mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-28 00:33:16 +00:00
Mainly singleton patches, documented in their respective changelogs.
Notable series include: - Some maintenance and performance work for ocfs2 in Heming Zhao's series "improve write IO performance when fragmentation is high". - Some ocfs2 bugfixes from Su Yue in the series "ocfs2 bugs fixes exposed by fstests". - kfifo header rework from Andy Shevchenko in the series "kfifo: Clean up kfifo.h". - GDB script fixes from Florian Rommel in the series "scripts/gdb: Fixes for $lx_current and $lx_per_cpu". - After much discussion, a coding-style update from Barry Song explaining one reason why inline functions are preferred over macros. The series is "codingstyle: avoid unused parameters for a function-like macro". -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZkpLYQAKCRDdBJ7gKXxA jo9NAQDctSD3TMXqxqCHLaEpCaYTYzi6TGAVHjgkqGzOt7tYjAD/ZIzgcmRwthjP R7SSiSgZ7UnP9JRn16DQILmFeaoG1gs= =lYhr -----END PGP SIGNATURE----- Merge tag 'mm-nonmm-stable-2024-05-19-11-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull non-mm updates from Andrew Morton: "Mainly singleton patches, documented in their respective changelogs. Notable series include: - Some maintenance and performance work for ocfs2 in Heming Zhao's series "improve write IO performance when fragmentation is high". - Some ocfs2 bugfixes from Su Yue in the series "ocfs2 bugs fixes exposed by fstests". - kfifo header rework from Andy Shevchenko in the series "kfifo: Clean up kfifo.h". - GDB script fixes from Florian Rommel in the series "scripts/gdb: Fixes for $lx_current and $lx_per_cpu". - After much discussion, a coding-style update from Barry Song explaining one reason why inline functions are preferred over macros. The series is "codingstyle: avoid unused parameters for a function-like macro"" * tag 'mm-nonmm-stable-2024-05-19-11-56' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (62 commits) fs/proc: fix softlockup in __read_vmcore nilfs2: convert BUG_ON() in nilfs_finish_roll_forward() to WARN_ON() scripts: checkpatch: check unused parameters for function-like macro Documentation: coding-style: ask function-like macros to evaluate parameters nilfs2: use __field_struct() for a bitwise field selftests/kcmp: remove unused open mode nilfs2: remove calls to folio_set_error() and folio_clear_error() kernel/watchdog_perf.c: tidy up kerneldoc watchdog: allow nmi watchdog to use raw perf event watchdog: handle comma separated nmi_watchdog command line nilfs2: make superblock data array index computation sparse friendly squashfs: remove calls to set the folio error flag squashfs: convert squashfs_symlink_read_folio to use folio APIs scripts/gdb: fix detection of current CPU in KGDB scripts/gdb: make get_thread_info accept pointers scripts/gdb: fix parameter handling in $lx_per_cpu scripts/gdb: fix failing KGDB detection during probe kfifo: don't use "proxy" headers media: stih-cec: add missing io.h media: rc: add missing io.h ...
This commit is contained in:
commit
eb6a9339ef
@ -136,10 +136,6 @@ System kernel config options
|
||||
|
||||
CONFIG_KEXEC_CORE=y
|
||||
|
||||
Subsequently, CRASH_CORE is selected by KEXEC_CORE::
|
||||
|
||||
CONFIG_CRASH_CORE=y
|
||||
|
||||
2) Enable "sysfs file system support" in "Filesystem" -> "Pseudo
|
||||
filesystems." This is usually enabled by default::
|
||||
|
||||
@ -168,6 +164,10 @@ Dump-capture kernel config options (Arch Independent)
|
||||
|
||||
CONFIG_CRASH_DUMP=y
|
||||
|
||||
And this will select VMCORE_INFO and CRASH_RESERVE::
|
||||
CONFIG_VMCORE_INFO=y
|
||||
CONFIG_CRASH_RESERVE=y
|
||||
|
||||
2) Enable "/proc/vmcore support" under "Filesystems" -> "Pseudo filesystems"::
|
||||
|
||||
CONFIG_PROC_VMCORE=y
|
||||
|
@ -3787,10 +3787,12 @@
|
||||
Format: [state][,regs][,debounce][,die]
|
||||
|
||||
nmi_watchdog= [KNL,BUGS=X86] Debugging features for SMP kernels
|
||||
Format: [panic,][nopanic,][num]
|
||||
Format: [panic,][nopanic,][rNNN,][num]
|
||||
Valid num: 0 or 1
|
||||
0 - turn hardlockup detector in nmi_watchdog off
|
||||
1 - turn hardlockup detector in nmi_watchdog on
|
||||
rNNN - configure the watchdog with raw perf event 0xNNN
|
||||
|
||||
When panic is specified, panic when an NMI watchdog
|
||||
timeout occurs (or 'nopanic' to not panic on an NMI
|
||||
watchdog, if CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is set)
|
||||
@ -7507,4 +7509,3 @@
|
||||
memory, and other data can't be written using
|
||||
xmon commands.
|
||||
off xmon is disabled.
|
||||
|
||||
|
@ -906,6 +906,20 @@ Macros, Attributes and Symbols
|
||||
|
||||
See: https://lore.kernel.org/lkml/1399671106.2912.21.camel@joe-AO725/
|
||||
|
||||
**MACRO_ARG_UNUSED**
|
||||
If function-like macros do not utilize a parameter, it might result
|
||||
in a build warning. We advocate for utilizing static inline functions
|
||||
to replace such macros.
|
||||
For example, for a macro such as the one below::
|
||||
|
||||
#define test(a) do { } while (0)
|
||||
|
||||
there would be a warning like below::
|
||||
|
||||
WARNING: Argument 'a' is not used in function-like macro.
|
||||
|
||||
See: https://www.kernel.org/doc/html/latest/process/coding-style.html#macros-enums-and-rtl
|
||||
|
||||
**SINGLE_STATEMENT_DO_WHILE_MACRO**
|
||||
For the multi-statement macros, it is necessary to use the do-while
|
||||
loop to avoid unpredictable code paths. The do-while loop helps to
|
||||
|
@ -827,6 +827,29 @@ Macros with multiple statements should be enclosed in a do - while block:
|
||||
do_this(b, c); \
|
||||
} while (0)
|
||||
|
||||
Function-like macros with unused parameters should be replaced by static
|
||||
inline functions to avoid the issue of unused variables:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
static inline void fun(struct foo *foo)
|
||||
{
|
||||
}
|
||||
|
||||
Due to historical practices, many files still employ the "cast to (void)"
|
||||
approach to evaluate parameters. However, this method is not advisable.
|
||||
Inline functions address the issue of "expression with side effects
|
||||
evaluated more than once", circumvent unused-variable problems, and
|
||||
are generally better documented than macros for some reason.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
/*
|
||||
* Avoid doing this whenever possible and instead opt for static
|
||||
* inline functions
|
||||
*/
|
||||
#define macrofun(foo) do { (void) (foo); } while (0)
|
||||
|
||||
Things to avoid when using macros:
|
||||
|
||||
1) macros that affect control flow:
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/instrumented.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
@ -61,10 +62,20 @@ unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned
|
||||
*/
|
||||
unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
|
||||
{
|
||||
if (copy_mc_fragile_enabled)
|
||||
return copy_mc_fragile(dst, src, len);
|
||||
if (static_cpu_has(X86_FEATURE_ERMS))
|
||||
return copy_mc_enhanced_fast_string(dst, src, len);
|
||||
unsigned long ret;
|
||||
|
||||
if (copy_mc_fragile_enabled) {
|
||||
instrument_memcpy_before(dst, src, len);
|
||||
ret = copy_mc_fragile(dst, src, len);
|
||||
instrument_memcpy_after(dst, src, len, ret);
|
||||
return ret;
|
||||
}
|
||||
if (static_cpu_has(X86_FEATURE_ERMS)) {
|
||||
instrument_memcpy_before(dst, src, len);
|
||||
ret = copy_mc_enhanced_fast_string(dst, src, len);
|
||||
instrument_memcpy_after(dst, src, len, ret);
|
||||
return ret;
|
||||
}
|
||||
memcpy(dst, src, len);
|
||||
return 0;
|
||||
}
|
||||
@ -75,6 +86,7 @@ unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, un
|
||||
unsigned long ret;
|
||||
|
||||
if (copy_mc_fragile_enabled) {
|
||||
instrument_copy_to_user(dst, src, len);
|
||||
__uaccess_begin();
|
||||
ret = copy_mc_fragile((__force void *)dst, src, len);
|
||||
__uaccess_end();
|
||||
@ -82,6 +94,7 @@ unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, un
|
||||
}
|
||||
|
||||
if (static_cpu_has(X86_FEATURE_ERMS)) {
|
||||
instrument_copy_to_user(dst, src, len);
|
||||
__uaccess_begin();
|
||||
ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
|
||||
__uaccess_end();
|
||||
|
@ -131,8 +131,7 @@ static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
|
||||
ldm_crit ("Cannot find TOCBLOCK, database may be corrupt.");
|
||||
return false;
|
||||
}
|
||||
strncpy (toc->bitmap1_name, data + 0x24, sizeof (toc->bitmap1_name));
|
||||
toc->bitmap1_name[sizeof (toc->bitmap1_name) - 1] = 0;
|
||||
strscpy_pad(toc->bitmap1_name, data + 0x24, sizeof(toc->bitmap1_name));
|
||||
toc->bitmap1_start = get_unaligned_be64(data + 0x2E);
|
||||
toc->bitmap1_size = get_unaligned_be64(data + 0x36);
|
||||
|
||||
@ -142,8 +141,7 @@ static bool ldm_parse_tocblock (const u8 *data, struct tocblock *toc)
|
||||
TOC_BITMAP1, toc->bitmap1_name);
|
||||
return false;
|
||||
}
|
||||
strncpy (toc->bitmap2_name, data + 0x46, sizeof (toc->bitmap2_name));
|
||||
toc->bitmap2_name[sizeof (toc->bitmap2_name) - 1] = 0;
|
||||
strscpy_pad(toc->bitmap2_name, data + 0x46, sizeof(toc->bitmap2_name));
|
||||
toc->bitmap2_start = get_unaligned_be64(data + 0x50);
|
||||
toc->bitmap2_size = get_unaligned_be64(data + 0x58);
|
||||
if (strncmp (toc->bitmap2_name, TOC_BITMAP2,
|
||||
|
@ -871,7 +871,7 @@ intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata,
|
||||
if (!th)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
th->id = ida_simple_get(&intel_th_ida, 0, 0, GFP_KERNEL);
|
||||
th->id = ida_alloc(&intel_th_ida, GFP_KERNEL);
|
||||
if (th->id < 0) {
|
||||
err = th->id;
|
||||
goto err_alloc;
|
||||
@ -931,7 +931,7 @@ intel_th_alloc(struct device *dev, const struct intel_th_drvdata *drvdata,
|
||||
"intel_th/output");
|
||||
|
||||
err_ida:
|
||||
ida_simple_remove(&intel_th_ida, th->id);
|
||||
ida_free(&intel_th_ida, th->id);
|
||||
|
||||
err_alloc:
|
||||
kfree(th);
|
||||
@ -964,7 +964,7 @@ void intel_th_free(struct intel_th *th)
|
||||
__unregister_chrdev(th->major, 0, TH_POSSIBLE_OUTPUTS,
|
||||
"intel_th/output");
|
||||
|
||||
ida_simple_remove(&intel_th_ida, th->id);
|
||||
ida_free(&intel_th_ida, th->id);
|
||||
|
||||
kfree(th);
|
||||
}
|
||||
|
@ -6,6 +6,7 @@
|
||||
*/
|
||||
#include <linux/clk.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <linux/clk.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/reset.h>
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/serial_reg.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -6,6 +6,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -12,6 +12,7 @@
|
||||
|
||||
#include <linux/clk.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
@ -64,7 +64,7 @@ static void mux_chip_release(struct device *dev)
|
||||
{
|
||||
struct mux_chip *mux_chip = to_mux_chip(dev);
|
||||
|
||||
ida_simple_remove(&mux_ida, mux_chip->id);
|
||||
ida_free(&mux_ida, mux_chip->id);
|
||||
kfree(mux_chip);
|
||||
}
|
||||
|
||||
@ -111,7 +111,7 @@ struct mux_chip *mux_chip_alloc(struct device *dev,
|
||||
mux_chip->dev.of_node = dev->of_node;
|
||||
dev_set_drvdata(&mux_chip->dev, mux_chip);
|
||||
|
||||
mux_chip->id = ida_simple_get(&mux_ida, 0, 0, GFP_KERNEL);
|
||||
mux_chip->id = ida_alloc(&mux_ida, GFP_KERNEL);
|
||||
if (mux_chip->id < 0) {
|
||||
int err = mux_chip->id;
|
||||
|
||||
|
@ -148,7 +148,7 @@ static void parport_attach(struct parport *port)
|
||||
return;
|
||||
}
|
||||
|
||||
index = ida_simple_get(&pps_client_index, 0, 0, GFP_KERNEL);
|
||||
index = ida_alloc(&pps_client_index, GFP_KERNEL);
|
||||
memset(&pps_client_cb, 0, sizeof(pps_client_cb));
|
||||
pps_client_cb.private = device;
|
||||
pps_client_cb.irq_func = parport_irq;
|
||||
@ -188,7 +188,7 @@ static void parport_attach(struct parport *port)
|
||||
err_unregister_dev:
|
||||
parport_unregister_device(device->pardev);
|
||||
err_free:
|
||||
ida_simple_remove(&pps_client_index, index);
|
||||
ida_free(&pps_client_index, index);
|
||||
kfree(device);
|
||||
}
|
||||
|
||||
@ -208,7 +208,7 @@ static void parport_detach(struct parport *port)
|
||||
pps_unregister_source(device->pps);
|
||||
parport_release(pardev);
|
||||
parport_unregister_device(pardev);
|
||||
ida_simple_remove(&pps_client_index, device->index);
|
||||
ida_free(&pps_client_index, device->index);
|
||||
kfree(device);
|
||||
}
|
||||
|
||||
|
@ -1934,7 +1934,7 @@ static void free_note_info(struct elf_note_info *info)
|
||||
threads = t->next;
|
||||
WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
|
||||
for (i = 1; i < info->thread_notes; ++i)
|
||||
kfree(t->notes[i].data);
|
||||
kvfree(t->notes[i].data);
|
||||
kfree(t);
|
||||
}
|
||||
kfree(info->psinfo.data);
|
||||
|
12
fs/fat/dir.c
12
fs/fat/dir.c
@ -269,6 +269,18 @@ enum { PARSE_INVALID = 1, PARSE_NOT_LONGNAME, PARSE_EOF, };
|
||||
/**
|
||||
* fat_parse_long - Parse extended directory entry.
|
||||
*
|
||||
* @dir: Pointer to the inode that represents the directory.
|
||||
* @pos: On input, contains the starting position to read from.
|
||||
* On output, updated with the new position.
|
||||
* @bh: Pointer to the buffer head that may be used for reading directory
|
||||
* entries. May be updated.
|
||||
* @de: On input, points to the current directory entry.
|
||||
* On output, points to the next directory entry.
|
||||
* @unicode: Pointer to a buffer where the parsed Unicode long filename will be
|
||||
* stored.
|
||||
* @nr_slots: Pointer to a variable that will store the number of longname
|
||||
* slots found.
|
||||
*
|
||||
* This function returns zero on success, negative value on error, or one of
|
||||
* the following:
|
||||
*
|
||||
|
@ -1857,13 +1857,22 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree,
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_btree_convert_and_insert -
|
||||
* @bmap:
|
||||
* @key:
|
||||
* @ptr:
|
||||
* @keys:
|
||||
* @ptrs:
|
||||
* @n:
|
||||
* nilfs_btree_convert_and_insert - Convert and insert entries into a B-tree
|
||||
* @btree: NILFS B-tree structure
|
||||
* @key: Key of the new entry to be inserted
|
||||
* @ptr: Pointer (block number) associated with the key to be inserted
|
||||
* @keys: Array of keys to be inserted in addition to @key
|
||||
* @ptrs: Array of pointers associated with @keys
|
||||
* @n: Number of keys and pointers in @keys and @ptrs
|
||||
*
|
||||
* This function is used to insert a new entry specified by @key and @ptr,
|
||||
* along with additional entries specified by @keys and @ptrs arrays, into a
|
||||
* NILFS B-tree.
|
||||
* It prepares the necessary changes by allocating the required blocks and any
|
||||
* necessary intermediate nodes. It converts configurations from other forms of
|
||||
* block mapping (the one that currently exists is direct mapping) to a B-tree.
|
||||
*
|
||||
* Return: 0 on success or a negative error code on failure.
|
||||
*/
|
||||
int nilfs_btree_convert_and_insert(struct nilfs_bmap *btree,
|
||||
__u64 key, __u64 ptr,
|
||||
|
@ -174,7 +174,6 @@ static bool nilfs_check_folio(struct folio *folio, char *kaddr)
|
||||
dir->i_ino, (folio->index << PAGE_SHIFT) + offs,
|
||||
(unsigned long)le64_to_cpu(p->inode));
|
||||
fail:
|
||||
folio_set_error(folio);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -175,6 +175,7 @@ int nilfs_init_gcinode(struct inode *inode)
|
||||
|
||||
/**
|
||||
* nilfs_remove_all_gcinodes() - remove all unprocessed gc inodes
|
||||
* @nilfs: NILFS filesystem instance
|
||||
*/
|
||||
void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
|
||||
{
|
||||
|
@ -335,8 +335,8 @@ void __nilfs_error(struct super_block *sb, const char *function,
|
||||
|
||||
extern struct nilfs_super_block *
|
||||
nilfs_read_super_block(struct super_block *, u64, int, struct buffer_head **);
|
||||
extern int nilfs_store_magic_and_option(struct super_block *,
|
||||
struct nilfs_super_block *, char *);
|
||||
extern int nilfs_store_magic(struct super_block *sb,
|
||||
struct nilfs_super_block *sbp);
|
||||
extern int nilfs_check_feature_compatibility(struct super_block *,
|
||||
struct nilfs_super_block *);
|
||||
extern void nilfs_set_log_cursor(struct nilfs_super_block *,
|
||||
|
@ -563,6 +563,7 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
|
||||
* checkpoint
|
||||
* @nilfs: nilfs object
|
||||
* @sb: super block instance
|
||||
* @root: NILFS root instance
|
||||
* @ri: pointer to a nilfs_recovery_info
|
||||
*/
|
||||
static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
|
||||
@ -698,7 +699,9 @@ static void nilfs_finish_roll_forward(struct the_nilfs *nilfs,
|
||||
return;
|
||||
|
||||
bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize);
|
||||
BUG_ON(!bh);
|
||||
if (WARN_ON(!bh))
|
||||
return; /* should never happen */
|
||||
|
||||
memset(bh->b_data, 0, bh->b_size);
|
||||
set_buffer_dirty(bh);
|
||||
err = sync_dirty_buffer(bh);
|
||||
|
@ -1725,14 +1725,8 @@ static void nilfs_end_folio_io(struct folio *folio, int err)
|
||||
return;
|
||||
}
|
||||
|
||||
if (!err) {
|
||||
if (!nilfs_folio_buffers_clean(folio))
|
||||
filemap_dirty_folio(folio->mapping, folio);
|
||||
folio_clear_error(folio);
|
||||
} else {
|
||||
if (err || !nilfs_folio_buffers_clean(folio))
|
||||
filemap_dirty_folio(folio->mapping, folio);
|
||||
folio_set_error(folio);
|
||||
}
|
||||
|
||||
folio_end_writeback(folio);
|
||||
}
|
||||
|
@ -29,13 +29,13 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/parser.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/vfs.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/fs_context.h>
|
||||
#include <linux/fs_parser.h>
|
||||
#include "nilfs.h"
|
||||
#include "export.h"
|
||||
#include "mdt.h"
|
||||
@ -61,7 +61,6 @@ struct kmem_cache *nilfs_segbuf_cachep;
|
||||
struct kmem_cache *nilfs_btree_path_cache;
|
||||
|
||||
static int nilfs_setup_super(struct super_block *sb, int is_mount);
|
||||
static int nilfs_remount(struct super_block *sb, int *flags, char *data);
|
||||
|
||||
void __nilfs_msg(struct super_block *sb, const char *fmt, ...)
|
||||
{
|
||||
@ -702,105 +701,98 @@ static const struct super_operations nilfs_sops = {
|
||||
.freeze_fs = nilfs_freeze,
|
||||
.unfreeze_fs = nilfs_unfreeze,
|
||||
.statfs = nilfs_statfs,
|
||||
.remount_fs = nilfs_remount,
|
||||
.show_options = nilfs_show_options
|
||||
};
|
||||
|
||||
enum {
|
||||
Opt_err_cont, Opt_err_panic, Opt_err_ro,
|
||||
Opt_barrier, Opt_nobarrier, Opt_snapshot, Opt_order, Opt_norecovery,
|
||||
Opt_discard, Opt_nodiscard, Opt_err,
|
||||
Opt_err, Opt_barrier, Opt_snapshot, Opt_order, Opt_norecovery,
|
||||
Opt_discard,
|
||||
};
|
||||
|
||||
static match_table_t tokens = {
|
||||
{Opt_err_cont, "errors=continue"},
|
||||
{Opt_err_panic, "errors=panic"},
|
||||
{Opt_err_ro, "errors=remount-ro"},
|
||||
{Opt_barrier, "barrier"},
|
||||
{Opt_nobarrier, "nobarrier"},
|
||||
{Opt_snapshot, "cp=%u"},
|
||||
{Opt_order, "order=%s"},
|
||||
{Opt_norecovery, "norecovery"},
|
||||
{Opt_discard, "discard"},
|
||||
{Opt_nodiscard, "nodiscard"},
|
||||
{Opt_err, NULL}
|
||||
static const struct constant_table nilfs_param_err[] = {
|
||||
{"continue", NILFS_MOUNT_ERRORS_CONT},
|
||||
{"panic", NILFS_MOUNT_ERRORS_PANIC},
|
||||
{"remount-ro", NILFS_MOUNT_ERRORS_RO},
|
||||
{}
|
||||
};
|
||||
|
||||
static int parse_options(char *options, struct super_block *sb, int is_remount)
|
||||
static const struct fs_parameter_spec nilfs_param_spec[] = {
|
||||
fsparam_enum ("errors", Opt_err, nilfs_param_err),
|
||||
fsparam_flag_no ("barrier", Opt_barrier),
|
||||
fsparam_u64 ("cp", Opt_snapshot),
|
||||
fsparam_string ("order", Opt_order),
|
||||
fsparam_flag ("norecovery", Opt_norecovery),
|
||||
fsparam_flag_no ("discard", Opt_discard),
|
||||
{}
|
||||
};
|
||||
|
||||
struct nilfs_fs_context {
|
||||
unsigned long ns_mount_opt;
|
||||
__u64 cno;
|
||||
};
|
||||
|
||||
static int nilfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
|
||||
{
|
||||
struct the_nilfs *nilfs = sb->s_fs_info;
|
||||
char *p;
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
struct nilfs_fs_context *nilfs = fc->fs_private;
|
||||
int is_remount = fc->purpose == FS_CONTEXT_FOR_RECONFIGURE;
|
||||
struct fs_parse_result result;
|
||||
int opt;
|
||||
|
||||
if (!options)
|
||||
return 1;
|
||||
opt = fs_parse(fc, nilfs_param_spec, param, &result);
|
||||
if (opt < 0)
|
||||
return opt;
|
||||
|
||||
while ((p = strsep(&options, ",")) != NULL) {
|
||||
int token;
|
||||
|
||||
if (!*p)
|
||||
continue;
|
||||
|
||||
token = match_token(p, tokens, args);
|
||||
switch (token) {
|
||||
case Opt_barrier:
|
||||
nilfs_set_opt(nilfs, BARRIER);
|
||||
break;
|
||||
case Opt_nobarrier:
|
||||
switch (opt) {
|
||||
case Opt_barrier:
|
||||
if (result.negated)
|
||||
nilfs_clear_opt(nilfs, BARRIER);
|
||||
break;
|
||||
case Opt_order:
|
||||
if (strcmp(args[0].from, "relaxed") == 0)
|
||||
/* Ordered data semantics */
|
||||
nilfs_clear_opt(nilfs, STRICT_ORDER);
|
||||
else if (strcmp(args[0].from, "strict") == 0)
|
||||
/* Strict in-order semantics */
|
||||
nilfs_set_opt(nilfs, STRICT_ORDER);
|
||||
else
|
||||
return 0;
|
||||
break;
|
||||
case Opt_err_panic:
|
||||
nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_PANIC);
|
||||
break;
|
||||
case Opt_err_ro:
|
||||
nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_RO);
|
||||
break;
|
||||
case Opt_err_cont:
|
||||
nilfs_write_opt(nilfs, ERROR_MODE, ERRORS_CONT);
|
||||
break;
|
||||
case Opt_snapshot:
|
||||
if (is_remount) {
|
||||
nilfs_err(sb,
|
||||
"\"%s\" option is invalid for remount",
|
||||
p);
|
||||
return 0;
|
||||
}
|
||||
break;
|
||||
case Opt_norecovery:
|
||||
nilfs_set_opt(nilfs, NORECOVERY);
|
||||
break;
|
||||
case Opt_discard:
|
||||
nilfs_set_opt(nilfs, DISCARD);
|
||||
break;
|
||||
case Opt_nodiscard:
|
||||
nilfs_clear_opt(nilfs, DISCARD);
|
||||
break;
|
||||
default:
|
||||
nilfs_err(sb, "unrecognized mount option \"%s\"", p);
|
||||
return 0;
|
||||
else
|
||||
nilfs_set_opt(nilfs, BARRIER);
|
||||
break;
|
||||
case Opt_order:
|
||||
if (strcmp(param->string, "relaxed") == 0)
|
||||
/* Ordered data semantics */
|
||||
nilfs_clear_opt(nilfs, STRICT_ORDER);
|
||||
else if (strcmp(param->string, "strict") == 0)
|
||||
/* Strict in-order semantics */
|
||||
nilfs_set_opt(nilfs, STRICT_ORDER);
|
||||
else
|
||||
return -EINVAL;
|
||||
break;
|
||||
case Opt_err:
|
||||
nilfs->ns_mount_opt &= ~NILFS_MOUNT_ERROR_MODE;
|
||||
nilfs->ns_mount_opt |= result.uint_32;
|
||||
break;
|
||||
case Opt_snapshot:
|
||||
if (is_remount) {
|
||||
struct super_block *sb = fc->root->d_sb;
|
||||
|
||||
nilfs_err(sb,
|
||||
"\"%s\" option is invalid for remount",
|
||||
param->key);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (result.uint_64 == 0) {
|
||||
nilfs_err(NULL,
|
||||
"invalid option \"cp=0\": invalid checkpoint number 0");
|
||||
return -EINVAL;
|
||||
}
|
||||
nilfs->cno = result.uint_64;
|
||||
break;
|
||||
case Opt_norecovery:
|
||||
nilfs_set_opt(nilfs, NORECOVERY);
|
||||
break;
|
||||
case Opt_discard:
|
||||
if (result.negated)
|
||||
nilfs_clear_opt(nilfs, DISCARD);
|
||||
else
|
||||
nilfs_set_opt(nilfs, DISCARD);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void
|
||||
nilfs_set_default_options(struct super_block *sb,
|
||||
struct nilfs_super_block *sbp)
|
||||
{
|
||||
struct the_nilfs *nilfs = sb->s_fs_info;
|
||||
|
||||
nilfs->ns_mount_opt =
|
||||
NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nilfs_setup_super(struct super_block *sb, int is_mount)
|
||||
@ -857,9 +849,8 @@ struct nilfs_super_block *nilfs_read_super_block(struct super_block *sb,
|
||||
return (struct nilfs_super_block *)((char *)(*pbh)->b_data + offset);
|
||||
}
|
||||
|
||||
int nilfs_store_magic_and_option(struct super_block *sb,
|
||||
struct nilfs_super_block *sbp,
|
||||
char *data)
|
||||
int nilfs_store_magic(struct super_block *sb,
|
||||
struct nilfs_super_block *sbp)
|
||||
{
|
||||
struct the_nilfs *nilfs = sb->s_fs_info;
|
||||
|
||||
@ -870,14 +861,12 @@ int nilfs_store_magic_and_option(struct super_block *sb,
|
||||
sb->s_flags |= SB_NOATIME;
|
||||
#endif
|
||||
|
||||
nilfs_set_default_options(sb, sbp);
|
||||
|
||||
nilfs->ns_resuid = le16_to_cpu(sbp->s_def_resuid);
|
||||
nilfs->ns_resgid = le16_to_cpu(sbp->s_def_resgid);
|
||||
nilfs->ns_interval = le32_to_cpu(sbp->s_c_interval);
|
||||
nilfs->ns_watermark = le32_to_cpu(sbp->s_c_block_max);
|
||||
|
||||
return !parse_options(data, sb, 0) ? -EINVAL : 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nilfs_check_feature_compatibility(struct super_block *sb,
|
||||
@ -1035,17 +1024,17 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
|
||||
/**
|
||||
* nilfs_fill_super() - initialize a super block instance
|
||||
* @sb: super_block
|
||||
* @data: mount options
|
||||
* @silent: silent mode flag
|
||||
* @fc: filesystem context
|
||||
*
|
||||
* This function is called exclusively by nilfs->ns_mount_mutex.
|
||||
* So, the recovery process is protected from other simultaneous mounts.
|
||||
*/
|
||||
static int
|
||||
nilfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
{
|
||||
struct the_nilfs *nilfs;
|
||||
struct nilfs_root *fsroot;
|
||||
struct nilfs_fs_context *ctx = fc->fs_private;
|
||||
__u64 cno;
|
||||
int err;
|
||||
|
||||
@ -1055,10 +1044,13 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
|
||||
sb->s_fs_info = nilfs;
|
||||
|
||||
err = init_nilfs(nilfs, sb, (char *)data);
|
||||
err = init_nilfs(nilfs, sb);
|
||||
if (err)
|
||||
goto failed_nilfs;
|
||||
|
||||
/* Copy in parsed mount options */
|
||||
nilfs->ns_mount_opt = ctx->ns_mount_opt;
|
||||
|
||||
sb->s_op = &nilfs_sops;
|
||||
sb->s_export_op = &nilfs_export_ops;
|
||||
sb->s_root = NULL;
|
||||
@ -1117,34 +1109,25 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int nilfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
static int nilfs_reconfigure(struct fs_context *fc)
|
||||
{
|
||||
struct nilfs_fs_context *ctx = fc->fs_private;
|
||||
struct super_block *sb = fc->root->d_sb;
|
||||
struct the_nilfs *nilfs = sb->s_fs_info;
|
||||
unsigned long old_sb_flags;
|
||||
unsigned long old_mount_opt;
|
||||
int err;
|
||||
|
||||
sync_filesystem(sb);
|
||||
old_sb_flags = sb->s_flags;
|
||||
old_mount_opt = nilfs->ns_mount_opt;
|
||||
|
||||
if (!parse_options(data, sb, 1)) {
|
||||
err = -EINVAL;
|
||||
goto restore_opts;
|
||||
}
|
||||
sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
|
||||
|
||||
err = -EINVAL;
|
||||
|
||||
if (!nilfs_valid_fs(nilfs)) {
|
||||
nilfs_warn(sb,
|
||||
"couldn't remount because the filesystem is in an incomplete recovery state");
|
||||
goto restore_opts;
|
||||
goto ignore_opts;
|
||||
}
|
||||
|
||||
if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
|
||||
if ((bool)(fc->sb_flags & SB_RDONLY) == sb_rdonly(sb))
|
||||
goto out;
|
||||
if (*flags & SB_RDONLY) {
|
||||
if (fc->sb_flags & SB_RDONLY) {
|
||||
sb->s_flags |= SB_RDONLY;
|
||||
|
||||
/*
|
||||
@ -1172,138 +1155,67 @@ static int nilfs_remount(struct super_block *sb, int *flags, char *data)
|
||||
"couldn't remount RDWR because of unsupported optional features (%llx)",
|
||||
(unsigned long long)features);
|
||||
err = -EROFS;
|
||||
goto restore_opts;
|
||||
goto ignore_opts;
|
||||
}
|
||||
|
||||
sb->s_flags &= ~SB_RDONLY;
|
||||
|
||||
root = NILFS_I(d_inode(sb->s_root))->i_root;
|
||||
err = nilfs_attach_log_writer(sb, root);
|
||||
if (err)
|
||||
goto restore_opts;
|
||||
if (err) {
|
||||
sb->s_flags |= SB_RDONLY;
|
||||
goto ignore_opts;
|
||||
}
|
||||
|
||||
down_write(&nilfs->ns_sem);
|
||||
nilfs_setup_super(sb, true);
|
||||
up_write(&nilfs->ns_sem);
|
||||
}
|
||||
out:
|
||||
sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
|
||||
/* Copy over parsed remount options */
|
||||
nilfs->ns_mount_opt = ctx->ns_mount_opt;
|
||||
|
||||
return 0;
|
||||
|
||||
restore_opts:
|
||||
sb->s_flags = old_sb_flags;
|
||||
nilfs->ns_mount_opt = old_mount_opt;
|
||||
ignore_opts:
|
||||
return err;
|
||||
}
|
||||
|
||||
struct nilfs_super_data {
|
||||
__u64 cno;
|
||||
int flags;
|
||||
};
|
||||
|
||||
static int nilfs_parse_snapshot_option(const char *option,
|
||||
const substring_t *arg,
|
||||
struct nilfs_super_data *sd)
|
||||
static int
|
||||
nilfs_get_tree(struct fs_context *fc)
|
||||
{
|
||||
unsigned long long val;
|
||||
const char *msg = NULL;
|
||||
int err;
|
||||
|
||||
if (!(sd->flags & SB_RDONLY)) {
|
||||
msg = "read-only option is not specified";
|
||||
goto parse_error;
|
||||
}
|
||||
|
||||
err = kstrtoull(arg->from, 0, &val);
|
||||
if (err) {
|
||||
if (err == -ERANGE)
|
||||
msg = "too large checkpoint number";
|
||||
else
|
||||
msg = "malformed argument";
|
||||
goto parse_error;
|
||||
} else if (val == 0) {
|
||||
msg = "invalid checkpoint number 0";
|
||||
goto parse_error;
|
||||
}
|
||||
sd->cno = val;
|
||||
return 0;
|
||||
|
||||
parse_error:
|
||||
nilfs_err(NULL, "invalid option \"%s\": %s", option, msg);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* nilfs_identify - pre-read mount options needed to identify mount instance
|
||||
* @data: mount options
|
||||
* @sd: nilfs_super_data
|
||||
*/
|
||||
static int nilfs_identify(char *data, struct nilfs_super_data *sd)
|
||||
{
|
||||
char *p, *options = data;
|
||||
substring_t args[MAX_OPT_ARGS];
|
||||
int token;
|
||||
int ret = 0;
|
||||
|
||||
do {
|
||||
p = strsep(&options, ",");
|
||||
if (p != NULL && *p) {
|
||||
token = match_token(p, tokens, args);
|
||||
if (token == Opt_snapshot)
|
||||
ret = nilfs_parse_snapshot_option(p, &args[0],
|
||||
sd);
|
||||
}
|
||||
if (!options)
|
||||
break;
|
||||
BUG_ON(options == data);
|
||||
*(options - 1) = ',';
|
||||
} while (!ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int nilfs_set_bdev_super(struct super_block *s, void *data)
|
||||
{
|
||||
s->s_dev = *(dev_t *)data;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nilfs_test_bdev_super(struct super_block *s, void *data)
|
||||
{
|
||||
return !(s->s_iflags & SB_I_RETIRED) && s->s_dev == *(dev_t *)data;
|
||||
}
|
||||
|
||||
static struct dentry *
|
||||
nilfs_mount(struct file_system_type *fs_type, int flags,
|
||||
const char *dev_name, void *data)
|
||||
{
|
||||
struct nilfs_super_data sd = { .flags = flags };
|
||||
struct nilfs_fs_context *ctx = fc->fs_private;
|
||||
struct super_block *s;
|
||||
dev_t dev;
|
||||
int err;
|
||||
|
||||
if (nilfs_identify(data, &sd))
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (ctx->cno && !(fc->sb_flags & SB_RDONLY)) {
|
||||
nilfs_err(NULL,
|
||||
"invalid option \"cp=%llu\": read-only option is not specified",
|
||||
ctx->cno);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
err = lookup_bdev(dev_name, &dev);
|
||||
err = lookup_bdev(fc->source, &dev);
|
||||
if (err)
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
|
||||
s = sget(fs_type, nilfs_test_bdev_super, nilfs_set_bdev_super, flags,
|
||||
&dev);
|
||||
s = sget_dev(fc, dev);
|
||||
if (IS_ERR(s))
|
||||
return ERR_CAST(s);
|
||||
return PTR_ERR(s);
|
||||
|
||||
if (!s->s_root) {
|
||||
err = setup_bdev_super(s, flags, NULL);
|
||||
err = setup_bdev_super(s, fc->sb_flags, fc);
|
||||
if (!err)
|
||||
err = nilfs_fill_super(s, data,
|
||||
flags & SB_SILENT ? 1 : 0);
|
||||
err = nilfs_fill_super(s, fc);
|
||||
if (err)
|
||||
goto failed_super;
|
||||
|
||||
s->s_flags |= SB_ACTIVE;
|
||||
} else if (!sd.cno) {
|
||||
} else if (!ctx->cno) {
|
||||
if (nilfs_tree_is_busy(s->s_root)) {
|
||||
if ((flags ^ s->s_flags) & SB_RDONLY) {
|
||||
if ((fc->sb_flags ^ s->s_flags) & SB_RDONLY) {
|
||||
nilfs_err(s,
|
||||
"the device already has a %s mount.",
|
||||
sb_rdonly(s) ? "read-only" : "read/write");
|
||||
@ -1312,37 +1224,75 @@ nilfs_mount(struct file_system_type *fs_type, int flags,
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Try remount to setup mount states if the current
|
||||
* Try reconfigure to setup mount states if the current
|
||||
* tree is not mounted and only snapshots use this sb.
|
||||
*
|
||||
* Since nilfs_reconfigure() requires fc->root to be
|
||||
* set, set it first and release it on failure.
|
||||
*/
|
||||
err = nilfs_remount(s, &flags, data);
|
||||
if (err)
|
||||
fc->root = dget(s->s_root);
|
||||
err = nilfs_reconfigure(fc);
|
||||
if (err) {
|
||||
dput(fc->root);
|
||||
fc->root = NULL; /* prevent double release */
|
||||
goto failed_super;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (sd.cno) {
|
||||
if (ctx->cno) {
|
||||
struct dentry *root_dentry;
|
||||
|
||||
err = nilfs_attach_snapshot(s, sd.cno, &root_dentry);
|
||||
err = nilfs_attach_snapshot(s, ctx->cno, &root_dentry);
|
||||
if (err)
|
||||
goto failed_super;
|
||||
return root_dentry;
|
||||
fc->root = root_dentry;
|
||||
return 0;
|
||||
}
|
||||
|
||||
return dget(s->s_root);
|
||||
fc->root = dget(s->s_root);
|
||||
return 0;
|
||||
|
||||
failed_super:
|
||||
deactivate_locked_super(s);
|
||||
return ERR_PTR(err);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void nilfs_free_fc(struct fs_context *fc)
|
||||
{
|
||||
kfree(fc->fs_private);
|
||||
}
|
||||
|
||||
static const struct fs_context_operations nilfs_context_ops = {
|
||||
.parse_param = nilfs_parse_param,
|
||||
.get_tree = nilfs_get_tree,
|
||||
.reconfigure = nilfs_reconfigure,
|
||||
.free = nilfs_free_fc,
|
||||
};
|
||||
|
||||
static int nilfs_init_fs_context(struct fs_context *fc)
|
||||
{
|
||||
struct nilfs_fs_context *ctx;
|
||||
|
||||
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
|
||||
ctx->ns_mount_opt = NILFS_MOUNT_ERRORS_RO | NILFS_MOUNT_BARRIER;
|
||||
fc->fs_private = ctx;
|
||||
fc->ops = &nilfs_context_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct file_system_type nilfs_fs_type = {
|
||||
.owner = THIS_MODULE,
|
||||
.name = "nilfs2",
|
||||
.mount = nilfs_mount,
|
||||
.kill_sb = kill_block_super,
|
||||
.fs_flags = FS_REQUIRES_DEV,
|
||||
.init_fs_context = nilfs_init_fs_context,
|
||||
.parameters = nilfs_param_spec,
|
||||
};
|
||||
MODULE_ALIAS_FS("nilfs2");
|
||||
|
||||
|
@ -592,7 +592,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
struct buffer_head **sbh = nilfs->ns_sbh;
|
||||
u64 sb2off, devsize = bdev_nr_bytes(nilfs->ns_bdev);
|
||||
int valid[2], swp = 0;
|
||||
int valid[2], swp = 0, older;
|
||||
|
||||
if (devsize < NILFS_SEG_MIN_BLOCKS * NILFS_MIN_BLOCK_SIZE + 4096) {
|
||||
nilfs_err(sb, "device size too small");
|
||||
@ -648,9 +648,25 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
|
||||
if (swp)
|
||||
nilfs_swap_super_block(nilfs);
|
||||
|
||||
/*
|
||||
* Calculate the array index of the older superblock data.
|
||||
* If one has been dropped, set index 0 pointing to the remaining one,
|
||||
* otherwise set index 1 pointing to the old one (including if both
|
||||
* are the same).
|
||||
*
|
||||
* Divided case valid[0] valid[1] swp -> older
|
||||
* -------------------------------------------------------------
|
||||
* Both SBs are invalid 0 0 N/A (Error)
|
||||
* SB1 is invalid 0 1 1 0
|
||||
* SB2 is invalid 1 0 0 0
|
||||
* SB2 is newer 1 1 1 0
|
||||
* SB2 is older or the same 1 1 0 1
|
||||
*/
|
||||
older = valid[1] ^ swp;
|
||||
|
||||
nilfs->ns_sbwcount = 0;
|
||||
nilfs->ns_sbwtime = le64_to_cpu(sbp[0]->s_wtime);
|
||||
nilfs->ns_prot_seq = le64_to_cpu(sbp[valid[1] & !swp]->s_last_seq);
|
||||
nilfs->ns_prot_seq = le64_to_cpu(sbp[older]->s_last_seq);
|
||||
*sbpp = sbp[0];
|
||||
return 0;
|
||||
}
|
||||
@ -659,7 +675,6 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
|
||||
* init_nilfs - initialize a NILFS instance.
|
||||
* @nilfs: the_nilfs structure
|
||||
* @sb: super block
|
||||
* @data: mount options
|
||||
*
|
||||
* init_nilfs() performs common initialization per block device (e.g.
|
||||
* reading the super block, getting disk layout information, initializing
|
||||
@ -668,7 +683,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
|
||||
* Return Value: On success, 0 is returned. On error, a negative error
|
||||
* code is returned.
|
||||
*/
|
||||
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
|
||||
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
|
||||
{
|
||||
struct nilfs_super_block *sbp;
|
||||
int blocksize;
|
||||
@ -686,7 +701,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data)
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = nilfs_store_magic_and_option(sb, sbp, data);
|
||||
err = nilfs_store_magic(sb, sbp);
|
||||
if (err)
|
||||
goto failed_sbh;
|
||||
|
||||
|
@ -219,10 +219,6 @@ THE_NILFS_FNS(PURGING, purging)
|
||||
#define nilfs_set_opt(nilfs, opt) \
|
||||
((nilfs)->ns_mount_opt |= NILFS_MOUNT_##opt)
|
||||
#define nilfs_test_opt(nilfs, opt) ((nilfs)->ns_mount_opt & NILFS_MOUNT_##opt)
|
||||
#define nilfs_write_opt(nilfs, mask, opt) \
|
||||
((nilfs)->ns_mount_opt = \
|
||||
(((nilfs)->ns_mount_opt & ~NILFS_MOUNT_##mask) | \
|
||||
NILFS_MOUNT_##opt)) \
|
||||
|
||||
/**
|
||||
* struct nilfs_root - nilfs root object
|
||||
@ -276,7 +272,7 @@ static inline int nilfs_sb_will_flip(struct the_nilfs *nilfs)
|
||||
void nilfs_set_last_segment(struct the_nilfs *, sector_t, u64, __u64);
|
||||
struct the_nilfs *alloc_nilfs(struct super_block *sb);
|
||||
void destroy_nilfs(struct the_nilfs *nilfs);
|
||||
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb, char *data);
|
||||
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
|
||||
int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb);
|
||||
unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs);
|
||||
void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs);
|
||||
|
@ -2283,8 +2283,6 @@ static int ocfs2_dio_wr_get_block(struct inode *inode, sector_t iblock,
|
||||
ocfs2_inode_unlock(inode, 1);
|
||||
brelse(di_bh);
|
||||
out:
|
||||
if (ret < 0)
|
||||
ret = -EIO;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1274,7 +1274,7 @@ static int dlm_query_nodeinfo_handler(struct o2net_msg *msg, u32 len,
|
||||
{
|
||||
struct dlm_query_nodeinfo *qn;
|
||||
struct dlm_ctxt *dlm = NULL;
|
||||
int locked = 0, status = -EINVAL;
|
||||
int status = -EINVAL;
|
||||
|
||||
qn = (struct dlm_query_nodeinfo *) msg->buf;
|
||||
|
||||
@ -1290,12 +1290,11 @@ static int dlm_query_nodeinfo_handler(struct o2net_msg *msg, u32 len,
|
||||
}
|
||||
|
||||
spin_lock(&dlm->spinlock);
|
||||
locked = 1;
|
||||
if (dlm->joining_node != qn->qn_nodenum) {
|
||||
mlog(ML_ERROR, "Node %d queried nodes on domain %s but "
|
||||
"joining node is %d\n", qn->qn_nodenum, qn->qn_domain,
|
||||
dlm->joining_node);
|
||||
goto bail;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* Support for node query was added in 1.1 */
|
||||
@ -1305,14 +1304,14 @@ static int dlm_query_nodeinfo_handler(struct o2net_msg *msg, u32 len,
|
||||
"but active dlm protocol is %d.%d\n", qn->qn_nodenum,
|
||||
qn->qn_domain, dlm->dlm_locking_proto.pv_major,
|
||||
dlm->dlm_locking_proto.pv_minor);
|
||||
goto bail;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
status = dlm_match_nodes(dlm, qn);
|
||||
|
||||
unlock:
|
||||
spin_unlock(&dlm->spinlock);
|
||||
bail:
|
||||
if (locked)
|
||||
spin_unlock(&dlm->spinlock);
|
||||
spin_unlock(&dlm_domain_lock);
|
||||
|
||||
return status;
|
||||
@ -1528,7 +1527,6 @@ static void dlm_send_join_asserts(struct dlm_ctxt *dlm,
|
||||
{
|
||||
int status, node, live;
|
||||
|
||||
status = 0;
|
||||
node = -1;
|
||||
while ((node = find_next_bit(node_map, O2NM_MAX_NODES,
|
||||
node + 1)) < O2NM_MAX_NODES) {
|
||||
|
@ -255,9 +255,9 @@ static struct dentry *ocfs2_fh_to_dentry(struct super_block *sb,
|
||||
if (fh_len < 3 || fh_type > 2)
|
||||
return NULL;
|
||||
|
||||
handle.ih_blkno = (u64)le32_to_cpu(fid->raw[0]) << 32;
|
||||
handle.ih_blkno |= (u64)le32_to_cpu(fid->raw[1]);
|
||||
handle.ih_generation = le32_to_cpu(fid->raw[2]);
|
||||
handle.ih_blkno = (u64)le32_to_cpu((__force __le32)fid->raw[0]) << 32;
|
||||
handle.ih_blkno |= (u64)le32_to_cpu((__force __le32)fid->raw[1]);
|
||||
handle.ih_generation = le32_to_cpu((__force __le32)fid->raw[2]);
|
||||
return ocfs2_get_dentry(sb, &handle);
|
||||
}
|
||||
|
||||
@ -269,9 +269,9 @@ static struct dentry *ocfs2_fh_to_parent(struct super_block *sb,
|
||||
if (fh_type != 2 || fh_len < 6)
|
||||
return NULL;
|
||||
|
||||
parent.ih_blkno = (u64)le32_to_cpu(fid->raw[3]) << 32;
|
||||
parent.ih_blkno |= (u64)le32_to_cpu(fid->raw[4]);
|
||||
parent.ih_generation = le32_to_cpu(fid->raw[5]);
|
||||
parent.ih_blkno = (u64)le32_to_cpu((__force __le32)fid->raw[3]) << 32;
|
||||
parent.ih_blkno |= (u64)le32_to_cpu((__force __le32)fid->raw[4]);
|
||||
parent.ih_generation = le32_to_cpu((__force __le32)fid->raw[5]);
|
||||
return ocfs2_get_dentry(sb, &parent);
|
||||
}
|
||||
|
||||
|
@ -1936,6 +1936,8 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
|
||||
|
||||
inode_lock(inode);
|
||||
|
||||
/* Wait all existing dio workers, newcomers will block on i_rwsem */
|
||||
inode_dio_wait(inode);
|
||||
/*
|
||||
* This prevents concurrent writes on other nodes
|
||||
*/
|
||||
|
@ -1621,6 +1621,7 @@ static struct super_block *ocfs2_inode_cache_get_super(struct ocfs2_caching_info
|
||||
}
|
||||
|
||||
static void ocfs2_inode_cache_lock(struct ocfs2_caching_info *ci)
|
||||
__acquires(&oi->ip_lock)
|
||||
{
|
||||
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
|
||||
|
||||
@ -1628,6 +1629,7 @@ static void ocfs2_inode_cache_lock(struct ocfs2_caching_info *ci)
|
||||
}
|
||||
|
||||
static void ocfs2_inode_cache_unlock(struct ocfs2_caching_info *ci)
|
||||
__releases(&oi->ip_lock)
|
||||
{
|
||||
struct ocfs2_inode_info *oi = cache_info_to_inode(ci);
|
||||
|
||||
|
@ -125,6 +125,7 @@ int ocfs2_fileattr_set(struct mnt_idmap *idmap,
|
||||
|
||||
ocfs2_inode->ip_attr = flags;
|
||||
ocfs2_set_inode_flags(inode);
|
||||
inode_set_ctime_current(inode);
|
||||
|
||||
status = ocfs2_mark_inode_dirty(handle, inode, bh);
|
||||
if (status < 0)
|
||||
|
@ -212,14 +212,15 @@ static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb)
|
||||
void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
|
||||
unsigned int num_clusters)
|
||||
{
|
||||
spin_lock(&osb->osb_lock);
|
||||
if (osb->local_alloc_state == OCFS2_LA_DISABLED ||
|
||||
osb->local_alloc_state == OCFS2_LA_THROTTLED)
|
||||
if (num_clusters >= osb->local_alloc_default_bits) {
|
||||
if (num_clusters >= osb->local_alloc_default_bits) {
|
||||
spin_lock(&osb->osb_lock);
|
||||
if (osb->local_alloc_state == OCFS2_LA_DISABLED ||
|
||||
osb->local_alloc_state == OCFS2_LA_THROTTLED) {
|
||||
cancel_delayed_work(&osb->la_enable_wq);
|
||||
osb->local_alloc_state = OCFS2_LA_ENABLED;
|
||||
}
|
||||
spin_unlock(&osb->osb_lock);
|
||||
spin_unlock(&osb->osb_lock);
|
||||
}
|
||||
}
|
||||
|
||||
void ocfs2_la_enable_worker(struct work_struct *work)
|
||||
@ -335,7 +336,7 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
|
||||
"found = %u, set = %u, taken = %u, off = %u\n",
|
||||
num_used, le32_to_cpu(alloc->id1.bitmap1.i_used),
|
||||
le32_to_cpu(alloc->id1.bitmap1.i_total),
|
||||
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off);
|
||||
le32_to_cpu(OCFS2_LOCAL_ALLOC(alloc)->la_bm_off));
|
||||
|
||||
status = -EINVAL;
|
||||
goto bail;
|
||||
@ -863,14 +864,8 @@ static int ocfs2_local_alloc_find_clear_bits(struct ocfs2_super *osb,
|
||||
|
||||
numfound = bitoff = startoff = 0;
|
||||
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
|
||||
while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) != -1) {
|
||||
if (bitoff == left) {
|
||||
/* mlog(0, "bitoff (%d) == left", bitoff); */
|
||||
break;
|
||||
}
|
||||
/* mlog(0, "Found a zero: bitoff = %d, startoff = %d, "
|
||||
"numfound = %d\n", bitoff, startoff, numfound);*/
|
||||
|
||||
while ((bitoff = ocfs2_find_next_zero_bit(bitmap, left, startoff)) <
|
||||
left) {
|
||||
/* Ok, we found a zero bit... is it contig. or do we
|
||||
* start over?*/
|
||||
if (bitoff == startoff) {
|
||||
@ -976,9 +971,9 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
|
||||
start = count = 0;
|
||||
left = le32_to_cpu(alloc->id1.bitmap1.i_total);
|
||||
|
||||
while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start))
|
||||
!= -1) {
|
||||
if ((bit_off < left) && (bit_off == start)) {
|
||||
while ((bit_off = ocfs2_find_next_zero_bit(bitmap, left, start)) <
|
||||
left) {
|
||||
if (bit_off == start) {
|
||||
count++;
|
||||
start++;
|
||||
continue;
|
||||
@ -1002,8 +997,7 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
|
||||
goto bail;
|
||||
}
|
||||
}
|
||||
if (bit_off >= left)
|
||||
break;
|
||||
|
||||
count = 1;
|
||||
start = bit_off + 1;
|
||||
}
|
||||
@ -1220,7 +1214,7 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
|
||||
OCFS2_LOCAL_ALLOC(alloc)->la_bitmap);
|
||||
|
||||
trace_ocfs2_local_alloc_new_window_result(
|
||||
OCFS2_LOCAL_ALLOC(alloc)->la_bm_off,
|
||||
le32_to_cpu(OCFS2_LOCAL_ALLOC(alloc)->la_bm_off),
|
||||
le32_to_cpu(alloc->id1.bitmap1.i_total));
|
||||
|
||||
bail:
|
||||
|
@ -685,7 +685,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
|
||||
}
|
||||
|
||||
ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh,
|
||||
goal_bit, len);
|
||||
goal_bit, len, 0, 0);
|
||||
if (ret) {
|
||||
ocfs2_rollback_alloc_dinode_counts(gb_inode, gb_bh, len,
|
||||
le16_to_cpu(gd->bg_chain));
|
||||
|
@ -566,7 +566,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
|
||||
fe->i_last_eb_blk = 0;
|
||||
strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
|
||||
fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL);
|
||||
ktime_get_real_ts64(&ts);
|
||||
ktime_get_coarse_real_ts64(&ts);
|
||||
fe->i_atime = fe->i_ctime = fe->i_mtime =
|
||||
cpu_to_le64(ts.tv_sec);
|
||||
fe->i_mtime_nsec = fe->i_ctime_nsec = fe->i_atime_nsec =
|
||||
@ -797,6 +797,7 @@ static int ocfs2_link(struct dentry *old_dentry,
|
||||
ocfs2_set_links_count(fe, inode->i_nlink);
|
||||
fe->i_ctime = cpu_to_le64(inode_get_ctime_sec(inode));
|
||||
fe->i_ctime_nsec = cpu_to_le32(inode_get_ctime_nsec(inode));
|
||||
ocfs2_update_inode_fsync_trans(handle, inode, 0);
|
||||
ocfs2_journal_dirty(handle, fe_bh);
|
||||
|
||||
err = ocfs2_add_entry(handle, dentry, inode,
|
||||
@ -993,6 +994,7 @@ static int ocfs2_unlink(struct inode *dir,
|
||||
drop_nlink(inode);
|
||||
drop_nlink(inode);
|
||||
ocfs2_set_links_count(fe, inode->i_nlink);
|
||||
ocfs2_update_inode_fsync_trans(handle, inode, 0);
|
||||
ocfs2_journal_dirty(handle, fe_bh);
|
||||
|
||||
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
|
||||
|
@ -883,7 +883,8 @@ struct ocfs2_group_desc
|
||||
__le16 bg_free_bits_count; /* Free bits count */
|
||||
__le16 bg_chain; /* What chain I am in. */
|
||||
/*10*/ __le32 bg_generation;
|
||||
__le32 bg_reserved1;
|
||||
__le16 bg_contig_free_bits; /* max contig free bits length */
|
||||
__le16 bg_reserved1;
|
||||
__le64 bg_next_group; /* Next group in my list, in
|
||||
blocks */
|
||||
/*20*/ __le64 bg_parent_dinode; /* dinode which owns me, in
|
||||
|
@ -630,7 +630,7 @@ static int ocfs2_create_refcount_tree(struct inode *inode,
|
||||
rb->rf_records.rl_count =
|
||||
cpu_to_le16(ocfs2_refcount_recs_per_rb(osb->sb));
|
||||
spin_lock(&osb->osb_lock);
|
||||
rb->rf_generation = osb->s_next_generation++;
|
||||
rb->rf_generation = cpu_to_le32(osb->s_next_generation++);
|
||||
spin_unlock(&osb->osb_lock);
|
||||
|
||||
ocfs2_journal_dirty(handle, new_bh);
|
||||
|
@ -414,7 +414,7 @@ static int ocfs2_resmap_find_free_bits(struct ocfs2_reservation_map *resmap,
|
||||
|
||||
start = search_start;
|
||||
while ((offset = ocfs2_find_next_zero_bit(bitmap, resmap->m_bitmap_len,
|
||||
start)) != -1) {
|
||||
start)) < resmap->m_bitmap_len) {
|
||||
/* Search reached end of the region */
|
||||
if (offset >= (search_start + search_len))
|
||||
break;
|
||||
|
@ -91,6 +91,8 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
|
||||
u16 cl_bpc = le16_to_cpu(cl->cl_bpc);
|
||||
u16 cl_cpg = le16_to_cpu(cl->cl_cpg);
|
||||
u16 old_bg_clusters;
|
||||
u16 contig_bits;
|
||||
__le16 old_bg_contig_free_bits;
|
||||
|
||||
trace_ocfs2_update_last_group_and_inode(new_clusters,
|
||||
first_new_cluster);
|
||||
@ -122,6 +124,11 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
|
||||
le16_add_cpu(&group->bg_free_bits_count, -1 * backups);
|
||||
}
|
||||
|
||||
contig_bits = ocfs2_find_max_contig_free_bits(group->bg_bitmap,
|
||||
le16_to_cpu(group->bg_bits), 0);
|
||||
old_bg_contig_free_bits = group->bg_contig_free_bits;
|
||||
group->bg_contig_free_bits = cpu_to_le16(contig_bits);
|
||||
|
||||
ocfs2_journal_dirty(handle, group_bh);
|
||||
|
||||
/* update the inode accordingly. */
|
||||
@ -160,6 +167,7 @@ static int ocfs2_update_last_group_and_inode(handle_t *handle,
|
||||
le16_add_cpu(&group->bg_free_bits_count, backups);
|
||||
le16_add_cpu(&group->bg_bits, -1 * num_bits);
|
||||
le16_add_cpu(&group->bg_free_bits_count, -1 * num_bits);
|
||||
group->bg_contig_free_bits = old_bg_contig_free_bits;
|
||||
}
|
||||
out:
|
||||
if (ret)
|
||||
|
@ -50,6 +50,10 @@ struct ocfs2_suballoc_result {
|
||||
u64 sr_blkno; /* The first allocated block */
|
||||
unsigned int sr_bit_offset; /* The bit in the bg */
|
||||
unsigned int sr_bits; /* How many bits we claimed */
|
||||
unsigned int sr_max_contig_bits; /* The length for contiguous
|
||||
* free bits, only available
|
||||
* for cluster group
|
||||
*/
|
||||
};
|
||||
|
||||
static u64 ocfs2_group_from_res(struct ocfs2_suballoc_result *res)
|
||||
@ -1272,6 +1276,26 @@ static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
|
||||
return ret;
|
||||
}
|
||||
|
||||
u16 ocfs2_find_max_contig_free_bits(void *bitmap,
|
||||
u16 total_bits, u16 start)
|
||||
{
|
||||
u16 offset, free_bits;
|
||||
u16 contig_bits = 0;
|
||||
|
||||
while (start < total_bits) {
|
||||
offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start);
|
||||
if (offset == total_bits)
|
||||
break;
|
||||
|
||||
start = ocfs2_find_next_bit(bitmap, total_bits, offset);
|
||||
free_bits = start - offset;
|
||||
if (contig_bits < free_bits)
|
||||
contig_bits = free_bits;
|
||||
}
|
||||
|
||||
return contig_bits;
|
||||
}
|
||||
|
||||
static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
|
||||
struct buffer_head *bg_bh,
|
||||
unsigned int bits_wanted,
|
||||
@ -1280,6 +1304,7 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
|
||||
{
|
||||
void *bitmap;
|
||||
u16 best_offset, best_size;
|
||||
u16 prev_best_size = 0;
|
||||
int offset, start, found, status = 0;
|
||||
struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
|
||||
|
||||
@ -1290,10 +1315,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
|
||||
found = start = best_offset = best_size = 0;
|
||||
bitmap = bg->bg_bitmap;
|
||||
|
||||
while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
|
||||
if (offset == total_bits)
|
||||
break;
|
||||
|
||||
while ((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) <
|
||||
total_bits) {
|
||||
if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
|
||||
/* We found a zero, but we can't use it as it
|
||||
* hasn't been put to disk yet! */
|
||||
@ -1308,6 +1331,7 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
|
||||
/* got a zero after some ones */
|
||||
found = 1;
|
||||
start = offset + 1;
|
||||
prev_best_size = best_size;
|
||||
}
|
||||
if (found > best_size) {
|
||||
best_size = found;
|
||||
@ -1320,6 +1344,8 @@ static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
|
||||
}
|
||||
}
|
||||
|
||||
/* best_size will be allocated, we save prev_best_size */
|
||||
res->sr_max_contig_bits = prev_best_size;
|
||||
if (best_size) {
|
||||
res->sr_bit_offset = best_offset;
|
||||
res->sr_bits = best_size;
|
||||
@ -1337,11 +1363,16 @@ int ocfs2_block_group_set_bits(handle_t *handle,
|
||||
struct ocfs2_group_desc *bg,
|
||||
struct buffer_head *group_bh,
|
||||
unsigned int bit_off,
|
||||
unsigned int num_bits)
|
||||
unsigned int num_bits,
|
||||
unsigned int max_contig_bits,
|
||||
int fastpath)
|
||||
{
|
||||
int status;
|
||||
void *bitmap = bg->bg_bitmap;
|
||||
int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
|
||||
unsigned int start = bit_off + num_bits;
|
||||
u16 contig_bits;
|
||||
struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
|
||||
|
||||
/* All callers get the descriptor via
|
||||
* ocfs2_read_group_descriptor(). Any corruption is a code bug. */
|
||||
@ -1373,6 +1404,29 @@ int ocfs2_block_group_set_bits(handle_t *handle,
|
||||
while(num_bits--)
|
||||
ocfs2_set_bit(bit_off++, bitmap);
|
||||
|
||||
/*
|
||||
* this is optimize path, caller set old contig value
|
||||
* in max_contig_bits to bypass finding action.
|
||||
*/
|
||||
if (fastpath) {
|
||||
bg->bg_contig_free_bits = cpu_to_le16(max_contig_bits);
|
||||
} else if (ocfs2_is_cluster_bitmap(alloc_inode)) {
|
||||
/*
|
||||
* Usually, the block group bitmap allocates only 1 bit
|
||||
* at a time, while the cluster group allocates n bits
|
||||
* each time. Therefore, we only save the contig bits for
|
||||
* the cluster group.
|
||||
*/
|
||||
contig_bits = ocfs2_find_max_contig_free_bits(bitmap,
|
||||
le16_to_cpu(bg->bg_bits), start);
|
||||
if (contig_bits > max_contig_bits)
|
||||
max_contig_bits = contig_bits;
|
||||
bg->bg_contig_free_bits = cpu_to_le16(max_contig_bits);
|
||||
ocfs2_local_alloc_seen_free_bits(osb, max_contig_bits);
|
||||
} else {
|
||||
bg->bg_contig_free_bits = 0;
|
||||
}
|
||||
|
||||
ocfs2_journal_dirty(handle, group_bh);
|
||||
|
||||
bail:
|
||||
@ -1486,7 +1540,12 @@ static int ocfs2_cluster_group_search(struct inode *inode,
|
||||
|
||||
BUG_ON(!ocfs2_is_cluster_bitmap(inode));
|
||||
|
||||
if (gd->bg_free_bits_count) {
|
||||
if (le16_to_cpu(gd->bg_contig_free_bits) &&
|
||||
le16_to_cpu(gd->bg_contig_free_bits) < bits_wanted)
|
||||
return -ENOSPC;
|
||||
|
||||
/* ->bg_contig_free_bits may un-initialized, so compare again */
|
||||
if (le16_to_cpu(gd->bg_free_bits_count) >= bits_wanted) {
|
||||
max_bits = le16_to_cpu(gd->bg_bits);
|
||||
|
||||
/* Tail groups in cluster bitmaps which aren't cpg
|
||||
@ -1530,13 +1589,6 @@ static int ocfs2_cluster_group_search(struct inode *inode,
|
||||
* of bits. */
|
||||
if (min_bits <= res->sr_bits)
|
||||
search = 0; /* success */
|
||||
else if (res->sr_bits) {
|
||||
/*
|
||||
* Don't show bits which we'll be returning
|
||||
* for allocation to the local alloc bitmap.
|
||||
*/
|
||||
ocfs2_local_alloc_seen_free_bits(osb, res->sr_bits);
|
||||
}
|
||||
}
|
||||
|
||||
return search;
|
||||
@ -1555,7 +1607,7 @@ static int ocfs2_block_group_search(struct inode *inode,
|
||||
BUG_ON(min_bits != 1);
|
||||
BUG_ON(ocfs2_is_cluster_bitmap(inode));
|
||||
|
||||
if (bg->bg_free_bits_count) {
|
||||
if (le16_to_cpu(bg->bg_free_bits_count) >= bits_wanted) {
|
||||
ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
|
||||
group_bh, bits_wanted,
|
||||
le16_to_cpu(bg->bg_bits),
|
||||
@ -1715,7 +1767,8 @@ static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
|
||||
}
|
||||
|
||||
ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
|
||||
res->sr_bit_offset, res->sr_bits);
|
||||
res->sr_bit_offset, res->sr_bits,
|
||||
res->sr_max_contig_bits, 0);
|
||||
if (ret < 0) {
|
||||
ocfs2_rollback_alloc_dinode_counts(alloc_inode, ac->ac_bh,
|
||||
res->sr_bits,
|
||||
@ -1849,7 +1902,9 @@ static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
|
||||
bg,
|
||||
group_bh,
|
||||
res->sr_bit_offset,
|
||||
res->sr_bits);
|
||||
res->sr_bits,
|
||||
res->sr_max_contig_bits,
|
||||
0);
|
||||
if (status < 0) {
|
||||
ocfs2_rollback_alloc_dinode_counts(alloc_inode,
|
||||
ac->ac_bh, res->sr_bits, chain);
|
||||
@ -1951,7 +2006,7 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
|
||||
for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
|
||||
if (i == victim)
|
||||
continue;
|
||||
if (!cl->cl_recs[i].c_free)
|
||||
if (le32_to_cpu(cl->cl_recs[i].c_free) < bits_wanted)
|
||||
continue;
|
||||
|
||||
ac->ac_chain = i;
|
||||
@ -2163,7 +2218,9 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
|
||||
bg,
|
||||
bg_bh,
|
||||
res->sr_bit_offset,
|
||||
res->sr_bits);
|
||||
res->sr_bits,
|
||||
res->sr_max_contig_bits,
|
||||
0);
|
||||
if (ret < 0) {
|
||||
ocfs2_rollback_alloc_dinode_counts(ac->ac_inode,
|
||||
ac->ac_bh, res->sr_bits, chain);
|
||||
@ -2382,11 +2439,13 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
|
||||
struct buffer_head *group_bh,
|
||||
unsigned int bit_off,
|
||||
unsigned int num_bits,
|
||||
unsigned int max_contig_bits,
|
||||
void (*undo_fn)(unsigned int bit,
|
||||
unsigned long *bmap))
|
||||
{
|
||||
int status;
|
||||
unsigned int tmp;
|
||||
u16 contig_bits;
|
||||
struct ocfs2_group_desc *undo_bg = NULL;
|
||||
struct journal_head *jh;
|
||||
|
||||
@ -2433,6 +2492,20 @@ static int ocfs2_block_group_clear_bits(handle_t *handle,
|
||||
num_bits);
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: even 'num_bits == 1' (the worst case, release 1 cluster),
|
||||
* we still need to rescan whole bitmap.
|
||||
*/
|
||||
if (ocfs2_is_cluster_bitmap(alloc_inode)) {
|
||||
contig_bits = ocfs2_find_max_contig_free_bits(bg->bg_bitmap,
|
||||
le16_to_cpu(bg->bg_bits), 0);
|
||||
if (contig_bits > max_contig_bits)
|
||||
max_contig_bits = contig_bits;
|
||||
bg->bg_contig_free_bits = cpu_to_le16(max_contig_bits);
|
||||
} else {
|
||||
bg->bg_contig_free_bits = 0;
|
||||
}
|
||||
|
||||
if (undo_fn)
|
||||
spin_unlock(&jh->b_state_lock);
|
||||
|
||||
@ -2459,6 +2532,7 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
|
||||
struct ocfs2_chain_list *cl = &fe->id2.i_chain;
|
||||
struct buffer_head *group_bh = NULL;
|
||||
struct ocfs2_group_desc *group;
|
||||
__le16 old_bg_contig_free_bits = 0;
|
||||
|
||||
/* The alloc_bh comes from ocfs2_free_dinode() or
|
||||
* ocfs2_free_clusters(). The callers have all locked the
|
||||
@ -2483,9 +2557,11 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
|
||||
|
||||
BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
|
||||
|
||||
if (ocfs2_is_cluster_bitmap(alloc_inode))
|
||||
old_bg_contig_free_bits = group->bg_contig_free_bits;
|
||||
status = ocfs2_block_group_clear_bits(handle, alloc_inode,
|
||||
group, group_bh,
|
||||
start_bit, count, undo_fn);
|
||||
start_bit, count, 0, undo_fn);
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
goto bail;
|
||||
@ -2496,7 +2572,8 @@ static int _ocfs2_free_suballoc_bits(handle_t *handle,
|
||||
if (status < 0) {
|
||||
mlog_errno(status);
|
||||
ocfs2_block_group_set_bits(handle, alloc_inode, group, group_bh,
|
||||
start_bit, count);
|
||||
start_bit, count,
|
||||
le16_to_cpu(old_bg_contig_free_bits), 1);
|
||||
goto bail;
|
||||
}
|
||||
|
||||
|
@ -79,12 +79,16 @@ void ocfs2_rollback_alloc_dinode_counts(struct inode *inode,
|
||||
struct buffer_head *di_bh,
|
||||
u32 num_bits,
|
||||
u16 chain);
|
||||
u16 ocfs2_find_max_contig_free_bits(void *bitmap,
|
||||
u16 total_bits, u16 start);
|
||||
int ocfs2_block_group_set_bits(handle_t *handle,
|
||||
struct inode *alloc_inode,
|
||||
struct ocfs2_group_desc *bg,
|
||||
struct buffer_head *group_bh,
|
||||
unsigned int bit_off,
|
||||
unsigned int num_bits);
|
||||
unsigned int num_bits,
|
||||
unsigned int max_contig_bits,
|
||||
int fastpath);
|
||||
|
||||
int ocfs2_claim_metadata(handle_t *handle,
|
||||
struct ocfs2_alloc_context *ac,
|
||||
|
@ -383,6 +383,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
|
||||
/* leave now if filled buffer already */
|
||||
if (!iov_iter_count(iter))
|
||||
return acc;
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
list_for_each_entry(m, &vmcore_list, list) {
|
||||
@ -1370,9 +1372,8 @@ static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
|
||||
vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
|
||||
vdd_hdr->n_type = NT_VMCOREDD;
|
||||
|
||||
strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
|
||||
sizeof(vdd_hdr->name));
|
||||
memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
|
||||
strscpy_pad(vdd_hdr->name, VMCOREDD_NOTE_NAME);
|
||||
strscpy_pad(vdd_hdr->dump_name, data->dump_name);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -375,8 +375,6 @@ void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer,
|
||||
flush_dcache_page(page);
|
||||
if (copied == avail)
|
||||
SetPageUptodate(page);
|
||||
else
|
||||
SetPageError(page);
|
||||
}
|
||||
|
||||
/* Copy data into page cache */
|
||||
@ -471,7 +469,7 @@ static int squashfs_read_folio(struct file *file, struct folio *folio)
|
||||
|
||||
res = read_blocklist(inode, index, &block);
|
||||
if (res < 0)
|
||||
goto error_out;
|
||||
goto out;
|
||||
|
||||
if (res == 0)
|
||||
res = squashfs_readpage_sparse(page, expected);
|
||||
@ -483,8 +481,6 @@ static int squashfs_read_folio(struct file *file, struct folio *folio)
|
||||
if (!res)
|
||||
return 0;
|
||||
|
||||
error_out:
|
||||
SetPageError(page);
|
||||
out:
|
||||
pageaddr = kmap_atomic(page);
|
||||
memset(pageaddr, 0, PAGE_SIZE);
|
||||
|
@ -106,14 +106,13 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize,
|
||||
return 0;
|
||||
|
||||
mark_errored:
|
||||
/* Decompression failed, mark pages as errored. Target_page is
|
||||
/* Decompression failed. Target_page is
|
||||
* dealt with by the caller
|
||||
*/
|
||||
for (i = 0; i < pages; i++) {
|
||||
if (page[i] == NULL || page[i] == target_page)
|
||||
continue;
|
||||
flush_dcache_page(page[i]);
|
||||
SetPageError(page[i]);
|
||||
unlock_page(page[i]);
|
||||
put_page(page[i]);
|
||||
}
|
||||
|
@ -62,27 +62,21 @@
|
||||
*/
|
||||
static int get_dir_index_using_name(struct super_block *sb,
|
||||
u64 *next_block, int *next_offset, u64 index_start,
|
||||
int index_offset, int i_count, const char *name,
|
||||
int len)
|
||||
int index_offset, int i_count, const char *name)
|
||||
{
|
||||
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
||||
int i, length = 0, err;
|
||||
unsigned int size;
|
||||
struct squashfs_dir_index *index;
|
||||
char *str;
|
||||
|
||||
TRACE("Entered get_dir_index_using_name, i_count %d\n", i_count);
|
||||
|
||||
index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN * 2 + 2, GFP_KERNEL);
|
||||
index = kmalloc(sizeof(*index) + SQUASHFS_NAME_LEN + 1, GFP_KERNEL);
|
||||
if (index == NULL) {
|
||||
ERROR("Failed to allocate squashfs_dir_index\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
str = &index->name[SQUASHFS_NAME_LEN + 1];
|
||||
strncpy(str, name, len);
|
||||
str[len] = '\0';
|
||||
|
||||
for (i = 0; i < i_count; i++) {
|
||||
err = squashfs_read_metadata(sb, index, &index_start,
|
||||
&index_offset, sizeof(*index));
|
||||
@ -101,7 +95,7 @@ static int get_dir_index_using_name(struct super_block *sb,
|
||||
|
||||
index->name[size] = '\0';
|
||||
|
||||
if (strcmp(index->name, str) > 0)
|
||||
if (strcmp(index->name, name) > 0)
|
||||
break;
|
||||
|
||||
length = le32_to_cpu(index->index);
|
||||
@ -153,7 +147,7 @@ static struct dentry *squashfs_lookup(struct inode *dir, struct dentry *dentry,
|
||||
length = get_dir_index_using_name(dir->i_sb, &block, &offset,
|
||||
squashfs_i(dir)->dir_idx_start,
|
||||
squashfs_i(dir)->dir_idx_offset,
|
||||
squashfs_i(dir)->dir_idx_cnt, name, len);
|
||||
squashfs_i(dir)->dir_idx_cnt, name);
|
||||
|
||||
while (length < i_size_read(dir)) {
|
||||
/*
|
||||
|
@ -32,20 +32,19 @@
|
||||
|
||||
static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
struct page *page = &folio->page;
|
||||
struct inode *inode = page->mapping->host;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
struct super_block *sb = inode->i_sb;
|
||||
struct squashfs_sb_info *msblk = sb->s_fs_info;
|
||||
int index = page->index << PAGE_SHIFT;
|
||||
int index = folio_pos(folio);
|
||||
u64 block = squashfs_i(inode)->start;
|
||||
int offset = squashfs_i(inode)->offset;
|
||||
int length = min_t(int, i_size_read(inode) - index, PAGE_SIZE);
|
||||
int bytes, copied;
|
||||
int bytes, copied, error;
|
||||
void *pageaddr;
|
||||
struct squashfs_cache_entry *entry;
|
||||
|
||||
TRACE("Entered squashfs_symlink_readpage, page index %ld, start block "
|
||||
"%llx, offset %x\n", page->index, block, offset);
|
||||
"%llx, offset %x\n", folio->index, block, offset);
|
||||
|
||||
/*
|
||||
* Skip index bytes into symlink metadata.
|
||||
@ -57,14 +56,15 @@ static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
|
||||
ERROR("Unable to read symlink [%llx:%x]\n",
|
||||
squashfs_i(inode)->start,
|
||||
squashfs_i(inode)->offset);
|
||||
goto error_out;
|
||||
error = bytes;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Read length bytes from symlink metadata. Squashfs_read_metadata
|
||||
* is not used here because it can sleep and we want to use
|
||||
* kmap_atomic to map the page. Instead call the underlying
|
||||
* kmap_local to map the folio. Instead call the underlying
|
||||
* squashfs_cache_get routine. As length bytes may overlap metadata
|
||||
* blocks, we may need to call squashfs_cache_get multiple times.
|
||||
*/
|
||||
@ -75,29 +75,26 @@ static int squashfs_symlink_read_folio(struct file *file, struct folio *folio)
|
||||
squashfs_i(inode)->start,
|
||||
squashfs_i(inode)->offset);
|
||||
squashfs_cache_put(entry);
|
||||
goto error_out;
|
||||
error = entry->error;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pageaddr = kmap_atomic(page);
|
||||
pageaddr = kmap_local_folio(folio, 0);
|
||||
copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
|
||||
length - bytes);
|
||||
if (copied == length - bytes)
|
||||
memset(pageaddr + length, 0, PAGE_SIZE - length);
|
||||
else
|
||||
block = entry->next_index;
|
||||
kunmap_atomic(pageaddr);
|
||||
kunmap_local(pageaddr);
|
||||
squashfs_cache_put(entry);
|
||||
}
|
||||
|
||||
flush_dcache_page(page);
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
return 0;
|
||||
|
||||
error_out:
|
||||
SetPageError(page);
|
||||
unlock_page(page);
|
||||
return 0;
|
||||
flush_dcache_folio(folio);
|
||||
error = 0;
|
||||
out:
|
||||
folio_end_read(folio, error == 0);
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1057,11 +1057,6 @@ void init_cpu_present(const struct cpumask *src);
|
||||
void init_cpu_possible(const struct cpumask *src);
|
||||
void init_cpu_online(const struct cpumask *src);
|
||||
|
||||
static inline void reset_cpu_possible_mask(void)
|
||||
{
|
||||
bitmap_zero(cpumask_bits(&__cpu_possible_mask), NR_CPUS);
|
||||
}
|
||||
|
||||
static inline void
|
||||
set_cpu_possible(unsigned int cpu, bool possible)
|
||||
{
|
||||
|
@ -147,6 +147,41 @@ instrument_copy_from_user_after(const void *to, const void __user *from,
|
||||
kmsan_unpoison_memory(to, n - left);
|
||||
}
|
||||
|
||||
/**
|
||||
* instrument_memcpy_before - add instrumentation before non-instrumented memcpy
|
||||
* @to: destination address
|
||||
* @from: source address
|
||||
* @n: number of bytes to copy
|
||||
*
|
||||
* Instrument memory accesses that happen in custom memcpy implementations. The
|
||||
* instrumentation should be inserted before the memcpy call.
|
||||
*/
|
||||
static __always_inline void instrument_memcpy_before(void *to, const void *from,
|
||||
unsigned long n)
|
||||
{
|
||||
kasan_check_write(to, n);
|
||||
kasan_check_read(from, n);
|
||||
kcsan_check_write(to, n);
|
||||
kcsan_check_read(from, n);
|
||||
}
|
||||
|
||||
/**
|
||||
* instrument_memcpy_after - add instrumentation after non-instrumented memcpy
|
||||
* @to: destination address
|
||||
* @from: source address
|
||||
* @n: number of bytes to copy
|
||||
* @left: number of bytes not copied (if known)
|
||||
*
|
||||
* Instrument memory accesses that happen in custom memcpy implementations. The
|
||||
* instrumentation should be inserted after the memcpy call.
|
||||
*/
|
||||
static __always_inline void instrument_memcpy_after(void *to, const void *from,
|
||||
unsigned long n,
|
||||
unsigned long left)
|
||||
{
|
||||
kmsan_memmove(to, from, n - left);
|
||||
}
|
||||
|
||||
/**
|
||||
* instrument_get_user() - add instrumentation to get_user()-like macros
|
||||
* @to: destination variable, may not be address-taken
|
||||
|
@ -464,10 +464,8 @@ static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) {
|
||||
|
||||
extern bool kexec_file_dbg_print;
|
||||
|
||||
#define kexec_dprintk(fmt, ...) \
|
||||
printk("%s" fmt, \
|
||||
kexec_file_dbg_print ? KERN_INFO : KERN_DEBUG, \
|
||||
##__VA_ARGS__)
|
||||
#define kexec_dprintk(fmt, arg...) \
|
||||
do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0)
|
||||
|
||||
#else /* !CONFIG_KEXEC_CORE */
|
||||
struct pt_regs;
|
||||
|
@ -36,10 +36,15 @@
|
||||
* to lock the reader.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/array_size.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/errno.h>
|
||||
|
||||
struct scatterlist;
|
||||
|
||||
struct __kfifo {
|
||||
unsigned int in;
|
||||
|
@ -61,6 +61,17 @@ void kmsan_check_memory(const void *address, size_t size);
|
||||
void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
|
||||
size_t left);
|
||||
|
||||
/**
|
||||
* kmsan_memmove() - Notify KMSAN about a data copy within kernel.
|
||||
* @to: destination address in the kernel.
|
||||
* @from: source address in the kernel.
|
||||
* @size: number of bytes to copy.
|
||||
*
|
||||
* Invoked after non-instrumented version (e.g. implemented using assembly
|
||||
* code) of memmove()/memcpy() is called, in order to copy KMSAN's metadata.
|
||||
*/
|
||||
void kmsan_memmove(void *to, const void *from, size_t to_copy);
|
||||
|
||||
#else
|
||||
|
||||
static inline void kmsan_poison_memory(const void *address, size_t size,
|
||||
@ -78,6 +89,10 @@ static inline void kmsan_copy_to_user(void __user *to, const void *from,
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kmsan_memmove(void *to, const void *from, size_t to_copy)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_KMSAN_CHECKS_H */
|
||||
|
@ -105,10 +105,12 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs);
|
||||
extern void hardlockup_detector_perf_stop(void);
|
||||
extern void hardlockup_detector_perf_restart(void);
|
||||
extern void hardlockup_detector_perf_cleanup(void);
|
||||
extern void hardlockup_config_perf_event(const char *str);
|
||||
#else
|
||||
static inline void hardlockup_detector_perf_stop(void) { }
|
||||
static inline void hardlockup_detector_perf_restart(void) { }
|
||||
static inline void hardlockup_detector_perf_cleanup(void) { }
|
||||
static inline void hardlockup_config_perf_event(const char *str) { }
|
||||
#endif
|
||||
|
||||
void watchdog_hardlockup_stop(void);
|
||||
|
@ -200,7 +200,11 @@ TRACE_EVENT(nilfs2_mdt_submit_block,
|
||||
__field(struct inode *, inode)
|
||||
__field(unsigned long, ino)
|
||||
__field(unsigned long, blkoff)
|
||||
__field(enum req_op, mode)
|
||||
/*
|
||||
* Use field_struct() to avoid is_signed_type() on the
|
||||
* bitwise type enum req_op.
|
||||
*/
|
||||
__field_struct(enum req_op, mode)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
|
@ -29,7 +29,6 @@ static struct ctl_table kern_do_mounts_initrd_table[] = {
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static __init int kernel_do_mounts_initrd_sysctls_init(void)
|
||||
|
19
init/main.c
19
init/main.c
@ -345,6 +345,11 @@ static int __init xbc_snprint_cmdline(char *buf, size_t size,
|
||||
continue;
|
||||
}
|
||||
xbc_array_for_each_value(vnode, val) {
|
||||
/*
|
||||
* For prettier and more readable /proc/cmdline, only
|
||||
* quote the value when necessary, i.e. when it contains
|
||||
* whitespace.
|
||||
*/
|
||||
q = strpbrk(val, " \t\r\n") ? "\"" : "";
|
||||
ret = snprintf(buf, rest(buf, end), "%s=%s%s%s ",
|
||||
xbc_namebuf, q, val, q);
|
||||
@ -881,6 +886,19 @@ static void __init print_unknown_bootoptions(void)
|
||||
memblock_free(unknown_options, len);
|
||||
}
|
||||
|
||||
static void __init early_numa_node_init(void)
|
||||
{
|
||||
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
|
||||
#ifndef cpu_to_node
|
||||
int cpu;
|
||||
|
||||
/* The early_cpu_to_node() should be ready here. */
|
||||
for_each_possible_cpu(cpu)
|
||||
set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
asmlinkage __visible __init __no_sanitize_address __noreturn __no_stack_protector
|
||||
void start_kernel(void)
|
||||
{
|
||||
@ -911,6 +929,7 @@ void start_kernel(void)
|
||||
setup_nr_cpu_ids();
|
||||
setup_per_cpu_areas();
|
||||
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
|
||||
early_numa_node_init();
|
||||
boot_cpu_hotplug_init();
|
||||
|
||||
pr_notice("Kernel command line: %s\n", saved_command_line);
|
||||
|
@ -178,7 +178,6 @@ static struct ctl_table ipc_sysctls[] = {
|
||||
.extra2 = SYSCTL_INT_MAX,
|
||||
},
|
||||
#endif
|
||||
{}
|
||||
};
|
||||
|
||||
static struct ctl_table_set *set_lookup(struct ctl_table_root *root)
|
||||
|
@ -64,7 +64,6 @@ static struct ctl_table mq_sysctls[] = {
|
||||
.extra1 = &msg_maxsize_limit_min,
|
||||
.extra2 = &msg_maxsize_limit_max,
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static struct ctl_table_set *set_lookup(struct ctl_table_root *root)
|
||||
|
@ -4,6 +4,8 @@
|
||||
* Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/buildid.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/utsname.h>
|
||||
|
@ -109,7 +109,7 @@ static int __init parse_crashkernel_mem(char *cmdline,
|
||||
|
||||
size = memparse(cur, &tmp);
|
||||
if (cur == tmp) {
|
||||
pr_warn("Memory value expected\n");
|
||||
pr_warn("crashkernel: Memory value expected\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
cur = tmp;
|
||||
@ -132,7 +132,7 @@ static int __init parse_crashkernel_mem(char *cmdline,
|
||||
cur++;
|
||||
*crash_base = memparse(cur, &tmp);
|
||||
if (cur == tmp) {
|
||||
pr_warn("Memory value expected after '@'\n");
|
||||
pr_warn("crahskernel: Memory value expected after '@'\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
@ -627,7 +627,8 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
|
||||
mode = kcov_get_mode(remote_arg->trace_mode);
|
||||
if (mode < 0)
|
||||
return mode;
|
||||
if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long))
|
||||
if ((unsigned long)remote_arg->area_size >
|
||||
LONG_MAX / sizeof(unsigned long))
|
||||
return -EINVAL;
|
||||
kcov->mode = mode;
|
||||
t->kcov = kcov;
|
||||
|
@ -16,14 +16,14 @@ static int __regset_get(struct task_struct *target,
|
||||
if (size > regset->n * regset->size)
|
||||
size = regset->n * regset->size;
|
||||
if (!p) {
|
||||
to_free = p = kzalloc(size, GFP_KERNEL);
|
||||
to_free = p = kvzalloc(size, GFP_KERNEL);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
}
|
||||
res = regset->regset_get(target, regset,
|
||||
(struct membuf){.p = p, .left = size});
|
||||
if (res < 0) {
|
||||
kfree(to_free);
|
||||
kvfree(to_free);
|
||||
return res;
|
||||
}
|
||||
*data = p;
|
||||
@ -71,6 +71,6 @@ int copy_regset_to_user(struct task_struct *target,
|
||||
ret = regset_get_alloc(target, regset, size, &buf);
|
||||
if (ret > 0)
|
||||
ret = copy_to_user(data, buf, ret) ? -EFAULT : 0;
|
||||
kfree(buf);
|
||||
kvfree(buf);
|
||||
return ret;
|
||||
}
|
||||
|
@ -524,8 +524,7 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
if (!buts->buf_size || !buts->buf_nr)
|
||||
return -EINVAL;
|
||||
|
||||
strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
|
||||
buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
|
||||
strscpy_pad(buts->name, name, BLKTRACE_BDEV_SIZE);
|
||||
|
||||
/*
|
||||
* some device names have larger paths - convert the slashes
|
||||
|
@ -78,6 +78,7 @@ void __init hardlockup_detector_disable(void)
|
||||
|
||||
static int __init hardlockup_panic_setup(char *str)
|
||||
{
|
||||
next:
|
||||
if (!strncmp(str, "panic", 5))
|
||||
hardlockup_panic = 1;
|
||||
else if (!strncmp(str, "nopanic", 7))
|
||||
@ -86,6 +87,14 @@ static int __init hardlockup_panic_setup(char *str)
|
||||
watchdog_hardlockup_user_enabled = 0;
|
||||
else if (!strncmp(str, "1", 1))
|
||||
watchdog_hardlockup_user_enabled = 1;
|
||||
else if (!strncmp(str, "r", 1))
|
||||
hardlockup_config_perf_event(str + 1);
|
||||
while (*(str++)) {
|
||||
if (*str == ',') {
|
||||
str++;
|
||||
goto next;
|
||||
}
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
__setup("nmi_watchdog=", hardlockup_panic_setup);
|
||||
|
@ -90,6 +90,14 @@ static struct perf_event_attr wd_hw_attr = {
|
||||
.disabled = 1,
|
||||
};
|
||||
|
||||
static struct perf_event_attr fallback_wd_hw_attr = {
|
||||
.type = PERF_TYPE_HARDWARE,
|
||||
.config = PERF_COUNT_HW_CPU_CYCLES,
|
||||
.size = sizeof(struct perf_event_attr),
|
||||
.pinned = 1,
|
||||
.disabled = 1,
|
||||
};
|
||||
|
||||
/* Callback function for perf event subsystem */
|
||||
static void watchdog_overflow_callback(struct perf_event *event,
|
||||
struct perf_sample_data *data,
|
||||
@ -122,6 +130,13 @@ static int hardlockup_detector_event_create(void)
|
||||
/* Try to register using hardware perf events */
|
||||
evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
|
||||
watchdog_overflow_callback, NULL);
|
||||
if (IS_ERR(evt)) {
|
||||
wd_attr = &fallback_wd_hw_attr;
|
||||
wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
|
||||
evt = perf_event_create_kernel_counter(wd_attr, cpu, NULL,
|
||||
watchdog_overflow_callback, NULL);
|
||||
}
|
||||
|
||||
if (IS_ERR(evt)) {
|
||||
pr_debug("Perf event create on CPU %d failed with %ld\n", cpu,
|
||||
PTR_ERR(evt));
|
||||
@ -133,7 +148,6 @@ static int hardlockup_detector_event_create(void)
|
||||
|
||||
/**
|
||||
* watchdog_hardlockup_enable - Enable the local event
|
||||
*
|
||||
* @cpu: The CPU to enable hard lockup on.
|
||||
*/
|
||||
void watchdog_hardlockup_enable(unsigned int cpu)
|
||||
@ -152,7 +166,6 @@ void watchdog_hardlockup_enable(unsigned int cpu)
|
||||
|
||||
/**
|
||||
* watchdog_hardlockup_disable - Disable the local event
|
||||
*
|
||||
* @cpu: The CPU to enable hard lockup on.
|
||||
*/
|
||||
void watchdog_hardlockup_disable(unsigned int cpu)
|
||||
@ -259,3 +272,33 @@ int __init watchdog_hardlockup_probe(void)
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* hardlockup_config_perf_event - Overwrite config of wd_hw_attr.
|
||||
* @str: number which identifies the raw perf event to use
|
||||
*/
|
||||
void __init hardlockup_config_perf_event(const char *str)
|
||||
{
|
||||
u64 config;
|
||||
char buf[24];
|
||||
char *comma = strchr(str, ',');
|
||||
|
||||
if (!comma) {
|
||||
if (kstrtoull(str, 16, &config))
|
||||
return;
|
||||
} else {
|
||||
unsigned int len = comma - str;
|
||||
|
||||
if (len >= sizeof(buf))
|
||||
return;
|
||||
|
||||
if (strscpy(buf, str, sizeof(buf)) < 0)
|
||||
return;
|
||||
buf[len] = 0;
|
||||
if (kstrtoull(buf, 16, &config))
|
||||
return;
|
||||
}
|
||||
|
||||
wd_hw_attr.type = PERF_TYPE_RAW;
|
||||
wd_hw_attr.config = config;
|
||||
}
|
||||
|
@ -122,6 +122,7 @@ config KDB_DEFAULT_ENABLE
|
||||
config KDB_KEYBOARD
|
||||
bool "KGDB_KDB: keyboard as input device"
|
||||
depends on VT && KGDB_KDB && !PARISC
|
||||
depends on HAS_IOPORT
|
||||
default n
|
||||
help
|
||||
KDB can use a PS/2 type keyboard for an input device
|
||||
|
@ -8,6 +8,7 @@
|
||||
#
|
||||
|
||||
use strict;
|
||||
use Cwd qw(abs_path);
|
||||
|
||||
my @names = ();
|
||||
my @oids = ();
|
||||
@ -17,6 +18,8 @@ if ($#ARGV != 1) {
|
||||
exit(2);
|
||||
}
|
||||
|
||||
my $abs_srctree = abs_path($ENV{'srctree'});
|
||||
|
||||
#
|
||||
# Open the file to read from
|
||||
#
|
||||
@ -35,7 +38,7 @@ close IN_FILE || die;
|
||||
#
|
||||
open C_FILE, ">$ARGV[1]" or die;
|
||||
print C_FILE "/*\n";
|
||||
print C_FILE " * Automatically generated by ", $0, ". Do not edit\n";
|
||||
print C_FILE " * Automatically generated by ", $0 =~ s#^\Q$abs_srctree/\E##r, ". Do not edit\n";
|
||||
print C_FILE " */\n";
|
||||
|
||||
#
|
||||
|
26
lib/devres.c
26
lib/devres.c
@ -1,10 +1,13 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/bug.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/gfp_types.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
enum devm_ioremap_type {
|
||||
DEVM_IOREMAP = 0,
|
||||
@ -125,12 +128,13 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
|
||||
resource_size_t size;
|
||||
void __iomem *dest_ptr;
|
||||
char *pretty_name;
|
||||
int ret;
|
||||
|
||||
BUG_ON(!dev);
|
||||
|
||||
if (!res || resource_type(res) != IORESOURCE_MEM) {
|
||||
dev_err(dev, "invalid resource %pR\n", res);
|
||||
return IOMEM_ERR_PTR(-EINVAL);
|
||||
ret = dev_err_probe(dev, -EINVAL, "invalid resource %pR\n", res);
|
||||
return IOMEM_ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (type == DEVM_IOREMAP && res->flags & IORESOURCE_MEM_NONPOSTED)
|
||||
@ -144,20 +148,20 @@ __devm_ioremap_resource(struct device *dev, const struct resource *res,
|
||||
else
|
||||
pretty_name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
|
||||
if (!pretty_name) {
|
||||
dev_err(dev, "can't generate pretty name for resource %pR\n", res);
|
||||
return IOMEM_ERR_PTR(-ENOMEM);
|
||||
ret = dev_err_probe(dev, -ENOMEM, "can't generate pretty name for resource %pR\n", res);
|
||||
return IOMEM_ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (!devm_request_mem_region(dev, res->start, size, pretty_name)) {
|
||||
dev_err(dev, "can't request region for resource %pR\n", res);
|
||||
return IOMEM_ERR_PTR(-EBUSY);
|
||||
ret = dev_err_probe(dev, -EBUSY, "can't request region for resource %pR\n", res);
|
||||
return IOMEM_ERR_PTR(ret);
|
||||
}
|
||||
|
||||
dest_ptr = __devm_ioremap(dev, res->start, size, type);
|
||||
if (!dest_ptr) {
|
||||
dev_err(dev, "ioremap failed for resource %pR\n", res);
|
||||
devm_release_mem_region(dev, res->start, size);
|
||||
dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
|
||||
ret = dev_err_probe(dev, -ENOMEM, "ioremap failed for resource %pR\n", res);
|
||||
return IOMEM_ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return dest_ptr;
|
||||
|
10
lib/kfifo.c
10
lib/kfifo.c
@ -5,13 +5,13 @@
|
||||
* Copyright (C) 2009/2010 Stefani Seibold <stefani@seibold.net>
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/scatterlist.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/*
|
||||
* internal helper to calculate the unused elements in a fifo
|
||||
|
@ -113,7 +113,7 @@ static void __init test_hexdump_prepare_test(size_t len, int rowsize,
|
||||
*p++ = ' ';
|
||||
} while (p < test + rs * 2 + rs / gs + 1);
|
||||
|
||||
strncpy(p, data_a, l);
|
||||
memcpy(p, data_a, l);
|
||||
p += l;
|
||||
}
|
||||
|
||||
|
@ -285,6 +285,17 @@ void kmsan_copy_to_user(void __user *to, const void *from, size_t to_copy,
|
||||
}
|
||||
EXPORT_SYMBOL(kmsan_copy_to_user);
|
||||
|
||||
void kmsan_memmove(void *to, const void *from, size_t size)
|
||||
{
|
||||
if (!kmsan_enabled || kmsan_in_runtime())
|
||||
return;
|
||||
|
||||
kmsan_enter_runtime();
|
||||
kmsan_internal_memmove_metadata(to, (void *)from, size);
|
||||
kmsan_leave_runtime();
|
||||
}
|
||||
EXPORT_SYMBOL(kmsan_memmove);
|
||||
|
||||
/* Helper function to check an URB. */
|
||||
void kmsan_handle_urb(const struct urb *urb, bool is_out)
|
||||
{
|
||||
|
@ -6,8 +6,9 @@
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kfifo.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/scatterlist.h>
|
||||
|
||||
/*
|
||||
* This module shows how to handle fifo dma operations.
|
||||
|
@ -6040,6 +6040,12 @@ sub process {
|
||||
CHK("MACRO_ARG_PRECEDENCE",
|
||||
"Macro argument '$arg' may be better as '($arg)' to avoid precedence issues\n" . "$herectx");
|
||||
}
|
||||
|
||||
# check if this is an unused argument
|
||||
if ($define_stmt !~ /\b$arg\b/) {
|
||||
WARN("MACRO_ARG_UNUSED",
|
||||
"Argument '$arg' is not used in function-like macro\n" . "$herectx");
|
||||
}
|
||||
}
|
||||
|
||||
# check for macros with flow control, but without ## concatenation
|
||||
|
@ -26,11 +26,7 @@ def get_current_cpu():
|
||||
if utils.get_gdbserver_type() == utils.GDBSERVER_QEMU:
|
||||
return gdb.selected_thread().num - 1
|
||||
elif utils.get_gdbserver_type() == utils.GDBSERVER_KGDB:
|
||||
tid = gdb.selected_thread().ptid[2]
|
||||
if tid > (0x100000000 - MAX_CPUS - 2):
|
||||
return 0x100000000 - tid - 2
|
||||
else:
|
||||
return tasks.get_thread_info(tasks.get_task_by_pid(tid))['cpu']
|
||||
return gdb.parse_and_eval("kgdb_active.counter")
|
||||
else:
|
||||
raise gdb.GdbError("Sorry, obtaining the current CPU is not yet "
|
||||
"supported with this gdb server.")
|
||||
@ -152,9 +148,8 @@ Note that VAR has to be quoted as string."""
|
||||
def __init__(self):
|
||||
super(PerCpu, self).__init__("lx_per_cpu")
|
||||
|
||||
def invoke(self, var_name, cpu=-1):
|
||||
var_ptr = gdb.parse_and_eval("&" + var_name.string())
|
||||
return per_cpu(var_ptr, cpu)
|
||||
def invoke(self, var, cpu=-1):
|
||||
return per_cpu(var.address, cpu)
|
||||
|
||||
|
||||
PerCpu()
|
||||
|
@ -85,7 +85,7 @@ thread_info_type = utils.CachedType("struct thread_info")
|
||||
|
||||
def get_thread_info(task):
|
||||
thread_info_ptr_type = thread_info_type.get_type().pointer()
|
||||
if task.type.fields()[0].type == thread_info_type.get_type():
|
||||
if task_type.get_type().fields()[0].type == thread_info_type.get_type():
|
||||
return task['thread_info']
|
||||
thread_info = task['stack'].cast(thread_info_ptr_type)
|
||||
return thread_info.dereference()
|
||||
|
@ -196,7 +196,7 @@ def get_gdbserver_type():
|
||||
def probe_kgdb():
|
||||
try:
|
||||
thread_info = gdb.execute("info thread 2", to_string=True)
|
||||
return "shadowCPU0" in thread_info
|
||||
return "shadowCPU" in thread_info
|
||||
except gdb.error:
|
||||
return False
|
||||
|
||||
|
@ -158,13 +158,13 @@ RB_DECLARE_CALLBACKS(RBSTATIC, RBNAME, \
|
||||
|
||||
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
|
||||
{
|
||||
rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
|
||||
rb->__rb_parent_color = rb_color(rb) + (unsigned long)p;
|
||||
}
|
||||
|
||||
static inline void rb_set_parent_color(struct rb_node *rb,
|
||||
struct rb_node *p, int color)
|
||||
{
|
||||
rb->__rb_parent_color = (unsigned long)p | color;
|
||||
rb->__rb_parent_color = (unsigned long)p + color;
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@ -58,7 +58,7 @@
|
||||
|
||||
static inline void rb_set_black(struct rb_node *rb)
|
||||
{
|
||||
rb->__rb_parent_color |= RB_BLACK;
|
||||
rb->__rb_parent_color += RB_BLACK;
|
||||
}
|
||||
|
||||
static inline struct rb_node *rb_red_parent(struct rb_node *red)
|
||||
|
@ -91,7 +91,7 @@ int main(int argc, char **argv)
|
||||
ksft_print_header();
|
||||
ksft_set_plan(3);
|
||||
|
||||
fd2 = open(kpath, O_RDWR, 0644);
|
||||
fd2 = open(kpath, O_RDWR);
|
||||
if (fd2 < 0) {
|
||||
perror("Can't open file");
|
||||
ksft_exit_fail();
|
||||
|
Loading…
Reference in New Issue
Block a user