diff --git a/.mailmap b/.mailmap index ea69b340168b..a12172b447e5 100644 --- a/.mailmap +++ b/.mailmap @@ -410,6 +410,7 @@ Liam Mark Linas Vepstas Linus Lüssing Linus Lüssing +Linus Lüssing Li Yang Li Yang diff --git a/CREDITS b/CREDITS index cda68f04d5f1..1f9f0f078b4a 100644 --- a/CREDITS +++ b/CREDITS @@ -4339,7 +4339,7 @@ D: Freescale Highspeed USB device driver D: Freescale QE SoC support and Ethernet driver S: B-1206 Jingmao Guojigongyu S: 16 Baliqiao Nanjie, Beijing 101100 -S: People's Repulic of China +S: People's Republic of China N: Vlad Yasevich E: vyasevich@gmail.com diff --git a/Documentation/accounting/delay-accounting.rst b/Documentation/accounting/delay-accounting.rst index f61c01fc376e..210c194d4a7b 100644 --- a/Documentation/accounting/delay-accounting.rst +++ b/Documentation/accounting/delay-accounting.rst @@ -100,29 +100,29 @@ Get delays, since system boot, for pid 10:: # ./getdelays -d -p 10 (output similar to next case) -Get sum of delays, since system boot, for all pids with tgid 5:: +Get sum and peak of delays, since system boot, for all pids with tgid 242:: - # ./getdelays -d -t 5 + bash-4.4# ./getdelays -d -t 242 print delayacct stats ON - TGID 5 + TGID 242 - CPU count real total virtual total delay total delay average - 8 7000000 6872122 3382277 0.423ms - IO count delay total delay average - 0 0 0.000ms - SWAP count delay total delay average - 0 0 0.000ms - RECLAIM count delay total delay average - 0 0 0.000ms - THRASHING count delay total delay average - 0 0 0.000ms - COMPACT count delay total delay average - 0 0 0.000ms - WPCOPY count delay total delay average - 0 0 0.000ms - IRQ count delay total delay average - 0 0 0.000ms + CPU count real total virtual total delay total delay average delay max delay min + 39 156000000 156576579 2111069 0.054ms 0.212296ms 0.031307ms + IO count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms + SWAP count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms + RECLAIM count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms + THRASHING count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms + COMPACT count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms + WPCOPY count delay total delay average delay max delay min + 156 11215873 0.072ms 0.207403ms 0.033913ms + IRQ count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms Get IO accounting for pid 1, it works only with -p:: diff --git a/Documentation/core-api/min_heap.rst b/Documentation/core-api/min_heap.rst index 0c636c8b7aa5..683bc6d09f00 100644 --- a/Documentation/core-api/min_heap.rst +++ b/Documentation/core-api/min_heap.rst @@ -4,6 +4,8 @@ Min Heap API ============ +:Author: Kuan-Wei Chiu + Introduction ============ diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index 77e0ece2b1d6..f6a3eef4fe7f 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -42,8 +42,8 @@ call xa_tag_pointer() to create an entry with a tag, xa_untag_pointer() to turn a tagged entry back into an untagged pointer and xa_pointer_tag() to retrieve the tag of an entry. Tagged pointers use the same bits that are used to distinguish value entries from normal pointers, so you must -decide whether they want to store value entries or tagged pointers in -any particular XArray. +decide whether you want to store value entries or tagged pointers in any +particular XArray. The XArray does not support storing IS_ERR() pointers as some conflict with value entries or internal entries. @@ -52,8 +52,9 @@ An unusual feature of the XArray is the ability to create entries which occupy a range of indices. Once stored to, looking up any index in the range will return the same entry as looking up any other index in the range. Storing to any index will store to all of them. Multi-index -entries can be explicitly split into smaller entries, or storing ``NULL`` -into any entry will cause the XArray to forget about the range. +entries can be explicitly split into smaller entries. Unsetting (using +xa_erase() or xa_store() with ``NULL``) any entry will cause the XArray +to forget about the range. Normal API ========== @@ -63,13 +64,14 @@ for statically allocated XArrays or xa_init() for dynamically allocated ones. A freshly-initialised XArray contains a ``NULL`` pointer at every index. -You can then set entries using xa_store() and get entries -using xa_load(). xa_store will overwrite any entry with the -new entry and return the previous entry stored at that index. You can -use xa_erase() instead of calling xa_store() with a -``NULL`` entry. There is no difference between an entry that has never -been stored to, one that has been erased and one that has most recently -had ``NULL`` stored to it. +You can then set entries using xa_store() and get entries using +xa_load(). xa_store() will overwrite any entry with the new entry and +return the previous entry stored at that index. You can unset entries +using xa_erase() or by setting the entry to ``NULL`` using xa_store(). +There is no difference between an entry that has never been stored to +and one that has been erased with xa_erase(); an entry that has most +recently had ``NULL`` stored to it is also equivalent except if the +XArray was initialized with ``XA_FLAGS_ALLOC``. You can conditionally replace an entry at an index by using xa_cmpxchg(). Like cmpxchg(), it will only succeed if diff --git a/Documentation/filesystems/squashfs.rst b/Documentation/filesystems/squashfs.rst index 4af8d6207509..45653b3228f9 100644 --- a/Documentation/filesystems/squashfs.rst +++ b/Documentation/filesystems/squashfs.rst @@ -6,7 +6,7 @@ Squashfs 4.0 Filesystem Squashfs is a compressed read-only filesystem for Linux. -It uses zlib, lz4, lzo, or xz compression to compress files, inodes and +It uses zlib, lz4, lzo, xz or zstd compression to compress files, inodes and directories. Inodes in the system are very small and all blocks are packed to minimise data overhead. Block sizes greater than 4K are supported up to a maximum of 1Mbytes (default block size 128K). @@ -16,8 +16,8 @@ use (i.e. in cases where a .tar.gz file may be used), and in constrained block device/memory systems (e.g. embedded systems) where low overhead is needed. -Mailing list: squashfs-devel@lists.sourceforge.net -Web site: www.squashfs.org +Mailing list (kernel code): linux-fsdevel@vger.kernel.org +Web site: github.com/plougher/squashfs-tools 1. Filesystem Features ---------------------- @@ -58,11 +58,9 @@ inodes have different sizes). As squashfs is a read-only filesystem, the mksquashfs program must be used to create populated squashfs filesystems. This and other squashfs utilities -can be obtained from http://www.squashfs.org. Usage instructions can be -obtained from this site also. - -The squashfs-tools development tree is now located on kernel.org - git://git.kernel.org/pub/scm/fs/squashfs/squashfs-tools.git +are very likely packaged by your linux distribution (called squashfs-tools). +The source code can be obtained from github.com/plougher/squashfs-tools. +Usage instructions can also be obtained from this site. 2.1 Mount options ----------------- diff --git a/MAINTAINERS b/MAINTAINERS index 0fa7c5728f1e..19800134a9dd 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2827,7 +2827,7 @@ ARM/NXP S32G ARCHITECTURE R: Chester Lin R: Matthias Brugger R: Ghennadi Procopciuc -L: NXP S32 Linux Team +R: NXP S32 Linux Team L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm64/boot/dts/freescale/s32g*.dts* @@ -16600,8 +16600,8 @@ F: arch/nios2/ NITRO ENCLAVES (NE) M: Alexandru Ciobotaru +R: The AWS Nitro Enclaves Team L: linux-kernel@vger.kernel.org -L: The AWS Nitro Enclaves Team S: Supported W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/ F: Documentation/virt/ne_overview.rst @@ -16612,8 +16612,8 @@ F: samples/nitro_enclaves/ NITRO SECURE MODULE (NSM) M: Alexander Graf +R: The AWS Nitro Enclaves Team L: linux-kernel@vger.kernel.org -L: The AWS Nitro Enclaves Team S: Supported W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/ F: drivers/misc/nsm.c @@ -18425,8 +18425,8 @@ M: Fabio Estevam M: Shawn Guo M: Jacky Bai R: Pengutronix Kernel Team +R: NXP S32 Linux Team L: linux-gpio@vger.kernel.org -L: NXP S32 Linux Team S: Maintained F: Documentation/devicetree/bindings/pinctrl/fsl,* F: Documentation/devicetree/bindings/pinctrl/nxp,s32* @@ -19561,7 +19561,7 @@ F: drivers/ras/amd/fmpm.c RASPBERRY PI PISP BACK END M: Jacopo Mondi -L: Raspberry Pi Kernel Maintenance +R: Raspberry Pi Kernel Maintenance L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/raspberrypi,pispbe.yaml diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c index 9a238e7536ae..3d32165043f8 100644 --- a/arch/alpha/lib/fpreg.c +++ b/arch/alpha/lib/fpreg.c @@ -10,7 +10,6 @@ #include #include #include -#include #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index d2f5ceaaed1b..3b2d8b1bd271 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -200,7 +200,6 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, struct callee_regs *cregs) { struct disasm_state state; - char buf[TASK_COMM_LEN]; /* handle user mode only and only if enabled by sysadmin */ if (!user_mode(regs) || !unaligned_enabled) @@ -212,11 +211,11 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, " performance significantly\n. To enable further" " logging of such instances, please \n" " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n", - get_task_comm(buf, current), task_pid_nr(current)); + current->comm, task_pid_nr(current)); } else { /* Add rate limiting if it gets down to it */ pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n", - get_task_comm(buf, current), task_pid_nr(current), + current->comm, task_pid_nr(current), address, regs->ret); } diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c index 0c8d9000df5a..dd930e3a61a4 100644 --- a/arch/arm/mach-pxa/sharpsl_pm.c +++ b/arch/arm/mach-pxa/sharpsl_pm.c @@ -31,10 +31,10 @@ /* * Constants */ -#define SHARPSL_CHARGE_ON_TIME_INTERVAL (msecs_to_jiffies(1*60*1000)) /* 1 min */ -#define SHARPSL_CHARGE_FINISH_TIME (msecs_to_jiffies(10*60*1000)) /* 10 min */ -#define SHARPSL_BATCHK_TIME (msecs_to_jiffies(15*1000)) /* 15 sec */ -#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */ +#define SHARPSL_CHARGE_ON_TIME_INTERVAL (secs_to_jiffies(60)) +#define SHARPSL_CHARGE_FINISH_TIME (secs_to_jiffies(10*60)) +#define SHARPSL_BATCHK_TIME (secs_to_jiffies(15)) +#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */ #define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */ #define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */ diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index c705247e7b5b..581f0080814e 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -629,7 +629,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 6d62b9187a58..25628a1e8fa1 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -586,7 +586,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index c3c644df852d..503a9ea526b5 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -606,7 +606,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 20261f819691..3358349898ef 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -578,7 +578,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ce4fe93a0f70..a5e933a7fdf9 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -588,7 +588,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 040ae75f47c3..a90676c04da6 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -605,7 +605,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 20d877cb4e30..f28f7783b090 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -692,7 +692,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 5e1c8d0d3da5..61308c2cd96c 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -578,7 +578,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 5d1409e6a137..9ec8eb9ea614 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -579,7 +579,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index e4c30e2b9bbb..5fd094391b9e 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -595,7 +595,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 980843a9ea1e..5e9c9d704c2e 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -575,7 +575,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 38681cc6b598..b2f5f398fe42 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -576,7 +576,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index f39c0d000c43..bc48063fd860 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -451,7 +451,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 25429905ae90..86bff159c51e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4957,7 +4957,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, * states are synchronized from L0 to L1. L1 needs to inform L0 about * MER=1 only when there are pending external interrupts. * In the above if check, MER bit is set if there are pending - * external interrupts. Hence, explicity mask off MER bit + * external interrupts. Hence, explicitly mask off MER bit * here as otherwise it may generate spurious interrupts in L2 KVM * causing an endless loop, which results in L2 guest getting hung. */ diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index f84ac9fbe203..f7c9271bda58 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -544,7 +544,7 @@ static int drc_pmem_query_health(struct papr_scm_priv *p) /* Jiffies offset for which the health data is assumed to be same */ cache_timeout = p->lasthealth_jiffies + - msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000); + secs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL); /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */ if (time_after(jiffies, cache_timeout)) diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c index 6652e54cf3db..6d1ffca5f798 100644 --- a/arch/s390/kernel/lgr.c +++ b/arch/s390/kernel/lgr.c @@ -166,7 +166,7 @@ static struct timer_list lgr_timer; */ static void lgr_timer_set(void) { - mod_timer(&lgr_timer, jiffies + msecs_to_jiffies(LGR_TIMER_INTERVAL_SECS * MSEC_PER_SEC)); + mod_timer(&lgr_timer, jiffies + secs_to_jiffies(LGR_TIMER_INTERVAL_SECS)); } /* diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 34a65c141ea0..e9f47c3a6197 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -662,12 +662,12 @@ static void stp_check_leap(void) if (ret < 0) pr_err("failed to set leap second flags\n"); /* arm Timer to clear leap second flags */ - mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC)); + mod_timer(&stp_timer, jiffies + secs_to_jiffies(14400)); } else { /* The day the leap second is scheduled for hasn't been reached. Retry * in one hour. */ - mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC)); + mod_timer(&stp_timer, jiffies + secs_to_jiffies(3600)); } } diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 4f9c301a705b..0fd56a1cadbd 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -371,7 +371,7 @@ static void set_topology_timer(void) if (atomic_add_unless(&topology_poll, -1, 0)) mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100)); else - mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC)); + mod_timer(&topology_timer, jiffies + secs_to_jiffies(60)); } void topology_expect_change(void) diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index d01724a715d0..7bf0f691827b 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -204,7 +204,7 @@ static void cmm_set_timer(void) del_timer(&cmm_timer); return; } - mod_timer(&cmm_timer, jiffies + msecs_to_jiffies(cmm_timeout_seconds * MSEC_PER_SEC)); + mod_timer(&cmm_timer, jiffies + secs_to_jiffies(cmm_timeout_seconds)); } static void cmm_timer_fn(struct timer_list *unused) diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index e9e803a4d44c..e6cc84143f3e 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -246,9 +246,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) /* VM86_SCREEN_BITMAP had numerous bugs and appears to have no users. */ if (v.flags & VM86_SCREEN_BITMAP) { - char comm[TASK_COMM_LEN]; - - pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", get_task_comm(comm, current)); + pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", + current->comm); return -EINVAL; } diff --git a/drivers/accel/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c index b83141f58319..9f212b17611a 100644 --- a/drivers/accel/habanalabs/common/context.c +++ b/drivers/accel/habanalabs/common/context.c @@ -199,7 +199,6 @@ out_err: int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) { - char task_comm[TASK_COMM_LEN]; int rc = 0, i; ctx->hdev = hdev; @@ -272,7 +271,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) mutex_init(&ctx->ts_reg_lock); dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n", - get_task_comm(task_comm, current), ctx->asid); + current->comm, ctx->asid); } return 0; diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c index e0cf3b4343bb..30277ae410d4 100644 --- a/drivers/accel/habanalabs/common/device.c +++ b/drivers/accel/habanalabs/common/device.c @@ -817,7 +817,7 @@ static void device_hard_reset_pending(struct work_struct *work) } queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work, - msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000)); + secs_to_jiffies(HL_PENDING_RESET_PER_SEC)); } } diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c index 708dfd10f39c..5ec13f3a46f9 100644 --- a/drivers/accel/habanalabs/common/habanalabs_drv.c +++ b/drivers/accel/habanalabs/common/habanalabs_drv.c @@ -362,8 +362,7 @@ static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout) * a different default timeout for Gaudi */ if (timeout == HL_DEFAULT_TIMEOUT_LOCKED) - hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED * - MSEC_PER_SEC); + hdev->timeout_jiffies = secs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED); hdev->reset_upon_device_release = 0; break; diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c index 1dd6e23172ca..8729a0c57d78 100644 --- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c @@ -1279,13 +1279,10 @@ static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long ar retcode = -EFAULT; out_err: - if (retcode) { - char task_comm[TASK_COMM_LEN]; - + if (retcode) dev_dbg_ratelimited(dev, "error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n", - task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr); - } + task_pid_nr(current), current->comm, cmd, nr); if (kdata != stack_kdata) kfree(kdata); @@ -1308,11 +1305,9 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg) if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) { ioctl = &hl_ioctls_control[nr - HL_COMMAND_START]; } else { - char task_comm[TASK_COMM_LEN]; - dev_dbg_ratelimited(hdev->dev_ctrl, "invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n", - task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr); + task_pid_nr(current), current->comm, cmd, nr); return -ENOTTY; } diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 838064593f62..a7c2b04ab943 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -544,7 +544,7 @@ static void print_stats(struct xen_blkif_ring *ring) ring->st_rd_req, ring->st_wr_req, ring->st_f_req, ring->st_ds_req, ring->persistent_gnt_c, max_pgrants); - ring->st_print = jiffies + msecs_to_jiffies(10 * 1000); + ring->st_print = jiffies + secs_to_jiffies(10); ring->st_rd_req = 0; ring->st_wr_req = 0; ring->st_oo_req = 0; diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 56b78cf6b854..62596424a9aa 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -391,7 +391,6 @@ void intel_display_driver_resume_access(struct drm_i915_private *i915) */ bool intel_display_driver_check_access(struct drm_i915_private *i915) { - char comm[TASK_COMM_LEN]; char current_task[TASK_COMM_LEN + 16]; char allowed_task[TASK_COMM_LEN + 16] = "none"; @@ -400,12 +399,11 @@ bool intel_display_driver_check_access(struct drm_i915_private *i915) return true; snprintf(current_task, sizeof(current_task), "%s[%d]", - get_task_comm(comm, current), - task_pid_vnr(current)); + current->comm, task_pid_vnr(current)); if (i915->display.access.allowed_task) snprintf(allowed_task, sizeof(allowed_task), "%s[%d]", - get_task_comm(comm, i915->display.access.allowed_task), + i915->display.access.allowed_task->comm, task_pid_vnr(i915->display.access.allowed_task)); drm_dbg_kms(&i915->drm, diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 2cb2e5675807..cd659b9fd1d9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -279,7 +279,6 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm, const u64 plength = 0x10000; const u64 ioffset = plength; const u64 ilength = 0x02000; - char name[TASK_COMM_LEN]; int cid, ret; u64 size; @@ -338,8 +337,7 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm, chan->userd = &chan->user; } - get_task_comm(name, current); - snprintf(args.name, sizeof(args.name), "%s[%d]", name, task_pid_nr(current)); + snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current)); ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass, &args, sizeof(args), &chan->user); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 107f63f08bd9..ea7206484d22 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1159,7 +1159,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_cli *cli; - char name[32], tmpname[TASK_COMM_LEN]; + char name[32]; int ret; /* need to bring up power immediately if opening device */ @@ -1169,10 +1169,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) return ret; } - get_task_comm(tmpname, current); rcu_read_lock(); snprintf(name, sizeof(name), "%s[%d]", - tmpname, pid_nr(rcu_dereference(fpriv->pid))); + current->comm, pid_nr(rcu_dereference(fpriv->pid))); rcu_read_unlock(); if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) { diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 06d6db8b50f9..f260e21fa283 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -514,7 +514,7 @@ static int wait_for_lmem_ready(struct xe_device *xe) drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); start = jiffies; - timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */ + timeout = start + secs_to_jiffies(60); /* 60 sec! */ do { if (signal_pending(current)) diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h index 49805a24bb0a..7259f4f55700 100644 --- a/drivers/infiniband/hw/hfi1/iowait.h +++ b/drivers/infiniband/hw/hfi1/iowait.h @@ -92,7 +92,7 @@ struct iowait_work { * * The lock field is used by waiters to record * the seqlock_t that guards the list head. - * Waiters explicity know that, but the destroy + * Waiters explicitly know that, but the destroy * code that unwaits QPs does not. */ struct iowait { diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h index 7fe9502ce8d3..86a82a4da0aa 100644 --- a/drivers/infiniband/hw/usnic/usnic_abi.h +++ b/drivers/infiniband/hw/usnic/usnic_abi.h @@ -72,7 +72,7 @@ struct usnic_ib_create_qp_resp { u64 bar_bus_addr; u32 bar_len; /* - * WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface + * WQ, RQ, CQ are explicitly specified bc exposing a generic resources inteface * expands the scope of ABI to many files. */ u32 wq_cnt; diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 57281a135dd7..bf192529e3fe 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -178,7 +178,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar, * received 'update stats' event, we keep a 3 seconds timeout in case, * fw_stats_done is not marked yet */ - timeout = jiffies + msecs_to_jiffies(3 * 1000); + timeout = jiffies + secs_to_jiffies(3); ath11k_debugfs_fw_stats_reset(ar); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 0949e7975ff1..b70d20128f98 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -1810,7 +1810,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) rfi->cur_idx = cur_idx; } } else { - /* explicity window move updating the expected index */ + /* explicitly window move updating the expected index */ exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n", diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 35860c61468b..fd797e278549 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -1044,7 +1044,7 @@ static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb) static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb) { timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0); - pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000); + pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60); add_timer(&pacb->refresh_timer); } diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index b375509d1470..97631f48e19d 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -966,7 +966,7 @@ static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach) * * This routine is the release handler for the fops registered with * the CXL services on an initial attach for a context. It is called - * when a close (explicity by the user or as part of a process tear + * when a close (explicitly by the user or as part of a process tear * down) is performed on the adapter file descriptor returned to the * user. The user should be aware that explicitly performing a close * considered catastrophic and subsequent usage of the superpipe API diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 7f57397d91a9..4fed2e1243e0 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -598,7 +598,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) jiffies + msecs_to_jiffies(1000 * timeout)); /* Set up heart beat (HB) timer */ mod_timer(&phba->hb_tmofunc, - jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); clear_bit(HBA_HBEAT_INP, &phba->hba_flag); clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); phba->last_completion_time = jiffies; @@ -1267,7 +1267,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) !test_bit(FC_UNLOADING, &phba->pport->load_flag)) mod_timer(&phba->hb_tmofunc, jiffies + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); return; } @@ -1555,7 +1555,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ spin_lock_irq(&phba->pport->work_port_lock); if (time_after(phba->last_completion_time + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL), jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) @@ -3354,7 +3354,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) spin_unlock_irqrestore(&phba->hbalock, iflag); if (mbx_action == LPFC_MBX_NO_WAIT) return; - timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies; spin_lock_irqsave(&phba->hbalock, iflag); if (phba->sli.mbox_active) { actcmd = phba->sli.mbox_active->u.mb.mbxCommand; @@ -4924,14 +4924,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) stat = 1; goto finished; } - if (time >= msecs_to_jiffies(30 * 1000)) { + if (time >= secs_to_jiffies(30)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } - if (time >= msecs_to_jiffies(15 * 1000) && + if (time >= secs_to_jiffies(15) && phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " @@ -4945,7 +4945,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; if (!atomic_read(&vport->fc_map_cnt) && - time < msecs_to_jiffies(2 * 1000)) + time < secs_to_jiffies(2)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; @@ -5179,8 +5179,8 @@ lpfc_vmid_poll(struct timer_list *t) lpfc_worker_wake_up(phba); /* restart the timer for the next iteration */ - mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * - LPFC_VMID_TIMER)); + mod_timer(&phba->inactive_vmid_poll, + jiffies + secs_to_jiffies(LPFC_VMID_TIMER)); } /** diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 4d88cfe71cae..08a7f5c6157f 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -906,7 +906,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (ndlp->nlp_state >= NLP_STE_ADISC_ISSUE || ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) { mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000 * 1)); + jiffies + secs_to_jiffies(1)); set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_printf_vlog(vport, KERN_INFO, @@ -1332,7 +1332,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); + mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1)); set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; @@ -1936,7 +1936,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000 * 1)); + jiffies + secs_to_jiffies(1)); set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; @@ -2743,7 +2743,7 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000 * 1)); + jiffies + secs_to_jiffies(1)); set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 43dc1da4a156..b1adb9f59097 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -2237,7 +2237,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, * wait. Print a message if a 10 second wait expires and renew the * wait. This is unexpected. */ - wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); + wait_tmo = secs_to_jiffies(LPFC_NVME_WAIT_TMO); while (true) { ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); if (unlikely(!ret)) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 874644b31a3e..3fd9723cd271 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -9012,7 +9012,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, - jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); clear_bit(HBA_HBEAT_INP, &phba->hba_flag); clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); phba->last_completion_time = jiffies; @@ -13323,7 +13323,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) lpfc_sli_mbox_sys_flush(phba); return; } - timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies; /* Disable softirqs, including timers from obtaining phba->hbalock */ local_bh_disable(); diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c index cc3e4736f2fe..14dbfe954e42 100644 --- a/drivers/scsi/lpfc/lpfc_vmid.c +++ b/drivers/scsi/lpfc/lpfc_vmid.c @@ -278,7 +278,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { mod_timer(&vport->phba->inactive_vmid_poll, jiffies + - msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); + secs_to_jiffies(LPFC_VMID_TIMER)); vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; } } diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index f8c81e53e93f..22e0e79e88ab 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -736,7 +736,7 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) return -EIO; } time_remaining = wait_for_completion_timeout(&completion, - msecs_to_jiffies(60*1000)); // 1 min + secs_to_jiffies(60)); // 1 min if (!time_remaining) { kfree(payload.func_specific); pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n"); diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c index dc0d715ed970..0dbe76ee5570 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c @@ -59,7 +59,7 @@ static int bcm2835_audio_send_msg_locked(struct bcm2835_audio_instance *instance if (wait) { if (!wait_for_completion_timeout(&instance->msg_avail_comp, - msecs_to_jiffies(10 * 1000))) { + secs_to_jiffies(10))) { dev_err(instance->dev, "vchi message timeout, msg=%d\n", m->type); return -ETIMEDOUT; diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index dcb1769c3625..50c0c23ae678 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2622,14 +2622,13 @@ static int tty_tiocgicount(struct tty_struct *tty, void __user *arg) static int tty_set_serial(struct tty_struct *tty, struct serial_struct *ss) { - char comm[TASK_COMM_LEN]; int flags; flags = ss->flags & ASYNC_DEPRECATED; if (flags) pr_warn_ratelimited("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n", - __func__, get_task_comm(comm, current), flags); + __func__, current->comm, flags); if (!tty->ops->set_serial) return -ENOTTY; diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c index 06ee397e0c3a..d90eda19bcc4 100644 --- a/fs/ceph/quota.c +++ b/fs/ceph/quota.c @@ -166,7 +166,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc, if (IS_ERR(in)) { doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino, PTR_ERR(in)); - qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */ + qri->timeout = jiffies + secs_to_jiffies(60); /* XXX */ } else { qri->timeout = 0; qri->inode = in; diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index ba3e1f591f36..6b506995818d 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -21,6 +21,8 @@ * nilfs_palloc_groups_per_desc_block - get the number of groups that a group * descriptor block can maintain * @inode: inode of metadata file using this allocator + * + * Return: Number of groups that a group descriptor block can maintain. */ static inline unsigned long nilfs_palloc_groups_per_desc_block(const struct inode *inode) @@ -32,6 +34,8 @@ nilfs_palloc_groups_per_desc_block(const struct inode *inode) /** * nilfs_palloc_groups_count - get maximum number of groups * @inode: inode of metadata file using this allocator + * + * Return: Maximum number of groups. */ static inline unsigned long nilfs_palloc_groups_count(const struct inode *inode) @@ -43,6 +47,8 @@ nilfs_palloc_groups_count(const struct inode *inode) * nilfs_palloc_init_blockgroup - initialize private variables for allocator * @inode: inode of metadata file using this allocator * @entry_size: size of the persistent object + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size) { @@ -78,6 +84,9 @@ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size) * @inode: inode of metadata file using this allocator * @nr: serial number of the entry (e.g. inode number) * @offset: pointer to store offset number in the group + * + * Return: Number of the group that contains the entry with the index + * specified by @nr. */ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr, unsigned long *offset) @@ -93,8 +102,8 @@ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr, * @inode: inode of metadata file using this allocator * @group: group number * - * nilfs_palloc_desc_blkoff() returns block offset of the descriptor - * block which contains a descriptor of the specified group. + * Return: Index number in the metadata file of the descriptor block of + * the group specified by @group. */ static unsigned long nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group) @@ -111,6 +120,9 @@ nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group) * * nilfs_palloc_bitmap_blkoff() returns block offset of the bitmap * block used to allocate/deallocate entries in the specified group. + * + * Return: Index number in the metadata file of the bitmap block of + * the group specified by @group. */ static unsigned long nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group) @@ -125,6 +137,8 @@ nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group) * nilfs_palloc_group_desc_nfrees - get the number of free entries in a group * @desc: pointer to descriptor structure for the group * @lock: spin lock protecting @desc + * + * Return: Number of free entries written in the group descriptor @desc. */ static unsigned long nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc, @@ -143,6 +157,9 @@ nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc, * @desc: pointer to descriptor structure for the group * @lock: spin lock protecting @desc * @n: delta to be added + * + * Return: Number of free entries after adjusting the group descriptor + * @desc. */ static u32 nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc, @@ -161,6 +178,9 @@ nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc, * nilfs_palloc_entry_blkoff - get block offset of an entry block * @inode: inode of metadata file using this allocator * @nr: serial number of the entry (e.g. inode number) + * + * Return: Index number in the metadata file of the block containing + * the entry specified by @nr. */ static unsigned long nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr) @@ -238,6 +258,12 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff, * @blkoff: block offset * @prev: nilfs_bh_assoc struct of the last used buffer * @lock: spin lock protecting @prev + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Non-existent block. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff, struct nilfs_bh_assoc *prev, @@ -258,6 +284,8 @@ static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff, * @group: group number * @create: create flag * @bhp: pointer to store the resultant buffer head + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_get_desc_block(struct inode *inode, unsigned long group, @@ -277,6 +305,8 @@ static int nilfs_palloc_get_desc_block(struct inode *inode, * @group: group number * @create: create flag * @bhp: pointer to store the resultant buffer head + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_get_bitmap_block(struct inode *inode, unsigned long group, @@ -294,6 +324,8 @@ static int nilfs_palloc_get_bitmap_block(struct inode *inode, * nilfs_palloc_delete_bitmap_block - delete a bitmap block * @inode: inode of metadata file using this allocator * @group: group number + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_delete_bitmap_block(struct inode *inode, unsigned long group) @@ -312,6 +344,8 @@ static int nilfs_palloc_delete_bitmap_block(struct inode *inode, * @nr: serial number of the entry (e.g. inode number) * @create: create flag * @bhp: pointer to store the resultant buffer head + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr, int create, struct buffer_head **bhp) @@ -328,6 +362,8 @@ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr, * nilfs_palloc_delete_entry_block - delete an entry block * @inode: inode of metadata file using this allocator * @nr: serial number of the entry + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_delete_entry_block(struct inode *inode, __u64 nr) { @@ -397,6 +433,9 @@ size_t nilfs_palloc_entry_offset(const struct inode *inode, __u64 nr, * @bsize: size in bits * @lock: spin lock protecting @bitmap * @wrap: whether to wrap around + * + * Return: Offset number within the group of the found free entry, or + * %-ENOSPC if not found. */ static int nilfs_palloc_find_available_slot(unsigned char *bitmap, unsigned long target, @@ -438,6 +477,9 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap, * @inode: inode of metadata file using this allocator * @curr: current group number * @max: maximum number of groups + * + * Return: Number of remaining descriptors (= groups) managed by the descriptor + * block. */ static unsigned long nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, @@ -453,6 +495,8 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, * nilfs_palloc_count_desc_blocks - count descriptor blocks number * @inode: inode of metadata file using this allocator * @desc_blocks: descriptor blocks number [out] + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_count_desc_blocks(struct inode *inode, unsigned long *desc_blocks) @@ -473,6 +517,8 @@ static int nilfs_palloc_count_desc_blocks(struct inode *inode, * MDT file growing * @inode: inode of metadata file using this allocator * @desc_blocks: known current descriptor blocks count + * + * Return: true if a group can be added in the metadata file, false if not. */ static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode, unsigned long desc_blocks) @@ -487,6 +533,12 @@ static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode, * @inode: inode of metadata file using this allocator * @nused: current number of used entries * @nmaxp: max number of entries [out] + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ERANGE - Number of entries in use is out of range. */ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp) { @@ -518,6 +570,13 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp) * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the allocation * @wrap: whether to wrap around + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - Entries exhausted (No entries available for allocation). + * * %-EROFS - Read only filesystem */ int nilfs_palloc_prepare_alloc_entry(struct inode *inode, struct nilfs_palloc_req *req, bool wrap) @@ -710,6 +769,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, * nilfs_palloc_prepare_free_entry - prepare to deallocate a persistent object * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the removal + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_prepare_free_entry(struct inode *inode, struct nilfs_palloc_req *req) @@ -754,6 +815,8 @@ void nilfs_palloc_abort_free_entry(struct inode *inode, * @inode: inode of metadata file using this allocator * @entry_nrs: array of entry numbers to be deallocated * @nitems: number of entries stored in @entry_nrs + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) { diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h index 3f115ab7e9a7..046d876ea3e0 100644 --- a/fs/nilfs2/alloc.h +++ b/fs/nilfs2/alloc.h @@ -21,6 +21,8 @@ * * The number of entries per group is defined by the number of bits * that a bitmap block can maintain. + * + * Return: Number of entries per group. */ static inline unsigned long nilfs_palloc_entries_per_group(const struct inode *inode) diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index c9e8d9a7d820..ccc1a7aa52d2 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -47,17 +47,14 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, * @ptrp: place to store the value associated to @key * * Description: nilfs_bmap_lookup_at_level() finds a record whose key - * matches @key in the block at @level of the bmap. + * matches @key in the block at @level of the bmap. The record associated + * with @key is stored in the place pointed to by @ptrp. * - * Return Value: On success, 0 is returned and the record associated with @key - * is stored in the place pointed by @ptrp. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - A record associated with @key does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - A record associated with @key does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, __u64 *ptrp) @@ -138,14 +135,11 @@ static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) * Description: nilfs_bmap_insert() inserts the new key-record pair specified * by @key and @rec into @bmap. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EEXIST - A record associated with @key already exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EEXIST - A record associated with @key already exists. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec) { @@ -193,14 +187,11 @@ static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) * Description: nilfs_bmap_seek_key() seeks a valid key on @bmap * starting from @start, and stores it to @keyp if found. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - No valid entry was found + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - No valid entry was found. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp) { @@ -236,14 +227,11 @@ int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp) * Description: nilfs_bmap_delete() deletes the key-record pair specified by * @key from @bmap. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - A record associated with @key does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - A record associated with @key does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key) { @@ -290,12 +278,10 @@ static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key) * Description: nilfs_bmap_truncate() removes key-record pairs whose keys are * greater than or equal to @key from @bmap. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key) { @@ -330,12 +316,10 @@ void nilfs_bmap_clear(struct nilfs_bmap *bmap) * Description: nilfs_bmap_propagate() marks the buffers that directly or * indirectly refer to the block specified by @bh dirty. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { @@ -362,22 +346,22 @@ void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap, /** * nilfs_bmap_assign - assign a new block number to a block - * @bmap: bmap - * @bh: pointer to buffer head + * @bmap: bmap + * @bh: place to store a pointer to the buffer head to which a block + * address is assigned (in/out) * @blocknr: block number - * @binfo: block information + * @binfo: block information * * Description: nilfs_bmap_assign() assigns the block number @blocknr to the - * buffer specified by @bh. + * buffer specified by @bh. The block information is stored in the memory + * pointed to by @binfo, and the buffer head may be replaced as a block + * address is assigned, in which case a pointer to the new buffer head is + * stored in the memory pointed to by @bh. * - * Return Value: On success, 0 is returned and the buffer head of a newly - * create buffer and the block information associated with the buffer are - * stored in the place pointed by @bh and @binfo, respectively. On error, one - * of the following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_assign(struct nilfs_bmap *bmap, struct buffer_head **bh, @@ -402,12 +386,10 @@ int nilfs_bmap_assign(struct nilfs_bmap *bmap, * Description: nilfs_bmap_mark() marks the block specified by @key and @level * as dirty. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) { @@ -430,7 +412,7 @@ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) * Description: nilfs_test_and_clear() is the atomic operation to test and * clear the dirty state of @bmap. * - * Return Value: 1 is returned if @bmap is dirty, or 0 if clear. + * Return: 1 if @bmap is dirty, or 0 if clear. */ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap) { @@ -490,10 +472,10 @@ static struct lock_class_key nilfs_bmap_mdt_lock_key; * * Description: nilfs_bmap_read() initializes the bmap @bmap. * - * Return Value: On success, 0 is returned. On error, the following negative - * error code is returned. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (corrupted bmap). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) { diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 54a3fa0cf67e..568367129092 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -201,7 +201,8 @@ void nilfs_btnode_delete(struct buffer_head *bh) * Note that the current implementation does not support folio sizes larger * than the page size. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EIO - I/O error (metadata corruption). * * %-ENOMEM - Insufficient memory available. */ diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index ef5061bb56da..0d8f7fb15c2e 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -334,7 +334,7 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node, * @inode: host inode of btree * @blocknr: block number * - * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. + * Return: 0 if normal, 1 if the node is broken. */ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, size_t size, struct inode *inode, @@ -366,7 +366,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, * @node: btree root node to be examined * @inode: host inode of btree * - * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. + * Return: 0 if normal, 1 if the root node is broken. */ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, struct inode *inode) @@ -652,8 +652,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, * @minlevel: start level * @nextkey: place to store the next valid key * - * Return Value: If a next key was found, 0 is returned. Otherwise, - * -ENOENT is returned. + * Return: 0 if the next key was found, %-ENOENT if not found. */ static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index c20207d7a989..bcc7d76269ac 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -191,14 +191,11 @@ static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, * @cnop: place to store the next checkpoint number * @bhp: place to store a pointer to buffer_head struct * - * Return Value: On success, it returns 0. On error, the following negative - * error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - no block exists in the range. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - no block exists in the range. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile, __u64 start_cno, __u64 end_cno, @@ -239,7 +236,8 @@ static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, * stores it to the inode file given by @ifile and the nilfs root object * given by @root. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid checkpoint. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). @@ -307,7 +305,8 @@ out_sem: * In either case, the buffer of the block containing the checkpoint entry * and the cpfile inode are made dirty for inclusion in the write log. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). * * %-EROFS - Read only filesystem @@ -376,7 +375,8 @@ out_sem: * cpfile with the data given by the arguments @root, @blkinc, @ctime, and * @minor. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). */ @@ -447,14 +447,11 @@ error: * the period from @start to @end, excluding @end itself. The checkpoints * which have been already deleted are ignored. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - invalid checkpoints. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid checkpoints. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, __u64 start, @@ -718,7 +715,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, * number to continue searching. * * Return: Count of checkpoint info items stored in the output buffer on - * success, or the following negative error code on failure. + * success, or one of the following negative error codes on failure: * * %-EINVAL - Invalid checkpoint mode. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). @@ -743,7 +740,8 @@ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, * @cpfile: checkpoint file inode * @cno: checkpoint number to delete * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EBUSY - Checkpoint in use (snapshot specified). * * %-EIO - I/O error (including metadata corruption). * * %-ENOENT - No valid checkpoint found. @@ -1011,7 +1009,7 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) * @cno: checkpoint number * * Return: 1 if the checkpoint specified by @cno is a snapshot, 0 if not, or - * the following negative error code on failure. + * one of the following negative error codes on failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOENT - No such checkpoint. * * %-ENOMEM - Insufficient memory available. @@ -1058,14 +1056,11 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) * Description: nilfs_change_cpmode() changes the mode of the checkpoint * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - No such checkpoint. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - No such checkpoint. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) { @@ -1097,14 +1092,12 @@ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) * @cpstat: pointer to a structure of checkpoint statistics * * Description: nilfs_cpfile_get_stat() returns information about checkpoints. + * The checkpoint statistics are stored in the location pointed to by @cpstat. * - * Return Value: On success, 0 is returned, and checkpoints information is - * stored in the place pointed by @cpstat. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) { @@ -1135,6 +1128,8 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) * @cpsize: size of a checkpoint entry * @raw_inode: on-disk cpfile inode * @inodep: buffer to store the inode + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize, struct nilfs_inode *raw_inode, struct inode **inodep) diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index e220dcb08aa6..c664daba56ae 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -276,7 +276,8 @@ void nilfs_dat_abort_update(struct inode *dat, * @dat: DAT file inode * @vblocknr: virtual block number * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid DAT entry (internal code). * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. @@ -302,14 +303,11 @@ int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr) * Description: nilfs_dat_freev() frees the virtual block numbers specified by * @vblocknrs and @nitems. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The virtual block number have not been allocated. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - The virtual block number have not been allocated. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems) { @@ -325,12 +323,10 @@ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems) * Description: nilfs_dat_move() changes the block number associated with * @vblocknr to @blocknr. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) { @@ -390,17 +386,14 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) * @blocknrp: pointer to a block number * * Description: nilfs_dat_translate() maps the virtual block number @vblocknr - * to the corresponding block number. + * to the corresponding block number. The block number associated with + * @vblocknr is stored in the place pointed to by @blocknrp. * - * Return Value: On success, 0 is returned and the block number associated - * with @vblocknr is stored in the place pointed by @blocknrp. On error, one - * of the following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - A block number associated with @vblocknr does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - A block number associated with @vblocknr does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) { @@ -489,6 +482,8 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz, * @entry_size: size of a dat entry * @raw_inode: on-disk dat inode * @inodep: buffer to store the inode + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_dat_read(struct super_block *sb, size_t entry_size, struct nilfs_inode *raw_inode, struct inode **inodep) diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 0a3aea6c416b..9b7f8e9655a2 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -400,7 +400,7 @@ int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino) return 0; } -void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, +int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, struct folio *folio, struct inode *inode) { size_t from = offset_in_folio(folio, de); @@ -410,11 +410,15 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, folio_lock(folio); err = nilfs_prepare_chunk(folio, from, to); - BUG_ON(err); + if (unlikely(err)) { + folio_unlock(folio); + return err; + } de->inode = cpu_to_le64(inode->i_ino); de->file_type = fs_umode_to_ftype(inode->i_mode); nilfs_commit_chunk(folio, mapping, from, to); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); + return 0; } /* @@ -543,7 +547,10 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct folio *folio) from = (char *)pde - kaddr; folio_lock(folio); err = nilfs_prepare_chunk(folio, from, to); - BUG_ON(err); + if (unlikely(err)) { + folio_unlock(folio); + goto out; + } if (pde) pde->rec_len = nilfs_rec_len_to_disk(to - from); dir->inode = 0; diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 2dbb15767df1..561c220799c7 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -46,14 +46,11 @@ * specified by @pbn to the GC pagecache with the key @blkoff. * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer. * - * Return Value: On success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The block specified with @pbn does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - The block specified with @pbn does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, sector_t pbn, __u64 vbn, @@ -114,12 +111,11 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, * specified by @vbn to the GC pagecache. @pbn can be supplied by the * caller to avoid translation of the disk block address. * - * Return Value: On success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Invalid virtual block address. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index e7339eb3c08a..c4cd4a4dedd0 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -38,17 +38,16 @@ static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile) * @out_ino: pointer to a variable to store inode number * @out_bh: buffer_head contains newly allocated disk inode * - * Return Value: On success, 0 is returned and the newly allocated inode - * number is stored in the place pointed by @ino, and buffer_head pointer - * that contains newly allocated disk inode structure is stored in the - * place pointed by @out_bh - * On error, one of the following negative error codes is returned. + * nilfs_ifile_create_inode() allocates a new inode in the ifile metadata + * file and stores the inode number in the variable pointed to by @out_ino, + * as well as storing the ifile's buffer with the disk inode in the location + * pointed to by @out_bh. * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOSPC - No inode left. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No inode left. */ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, struct buffer_head **out_bh) @@ -83,14 +82,11 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, * @ifile: ifile inode * @ino: inode number * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The inode number @ino have not been allocated. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Inode number unallocated. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino) { @@ -150,6 +146,8 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, * @ifile: ifile inode * @nmaxinodes: current maximum of available inodes count [out] * @nfreeinodes: free inodes count [out] + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_ifile_count_free_inodes(struct inode *ifile, u64 *nmaxinodes, u64 *nfreeinodes) @@ -174,7 +172,8 @@ int nilfs_ifile_count_free_inodes(struct inode *ifile, * @cno: number of checkpoint entry to read * @inode_size: size of an inode * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid checkpoint. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 23f3a75edd50..e8015d24a82c 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -68,6 +68,8 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n) * * This function does not issue actual read request of the specified data * block. It is done by VFS. + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_get_block(struct inode *inode, sector_t blkoff, struct buffer_head *bh_result, int create) @@ -141,6 +143,8 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, * address_space_operations. * @file: file struct of the file to be read * @folio: the folio to be read + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_read_folio(struct file *file, struct folio *folio) { @@ -598,10 +602,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, * or does nothing if the inode already has it. This function allocates * an additional inode to maintain page cache of B-tree nodes one-on-one. * - * Return Value: On success, 0 is returned. On errors, one of the following - * negative error code is returned. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or %-ENOMEM if memory is insufficient. */ int nilfs_attach_btree_node_cache(struct inode *inode) { @@ -660,11 +661,8 @@ void nilfs_detach_btree_node_cache(struct inode *inode) * in one inode and the one for b-tree node pages is set up in the * other inode, which is attached to the former inode. * - * Return Value: On success, a pointer to the inode for data pages is - * returned. On errors, one of the following negative error code is returned - * in a pointer type. - * - * %-ENOMEM - Insufficient memory available. + * Return: a pointer to the inode for data pages on success, or %-ENOMEM + * if memory is insufficient. */ struct inode *nilfs_iget_for_shadow(struct inode *inode) { diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index fa77f78df681..a66d62a51f77 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -33,17 +33,14 @@ * @dofunc: concrete function of get/set metadata info * * Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of - * calling dofunc() function on the basis of @argv argument. + * calling dofunc() function on the basis of @argv argument. If successful, + * the requested metadata information is copied to userspace memory. * - * Return Value: On success, 0 is returned and requested metadata info - * is copied into userspace. On error, one of the following - * negative error codes is returned. - * - * %-EINVAL - Invalid arguments from userspace. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during execution of requested operation. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during execution of requested operation. + * * %-EINVAL - Invalid arguments from userspace. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, struct nilfs_argv *argv, int dir, @@ -190,13 +187,10 @@ static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp) * given checkpoint between checkpoint and snapshot state. This ioctl * is used in chcp and mkcp utilities. * - * Return Value: On success, 0 is returned and mode of a checkpoint is - * changed. On error, one of the following negative error codes - * is returned. - * - * %-EPERM - Operation not permitted. - * - * %-EFAULT - Failure during checkpoint mode changing. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * %-EFAULT - Failure during checkpoint mode changing. + * %-EPERM - Operation not permitted. */ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -244,13 +238,10 @@ out: * checkpoint from NILFS2 file system. This ioctl is used in rmcp * utility. * - * Return Value: On success, 0 is returned and a checkpoint is - * removed. On error, one of the following negative error codes - * is returned. - * - * %-EPERM - Operation not permitted. - * - * %-EFAULT - Failure during checkpoint removing. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * %-EFAULT - Failure during checkpoint removing. + * %-EPERM - Operation not permitted. */ static int nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp, @@ -296,7 +287,7 @@ out: * requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in * lscp utility and by nilfs_cleanerd daemon. * - * Return value: count of nilfs_cpinfo structures in output buffer. + * Return: Count of nilfs_cpinfo structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -320,17 +311,14 @@ nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, * * Description: nilfs_ioctl_get_cpstat() returns information about checkpoints. * The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities - * and by nilfs_cleanerd daemon. + * and by nilfs_cleanerd daemon. The checkpoint statistics are copied to + * the userspace memory pointed to by @argp. * - * Return Value: On success, 0 is returned, and checkpoints information is - * copied into userspace pointer @argp. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during getting checkpoints statistics. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during getting checkpoints statistics. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -363,7 +351,8 @@ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, * info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used * in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon. * - * Return value: count of nilfs_suinfo structures in output buffer. + * Return: Count of nilfs_suinfo structures in output buffer on success, + * or a negative error code on failure. */ static ssize_t nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -387,17 +376,14 @@ nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, * * Description: nilfs_ioctl_get_sustat() returns segment usage statistics. * The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities - * and by nilfs_cleanerd daemon. + * and by nilfs_cleanerd daemon. The requested segment usage information is + * copied to the userspace memory pointed to by @argp. * - * Return Value: On success, 0 is returned, and segment usage information is - * copied into userspace pointer @argp. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during getting segment usage statistics. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during getting segment usage statistics. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -430,7 +416,8 @@ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, * on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used * by nilfs_cleanerd daemon. * - * Return value: count of nilfs_vinfo structures in output buffer. + * Return: Count of nilfs_vinfo structures in output buffer on success, or + * a negative error code on failure. */ static ssize_t nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -457,7 +444,8 @@ nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl * is used by nilfs_cleanerd daemon. * - * Return value: count of nilfs_bdescs structures in output buffer. + * Return: Count of nilfs_bdescs structures in output buffer on success, or + * a negative error code on failure. */ static ssize_t nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -494,19 +482,15 @@ nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, * * Description: nilfs_ioctl_do_get_bdescs() function returns information * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl - * is used by nilfs_cleanerd daemon. + * is used by nilfs_cleanerd daemon. If successful, disk block descriptors + * are copied to userspace pointer @argp. * - * Return Value: On success, 0 is returned, and disk block descriptors are - * copied into userspace pointer @argp. On error, one of the following - * negative error codes is returned. - * - * %-EINVAL - Invalid arguments from userspace. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during getting disk block descriptors. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during getting disk block descriptors. + * * %-EINVAL - Invalid arguments from userspace. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -540,16 +524,12 @@ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, * Description: nilfs_ioctl_move_inode_block() function registers data/node * buffer in the GC pagecache and submit read request. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - Requested block doesn't exist. - * - * %-EEXIST - Blocks conflict is detected. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EEXIST - Block conflict detected. + * * %-EIO - I/O error. + * * %-ENOENT - Requested block doesn't exist. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_move_inode_block(struct inode *inode, struct nilfs_vdesc *vdesc, @@ -604,8 +584,8 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode, * blocks that garbage collector specified with the array of nilfs_vdesc * structures and stores them into page caches of GC inodes. * - * Return Value: Number of processed nilfs_vdesc structures or - * error code, otherwise. + * Return: Number of processed nilfs_vdesc structures on success, or + * a negative error code on failure. */ static int nilfs_ioctl_move_blocks(struct super_block *sb, struct nilfs_argv *argv, void *buf) @@ -682,14 +662,11 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb, * in the period from p_start to p_end, excluding p_end itself. The checkpoints * which have been already deleted are ignored. * - * Return Value: Number of processed nilfs_period structures or - * error code, otherwise. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - invalid checkpoints. + * Return: Number of processed nilfs_period structures on success, or one of + * the following negative error codes on failure: + * * %-EINVAL - invalid checkpoints. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) @@ -717,14 +694,11 @@ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, * Description: nilfs_ioctl_free_vblocknrs() function frees * the virtual block numbers specified by @buf and @argv->v_nmembs. * - * Return Value: Number of processed virtual block numbers or - * error code, otherwise. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The virtual block number have not been allocated. + * Return: Number of processed virtual block numbers on success, or one of the + * following negative error codes on failure: + * * %-EIO - I/O error. + * * %-ENOENT - Unallocated virtual block number. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) @@ -746,14 +720,11 @@ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, * Description: nilfs_ioctl_mark_blocks_dirty() function marks * metadata file or data blocks as dirty. * - * Return Value: Number of processed block descriptors or - * error code, otherwise. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - the specified block does not exist (hole block) + * Return: Number of processed block descriptors on success, or one of the + * following negative error codes on failure: + * * %-EIO - I/O error. + * * %-ENOENT - Non-existent block (hole block). + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) @@ -852,7 +823,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, * from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by * nilfs_cleanerd daemon. * - * Return Value: On success, 0 is returned or error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -976,20 +947,14 @@ out: * and metadata are written out to the device when it successfully * returned. * - * Return Value: On success, 0 is retured. On errors, one of the following - * negative error code is returned. - * - * %-EROFS - Read only filesystem. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EFAULT - Failure during execution of requested operation. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during execution of requested operation. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. + * * %-EROFS - Read only filesystem. */ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -1023,7 +988,7 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, * @filp: file object * @argp: pointer on argument from userspace * - * Return Value: On success, 0 is returned or error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_resize(struct inode *inode, struct file *filp, void __user *argp) @@ -1059,7 +1024,7 @@ out: * checks the arguments from userspace and calls nilfs_sufile_trim_fs, which * performs the actual trim operation. * - * Return Value: On success, 0 is returned or negative error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) { @@ -1101,7 +1066,7 @@ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) * of segments in bytes and upper limit of segments in bytes. * The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility. * - * Return Value: On success, 0 is returned or error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) { @@ -1152,17 +1117,15 @@ out: * @dofunc: concrete function of getting metadata info * * Description: nilfs_ioctl_get_info() gets metadata info by means of - * calling dofunc() function. + * calling dofunc() function. The requested metadata information is copied + * to userspace memory @argp. * - * Return Value: On success, 0 is returned and requested metadata info - * is copied into userspace. On error, one of the following - * negative error codes is returned. - * - * %-EINVAL - Invalid arguments from userspace. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during execution of requested operation. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during execution of requested operation. + * * %-EINVAL - Invalid arguments from userspace. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp, @@ -1202,18 +1165,14 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, * encapsulated in nilfs_argv and updates the segment usage info * according to the flags in nilfs_suinfo_update. * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EPERM - Not enough permissions - * - * %-EFAULT - Error copying input data - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid values in input (segment number, flags or nblocks) + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EEXIST - Block conflict detected. + * * %-EFAULT - Error copying input data. + * * %-EINVAL - Invalid values in input (segment number, flags or nblocks). + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-EPERM - Not enough permissions. */ static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -1309,7 +1268,8 @@ static int nilfs_ioctl_get_fslabel(struct super_block *sb, void __user *argp) * @filp: file object * @argp: pointer to userspace memory that contains the volume name * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EFAULT - Error copying input data. * * %-EINVAL - Label length exceeds record size in superblock. * * %-EIO - I/O error. diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 965b5ad1c0df..2f850a18d6e7 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -226,20 +226,21 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, * @out_bh: output of a pointer to the buffer_head * * nilfs_mdt_get_block() looks up the specified buffer and tries to create - * a new buffer if @create is not zero. On success, the returned buffer is - * assured to be either existing or formatted using a buffer lock on success. - * @out_bh is substituted only when zero is returned. + * a new buffer if @create is not zero. If (and only if) this function + * succeeds, it stores a pointer to the retrieved buffer head in the location + * pointed to by @out_bh. * - * Return Value: On success, it returns 0. On error, the following negative - * error code is returned. + * The retrieved buffer may be either an existing one or a newly allocated one. + * For a newly created buffer, if the callback function argument @init_block + * is non-NULL, the callback will be called with the buffer locked to format + * the block. * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - the specified block does not exist (hole block) - * - * %-EROFS - Read only filesystem (for create mode) + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - The specified block does not exist (hole block). + * * %-ENOMEM - Insufficient memory available. + * * %-EROFS - Read only filesystem (for create mode). */ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, void (*init_block)(struct inode *, @@ -275,14 +276,11 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, * @out_bh, and block offset to @blkoff, respectively. @out_bh and * @blkoff are substituted only when zero is returned. * - * Return Value: On success, it returns 0. On error, the following negative - * error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - no block was found in the range + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - No block was found in the range. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_mdt_find_block(struct inode *inode, unsigned long start, unsigned long end, unsigned long *blkoff, @@ -321,12 +319,11 @@ out: * @inode: inode of the meta data file * @block: block offset * - * Return Value: On success, zero is returned. - * On error, one of the following negative error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Non-existent block. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) { @@ -349,12 +346,10 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and * tries to release the page including the buffer from a page cache. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-EBUSY - page has an active buffer. - * - * %-ENOENT - page cache has no page addressed by the offset. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EBUSY - Page has an active buffer. + * * %-ENOENT - Page cache has no page addressed by the offset. */ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) { @@ -524,6 +519,8 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size, * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file * @inode: inode of the metadata file * @shadow: shadow mapping + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_mdt_setup_shadow_map(struct inode *inode, struct nilfs_shadow_map *shadow) @@ -545,6 +542,8 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, /** * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map * @inode: inode of the metadata file + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_mdt_save_to_shadow_map(struct inode *inode) { diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 1d836a5540f3..953fbd5f0851 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -370,6 +370,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, struct folio *old_folio; struct nilfs_dir_entry *old_de; struct nilfs_transaction_info ti; + bool old_is_dir = S_ISDIR(old_inode->i_mode); int err; if (flags & ~RENAME_NOREPLACE) @@ -385,7 +386,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, goto out; } - if (S_ISDIR(old_inode->i_mode)) { + if (old_is_dir && old_dir != new_dir) { err = -EIO; dir_de = nilfs_dotdot(old_inode, &dir_folio); if (!dir_de) @@ -397,7 +398,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, struct nilfs_dir_entry *new_de; err = -ENOTEMPTY; - if (dir_de && !nilfs_empty_dir(new_inode)) + if (old_is_dir && !nilfs_empty_dir(new_inode)) goto out_dir; new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, @@ -406,11 +407,13 @@ static int nilfs_rename(struct mnt_idmap *idmap, err = PTR_ERR(new_de); goto out_dir; } - nilfs_set_link(new_dir, new_de, new_folio, old_inode); + err = nilfs_set_link(new_dir, new_de, new_folio, old_inode); folio_release_kmap(new_folio, new_de); + if (unlikely(err)) + goto out_dir; nilfs_mark_inode_dirty(new_dir); inode_set_ctime_current(new_inode); - if (dir_de) + if (old_is_dir) drop_nlink(new_inode); drop_nlink(new_inode); nilfs_mark_inode_dirty(new_inode); @@ -418,7 +421,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, err = nilfs_add_link(new_dentry, old_inode); if (err) goto out_dir; - if (dir_de) { + if (old_is_dir) { inc_nlink(new_dir); nilfs_mark_inode_dirty(new_dir); } @@ -430,28 +433,28 @@ static int nilfs_rename(struct mnt_idmap *idmap, */ inode_set_ctime_current(old_inode); - nilfs_delete_entry(old_de, old_folio); - - if (dir_de) { - nilfs_set_link(old_inode, dir_de, dir_folio, new_dir); - folio_release_kmap(dir_folio, dir_de); - drop_nlink(old_dir); + err = nilfs_delete_entry(old_de, old_folio); + if (likely(!err)) { + if (old_is_dir) { + if (old_dir != new_dir) + err = nilfs_set_link(old_inode, dir_de, + dir_folio, new_dir); + drop_nlink(old_dir); + } + nilfs_mark_inode_dirty(old_dir); } - folio_release_kmap(old_folio, old_de); - - nilfs_mark_inode_dirty(old_dir); nilfs_mark_inode_dirty(old_inode); - err = nilfs_transaction_commit(old_dir->i_sb); - return err; - out_dir: if (dir_de) folio_release_kmap(dir_folio, dir_de); out_old: folio_release_kmap(old_folio, old_de); out: - nilfs_transaction_abort(old_dir->i_sb); + if (likely(!err)) + err = nilfs_transaction_commit(old_dir->i_sb); + else + nilfs_transaction_abort(old_dir->i_sb); return err; } diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index dff241c53fc5..cb6ed54accd7 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -261,8 +261,8 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *, const struct qstr *, int nilfs_delete_entry(struct nilfs_dir_entry *, struct folio *); int nilfs_empty_dir(struct inode *); struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct folio **); -void nilfs_set_link(struct inode *, struct nilfs_dir_entry *, - struct folio *, struct inode *); +int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, + struct folio *folio, struct inode *inode); /* file.c */ extern int nilfs_sync_file(struct file *, loff_t, loff_t, int); diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 9de2a494a069..806b056d2260 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -135,8 +135,7 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) * nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not. * @folio: Folio to be checked. * - * nilfs_folio_buffers_clean() returns false if the folio has dirty buffers. - * Otherwise, it returns true. + * Return: false if the folio has dirty buffers, true otherwise. */ bool nilfs_folio_buffers_clean(struct folio *folio) { @@ -392,6 +391,11 @@ void nilfs_clear_dirty_pages(struct address_space *mapping) /** * nilfs_clear_folio_dirty - discard dirty folio * @folio: dirty folio that will be discarded + * + * nilfs_clear_folio_dirty() clears working states including dirty state for + * the folio and its buffers. If the folio has buffers, clear only if it is + * confirmed that none of the buffer heads are busy (none have valid + * references and none are locked). */ void nilfs_clear_folio_dirty(struct folio *folio) { @@ -399,10 +403,6 @@ void nilfs_clear_folio_dirty(struct folio *folio) BUG_ON(!folio_test_locked(folio)); - folio_clear_uptodate(folio); - folio_clear_mappedtodisk(folio); - folio_clear_checked(folio); - head = folio_buffers(folio); if (head) { const unsigned long clear_bits = @@ -410,6 +410,25 @@ void nilfs_clear_folio_dirty(struct folio *folio) BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) | BIT(BH_Delay)); + bool busy, invalidated = false; + +recheck_buffers: + busy = false; + bh = head; + do { + if (atomic_read(&bh->b_count) | buffer_locked(bh)) { + busy = true; + break; + } + } while (bh = bh->b_this_page, bh != head); + + if (busy) { + if (invalidated) + return; + invalidate_bh_lrus(); + invalidated = true; + goto recheck_buffers; + } bh = head; do { @@ -419,6 +438,9 @@ void nilfs_clear_folio_dirty(struct folio *folio) } while (bh = bh->b_this_page, bh != head); } + folio_clear_uptodate(folio); + folio_clear_mappedtodisk(folio); + folio_clear_checked(folio); __nilfs_clear_folio_dirty(folio); } @@ -477,8 +499,9 @@ void __nilfs_clear_folio_dirty(struct folio *folio) * This function searches an extent of buffers marked "delayed" which * starts from a block offset equal to or larger than @start_blk. If * such an extent was found, this will store the start offset in - * @blkoff and return its length in blocks. Otherwise, zero is - * returned. + * @blkoff and return its length in blocks. + * + * Return: Length in blocks of found extent, 0 otherwise. */ unsigned long nilfs_find_uncommitted_extent(struct inode *inode, sector_t start_blk, diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index e43405bf521e..22aecf6e2344 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -88,6 +88,8 @@ static int nilfs_warn_segment_error(struct super_block *sb, int err) * @check_bytes: number of bytes to be checked * @start: DBN of start block * @nblock: number of blocks to be checked + * + * Return: 0 on success, or %-EIO if an I/O error occurs. */ static int nilfs_compute_checksum(struct the_nilfs *nilfs, struct buffer_head *bhs, u32 *sum, @@ -126,6 +128,11 @@ static int nilfs_compute_checksum(struct the_nilfs *nilfs, * @sr_block: disk block number of the super root block * @pbh: address of a buffer_head pointer to return super root buffer * @check: CRC check flag + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Super root block corrupted. + * * %-EIO - I/O error. */ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, struct buffer_head **pbh, int check) @@ -176,6 +183,8 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, * @nilfs: nilfs object * @start_blocknr: start block number of the log * @sum: pointer to return segment summary structure + * + * Return: Buffer head pointer, or NULL if an I/O error occurs. */ static struct buffer_head * nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, @@ -195,6 +204,13 @@ nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, * @seg_seq: sequence number of segment * @bh_sum: buffer head of summary block * @sum: segment summary struct + * + * Return: 0 on success, or one of the following internal codes on failure: + * * %NILFS_SEG_FAIL_MAGIC - Magic number mismatch. + * * %NILFS_SEG_FAIL_SEQ - Sequence number mismatch. + * * %NIFLS_SEG_FAIL_CONSISTENCY - Block count out of range. + * * %NILFS_SEG_FAIL_IO - I/O error. + * * %NILFS_SEG_FAIL_CHECKSUM_FULL - Full log checksum verification failed. */ static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq, struct buffer_head *bh_sum, @@ -238,6 +254,9 @@ out: * @pbh: the current buffer head on summary blocks [in, out] * @offset: the current byte offset on summary blocks [in, out] * @bytes: byte size of the item to be read + * + * Return: Kernel space address of current segment summary entry, or + * NULL if an I/O error occurs. */ static void *nilfs_read_summary_info(struct the_nilfs *nilfs, struct buffer_head **pbh, @@ -300,6 +319,11 @@ static void nilfs_skip_summary_info(struct the_nilfs *nilfs, * @start_blocknr: start block number of the log * @sum: log summary information * @head: list head to add nilfs_recovery_block struct + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, struct nilfs_segment_summary *sum, @@ -571,6 +595,12 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, * @sb: super block instance * @root: NILFS root instance * @ri: pointer to a nilfs_recovery_info + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Log format error. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, struct super_block *sb, @@ -754,18 +784,13 @@ static void nilfs_abort_roll_forward(struct the_nilfs *nilfs) * @sb: super block instance * @ri: pointer to a nilfs_recovery_info struct to store search results. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-EINVAL - Inconsistent filesystem state. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Inconsistent filesystem state. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. */ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, struct super_block *sb, @@ -830,14 +855,11 @@ failed: * segment pointed by the superblock. It sets up struct the_nilfs through * this search. It fills nilfs_recovery_info (ri) required for recovery. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-EINVAL - No valid segment found - * - * %-EIO - I/O error - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - No valid segment found. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_recovery_info *ri) diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index e08cab03366b..a8bdf3d318ea 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -406,12 +406,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf, * @segbuf: buffer storing a log to be written * @nilfs: nilfs object * - * Return Value: On Success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error - * - * %-ENOMEM - Insufficient memory available. + * Return: Always 0. */ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, struct the_nilfs *nilfs) @@ -452,10 +447,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, * nilfs_segbuf_wait - wait for completion of requested BIOs * @segbuf: segment buffer * - * Return Value: On Success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error + * Return: 0 on success, or %-EIO if I/O error is detected. */ static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf) { diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 587251830897..3a202e51b360 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -191,12 +191,10 @@ static int nilfs_prepare_segment_lock(struct super_block *sb, * When @vacancy_check flag is set, this function will check the amount of * free space, and will wait for the GC to reclaim disk space if low capacity. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-ENOSPC - No space left on device + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (if checking free space). */ int nilfs_transaction_begin(struct super_block *sb, struct nilfs_transaction_info *ti, @@ -252,6 +250,8 @@ int nilfs_transaction_begin(struct super_block *sb, * nilfs_transaction_commit() sets a timer to start the segment * constructor. If a sync flag is set, it starts construction * directly. + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_transaction_commit(struct super_block *sb) { @@ -407,6 +407,8 @@ static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, /** * nilfs_segctor_reset_segment_buffer - reset the current segment buffer * @sci: nilfs_sc_info + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) { @@ -734,7 +736,6 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, if (!head) head = create_empty_buffers(folio, i_blocksize(inode), 0); - folio_unlock(folio); bh = head; do { @@ -744,11 +745,14 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, list_add_tail(&bh->b_assoc_buffers, listp); ndirties++; if (unlikely(ndirties >= nlimit)) { + folio_unlock(folio); folio_batch_release(&fbatch); cond_resched(); return ndirties; } } while (bh = bh->b_this_page, bh != head); + + folio_unlock(folio); } folio_batch_release(&fbatch); cond_resched(); @@ -1118,7 +1122,8 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci, * a super root block containing this sufile change is complete, and it can * be canceled with nilfs_sufile_cancel_freev() until then. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid segment number. * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. @@ -1315,6 +1320,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) * nilfs_segctor_begin_construction - setup segment buffer to make a new log * @sci: nilfs_sc_info * @nilfs: nilfs object + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) @@ -2312,18 +2319,13 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force) * nilfs_construct_segment - construct a logical segment * @sb: super block * - * Return Value: On success, 0 is returned. On errors, one of the following - * negative error code is returned. - * - * %-EROFS - Read only filesystem. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. + * * %-EROFS - Read only filesystem. */ int nilfs_construct_segment(struct super_block *sb) { @@ -2347,18 +2349,13 @@ int nilfs_construct_segment(struct super_block *sb) * @start: start byte offset * @end: end byte offset (inclusive) * - * Return Value: On success, 0 is returned. On errors, one of the following - * negative error code is returned. - * - * %-EROFS - Read only filesystem. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. + * * %-EROFS - Read only filesystem. */ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, loff_t start, loff_t end) @@ -2464,6 +2461,8 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err) * nilfs_segctor_construct - form logs and write them to disk * @sci: segment constructor object * @mode: mode of log forming + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) { @@ -2836,7 +2835,8 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) * This allocates a log writer object, initializes it, and starts the * log writer. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINTR - Log writer thread creation failed due to interruption. * * %-ENOMEM - Insufficient memory available. */ diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index d3ecc813d633..330f269abedf 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -133,6 +133,8 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, /** * nilfs_sufile_get_ncleansegs - return the number of clean segments * @sufile: inode of segment usage file + * + * Return: Number of clean segments. */ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) { @@ -155,17 +157,13 @@ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) * of successfully modified segments from the head is stored in the * place @ndone points to. * - * Return Value: On success, zero is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - Given segment usage is in hole block (may be returned if - * @create is zero) - * - * %-EINVAL - Invalid segment usage number + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid segment usage number + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Given segment usage is in hole block (may be returned if + * @create is zero) + * * %-ENOMEM - Insufficient memory available. */ int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, int create, size_t *ndone, @@ -272,10 +270,7 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, * @start: minimum segment number of allocatable region (inclusive) * @end: maximum segment number of allocatable region (inclusive) * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-ERANGE - invalid segment region + * Return: 0 on success, or %-ERANGE if segment range is invalid. */ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) { @@ -300,17 +295,14 @@ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) * @sufile: inode of segment usage file * @segnump: pointer to segment number * - * Description: nilfs_sufile_alloc() allocates a clean segment. + * Description: nilfs_sufile_alloc() allocates a clean segment, and stores + * its segment number in the place pointed to by @segnump. * - * Return Value: On success, 0 is returned and the segment number of the - * allocated segment is stored in the place pointed by @segnump. On error, one - * of the following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOSPC - No clean segment left. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No clean segment left. */ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) { @@ -510,6 +502,8 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty * @sufile: inode of segment usage file * @segnum: segment number + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) { @@ -569,6 +563,8 @@ out_sem: * @segnum: segment number * @nblocks: number of live blocks in the segment * @modtime: modification time (option) + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, unsigned long nblocks, time64_t modtime) @@ -610,16 +606,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, * @sufile: inode of segment usage file * @sustat: pointer to a structure of segment usage statistics * - * Description: nilfs_sufile_get_stat() returns information about segment - * usage. + * Description: nilfs_sufile_get_stat() retrieves segment usage statistics + * and stores them in the location pointed to by @sustat. * - * Return Value: On success, 0 is returned, and segment usage information is - * stored in the place pointed by @sustat. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) { @@ -683,16 +676,12 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, * @start: start segment number (inclusive) * @end: end segment number (inclusive) * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid number of segments specified - * - * %-EBUSY - Dirty or active segments are present in the range + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EBUSY - Dirty or active segments are present in the range. + * * %-EINVAL - Invalid number of segments specified. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_sufile_truncate_range(struct inode *sufile, __u64 start, __u64 end) @@ -787,16 +776,12 @@ out: * @sufile: inode of segment usage file * @newnsegs: new number of segments * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOSPC - Enough free space is not left for shrinking - * - * %-EBUSY - Dirty or active segments exist in the region to be truncated + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EBUSY - Dirty or active segments exist in the region to be truncated. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - Enough free space is not left for shrinking. */ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) { @@ -865,7 +850,7 @@ out: * @nsi: size of suinfo array * * Return: Count of segment usage info items stored in the output buffer on - * success, or the following negative error code on failure. + * success, or one of the following negative error codes on failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. */ @@ -939,14 +924,11 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, * segment usage accordingly. Only the fields indicated by the sup_flags * are updated. * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid values in input (segment number, flags or nblocks) + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid values in input (segment number, flags or nblocks). + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, unsigned int supsz, size_t nsup) @@ -1073,7 +1055,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, * and start+len is rounded down. For each clean segment blkdev_issue_discard * function is invoked. * - * Return Value: On success, 0 is returned or negative error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) { @@ -1219,6 +1201,8 @@ out_sem: * @susize: size of a segment usage entry * @raw_inode: on-disk sufile inode * @inodep: buffer to store the inode + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_read(struct super_block *sb, size_t susize, struct nilfs_inode *raw_inode, struct inode **inodep) diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index 8e8a1a5a0402..cd6f28ab3521 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h @@ -58,6 +58,8 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range); * nilfs_sufile_scrap - make a segment garbage * @sufile: inode of segment usage file * @segnum: segment number to be freed + * + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum) { @@ -68,6 +70,8 @@ static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum) * nilfs_sufile_free - free segment * @sufile: inode of segment usage file * @segnum: segment number to be freed + * + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum) { @@ -80,6 +84,8 @@ static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum) * @segnumv: array of segment numbers * @nsegs: size of @segnumv array * @ndone: place to store the number of freed segments + * + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv, size_t nsegs, size_t *ndone) @@ -95,8 +101,7 @@ static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv, * @nsegs: size of @segnumv array * @ndone: place to store the number of cancelled segments * - * Return Value: On success, 0 is returned. On error, a negative error codes - * is returned. + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_cancel_freev(struct inode *sufile, __u64 *segnumv, size_t nsegs, @@ -114,14 +119,11 @@ static inline int nilfs_sufile_cancel_freev(struct inode *sufile, * Description: nilfs_sufile_set_error() marks the segment specified by * @segnum as erroneous. The error segment will never be used again. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid segment usage number. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid segment usage number. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum) { diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index eca79cca3803..badc2cbc895e 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -309,6 +309,8 @@ int nilfs_commit_super(struct super_block *sb, int flag) * This function restores state flags in the on-disk super block. * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the * filesystem was not clean previously. + * + * Return: 0 on success, %-EIO if I/O error or superblock is corrupted. */ int nilfs_cleanup_super(struct super_block *sb) { @@ -339,6 +341,8 @@ int nilfs_cleanup_super(struct super_block *sb) * nilfs_move_2nd_super - relocate secondary super block * @sb: super block instance * @sb2off: new offset of the secondary super block (in bytes) + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off) { @@ -420,6 +424,8 @@ out: * nilfs_resize_fs - resize the filesystem * @sb: super block instance * @newsize: new size of the filesystem (in bytes) + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_resize_fs(struct super_block *sb, __u64 newsize) { @@ -987,7 +993,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno, * nilfs_tree_is_busy() - try to shrink dentries of a checkpoint * @root_dentry: root dentry of the tree to be shrunk * - * This function returns true if the tree was in-use. + * Return: true if the tree was in-use, false otherwise. */ static bool nilfs_tree_is_busy(struct dentry *root_dentry) { @@ -1033,6 +1039,8 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno) * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_fill_super(struct super_block *sb, struct fs_context *fc) diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index ac03fd3c330c..cb01ea81724d 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -49,8 +49,8 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs, * alloc_nilfs - allocate a nilfs object * @sb: super block instance * - * Return Value: On success, pointer to the_nilfs is returned. - * On error, NULL is returned. + * Return: a pointer to the allocated nilfs object on success, or NULL on + * failure. */ struct the_nilfs *alloc_nilfs(struct super_block *sb) { @@ -165,6 +165,9 @@ static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) * containing a super root from a given super block, and initializes * relevant information on the nilfs object preparatory for log * scanning and recovery. + * + * Return: 0 on success, or %-EINVAL if current segment number is out + * of range. */ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, struct nilfs_super_block *sbp) @@ -200,8 +203,7 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, * exponent information written in @sbp and stores it in @blocksize, * or aborts with an error message if it's too large. * - * Return Value: On success, 0 is returned. If the block size is too - * large, -EINVAL is returned. + * Return: 0 on success, or %-EINVAL if the block size is too large. */ static int nilfs_get_blocksize(struct super_block *sb, struct nilfs_super_block *sbp, int *blocksize) @@ -226,6 +228,13 @@ static int nilfs_get_blocksize(struct super_block *sb, * load_nilfs() searches and load the latest super root, * attaches the last segment, and does recovery if needed. * The caller must call this exclusively for simultaneous mounts. + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - No valid segment found. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-EROFS - Read only device or RO compat mode (if recovery is required) */ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) { @@ -395,6 +404,8 @@ static unsigned long long nilfs_max_size(unsigned int blkbits) * nilfs_nrsvsegs - calculate the number of reserved segments * @nilfs: nilfs object * @nsegs: total number of segments + * + * Return: Number of reserved segments. */ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs) { @@ -406,6 +417,8 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs) /** * nilfs_max_segment_count - calculate the maximum number of segments * @nilfs: nilfs object + * + * Return: Maximum number of segments */ static u64 nilfs_max_segment_count(struct the_nilfs *nilfs) { @@ -538,7 +551,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp) * area, or if the parameters themselves are not normal, it is * determined to be invalid. * - * Return Value: true if invalid, false if valid. + * Return: true if invalid, false if valid. */ static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) { @@ -684,8 +697,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, * reading the super block, getting disk layout information, initializing * shared fields in the_nilfs). * - * Return Value: On success, 0 is returned. On error, a negative error - * code is returned. + * Return: 0 on success, or a negative error code on failure. */ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb) { diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 395e23920632..4414743b638e 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -566,7 +566,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle, struct ocfs2_path *path, struct ocfs2_extent_rec *insert_rec); /* - * Reset the actual path elements so that we can re-use the structure + * Reset the actual path elements so that we can reuse the structure * to build another path. Generally, this involves freeing the buffer * heads. */ @@ -1182,7 +1182,7 @@ static int ocfs2_add_branch(handle_t *handle, /* * If there is a gap before the root end and the real end - * of the righmost leaf block, we need to remove the gap + * of the rightmost leaf block, we need to remove the gap * between new_cpos and root_end first so that the tree * is consistent after we add a new branch(it will start * from new_cpos). @@ -1238,7 +1238,7 @@ static int ocfs2_add_branch(handle_t *handle, /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be * linked with the rest of the tree. - * conversly, new_eb_bhs[0] is the new bottommost leaf. + * conversely, new_eb_bhs[0] is the new bottommost leaf. * * when we leave the loop, new_last_eb_blk will point to the * newest leaf, and next_blkno will point to the topmost extent @@ -3712,7 +3712,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, * update split_index here. * * When the split_index is zero, we need to merge it to the - * prevoius extent block. It is more efficient and easier + * previous extent block. It is more efficient and easier * if we do merge_right first and merge_left later. */ ret = ocfs2_merge_rec_right(path, handle, et, split_rec, @@ -4517,7 +4517,7 @@ static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et, } /* - * This should only be called against the righmost leaf extent list. + * This should only be called against the rightmost leaf extent list. * * ocfs2_figure_appending_type() will figure out whether we'll have to * insert at the tail of the rightmost leaf. @@ -6154,6 +6154,9 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, int status; struct inode *inode = NULL; struct buffer_head *bh = NULL; + struct ocfs2_dinode *di; + struct ocfs2_truncate_log *tl; + unsigned int tl_count; inode = ocfs2_get_system_file_inode(osb, TRUNCATE_LOG_SYSTEM_INODE, @@ -6171,6 +6174,18 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, goto bail; } + di = (struct ocfs2_dinode *)bh->b_data; + tl = &di->id2.i_dealloc; + tl_count = le16_to_cpu(tl->tl_count); + if (unlikely(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || + tl_count == 0)) { + status = -EFSCORRUPTED; + iput(inode); + brelse(bh); + mlog_errno(status); + goto bail; + } + *tl_inode = inode; *tl_bh = bh; bail: @@ -6808,27 +6823,27 @@ static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh) return 0; } -void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, - unsigned int from, unsigned int to, - struct page *page, int zero, u64 *phys) +void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle, + size_t from, size_t to, struct folio *folio, int zero, + u64 *phys) { int ret, partial = 0; - loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from; + loff_t start_byte = folio_pos(folio) + from; loff_t length = to - from; - ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0); + ret = ocfs2_map_folio_blocks(folio, phys, inode, from, to, 0); if (ret) mlog_errno(ret); if (zero) - zero_user_segment(page, from, to); + folio_zero_segment(folio, from, to); /* * Need to set the buffers we zero'd into uptodate * here if they aren't - ocfs2_map_page_blocks() * might've skipped some */ - ret = walk_page_buffers(handle, page_buffers(page), + ret = walk_page_buffers(handle, folio_buffers(folio), from, to, &partial, ocfs2_zero_func); if (ret < 0) @@ -6841,92 +6856,88 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, } if (!partial) - SetPageUptodate(page); + folio_mark_uptodate(folio); - flush_dcache_page(page); + flush_dcache_folio(folio); } -static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, - loff_t end, struct page **pages, - int numpages, u64 phys, handle_t *handle) +static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start, + loff_t end, struct folio **folios, int numfolios, + u64 phys, handle_t *handle) { int i; - struct page *page; - unsigned int from, to = PAGE_SIZE; struct super_block *sb = inode->i_sb; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); - if (numpages == 0) + if (numfolios == 0) goto out; - to = PAGE_SIZE; - for(i = 0; i < numpages; i++) { - page = pages[i]; + for (i = 0; i < numfolios; i++) { + struct folio *folio = folios[i]; + size_t to = folio_size(folio); + size_t from = offset_in_folio(folio, start); - from = start & (PAGE_SIZE - 1); - if ((end >> PAGE_SHIFT) == page->index) - to = end & (PAGE_SIZE - 1); + if (to > end - folio_pos(folio)) + to = end - folio_pos(folio); - BUG_ON(from > PAGE_SIZE); - BUG_ON(to > PAGE_SIZE); + ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1, + &phys); - ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, - &phys); - - start = (page->index + 1) << PAGE_SHIFT; + start = folio_next_index(folio) << PAGE_SHIFT; } out: - if (pages) - ocfs2_unlock_and_free_pages(pages, numpages); + if (folios) + ocfs2_unlock_and_free_folios(folios, numfolios); } -int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, - struct page **pages, int *num) +static int ocfs2_grab_folios(struct inode *inode, loff_t start, loff_t end, + struct folio **folios, int *num) { - int numpages, ret = 0; + int numfolios, ret = 0; struct address_space *mapping = inode->i_mapping; unsigned long index; loff_t last_page_bytes; BUG_ON(start > end); - numpages = 0; + numfolios = 0; last_page_bytes = PAGE_ALIGN(end); index = start >> PAGE_SHIFT; do { - pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); - if (!pages[numpages]) { - ret = -ENOMEM; + folios[numfolios] = __filemap_get_folio(mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); + if (IS_ERR(folios[numfolios])) { + ret = PTR_ERR(folios[numfolios]); mlog_errno(ret); goto out; } - numpages++; - index++; + index = folio_next_index(folios[numfolios]); + numfolios++; } while (index < (last_page_bytes >> PAGE_SHIFT)); out: if (ret != 0) { - if (pages) - ocfs2_unlock_and_free_pages(pages, numpages); - numpages = 0; + if (folios) + ocfs2_unlock_and_free_folios(folios, numfolios); + numfolios = 0; } - *num = numpages; + *num = numfolios; return ret; } -static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, - struct page **pages, int *num) +static int ocfs2_grab_eof_folios(struct inode *inode, loff_t start, loff_t end, + struct folio **folios, int *num) { struct super_block *sb = inode->i_sb; BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits); - return ocfs2_grab_pages(inode, start, end, pages, num); + return ocfs2_grab_folios(inode, start, end, folios, num); } /* @@ -6940,8 +6951,8 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, u64 range_start, u64 range_end) { - int ret = 0, numpages; - struct page **pages = NULL; + int ret = 0, numfolios; + struct folio **folios = NULL; u64 phys; unsigned int ext_flags; struct super_block *sb = inode->i_sb; @@ -6954,17 +6965,17 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, return 0; /* - * Avoid zeroing pages fully beyond current i_size. It is pointless as - * underlying blocks of those pages should be already zeroed out and + * Avoid zeroing folios fully beyond current i_size. It is pointless as + * underlying blocks of those folios should be already zeroed out and * page writeback will skip them anyway. */ range_end = min_t(u64, range_end, i_size_read(inode)); if (range_start >= range_end) return 0; - pages = kcalloc(ocfs2_pages_per_cluster(sb), - sizeof(struct page *), GFP_NOFS); - if (pages == NULL) { + folios = kcalloc(ocfs2_pages_per_cluster(sb), + sizeof(struct folio *), GFP_NOFS); + if (folios == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; @@ -6985,18 +6996,18 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN) goto out; - ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages, - &numpages); + ret = ocfs2_grab_eof_folios(inode, range_start, range_end, folios, + &numfolios); if (ret) { mlog_errno(ret); goto out; } - ocfs2_zero_cluster_pages(inode, range_start, range_end, pages, - numpages, phys, handle); + ocfs2_zero_cluster_folios(inode, range_start, range_end, folios, + numfolios, phys, handle); /* - * Initiate writeout of the pages we zero'd here. We don't + * Initiate writeout of the folios we zero'd here. We don't * wait on them - the truncate_inode_pages() call later will * do that for us. */ @@ -7006,7 +7017,7 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, mlog_errno(ret); out: - kfree(pages); + kfree(folios); return ret; } @@ -7059,7 +7070,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di) int ocfs2_convert_inline_data_to_extents(struct inode *inode, struct buffer_head *di_bh) { - int ret, has_data, num_pages = 0; + int ret, has_data, num_folios = 0; int need_free = 0; u32 bit_off, num; handle_t *handle; @@ -7068,7 +7079,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_alloc_context *data_ac = NULL; - struct page *page = NULL; + struct folio *folio = NULL; struct ocfs2_extent_tree et; int did_quota = 0; @@ -7119,12 +7130,12 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, /* * Save two copies, one for insert, and one that can - * be changed by ocfs2_map_and_dirty_page() below. + * be changed by ocfs2_map_and_dirty_folio() below. */ block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); - ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page, - &num_pages); + ret = ocfs2_grab_eof_folios(inode, 0, page_end, &folio, + &num_folios); if (ret) { mlog_errno(ret); need_free = 1; @@ -7135,15 +7146,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, * This should populate the 1st page for us and mark * it up to date. */ - ret = ocfs2_read_inline_data(inode, page, di_bh); + ret = ocfs2_read_inline_data(inode, folio, di_bh); if (ret) { mlog_errno(ret); need_free = 1; goto out_unlock; } - ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0, - &phys); + ocfs2_map_and_dirty_folio(inode, handle, 0, page_end, folio, 0, + &phys); } spin_lock(&oi->ip_lock); @@ -7174,8 +7185,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, } out_unlock: - if (page) - ocfs2_unlock_and_free_pages(&page, num_pages); + if (folio) + ocfs2_unlock_and_free_folios(&folio, num_folios); out_commit: if (ret < 0 && did_quota) diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 4af7abaa6e40..1c0c83362904 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -254,11 +254,9 @@ static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec) return !rec->e_leaf_clusters; } -int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, - struct page **pages, int *num); -void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, - unsigned int from, unsigned int to, - struct page *page, int zero, u64 *phys); +void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle, + size_t from, size_t to, struct folio *folio, int zero, + u64 *phys); /* * Structures which describe a path through a btree, and functions to * manipulate them. diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index db72b3e924b3..5bbeb6fbb1ac 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -215,10 +215,9 @@ bail: return err; } -int ocfs2_read_inline_data(struct inode *inode, struct page *page, +int ocfs2_read_inline_data(struct inode *inode, struct folio *folio, struct buffer_head *di_bh) { - void *kaddr; loff_t size; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; @@ -230,7 +229,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, size = i_size_read(inode); - if (size > PAGE_SIZE || + if (size > folio_size(folio) || size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { ocfs2_error(inode->i_sb, "Inode %llu has with inline data has bad size: %Lu\n", @@ -239,25 +238,18 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, return -EROFS; } - kaddr = kmap_atomic(page); - if (size) - memcpy(kaddr, di->id2.i_data.id_data, size); - /* Clear the remaining part of the page */ - memset(kaddr + size, 0, PAGE_SIZE - size); - flush_dcache_page(page); - kunmap_atomic(kaddr); - - SetPageUptodate(page); + folio_fill_tail(folio, 0, di->id2.i_data.id_data, size); + folio_mark_uptodate(folio); return 0; } -static int ocfs2_readpage_inline(struct inode *inode, struct page *page) +static int ocfs2_readpage_inline(struct inode *inode, struct folio *folio) { int ret; struct buffer_head *di_bh = NULL; - BUG_ON(!PageLocked(page)); + BUG_ON(!folio_test_locked(folio)); BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); ret = ocfs2_read_inode_block(inode, &di_bh); @@ -266,9 +258,9 @@ static int ocfs2_readpage_inline(struct inode *inode, struct page *page) goto out; } - ret = ocfs2_read_inline_data(inode, page, di_bh); + ret = ocfs2_read_inline_data(inode, folio, di_bh); out: - unlock_page(page); + folio_unlock(folio); brelse(di_bh); return ret; @@ -283,7 +275,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio) trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index); - ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page); + ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; @@ -305,7 +297,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio) } /* - * i_size might have just been updated as we grabed the meta lock. We + * i_size might have just been updated as we grabbed the meta lock. We * might now be discovering a truncate that hit on another node. * block_read_full_folio->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers @@ -322,7 +314,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio) } if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) - ret = ocfs2_readpage_inline(inode, &folio->page); + ret = ocfs2_readpage_inline(inode, folio); else ret = block_read_full_folio(folio, ocfs2_get_block); unlock = 0; @@ -534,7 +526,7 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, * * from == to == 0 is code for "zero the entire cluster region" */ -static void ocfs2_clear_page_regions(struct page *page, +static void ocfs2_clear_folio_regions(struct folio *folio, struct ocfs2_super *osb, u32 cpos, unsigned from, unsigned to) { @@ -543,7 +535,7 @@ static void ocfs2_clear_page_regions(struct page *page, ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); - kaddr = kmap_atomic(page); + kaddr = kmap_local_folio(folio, 0); if (from || to) { if (from > cluster_start) @@ -554,13 +546,13 @@ static void ocfs2_clear_page_regions(struct page *page, memset(kaddr + cluster_start, 0, cluster_end - cluster_start); } - kunmap_atomic(kaddr); + kunmap_local(kaddr); } /* * Nonsparse file systems fully allocate before we get to the write * code. This prevents ocfs2_write() from tagging the write as an - * allocating one, which means ocfs2_map_page_blocks() might try to + * allocating one, which means ocfs2_map_folio_blocks() might try to * read-in the blocks at the tail of our file. Avoid reading them by * testing i_size against each block offset. */ @@ -585,11 +577,10 @@ static int ocfs2_should_read_blk(struct inode *inode, struct folio *folio, * * This will also skip zeroing, which is handled externally. */ -int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, +int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new) { - struct folio *folio = page_folio(page); int ret = 0; struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; unsigned int block_end, block_start; @@ -729,24 +720,24 @@ struct ocfs2_write_ctxt { unsigned int w_large_pages; /* - * Pages involved in this write. + * Folios involved in this write. * - * w_target_page is the page being written to by the user. + * w_target_folio is the folio being written to by the user. * - * w_pages is an array of pages which always contains - * w_target_page, and in the case of an allocating write with + * w_folios is an array of folios which always contains + * w_target_folio, and in the case of an allocating write with * page_size < cluster size, it will contain zero'd and mapped - * pages adjacent to w_target_page which need to be written + * pages adjacent to w_target_folio which need to be written * out in so that future reads from that region will get * zero's. */ - unsigned int w_num_pages; - struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; - struct page *w_target_page; + unsigned int w_num_folios; + struct folio *w_folios[OCFS2_MAX_CTXT_PAGES]; + struct folio *w_target_folio; /* * w_target_locked is used for page_mkwrite path indicating no unlocking - * against w_target_page in ocfs2_write_end_nolock. + * against w_target_folio in ocfs2_write_end_nolock. */ unsigned int w_target_locked:1; @@ -771,40 +762,40 @@ struct ocfs2_write_ctxt { unsigned int w_unwritten_count; }; -void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) +void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios) { int i; - for(i = 0; i < num_pages; i++) { - if (pages[i]) { - unlock_page(pages[i]); - mark_page_accessed(pages[i]); - put_page(pages[i]); - } + for(i = 0; i < num_folios; i++) { + if (!folios[i]) + continue; + folio_unlock(folios[i]); + folio_mark_accessed(folios[i]); + folio_put(folios[i]); } } -static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) +static void ocfs2_unlock_folios(struct ocfs2_write_ctxt *wc) { int i; /* * w_target_locked is only set to true in the page_mkwrite() case. * The intent is to allow us to lock the target page from write_begin() - * to write_end(). The caller must hold a ref on w_target_page. + * to write_end(). The caller must hold a ref on w_target_folio. */ if (wc->w_target_locked) { - BUG_ON(!wc->w_target_page); - for (i = 0; i < wc->w_num_pages; i++) { - if (wc->w_target_page == wc->w_pages[i]) { - wc->w_pages[i] = NULL; + BUG_ON(!wc->w_target_folio); + for (i = 0; i < wc->w_num_folios; i++) { + if (wc->w_target_folio == wc->w_folios[i]) { + wc->w_folios[i] = NULL; break; } } - mark_page_accessed(wc->w_target_page); - put_page(wc->w_target_page); + folio_mark_accessed(wc->w_target_folio); + folio_put(wc->w_target_folio); } - ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); + ocfs2_unlock_and_free_folios(wc->w_folios, wc->w_num_folios); } static void ocfs2_free_unwritten_list(struct inode *inode, @@ -826,7 +817,7 @@ static void ocfs2_free_write_ctxt(struct inode *inode, struct ocfs2_write_ctxt *wc) { ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); - ocfs2_unlock_pages(wc); + ocfs2_unlock_folios(wc); brelse(wc->w_di_bh); kfree(wc); } @@ -869,29 +860,30 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ -static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) +static void ocfs2_zero_new_buffers(struct folio *folio, size_t from, size_t to) { unsigned int block_start, block_end; struct buffer_head *head, *bh; - BUG_ON(!PageLocked(page)); - if (!page_has_buffers(page)) + BUG_ON(!folio_test_locked(folio)); + head = folio_buffers(folio); + if (!head) return; - bh = head = page_buffers(page); + bh = head; block_start = 0; do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { unsigned start, end; start = max(from, block_start); end = min(to, block_end); - zero_user_segment(page, start, end); + folio_zero_segment(folio, start, end); set_buffer_uptodate(bh); } @@ -916,29 +908,26 @@ static void ocfs2_write_failure(struct inode *inode, int i; unsigned from = user_pos & (PAGE_SIZE - 1), to = user_pos + user_len; - struct page *tmppage; - if (wc->w_target_page) - ocfs2_zero_new_buffers(wc->w_target_page, from, to); + if (wc->w_target_folio) + ocfs2_zero_new_buffers(wc->w_target_folio, from, to); - for(i = 0; i < wc->w_num_pages; i++) { - tmppage = wc->w_pages[i]; + for (i = 0; i < wc->w_num_folios; i++) { + struct folio *folio = wc->w_folios[i]; - if (tmppage && page_has_buffers(tmppage)) { + if (folio && folio_buffers(folio)) { if (ocfs2_should_order_data(inode)) ocfs2_jbd2_inode_add_write(wc->w_handle, inode, user_pos, user_len); - block_commit_write(tmppage, from, to); + block_commit_write(&folio->page, from, to); } } } -static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, - struct ocfs2_write_ctxt *wc, - struct page *page, u32 cpos, - loff_t user_pos, unsigned user_len, - int new) +static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, + struct ocfs2_write_ctxt *wc, struct folio *folio, u32 cpos, + loff_t user_pos, unsigned user_len, int new) { int ret; unsigned int map_from = 0, map_to = 0; @@ -951,20 +940,19 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, /* treat the write as new if the a hole/lseek spanned across * the page boundary. */ - new = new | ((i_size_read(inode) <= page_offset(page)) && - (page_offset(page) <= user_pos)); + new = new | ((i_size_read(inode) <= folio_pos(folio)) && + (folio_pos(folio) <= user_pos)); - if (page == wc->w_target_page) { + if (folio == wc->w_target_folio) { map_from = user_pos & (PAGE_SIZE - 1); map_to = map_from + user_len; if (new) - ret = ocfs2_map_page_blocks(page, p_blkno, inode, - cluster_start, cluster_end, - new); + ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, + cluster_start, cluster_end, new); else - ret = ocfs2_map_page_blocks(page, p_blkno, inode, - map_from, map_to, new); + ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, + map_from, map_to, new); if (ret) { mlog_errno(ret); goto out; @@ -978,7 +966,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, } } else { /* - * If we haven't allocated the new page yet, we + * If we haven't allocated the new folio yet, we * shouldn't be writing it out without copying user * data. This is likely a math error from the caller. */ @@ -987,8 +975,8 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, map_from = cluster_start; map_to = cluster_end; - ret = ocfs2_map_page_blocks(page, p_blkno, inode, - cluster_start, cluster_end, new); + ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, + cluster_start, cluster_end, new); if (ret) { mlog_errno(ret); goto out; @@ -996,20 +984,20 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, } /* - * Parts of newly allocated pages need to be zero'd. + * Parts of newly allocated folios need to be zero'd. * * Above, we have also rewritten 'to' and 'from' - as far as * the rest of the function is concerned, the entire cluster - * range inside of a page needs to be written. + * range inside of a folio needs to be written. * - * We can skip this if the page is up to date - it's already + * We can skip this if the folio is uptodate - it's already * been zero'd from being read in as a hole. */ - if (new && !PageUptodate(page)) - ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), + if (new && !folio_test_uptodate(folio)) + ocfs2_clear_folio_regions(folio, OCFS2_SB(inode->i_sb), cpos, user_data_from, user_data_to); - flush_dcache_page(page); + flush_dcache_folio(folio); out: return ret; @@ -1018,11 +1006,9 @@ out: /* * This function will only grab one clusters worth of pages. */ -static int ocfs2_grab_pages_for_write(struct address_space *mapping, - struct ocfs2_write_ctxt *wc, - u32 cpos, loff_t user_pos, - unsigned user_len, int new, - struct page *mmap_page) +static int ocfs2_grab_folios_for_write(struct address_space *mapping, + struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, + unsigned user_len, int new, struct folio *mmap_folio) { int ret = 0, i; unsigned long start, target_index, end_index, index; @@ -1039,7 +1025,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, * last page of the write. */ if (new) { - wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); + wc->w_num_folios = ocfs2_pages_per_cluster(inode->i_sb); start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); /* * We need the index *past* the last page we could possibly @@ -1049,15 +1035,15 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, last_byte = max(user_pos + user_len, i_size_read(inode)); BUG_ON(last_byte < 1); end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; - if ((start + wc->w_num_pages) > end_index) - wc->w_num_pages = end_index - start; + if ((start + wc->w_num_folios) > end_index) + wc->w_num_folios = end_index - start; } else { - wc->w_num_pages = 1; + wc->w_num_folios = 1; start = target_index; } end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; - for(i = 0; i < wc->w_num_pages; i++) { + for(i = 0; i < wc->w_num_folios; i++) { index = start + i; if (index >= target_index && index <= end_index && @@ -1067,37 +1053,38 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, * and wants us to directly use the page * passed in. */ - lock_page(mmap_page); + folio_lock(mmap_folio); /* Exit and let the caller retry */ - if (mmap_page->mapping != mapping) { - WARN_ON(mmap_page->mapping); - unlock_page(mmap_page); + if (mmap_folio->mapping != mapping) { + WARN_ON(mmap_folio->mapping); + folio_unlock(mmap_folio); ret = -EAGAIN; goto out; } - get_page(mmap_page); - wc->w_pages[i] = mmap_page; + folio_get(mmap_folio); + wc->w_folios[i] = mmap_folio; wc->w_target_locked = true; } else if (index >= target_index && index <= end_index && wc->w_type == OCFS2_WRITE_DIRECT) { /* Direct write has no mapping page. */ - wc->w_pages[i] = NULL; + wc->w_folios[i] = NULL; continue; } else { - wc->w_pages[i] = find_or_create_page(mapping, index, - GFP_NOFS); - if (!wc->w_pages[i]) { - ret = -ENOMEM; + wc->w_folios[i] = __filemap_get_folio(mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, + GFP_NOFS); + if (IS_ERR(wc->w_folios[i])) { + ret = PTR_ERR(wc->w_folios[i]); mlog_errno(ret); goto out; } } - wait_for_stable_page(wc->w_pages[i]); + folio_wait_stable(wc->w_folios[i]); if (index == target_index) - wc->w_target_page = wc->w_pages[i]; + wc->w_target_folio = wc->w_folios[i]; } out: if (ret) @@ -1181,19 +1168,18 @@ static int ocfs2_write_cluster(struct address_space *mapping, if (!should_zero) p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); - for(i = 0; i < wc->w_num_pages; i++) { + for (i = 0; i < wc->w_num_folios; i++) { int tmpret; /* This is the direct io target page. */ - if (wc->w_pages[i] == NULL) { + if (wc->w_folios[i] == NULL) { p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits)); continue; } - tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, - wc->w_pages[i], cpos, - user_pos, user_len, - should_zero); + tmpret = ocfs2_prepare_folio_for_write(inode, &p_blkno, wc, + wc->w_folios[i], cpos, user_pos, user_len, + should_zero); if (tmpret) { mlog_errno(tmpret); if (ret == 0) @@ -1472,7 +1458,7 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct page *page; + struct folio *folio; handle_t *handle; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; @@ -1483,19 +1469,21 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, goto out; } - page = find_or_create_page(mapping, 0, GFP_NOFS); - if (!page) { + folio = __filemap_get_folio(mapping, 0, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); + if (IS_ERR(folio)) { ocfs2_commit_trans(osb, handle); - ret = -ENOMEM; + ret = PTR_ERR(folio); mlog_errno(ret); goto out; } /* - * If we don't set w_num_pages then this page won't get unlocked + * If we don't set w_num_folios then this folio won't get unlocked * and freed on cleanup of the write context. */ - wc->w_pages[0] = wc->w_target_page = page; - wc->w_num_pages = 1; + wc->w_target_folio = folio; + wc->w_folios[0] = folio; + wc->w_num_folios = 1; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); @@ -1509,8 +1497,8 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) ocfs2_set_inode_data_inline(inode, di); - if (!PageUptodate(page)) { - ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); + if (!folio_test_uptodate(folio)) { + ret = ocfs2_read_inline_data(inode, folio, wc->w_di_bh); if (ret) { ocfs2_commit_trans(osb, handle); @@ -1533,9 +1521,8 @@ int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) } static int ocfs2_try_to_write_inline_data(struct address_space *mapping, - struct inode *inode, loff_t pos, - unsigned len, struct page *mmap_page, - struct ocfs2_write_ctxt *wc) + struct inode *inode, loff_t pos, size_t len, + struct folio *mmap_folio, struct ocfs2_write_ctxt *wc) { int ret, written = 0; loff_t end = pos + len; @@ -1550,7 +1537,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping, * Handle inodes which already have inline data 1st. */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { - if (mmap_page == NULL && + if (mmap_folio == NULL && ocfs2_size_fits_inline_data(wc->w_di_bh, end)) goto do_inline_write; @@ -1574,7 +1561,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping, * Check whether the write can fit. */ di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; - if (mmap_page || + if (mmap_folio || end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) return 0; @@ -1641,9 +1628,9 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, } int ocfs2_write_begin_nolock(struct address_space *mapping, - loff_t pos, unsigned len, ocfs2_write_type_t type, - struct folio **foliop, void **fsdata, - struct buffer_head *di_bh, struct page *mmap_page) + loff_t pos, unsigned len, ocfs2_write_type_t type, + struct folio **foliop, void **fsdata, + struct buffer_head *di_bh, struct folio *mmap_folio) { int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; @@ -1666,7 +1653,7 @@ try_again: if (ocfs2_supports_inline_data(osb)) { ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, - mmap_page, wc); + mmap_folio, wc); if (ret == 1) { ret = 0; goto success; @@ -1718,7 +1705,7 @@ try_again: (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), - pos, len, type, mmap_page, + pos, len, type, mmap_folio, clusters_to_alloc, extents_to_split); /* @@ -1789,21 +1776,21 @@ try_again: } /* - * Fill our page array first. That way we've grabbed enough so + * Fill our folio array first. That way we've grabbed enough so * that we can zero and flush if we error after adding the * extent. */ - ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, - cluster_of_pages, mmap_page); + ret = ocfs2_grab_folios_for_write(mapping, wc, wc->w_cpos, pos, len, + cluster_of_pages, mmap_folio); if (ret) { /* - * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock - * the target page. In this case, we exit with no error and no target - * page. This will trigger the caller, page_mkwrite(), to re-try - * the operation. + * ocfs2_grab_folios_for_write() returns -EAGAIN if it + * could not lock the target folio. In this case, we exit + * with no error and no target folio. This will trigger + * the caller, page_mkwrite(), to re-try the operation. */ if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) { - BUG_ON(wc->w_target_page); + BUG_ON(wc->w_target_folio); ret = 0; goto out_quota; } @@ -1826,7 +1813,7 @@ try_again: success: if (foliop) - *foliop = page_folio(wc->w_target_page); + *foliop = wc->w_target_folio; *fsdata = wc; return 0; out_quota: @@ -1845,7 +1832,7 @@ out: * to VM code. */ if (wc->w_target_locked) - unlock_page(mmap_page); + folio_unlock(mmap_folio); ocfs2_free_write_ctxt(inode, wc); @@ -1924,18 +1911,15 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, struct ocfs2_dinode *di, struct ocfs2_write_ctxt *wc) { - void *kaddr; - if (unlikely(*copied < len)) { - if (!PageUptodate(wc->w_target_page)) { + if (!folio_test_uptodate(wc->w_target_folio)) { *copied = 0; return; } } - kaddr = kmap_atomic(wc->w_target_page); - memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); - kunmap_atomic(kaddr); + memcpy_from_folio(di->id2.i_data.id_data + pos, wc->w_target_folio, + pos, *copied); trace_ocfs2_write_end_inline( (unsigned long long)OCFS2_I(inode)->ip_blkno, @@ -1944,17 +1928,16 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, le16_to_cpu(di->i_dyn_features)); } -int ocfs2_write_end_nolock(struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, void *fsdata) +int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, + unsigned len, unsigned copied, void *fsdata) { int i, ret; - unsigned from, to, start = pos & (PAGE_SIZE - 1); + size_t from, to, start = pos & (PAGE_SIZE - 1); struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_write_ctxt *wc = fsdata; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; handle_t *handle = wc->w_handle; - struct page *tmppage; BUG_ON(!list_empty(&wc->w_unwritten_list)); @@ -1973,44 +1956,44 @@ int ocfs2_write_end_nolock(struct address_space *mapping, goto out_write_size; } - if (unlikely(copied < len) && wc->w_target_page) { + if (unlikely(copied < len) && wc->w_target_folio) { loff_t new_isize; - if (!PageUptodate(wc->w_target_page)) + if (!folio_test_uptodate(wc->w_target_folio)) copied = 0; new_isize = max_t(loff_t, i_size_read(inode), pos + copied); - if (new_isize > page_offset(wc->w_target_page)) - ocfs2_zero_new_buffers(wc->w_target_page, start+copied, + if (new_isize > folio_pos(wc->w_target_folio)) + ocfs2_zero_new_buffers(wc->w_target_folio, start+copied, start+len); else { /* - * When page is fully beyond new isize (data copy - * failed), do not bother zeroing the page. Invalidate + * When folio is fully beyond new isize (data copy + * failed), do not bother zeroing the folio. Invalidate * it instead so that writeback does not get confused * put page & buffer dirty bits into inconsistent * state. */ - block_invalidate_folio(page_folio(wc->w_target_page), - 0, PAGE_SIZE); + block_invalidate_folio(wc->w_target_folio, 0, + folio_size(wc->w_target_folio)); } } - if (wc->w_target_page) - flush_dcache_page(wc->w_target_page); + if (wc->w_target_folio) + flush_dcache_folio(wc->w_target_folio); - for(i = 0; i < wc->w_num_pages; i++) { - tmppage = wc->w_pages[i]; + for (i = 0; i < wc->w_num_folios; i++) { + struct folio *folio = wc->w_folios[i]; - /* This is the direct io target page. */ - if (tmppage == NULL) + /* This is the direct io target folio */ + if (folio == NULL) continue; - if (tmppage == wc->w_target_page) { + if (folio == wc->w_target_folio) { from = wc->w_target_from; to = wc->w_target_to; - BUG_ON(from > PAGE_SIZE || - to > PAGE_SIZE || + BUG_ON(from > folio_size(folio) || + to > folio_size(folio) || to < from); } else { /* @@ -2019,19 +2002,17 @@ int ocfs2_write_end_nolock(struct address_space *mapping, * to flush their entire range. */ from = 0; - to = PAGE_SIZE; + to = folio_size(folio); } - if (page_has_buffers(tmppage)) { + if (folio_buffers(folio)) { if (handle && ocfs2_should_order_data(inode)) { - loff_t start_byte = - ((loff_t)tmppage->index << PAGE_SHIFT) + - from; + loff_t start_byte = folio_pos(folio) + from; loff_t length = to - from; ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length); } - block_commit_write(tmppage, from, to); + block_commit_write(&folio->page, from, to); } } @@ -2060,7 +2041,7 @@ out: * this lock and will ask for the page lock when flushing the data. * put it here to preserve the unlock order. */ - ocfs2_unlock_pages(wc); + ocfs2_unlock_folios(wc); if (handle) ocfs2_commit_trans(osb, handle); diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 1d1b4b7edba0..114efc9111e4 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -8,16 +8,11 @@ #include -handle_t *ocfs2_start_walk_page_trans(struct inode *inode, - struct page *page, - unsigned from, - unsigned to); - -int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, +int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new); -void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages); +void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios); int walk_page_buffers( handle_t *handle, struct buffer_head *head, @@ -37,11 +32,11 @@ typedef enum { } ocfs2_write_type_t; int ocfs2_write_begin_nolock(struct address_space *mapping, - loff_t pos, unsigned len, ocfs2_write_type_t type, - struct folio **foliop, void **fsdata, - struct buffer_head *di_bh, struct page *mmap_page); + loff_t pos, unsigned len, ocfs2_write_type_t type, + struct folio **foliop, void **fsdata, + struct buffer_head *di_bh, struct folio *mmap_folio); -int ocfs2_read_inline_data(struct inode *inode, struct page *page, +int ocfs2_read_inline_data(struct inode *inode, struct folio *folio, struct buffer_head *di_bh); int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size); diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 4200a0341343..724350925aff 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -3,6 +3,7 @@ * Copyright (C) 2004, 2005 Oracle. All rights reserved. */ +#include "linux/kstrtox.h" #include #include #include @@ -1020,7 +1021,7 @@ fire_callbacks: if (list_empty(&slot->ds_live_item)) goto out; - /* live nodes only go dead after enough consequtive missed + /* live nodes only go dead after enough consecutive missed * samples.. reset the missed counter whenever we see * activity */ if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) { @@ -1535,10 +1536,11 @@ static int o2hb_read_block_input(struct o2hb_region *reg, { unsigned long bytes; char *p = (char *)page; + int ret; - bytes = simple_strtoul(p, &p, 0); - if (!p || (*p && (*p != '\n'))) - return -EINVAL; + ret = kstrtoul(p, 0, &bytes); + if (ret) + return ret; /* Heartbeat and fs min / max block sizes are the same. */ if (bytes > 4096 || bytes < 512) @@ -1622,13 +1624,14 @@ static ssize_t o2hb_region_blocks_store(struct config_item *item, struct o2hb_region *reg = to_o2hb_region(item); unsigned long tmp; char *p = (char *)page; + int ret; if (reg->hr_bdev_file) return -EINVAL; - tmp = simple_strtoul(p, &p, 0); - if (!p || (*p && (*p != '\n'))) - return -EINVAL; + ret = kstrtoul(p, 0, &tmp); + if (ret) + return ret; if (tmp > O2NM_MAX_NODES || tmp == 0) return -ERANGE; @@ -1776,8 +1779,8 @@ static ssize_t o2hb_region_dev_store(struct config_item *item, if (o2nm_this_node() == O2NM_MAX_NODES) return -EINVAL; - fd = simple_strtol(p, &p, 0); - if (!p || (*p && (*p != '\n'))) + ret = kstrtol(p, 0, &fd); + if (ret < 0) return -EINVAL; if (fd < 0 || fd >= INT_MAX) @@ -2136,10 +2139,11 @@ static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *ite { unsigned long tmp; char *p = (char *)page; + int ret; - tmp = simple_strtoul(p, &p, 10); - if (!p || (*p && (*p != '\n'))) - return -EINVAL; + ret = kstrtoul(p, 10, &tmp); + if (ret) + return ret; /* this will validate ranges for us. */ o2hb_dead_threshold_set((unsigned int) tmp); diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index b73fc42e46ff..630bd5a3dd0d 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h @@ -29,7 +29,7 @@ * just calling printk() so that this can eventually make its way through * relayfs along with the debugging messages. Everything else gets KERN_DEBUG. * The inline tests and macro dance give GCC the opportunity to quite cleverly - * only emit the appropriage printk() when the caller passes in a constant + * only emit the appropriate printk() when the caller passes in a constant * mask, as is almost always the case. * * All this bitmask nonsense is managed from the files under diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 8bf17231d7b7..bfb8b456876c 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -23,7 +23,7 @@ * race between when we see a node start heartbeating and when we connect * to it. * - * So nodes that are in this transtion put a hold on the quorum decision + * So nodes that are in this transition put a hold on the quorum decision * with a counter. As they fall out of this transition they drop the count * and if they're the last, they fire off the decision. */ @@ -189,7 +189,7 @@ static void o2quo_clear_hold(struct o2quo_state *qs, u8 node) } /* as a node comes up we delay the quorum decision until we know the fate of - * the connection. the hold will be droped in conn_up or hb_down. it might be + * the connection. the hold will be dropped in conn_up or hb_down. it might be * perpetuated by con_err until hb_down. if we already have a conn, we might * be dropping a hold that conn_up got. */ void o2quo_hb_up(u8 node) @@ -256,7 +256,7 @@ void o2quo_hb_still_up(u8 node) } /* This is analogous to hb_up. as a node's connection comes up we delay the - * quorum decision until we see it heartbeating. the hold will be droped in + * quorum decision until we see it heartbeating. the hold will be dropped in * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if * it's already heartbeating we might be dropping a hold that conn_up got. * */ diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 2b8fa3e782fb..0f46b22561d6 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -5,13 +5,13 @@ * * ---- * - * Callers for this were originally written against a very simple synchronus + * Callers for this were originally written against a very simple synchronous * API. This implementation reflects those simple callers. Some day I'm sure * we'll need to move to a more robust posting/callback mechanism. * * Transmit calls pass in kernel virtual addresses and block copying this into * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting - * for a failed socket to timeout. TX callers can also pass in a poniter to an + * for a failed socket to timeout. TX callers can also pass in a pointer to an * 'int' which gets filled with an errno off the wire in response to the * message they send. * @@ -101,7 +101,7 @@ static struct socket *o2net_listen_sock; * o2net_wq. teardown detaches the callbacks before destroying the workqueue. * quorum work is queued as sock containers are shutdown.. stop_listening * tears down all the node's sock containers, preventing future shutdowns - * and queued quroum work, before canceling delayed quorum work and + * and queued quorum work, before canceling delayed quorum work and * destroying the work queue. */ static struct workqueue_struct *o2net_wq; @@ -1419,7 +1419,7 @@ out: return ret; } -/* this work func is triggerd by data ready. it reads until it can read no +/* this work func is triggered by data ready. it reads until it can read no * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing * our work the work struct will be marked and we'll be called again. */ static void o2net_rx_until_empty(struct work_struct *work) diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h index 847a52dcbe7d..1969db8ffa9c 100644 --- a/fs/ocfs2/dlm/dlmapi.h +++ b/fs/ocfs2/dlm/dlmapi.h @@ -118,7 +118,7 @@ struct dlm_lockstatus { #define LKM_VALBLK 0x00000100 /* lock value block request */ #define LKM_NOQUEUE 0x00000200 /* non blocking request */ #define LKM_CONVERT 0x00000400 /* conversion request */ -#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */ +#define LKM_NODLCKWT 0x00000800 /* this lock won't deadlock (U) */ #define LKM_UNLOCK 0x00001000 /* deallocate this lock */ #define LKM_CANCEL 0x00002000 /* cancel conversion request */ #define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */ diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 50da8af988c1..54c548ef037a 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -207,7 +207,7 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) * 1) all recovery threads cluster wide will work on recovering * ONE node at a time * 2) negotiate who will take over all the locks for the dead node. - * thats right... ALL the locks. + * that's right... ALL the locks. * 3) once a new master is chosen, everyone scans all locks * and moves aside those mastered by the dead guy * 4) each of these locks should be locked until recovery is done @@ -1469,7 +1469,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, * The first one is handled at the end of this function. The * other two are handled in the worker thread after locks have * been attached. Yes, we don't wait for purge time to match - * kref_init. The lockres will still have atleast one ref + * kref_init. The lockres will still have at least one ref * added because it is in the hash __dlm_insert_lockres() */ extra_refs++; @@ -1735,7 +1735,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, spin_unlock(&res->spinlock); } } else { - /* put.. incase we are not the master */ + /* put.. in case we are not the master */ spin_unlock(&res->spinlock); dlm_lockres_put(res); } diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 7fc0e920eda7..2a7f36643895 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -20,6 +20,7 @@ #include #include +#include #include #include #include @@ -506,9 +507,7 @@ bail: return status; } -static int dlmfs_fill_super(struct super_block * sb, - void * data, - int silent) +static int dlmfs_fill_super(struct super_block *sb, struct fs_context *fc) { sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; @@ -556,17 +555,27 @@ static const struct inode_operations dlmfs_file_inode_operations = { .setattr = dlmfs_file_setattr, }; -static struct dentry *dlmfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static int dlmfs_get_tree(struct fs_context *fc) { - return mount_nodev(fs_type, flags, data, dlmfs_fill_super); + return get_tree_nodev(fc, dlmfs_fill_super); +} + +static const struct fs_context_operations dlmfs_context_ops = { + .get_tree = dlmfs_get_tree, +}; + +static int dlmfs_init_fs_context(struct fs_context *fc) +{ + fc->ops = &dlmfs_context_ops; + + return 0; } static struct file_system_type dlmfs_fs_type = { .owner = THIS_MODULE, .name = "ocfs2_dlmfs", - .mount = dlmfs_mount, .kill_sb = kill_litter_super, + .init_fs_context = dlmfs_init_fs_context, }; MODULE_ALIAS_FS("ocfs2_dlmfs"); diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 764ecbd5ad41..c9b62a6d8673 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -794,7 +794,7 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res) /* * Keep a list of processes who have interest in a lockres. - * Note: this is now only uesed for check recursive cluster locking. + * Note: this is now only used for check recursive cluster locking. */ static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres, struct ocfs2_lock_holder *oh) @@ -2529,30 +2529,28 @@ bail: /* * This is working around a lock inversion between tasks acquiring DLM - * locks while holding a page lock and the downconvert thread which - * blocks dlm lock acquiry while acquiring page locks. + * locks while holding a folio lock and the downconvert thread which + * blocks dlm lock acquiry while acquiring folio locks. * - * ** These _with_page variantes are only intended to be called from aop - * methods that hold page locks and return a very specific *positive* error + * ** These _with_folio variants are only intended to be called from aop + * methods that hold folio locks and return a very specific *positive* error * code that aop methods pass up to the VFS -- test for errors with != 0. ** * * The DLM is called such that it returns -EAGAIN if it would have * blocked waiting for the downconvert thread. In that case we unlock - * our page so the downconvert thread can make progress. Once we've + * our folio so the downconvert thread can make progress. Once we've * done this we have to return AOP_TRUNCATED_PAGE so the aop method * that called us can bubble that back up into the VFS who will then * immediately retry the aop call. */ -int ocfs2_inode_lock_with_page(struct inode *inode, - struct buffer_head **ret_bh, - int ex, - struct page *page) +int ocfs2_inode_lock_with_folio(struct inode *inode, + struct buffer_head **ret_bh, int ex, struct folio *folio) { int ret; ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK); if (ret == -EAGAIN) { - unlock_page(page); + folio_unlock(folio); /* * If we can't get inode lock immediately, we should not return * directly here, since this will lead to a softlockup problem. @@ -2630,7 +2628,7 @@ void ocfs2_inode_unlock(struct inode *inode, } /* - * This _tracker variantes are introduced to deal with the recursive cluster + * This _tracker variants are introduced to deal with the recursive cluster * locking issue. The idea is to keep track of a lock holder on the stack of * the current process. If there's a lock holder on the stack, we know the * task context is already protected by cluster locking. Currently, they're @@ -2735,7 +2733,7 @@ void ocfs2_inode_unlock_tracker(struct inode *inode, struct ocfs2_lock_res *lockres; lockres = &OCFS2_I(inode)->ip_inode_lockres; - /* had_lock means that the currect process already takes the cluster + /* had_lock means that the current process already takes the cluster * lock previously. * If had_lock is 1, we have nothing to do here. * If had_lock is 0, we will release the lock. @@ -3802,9 +3800,9 @@ recheck: * set when the ast is received for an upconvert just before the * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast * on the heels of the ast, we want to delay the downconvert just - * enough to allow the up requestor to do its task. Because this + * enough to allow the up requester to do its task. Because this * lock is in the blocked queue, the lock will be downconverted - * as soon as the requestor is done with the lock. + * as soon as the requester is done with the lock. */ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) goto leave_requeue; diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index e5da5809ed95..a3ebd7303ea2 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h @@ -137,10 +137,8 @@ int ocfs2_inode_lock_full_nested(struct inode *inode, int ex, int arg_flags, int subclass); -int ocfs2_inode_lock_with_page(struct inode *inode, - struct buffer_head **ret_bh, - int ex, - struct page *page); +int ocfs2_inode_lock_with_folio(struct inode *inode, + struct buffer_head **ret_bh, int ex, struct folio *folio); /* Variants without special locking class or flags */ #define ocfs2_inode_lock_full(i, r, e, f)\ ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL) diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index f7672472fa82..930150ed5db1 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -435,6 +435,16 @@ static int ocfs2_get_clusters_nocache(struct inode *inode, } } + if (le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)) { + ocfs2_error(inode->i_sb, + "Inode %lu has an invalid extent (next_free_rec %u, count %u)\n", + inode->i_ino, + le16_to_cpu(el->l_next_free_rec), + le16_to_cpu(el->l_count)); + ret = -EROFS; + goto out; + } + i = ocfs2_search_extent_list(el, v_cluster); if (i == -1) { /* diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 957ced628eb1..e54f2c4b5a90 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -782,11 +782,11 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, goto out_commit_trans; } - /* Get the offsets within the page that we want to zero */ - zero_from = abs_from & (PAGE_SIZE - 1); - zero_to = abs_to & (PAGE_SIZE - 1); + /* Get the offsets within the folio that we want to zero */ + zero_from = offset_in_folio(folio, abs_from); + zero_to = offset_in_folio(folio, abs_to); if (!zero_to) - zero_to = PAGE_SIZE; + zero_to = folio_size(folio); trace_ocfs2_write_zero_page( (unsigned long long)OCFS2_I(inode)->ip_blkno, diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 2cc5c99fe941..12e5d1f73325 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -200,6 +200,20 @@ bail: return inode; } +static int ocfs2_dinode_has_extents(struct ocfs2_dinode *di) +{ + /* inodes flagged with other stuff in id2 */ + if (di->i_flags & (OCFS2_SUPER_BLOCK_FL | OCFS2_LOCAL_ALLOC_FL | + OCFS2_CHAIN_FL | OCFS2_DEALLOC_FL)) + return 0; + /* i_flags doesn't indicate when id2 is a fast symlink */ + if (S_ISLNK(di->i_mode) && di->i_size && di->i_clusters == 0) + return 0; + if (di->i_dyn_features & OCFS2_INLINE_DATA_FL) + return 0; + + return 1; +} /* * here's how inodes get read from disk: @@ -1122,7 +1136,7 @@ static void ocfs2_clear_inode(struct inode *inode) dquot_drop(inode); - /* To preven remote deletes we hold open lock before, now it + /* To prevent remote deletes we hold open lock before, now it * is time to unlock PR and EX open locks. */ ocfs2_open_unlock(inode); @@ -1437,7 +1451,7 @@ static int ocfs2_filecheck_validate_inode_block(struct super_block *sb, * Call ocfs2_validate_meta_ecc() first since it has ecc repair * function, but we should not return error immediately when ecc * validation fails, because the reason is quite likely the invalid - * inode number inputed. + * inode number inputted. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check); if (rc) { @@ -1547,6 +1561,16 @@ static int ocfs2_filecheck_repair_inode_block(struct super_block *sb, le32_to_cpu(di->i_fs_generation)); } + if (ocfs2_dinode_has_extents(di) && + le16_to_cpu(di->id2.i_list.l_next_free_rec) > le16_to_cpu(di->id2.i_list.l_count)) { + di->id2.i_list.l_next_free_rec = di->id2.i_list.l_count; + changed = 1; + mlog(ML_ERROR, + "Filecheck: reset dinode #%llu: l_next_free_rec to %u\n", + (unsigned long long)bh->b_blocknr, + le16_to_cpu(di->id2.i_list.l_next_free_rec)); + } + if (changed || ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check)) { ocfs2_compute_meta_ecc(sb, bh->b_data, &di->i_check); mark_buffer_dirty(bh); diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 71beef7f8a60..7ae96fb8807a 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -796,7 +796,7 @@ bail: /* * OCFS2_IOC_INFO handles an array of requests passed from userspace. * - * ocfs2_info_handle() recevies a large info aggregation, grab and + * ocfs2_info_handle() receives a large info aggregation, grab and * validate the request count from header, then break it into small * pieces, later specific handlers can handle them one by one. * diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 1bf188b6866a..f1b4b3e611cb 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1956,7 +1956,7 @@ bail: /* * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some - * randomness to the timeout to minimize multple nodes firing the timer at the + * randomness to the timeout to minimize multiple nodes firing the timer at the * same time. */ static inline unsigned long ocfs2_orphan_scan_timeout(void) diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 6ef4cb045ccd..6a314e9f2b49 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -44,13 +44,13 @@ static vm_fault_t ocfs2_fault(struct vm_fault *vmf) } static vm_fault_t __ocfs2_page_mkwrite(struct file *file, - struct buffer_head *di_bh, struct page *page) + struct buffer_head *di_bh, struct folio *folio) { int err; vm_fault_t ret = VM_FAULT_NOPAGE; struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; - loff_t pos = page_offset(page); + loff_t pos = folio_pos(folio); unsigned int len = PAGE_SIZE; pgoff_t last_index; struct folio *locked_folio = NULL; @@ -72,9 +72,9 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file, * * Let VM retry with these cases. */ - if ((page->mapping != inode->i_mapping) || - (!PageUptodate(page)) || - (page_offset(page) >= size)) + if ((folio->mapping != inode->i_mapping) || + !folio_test_uptodate(folio) || + (pos >= size)) goto out; /* @@ -87,11 +87,11 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file, * worry about ocfs2_write_begin() skipping some buffer reads * because the "write" would invalidate their data. */ - if (page->index == last_index) + if (folio->index == last_index) len = ((size - 1) & ~PAGE_MASK) + 1; err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, - &locked_folio, &fsdata, di_bh, page); + &locked_folio, &fsdata, di_bh, folio); if (err) { if (err != -ENOSPC) mlog_errno(err); @@ -112,7 +112,7 @@ out: static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) { - struct page *page = vmf->page; + struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vmf->vma->vm_file); struct buffer_head *di_bh = NULL; sigset_t oldset; @@ -141,7 +141,7 @@ static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) */ down_write(&OCFS2_I(inode)->ip_alloc_sem); - ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page); + ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, folio); up_write(&OCFS2_I(inode)->ip_alloc_sem); diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index f9d6a4f9ca92..369c7d27befd 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -492,7 +492,7 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode, bg = (struct ocfs2_group_desc *)gd_bh->b_data; /* - * moving goal is not allowd to start with a group desc blok(#0 blk) + * moving goal is not allowed to start with a group desc blok(#0 blk) * let's compromise to the latter cluster. */ if (range->me_goal == le64_to_cpu(bg->bg_blkno)) @@ -658,7 +658,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, /* * probe the victim cluster group to find a proper - * region to fit wanted movement, it even will perfrom + * region to fit wanted movement, it even will perform * a best-effort attempt by compromising to a threshold * around the goal. */ @@ -920,7 +920,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) } /* - * rememer ip_xattr_sem also needs to be held if necessary + * remember ip_xattr_sem also needs to be held if necessary */ down_write(&OCFS2_I(inode)->ip_alloc_sem); @@ -1022,7 +1022,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp) context->range = ⦥ /* - * ok, the default theshold for the defragmentation + * ok, the default threshold for the defragmentation * is 1M, since our maximum clustersize was 1M also. * any thought? */ diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 5550f8afa438..0ec63a1a94b8 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -508,7 +508,6 @@ static int __ocfs2_mknod_locked(struct inode *dir, struct inode *inode, dev_t dev, struct buffer_head **new_fe_bh, - struct buffer_head *parent_fe_bh, handle_t *handle, struct ocfs2_alloc_context *inode_ac, u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit) @@ -641,8 +640,8 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, } return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, - parent_fe_bh, handle, inode_ac, - fe_blkno, suballoc_loc, suballoc_bit); + handle, inode_ac, fe_blkno, + suballoc_loc, suballoc_bit); } static int ocfs2_mkdir(struct mnt_idmap *idmap, @@ -2576,7 +2575,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, clear_nlink(inode); /* do the real work now. */ status = __ocfs2_mknod_locked(dir, inode, - 0, &new_di_bh, parent_di_bh, handle, + 0, &new_di_bh, handle, inode_ac, di_blkno, suballoc_loc, suballoc_bit); if (status < 0) { diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index c93689b568fe..e8e94599e907 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -132,7 +132,7 @@ * well as the name of the cluster being joined. * mount.ocfs2 must pass in a matching stack name. * - * If not set, the classic stack will be used. This is compatbile with + * If not set, the classic stack will be used. This is compatible with * all older versions. */ #define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080 @@ -143,7 +143,7 @@ /* Support for extended attributes */ #define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200 -/* Support for indexed directores */ +/* Support for indexed directories */ #define OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS 0x0400 /* Metadata checksum and error correction */ @@ -156,7 +156,7 @@ #define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000 /* - * Incompat bit to indicate useable clusterinfo with stackflags for all + * Incompat bit to indicate usable clusterinfo with stackflags for all * cluster stacks (userspace adnd o2cb). If this bit is set, * INCOMPAT_USERSPACE_STACK becomes superfluous and thus should not be set. */ @@ -1083,7 +1083,7 @@ struct ocfs2_xattr_block { struct ocfs2_xattr_header xb_header; /* xattr header if this block contains xattr */ struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this - block cotains xattr + block contains xattr tree. */ } xb_attrs; }; diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h index 9680797bc531..2de2f8733283 100644 --- a/fs/ocfs2/ocfs2_ioctl.h +++ b/fs/ocfs2/ocfs2_ioctl.h @@ -215,7 +215,7 @@ struct ocfs2_move_extents { movement less likely to fail, may make fs even more fragmented */ -#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmenation +#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmentation completely gets done. */ diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h index 8ac357ce6a30..9b234c03d693 100644 --- a/fs/ocfs2/ocfs2_lockid.h +++ b/fs/ocfs2/ocfs2_lockid.h @@ -93,7 +93,7 @@ static char *ocfs2_lock_type_strings[] = { [OCFS2_LOCK_TYPE_DATA] = "Data", [OCFS2_LOCK_TYPE_SUPER] = "Super", [OCFS2_LOCK_TYPE_RENAME] = "Rename", - /* Need to differntiate from [R]ename.. serializing writes is the + /* Need to differentiate from [R]ename.. serializing writes is the * important job it does, anyway. */ [OCFS2_LOCK_TYPE_RW] = "Write/Read", [OCFS2_LOCK_TYPE_DENTRY] = "Dentry", diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h index 0511c69c9fde..54ed1495de9a 100644 --- a/fs/ocfs2/ocfs2_trace.h +++ b/fs/ocfs2/ocfs2_trace.h @@ -1658,34 +1658,34 @@ TRACE_EVENT(ocfs2_remount, ); TRACE_EVENT(ocfs2_fill_super, - TP_PROTO(void *sb, void *data, int silent), - TP_ARGS(sb, data, silent), + TP_PROTO(void *sb, void *fc, int silent), + TP_ARGS(sb, fc, silent), TP_STRUCT__entry( __field(void *, sb) - __field(void *, data) + __field(void *, fc) __field(int, silent) ), TP_fast_assign( __entry->sb = sb; - __entry->data = data; + __entry->fc = fc; __entry->silent = silent; ), TP_printk("%p %p %d", __entry->sb, - __entry->data, __entry->silent) + __entry->fc, __entry->silent) ); TRACE_EVENT(ocfs2_parse_options, - TP_PROTO(int is_remount, char *options), - TP_ARGS(is_remount, options), + TP_PROTO(int is_remount, const char *option), + TP_ARGS(is_remount, option), TP_STRUCT__entry( __field(int, is_remount) - __string(options, options) + __string(option, option) ), TP_fast_assign( __entry->is_remount = is_remount; - __assign_str(options); + __assign_str(option); ), - TP_printk("%d %s", __entry->is_remount, __get_str(options)) + TP_printk("%d %s", __entry->is_remount, __get_str(option)) ); DEFINE_OCFS2_POINTER_EVENT(ocfs2_put_super); diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 3404e7a30c33..15d9acd456ec 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -761,6 +761,11 @@ static int ocfs2_release_dquot(struct dquot *dquot) handle = ocfs2_start_trans(osb, ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type)); if (IS_ERR(handle)) { + /* + * Mark dquot as inactive to avoid endless cycle in + * quota_release_workfn(). + */ + clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); status = PTR_ERR(handle); mlog_errno(status); goto out_ilock; diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 004393b13c0a..8f732742b26e 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2420,7 +2420,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, * * If we will insert a new one, this is easy and only happens * during adding refcounted flag to the extent, so we don't - * have a chance of spliting. We just need one record. + * have a chance of splitting. We just need one record. * * If the refcount rec already exists, that would be a little * complicated. we may have to: @@ -2610,11 +2610,11 @@ static inline unsigned int ocfs2_cow_align_length(struct super_block *sb, /* * Calculate out the start and number of virtual clusters we need to CoW. * - * cpos is vitual start cluster position we want to do CoW in a + * cpos is virtual start cluster position we want to do CoW in a * file and write_len is the cluster length. * max_cpos is the place where we want to stop CoW intentionally. * - * Normal we will start CoW from the beginning of extent record cotaining cpos. + * Normal we will start CoW from the beginning of extent record containing cpos. * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we * get good I/O from the resulting extent tree. */ @@ -2902,7 +2902,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, int ret = 0, partial; struct super_block *sb = inode->i_sb; u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); - struct page *page; pgoff_t page_index; unsigned int from, to; loff_t offset, end, map_end; @@ -2921,6 +2920,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, end = i_size_read(inode); while (offset < end) { + struct folio *folio; page_index = offset >> PAGE_SHIFT; map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; if (map_end > end) @@ -2933,9 +2933,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, to = map_end & (PAGE_SIZE - 1); retry: - page = find_or_create_page(mapping, page_index, GFP_NOFS); - if (!page) { - ret = -ENOMEM; + folio = __filemap_get_folio(mapping, page_index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); + if (IS_ERR(folio)) { + ret = PTR_ERR(folio); mlog_errno(ret); break; } @@ -2945,9 +2946,9 @@ retry: * page, so write it back. */ if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) { - if (PageDirty(page)) { - unlock_page(page); - put_page(page); + if (folio_test_dirty(folio)) { + folio_unlock(folio); + folio_put(folio); ret = filemap_write_and_wait_range(mapping, offset, map_end - 1); @@ -2955,9 +2956,7 @@ retry: } } - if (!PageUptodate(page)) { - struct folio *folio = page_folio(page); - + if (!folio_test_uptodate(folio)) { ret = block_read_full_folio(folio, ocfs2_get_block); if (ret) { mlog_errno(ret); @@ -2966,8 +2965,8 @@ retry: folio_lock(folio); } - if (page_has_buffers(page)) { - ret = walk_page_buffers(handle, page_buffers(page), + if (folio_buffers(folio)) { + ret = walk_page_buffers(handle, folio_buffers(folio), from, to, &partial, ocfs2_clear_cow_buffer); if (ret) { @@ -2976,14 +2975,12 @@ retry: } } - ocfs2_map_and_dirty_page(inode, - handle, from, to, - page, 0, &new_block); - mark_page_accessed(page); + ocfs2_map_and_dirty_folio(inode, handle, from, to, + folio, 0, &new_block); + folio_mark_accessed(folio); unlock: - unlock_page(page); - put_page(page); - page = NULL; + folio_unlock(folio); + folio_put(folio); offset = map_end; if (ret) break; diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h index ec8101ef5717..4fce17180342 100644 --- a/fs/ocfs2/reservations.h +++ b/fs/ocfs2/reservations.h @@ -31,7 +31,7 @@ struct ocfs2_alloc_reservation { #define OCFS2_RESV_FLAG_INUSE 0x01 /* Set when r_node is part of a btree */ #define OCFS2_RESV_FLAG_TMP 0x02 /* Temporary reservation, will be - * destroyed immedately after use */ + * destroyed immediately after use */ #define OCFS2_RESV_FLAG_DIR 0x04 /* Reservation is for an unindexed * directory btree */ @@ -125,7 +125,7 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap, /** * ocfs2_resmap_claimed_bits() - Tell the reservation code that bits were used. * @resmap: reservations bitmap - * @resv: optional reservation to recalulate based on new bitmap + * @resv: optional reservation to recalculate based on new bitmap * @cstart: start of allocation in clusters * @clen: end of allocation in clusters. * diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index 10157d9d7a9c..f58e891aa2da 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c @@ -227,7 +227,7 @@ static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb) } /* - * o2dlm aways has a "valid" LVB. If the dlm loses track of the LVB + * o2dlm always has a "valid" LVB. If the dlm loses track of the LVB * contents, it will zero out the LVB. Thus the caller can always trust * the contents. */ diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h index 02ab072c528a..5486a6dce70a 100644 --- a/fs/ocfs2/stackglue.h +++ b/fs/ocfs2/stackglue.h @@ -210,7 +210,7 @@ struct ocfs2_stack_operations { struct file_lock *fl); /* - * This is an optoinal debugging hook. If provided, the + * This is an optional debugging hook. If provided, the * stack can dump debugging information about this lock. */ void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index c79b4291777f..e0b91dbaa0ac 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -19,10 +19,10 @@ #include #include #include -#include +#include +#include #include #include -#include #include #include #include @@ -80,17 +80,15 @@ struct mount_options unsigned int resv_level; int dir_resv_level; char cluster_stack[OCFS2_STACK_LABEL_LEN + 1]; + bool user_stack; }; -static int ocfs2_parse_options(struct super_block *sb, char *options, - struct mount_options *mopt, - int is_remount); +static int ocfs2_parse_param(struct fs_context *fc, struct fs_parameter *param); static int ocfs2_check_set_options(struct super_block *sb, struct mount_options *options); static int ocfs2_show_options(struct seq_file *s, struct dentry *root); static void ocfs2_put_super(struct super_block *sb); static int ocfs2_mount_volume(struct super_block *sb); -static int ocfs2_remount(struct super_block *sb, int *flags, char *data); static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err); static int ocfs2_initialize_mem_caches(void); static void ocfs2_free_mem_caches(void); @@ -135,7 +133,6 @@ static const struct super_operations ocfs2_sops = { .evict_inode = ocfs2_evict_inode, .sync_fs = ocfs2_sync_fs, .put_super = ocfs2_put_super, - .remount_fs = ocfs2_remount, .show_options = ocfs2_show_options, .quota_read = ocfs2_quota_read, .quota_write = ocfs2_quota_write, @@ -144,15 +141,10 @@ static const struct super_operations ocfs2_sops = { enum { Opt_barrier, - Opt_err_panic, - Opt_err_ro, + Opt_errors, Opt_intr, - Opt_nointr, - Opt_hb_none, - Opt_hb_local, - Opt_hb_global, - Opt_data_ordered, - Opt_data_writeback, + Opt_heartbeat, + Opt_data, Opt_atime_quantum, Opt_slot, Opt_commit, @@ -160,52 +152,64 @@ enum { Opt_localflocks, Opt_stack, Opt_user_xattr, - Opt_nouser_xattr, Opt_inode64, Opt_acl, - Opt_noacl, Opt_usrquota, Opt_grpquota, - Opt_coherency_buffered, - Opt_coherency_full, + Opt_coherency, Opt_resv_level, Opt_dir_resv_level, Opt_journal_async_commit, - Opt_err_cont, - Opt_err, }; -static const match_table_t tokens = { - {Opt_barrier, "barrier=%u"}, - {Opt_err_panic, "errors=panic"}, - {Opt_err_ro, "errors=remount-ro"}, - {Opt_intr, "intr"}, - {Opt_nointr, "nointr"}, - {Opt_hb_none, OCFS2_HB_NONE}, - {Opt_hb_local, OCFS2_HB_LOCAL}, - {Opt_hb_global, OCFS2_HB_GLOBAL}, - {Opt_data_ordered, "data=ordered"}, - {Opt_data_writeback, "data=writeback"}, - {Opt_atime_quantum, "atime_quantum=%u"}, - {Opt_slot, "preferred_slot=%u"}, - {Opt_commit, "commit=%u"}, - {Opt_localalloc, "localalloc=%d"}, - {Opt_localflocks, "localflocks"}, - {Opt_stack, "cluster_stack=%s"}, - {Opt_user_xattr, "user_xattr"}, - {Opt_nouser_xattr, "nouser_xattr"}, - {Opt_inode64, "inode64"}, - {Opt_acl, "acl"}, - {Opt_noacl, "noacl"}, - {Opt_usrquota, "usrquota"}, - {Opt_grpquota, "grpquota"}, - {Opt_coherency_buffered, "coherency=buffered"}, - {Opt_coherency_full, "coherency=full"}, - {Opt_resv_level, "resv_level=%u"}, - {Opt_dir_resv_level, "dir_resv_level=%u"}, - {Opt_journal_async_commit, "journal_async_commit"}, - {Opt_err_cont, "errors=continue"}, - {Opt_err, NULL} +static const struct constant_table ocfs2_param_errors[] = { + {"panic", OCFS2_MOUNT_ERRORS_PANIC}, + {"remount-ro", OCFS2_MOUNT_ERRORS_ROFS}, + {"continue", OCFS2_MOUNT_ERRORS_CONT}, + {} +}; + +static const struct constant_table ocfs2_param_heartbeat[] = { + {"local", OCFS2_MOUNT_HB_LOCAL}, + {"none", OCFS2_MOUNT_HB_NONE}, + {"global", OCFS2_MOUNT_HB_GLOBAL}, + {} +}; + +static const struct constant_table ocfs2_param_data[] = { + {"writeback", OCFS2_MOUNT_DATA_WRITEBACK}, + {"ordered", 0}, + {} +}; + +static const struct constant_table ocfs2_param_coherency[] = { + {"buffered", OCFS2_MOUNT_COHERENCY_BUFFERED}, + {"full", 0}, + {} +}; + +static const struct fs_parameter_spec ocfs2_param_spec[] = { + fsparam_u32 ("barrier", Opt_barrier), + fsparam_enum ("errors", Opt_errors, ocfs2_param_errors), + fsparam_flag_no ("intr", Opt_intr), + fsparam_enum ("heartbeat", Opt_heartbeat, ocfs2_param_heartbeat), + fsparam_enum ("data", Opt_data, ocfs2_param_data), + fsparam_u32 ("atime_quantum", Opt_atime_quantum), + fsparam_u32 ("preferred_slot", Opt_slot), + fsparam_u32 ("commit", Opt_commit), + fsparam_s32 ("localalloc", Opt_localalloc), + fsparam_flag ("localflocks", Opt_localflocks), + fsparam_string ("cluster_stack", Opt_stack), + fsparam_flag_no ("user_xattr", Opt_user_xattr), + fsparam_flag ("inode64", Opt_inode64), + fsparam_flag_no ("acl", Opt_acl), + fsparam_flag ("usrquota", Opt_usrquota), + fsparam_flag ("grpquota", Opt_grpquota), + fsparam_enum ("coherency", Opt_coherency, ocfs2_param_coherency), + fsparam_u32 ("resv_level", Opt_resv_level), + fsparam_u32 ("dir_resv_level", Opt_dir_resv_level), + fsparam_flag ("journal_async_commit", Opt_journal_async_commit), + {} }; #ifdef CONFIG_DEBUG_FS @@ -600,32 +604,32 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits, return (((unsigned long long)bytes) << bitshift) - trim; } -static int ocfs2_remount(struct super_block *sb, int *flags, char *data) +static int ocfs2_reconfigure(struct fs_context *fc) { int incompat_features; int ret = 0; - struct mount_options parsed_options; + struct mount_options *parsed_options = fc->fs_private; + struct super_block *sb = fc->root->d_sb; struct ocfs2_super *osb = OCFS2_SB(sb); u32 tmp; sync_filesystem(sb); - if (!ocfs2_parse_options(sb, data, &parsed_options, 1) || - !ocfs2_check_set_options(sb, &parsed_options)) { + if (!ocfs2_check_set_options(sb, parsed_options)) { ret = -EINVAL; goto out; } tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | OCFS2_MOUNT_HB_NONE; - if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) { + if ((osb->s_mount_opt & tmp) != (parsed_options->mount_opt & tmp)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n"); goto out; } if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) != - (parsed_options.mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) { + (parsed_options->mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot change data mode on remount\n"); goto out; @@ -634,16 +638,16 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) /* Probably don't want this on remount; it might * mess with other nodes */ if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64) && - (parsed_options.mount_opt & OCFS2_MOUNT_INODE64)) { + (parsed_options->mount_opt & OCFS2_MOUNT_INODE64)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot enable inode64 on remount\n"); goto out; } /* We're going to/from readonly mode. */ - if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) { + if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) { /* Disable quota accounting before remounting RO */ - if (*flags & SB_RDONLY) { + if (fc->sb_flags & SB_RDONLY) { ret = ocfs2_susp_quotas(osb, 0); if (ret < 0) goto out; @@ -657,7 +661,7 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) goto unlock_osb; } - if (*flags & SB_RDONLY) { + if (fc->sb_flags & SB_RDONLY) { sb->s_flags |= SB_RDONLY; osb->osb_flags |= OCFS2_OSB_SOFT_RO; } else { @@ -678,11 +682,11 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) sb->s_flags &= ~SB_RDONLY; osb->osb_flags &= ~OCFS2_OSB_SOFT_RO; } - trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags); + trace_ocfs2_remount(sb->s_flags, osb->osb_flags, fc->sb_flags); unlock_osb: spin_unlock(&osb->osb_lock); /* Enable quota accounting after remounting RW */ - if (!ret && !(*flags & SB_RDONLY)) { + if (!ret && !(fc->sb_flags & SB_RDONLY)) { if (sb_any_quota_suspended(sb)) ret = ocfs2_susp_quotas(osb, 1); else @@ -701,11 +705,11 @@ unlock_osb: if (!ret) { /* Only save off the new mount options in case of a successful * remount. */ - osb->s_mount_opt = parsed_options.mount_opt; - osb->s_atime_quantum = parsed_options.atime_quantum; - osb->preferred_slot = parsed_options.slot; - if (parsed_options.commit_interval) - osb->osb_commit_interval = parsed_options.commit_interval; + osb->s_mount_opt = parsed_options->mount_opt; + osb->s_atime_quantum = parsed_options->atime_quantum; + osb->preferred_slot = parsed_options->slot; + if (parsed_options->commit_interval) + osb->osb_commit_interval = parsed_options->commit_interval; if (!ocfs2_is_hard_readonly(osb)) ocfs2_set_journal_params(osb); @@ -966,23 +970,18 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb) } } -static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) +static int ocfs2_fill_super(struct super_block *sb, struct fs_context *fc) { struct dentry *root; int status, sector_size; - struct mount_options parsed_options; + struct mount_options *parsed_options = fc->fs_private; struct inode *inode = NULL; struct ocfs2_super *osb = NULL; struct buffer_head *bh = NULL; char nodestr[12]; struct ocfs2_blockcheck_stats stats; - trace_ocfs2_fill_super(sb, data, silent); - - if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { - status = -EINVAL; - goto out; - } + trace_ocfs2_fill_super(sb, fc, fc->sb_flags & SB_SILENT); /* probe for superblock */ status = ocfs2_sb_probe(sb, &bh, §or_size, &stats); @@ -999,24 +998,24 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) osb = OCFS2_SB(sb); - if (!ocfs2_check_set_options(sb, &parsed_options)) { + if (!ocfs2_check_set_options(sb, parsed_options)) { status = -EINVAL; goto out_super; } - osb->s_mount_opt = parsed_options.mount_opt; - osb->s_atime_quantum = parsed_options.atime_quantum; - osb->preferred_slot = parsed_options.slot; - osb->osb_commit_interval = parsed_options.commit_interval; + osb->s_mount_opt = parsed_options->mount_opt; + osb->s_atime_quantum = parsed_options->atime_quantum; + osb->preferred_slot = parsed_options->slot; + osb->osb_commit_interval = parsed_options->commit_interval; - ocfs2_la_set_sizes(osb, parsed_options.localalloc_opt); - osb->osb_resv_level = parsed_options.resv_level; - osb->osb_dir_resv_level = parsed_options.resv_level; - if (parsed_options.dir_resv_level == -1) - osb->osb_dir_resv_level = parsed_options.resv_level; + ocfs2_la_set_sizes(osb, parsed_options->localalloc_opt); + osb->osb_resv_level = parsed_options->resv_level; + osb->osb_dir_resv_level = parsed_options->resv_level; + if (parsed_options->dir_resv_level == -1) + osb->osb_dir_resv_level = parsed_options->resv_level; else - osb->osb_dir_resv_level = parsed_options.dir_resv_level; + osb->osb_dir_resv_level = parsed_options->dir_resv_level; - status = ocfs2_verify_userspace_stack(osb, &parsed_options); + status = ocfs2_verify_userspace_stack(osb, parsed_options); if (status) goto out_super; @@ -1180,27 +1179,72 @@ out: return status; } -static struct dentry *ocfs2_mount(struct file_system_type *fs_type, - int flags, - const char *dev_name, - void *data) +static int ocfs2_get_tree(struct fs_context *fc) { - return mount_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super); + return get_tree_bdev(fc, ocfs2_fill_super); +} + +static void ocfs2_free_fc(struct fs_context *fc) +{ + kfree(fc->fs_private); +} + +static const struct fs_context_operations ocfs2_context_ops = { + .parse_param = ocfs2_parse_param, + .get_tree = ocfs2_get_tree, + .reconfigure = ocfs2_reconfigure, + .free = ocfs2_free_fc, +}; + +static int ocfs2_init_fs_context(struct fs_context *fc) +{ + struct mount_options *mopt; + + mopt = kzalloc(sizeof(struct mount_options), GFP_KERNEL); + if (!mopt) + return -EINVAL; + + mopt->commit_interval = 0; + mopt->mount_opt = OCFS2_MOUNT_NOINTR; + mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; + mopt->slot = OCFS2_INVALID_SLOT; + mopt->localalloc_opt = -1; + mopt->cluster_stack[0] = '\0'; + mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL; + mopt->dir_resv_level = -1; + + fc->fs_private = mopt; + fc->ops = &ocfs2_context_ops; + + return 0; } static struct file_system_type ocfs2_fs_type = { .owner = THIS_MODULE, .name = "ocfs2", - .mount = ocfs2_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, - .next = NULL + .next = NULL, + .init_fs_context = ocfs2_init_fs_context, + .parameters = ocfs2_param_spec, }; MODULE_ALIAS_FS("ocfs2"); static int ocfs2_check_set_options(struct super_block *sb, struct mount_options *options) { + if (options->user_stack == 0) { + u32 tmp; + + /* Ensure only one heartbeat mode */ + tmp = options->mount_opt & (OCFS2_MOUNT_HB_LOCAL | + OCFS2_MOUNT_HB_GLOBAL | + OCFS2_MOUNT_HB_NONE); + if (hweight32(tmp) != 1) { + mlog(ML_ERROR, "Invalid heartbeat mount options\n"); + return 0; + } + } if (options->mount_opt & OCFS2_MOUNT_USRQUOTA && !OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { @@ -1232,241 +1276,142 @@ static int ocfs2_check_set_options(struct super_block *sb, return 1; } -static int ocfs2_parse_options(struct super_block *sb, - char *options, - struct mount_options *mopt, - int is_remount) +static int ocfs2_parse_param(struct fs_context *fc, struct fs_parameter *param) { - int status, user_stack = 0; - char *p; - u32 tmp; - int token, option; - substring_t args[MAX_OPT_ARGS]; + struct fs_parse_result result; + int opt; + struct mount_options *mopt = fc->fs_private; + bool is_remount = (fc->purpose & FS_CONTEXT_FOR_RECONFIGURE); - trace_ocfs2_parse_options(is_remount, options ? options : "(none)"); + trace_ocfs2_parse_options(is_remount, param->key); - mopt->commit_interval = 0; - mopt->mount_opt = OCFS2_MOUNT_NOINTR; - mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; - mopt->slot = OCFS2_INVALID_SLOT; - mopt->localalloc_opt = -1; - mopt->cluster_stack[0] = '\0'; - mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL; - mopt->dir_resv_level = -1; + opt = fs_parse(fc, ocfs2_param_spec, param, &result); + if (opt < 0) + return opt; - if (!options) { - status = 1; - goto bail; - } - - while ((p = strsep(&options, ",")) != NULL) { - if (!*p) - continue; - - token = match_token(p, tokens, args); - switch (token) { - case Opt_hb_local: - mopt->mount_opt |= OCFS2_MOUNT_HB_LOCAL; - break; - case Opt_hb_none: - mopt->mount_opt |= OCFS2_MOUNT_HB_NONE; - break; - case Opt_hb_global: - mopt->mount_opt |= OCFS2_MOUNT_HB_GLOBAL; - break; - case Opt_barrier: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option) - mopt->mount_opt |= OCFS2_MOUNT_BARRIER; - else - mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER; - break; - case Opt_intr: - mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR; - break; - case Opt_nointr: + switch (opt) { + case Opt_heartbeat: + mopt->mount_opt |= result.uint_32; + break; + case Opt_barrier: + if (result.uint_32) + mopt->mount_opt |= OCFS2_MOUNT_BARRIER; + else + mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER; + break; + case Opt_intr: + if (result.negated) mopt->mount_opt |= OCFS2_MOUNT_NOINTR; - break; - case Opt_err_panic: - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT; - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS; - mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC; - break; - case Opt_err_ro: - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT; - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC; - mopt->mount_opt |= OCFS2_MOUNT_ERRORS_ROFS; - break; - case Opt_err_cont: - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS; - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC; - mopt->mount_opt |= OCFS2_MOUNT_ERRORS_CONT; - break; - case Opt_data_ordered: - mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK; - break; - case Opt_data_writeback: - mopt->mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK; - break; - case Opt_user_xattr: - mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR; - break; - case Opt_nouser_xattr: + else + mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR; + break; + case Opt_errors: + mopt->mount_opt &= ~(OCFS2_MOUNT_ERRORS_CONT | + OCFS2_MOUNT_ERRORS_ROFS | + OCFS2_MOUNT_ERRORS_PANIC); + mopt->mount_opt |= result.uint_32; + break; + case Opt_data: + mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK; + mopt->mount_opt |= result.uint_32; + break; + case Opt_user_xattr: + if (result.negated) mopt->mount_opt |= OCFS2_MOUNT_NOUSERXATTR; - break; - case Opt_atime_quantum: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= 0) - mopt->atime_quantum = option; - break; - case Opt_slot: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option) - mopt->slot = (u16)option; - break; - case Opt_commit: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option < 0) - return 0; - if (option == 0) - option = JBD2_DEFAULT_MAX_COMMIT_AGE; - mopt->commit_interval = HZ * option; - break; - case Opt_localalloc: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= 0) - mopt->localalloc_opt = option; - break; - case Opt_localflocks: - /* - * Changing this during remount could race - * flock() requests, or "unbalance" existing - * ones (e.g., a lock is taken in one mode but - * dropped in the other). If users care enough - * to flip locking modes during remount, we - * could add a "local" flag to individual - * flock structures for proper tracking of - * state. - */ - if (!is_remount) - mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS; - break; - case Opt_stack: - /* Check both that the option we were passed - * is of the right length and that it is a proper - * string of the right length. - */ - if (((args[0].to - args[0].from) != - OCFS2_STACK_LABEL_LEN) || - (strnlen(args[0].from, - OCFS2_STACK_LABEL_LEN) != - OCFS2_STACK_LABEL_LEN)) { - mlog(ML_ERROR, - "Invalid cluster_stack option\n"); - status = 0; - goto bail; - } - memcpy(mopt->cluster_stack, args[0].from, - OCFS2_STACK_LABEL_LEN); - mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; - /* - * Open code the memcmp here as we don't have - * an osb to pass to - * ocfs2_userspace_stack(). - */ - if (memcmp(mopt->cluster_stack, - OCFS2_CLASSIC_CLUSTER_STACK, - OCFS2_STACK_LABEL_LEN)) - user_stack = 1; - break; - case Opt_inode64: - mopt->mount_opt |= OCFS2_MOUNT_INODE64; - break; - case Opt_usrquota: - mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA; - break; - case Opt_grpquota: - mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA; - break; - case Opt_coherency_buffered: - mopt->mount_opt |= OCFS2_MOUNT_COHERENCY_BUFFERED; - break; - case Opt_coherency_full: - mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED; - break; - case Opt_acl: - mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; - mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL; - break; - case Opt_noacl: + else + mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR; + break; + case Opt_atime_quantum: + mopt->atime_quantum = result.uint_32; + break; + case Opt_slot: + if (result.uint_32) + mopt->slot = (u16)result.uint_32; + break; + case Opt_commit: + if (result.uint_32 == 0) + mopt->commit_interval = HZ * JBD2_DEFAULT_MAX_COMMIT_AGE; + else + mopt->commit_interval = HZ * result.uint_32; + break; + case Opt_localalloc: + if (result.int_32 >= 0) + mopt->localalloc_opt = result.int_32; + break; + case Opt_localflocks: + /* + * Changing this during remount could race flock() requests, or + * "unbalance" existing ones (e.g., a lock is taken in one mode + * but dropped in the other). If users care enough to flip + * locking modes during remount, we could add a "local" flag to + * individual flock structures for proper tracking of state. + */ + if (!is_remount) + mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS; + break; + case Opt_stack: + /* Check both that the option we were passed is of the right + * length and that it is a proper string of the right length. + */ + if (strlen(param->string) != OCFS2_STACK_LABEL_LEN) { + mlog(ML_ERROR, "Invalid cluster_stack option\n"); + return -EINVAL; + } + memcpy(mopt->cluster_stack, param->string, OCFS2_STACK_LABEL_LEN); + mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; + /* + * Open code the memcmp here as we don't have an osb to pass + * to ocfs2_userspace_stack(). + */ + if (memcmp(mopt->cluster_stack, + OCFS2_CLASSIC_CLUSTER_STACK, + OCFS2_STACK_LABEL_LEN)) + mopt->user_stack = 1; + break; + case Opt_inode64: + mopt->mount_opt |= OCFS2_MOUNT_INODE64; + break; + case Opt_usrquota: + mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA; + break; + case Opt_grpquota: + mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA; + break; + case Opt_coherency: + mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED; + mopt->mount_opt |= result.uint_32; + break; + case Opt_acl: + if (result.negated) { mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL; mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; - break; - case Opt_resv_level: - if (is_remount) - break; - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= OCFS2_MIN_RESV_LEVEL && - option < OCFS2_MAX_RESV_LEVEL) - mopt->resv_level = option; - break; - case Opt_dir_resv_level: - if (is_remount) - break; - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= OCFS2_MIN_RESV_LEVEL && - option < OCFS2_MAX_RESV_LEVEL) - mopt->dir_resv_level = option; - break; - case Opt_journal_async_commit: - mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT; - break; - default: - mlog(ML_ERROR, - "Unrecognized mount option \"%s\" " - "or missing value\n", p); - status = 0; - goto bail; + } else { + mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; + mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL; } + break; + case Opt_resv_level: + if (is_remount) + break; + if (result.uint_32 >= OCFS2_MIN_RESV_LEVEL && + result.uint_32 < OCFS2_MAX_RESV_LEVEL) + mopt->resv_level = result.uint_32; + break; + case Opt_dir_resv_level: + if (is_remount) + break; + if (result.uint_32 >= OCFS2_MIN_RESV_LEVEL && + result.uint_32 < OCFS2_MAX_RESV_LEVEL) + mopt->dir_resv_level = result.uint_32; + break; + case Opt_journal_async_commit: + mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT; + break; + default: + return -EINVAL; } - if (user_stack == 0) { - /* Ensure only one heartbeat mode */ - tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | - OCFS2_MOUNT_HB_GLOBAL | - OCFS2_MOUNT_HB_NONE); - if (hweight32(tmp) != 1) { - mlog(ML_ERROR, "Invalid heartbeat mount options\n"); - status = 0; - goto bail; - } - } - - status = 1; - -bail: - return status; + return 0; } static int ocfs2_show_options(struct seq_file *s, struct dentry *root) @@ -1858,7 +1803,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) osb = OCFS2_SB(sb); BUG_ON(!osb); - /* Remove file check sysfs related directores/files, + /* Remove file check sysfs related directories/files, * and wait for the pending file check operations */ ocfs2_filecheck_remove_sysfs(osb); diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index d4c5fdcfa1e4..ad8be3300b49 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -54,31 +54,27 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio) { - struct page *page = &folio->page; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct buffer_head *bh = NULL; int status = ocfs2_read_inode_block(inode, &bh); struct ocfs2_dinode *fe; const char *link; - void *kaddr; size_t len; if (status < 0) { mlog_errno(status); - return status; + goto out; } fe = (struct ocfs2_dinode *) bh->b_data; link = (char *) fe->id2.i_symlink; /* will be less than a page size */ len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb)); - kaddr = kmap_atomic(page); - memcpy(kaddr, link, len + 1); - kunmap_atomic(kaddr); - SetPageUptodate(page); - unlock_page(page); + memcpy_to_folio(folio, 0, link, len + 1); +out: + folio_end_read(folio, status == 0); brelse(bh); - return 0; + return status; } const struct address_space_operations ocfs2_fast_symlink_aops = { diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 73a6f6fd8a8e..d70a20d29e3e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -648,7 +648,7 @@ int ocfs2_calc_xattr_init(struct inode *dir, * 256(name) + 80(value) + 16(entry) = 352 bytes, * The max space of acl xattr taken inline is * 80(value) + 16(entry) * 2(if directory) = 192 bytes, - * when blocksize = 512, may reserve one more cluser for + * when blocksize = 512, may reserve one more cluster for * xattr bucket, otherwise reserve one metadata block * for them is ok. * If this is a new directory with inline data, @@ -4371,7 +4371,7 @@ static int cmp_xe_offset(const void *a, const void *b) /* * defrag a xattr bucket if we find that the bucket has some - * holes beteen name/value pairs. + * holes between name/value pairs. * We will move all the name/value pairs to the end of the bucket * so that we can spare some space for insertion. */ @@ -5011,7 +5011,7 @@ static int ocfs2_divide_xattr_cluster(struct inode *inode, * 2. If cluster_size == bucket_size: * a) If the previous extent rec has more than one cluster and the insert * place isn't in the last cluster, copy the entire last cluster to the - * new one. This time, we don't need to upate the first_bh and header_bh + * new one. This time, we don't need to update the first_bh and header_bh * since they will not be moved into the new cluster. * b) Otherwise, move the bottom half of the xattrs in the last cluster into * the new one. And we set the extend flag to zero if the insert place is @@ -6189,7 +6189,7 @@ struct ocfs2_xattr_reflink { /* * Given a xattr header and xe offset, * return the proper xv and the corresponding bh. - * xattr in inode, block and xattr tree have different implementaions. + * xattr in inode, block and xattr tree have different implementations. */ typedef int (get_xattr_value_root)(struct super_block *sb, struct buffer_head *bh, @@ -6269,7 +6269,7 @@ static int ocfs2_get_xattr_value_root(struct super_block *sb, } /* - * Lock the meta_ac and caculate how much credits we need for reflink xattrs. + * Lock the meta_ac and calculate how much credits we need for reflink xattrs. * It is only used for inline xattr and xattr block. */ static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb, diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig index 60fc98bdf421..b1091e70434a 100644 --- a/fs/squashfs/Kconfig +++ b/fs/squashfs/Kconfig @@ -5,8 +5,8 @@ config SQUASHFS help Saying Y here includes support for SquashFS 4.0 (a Compressed Read-Only File System). Squashfs is a highly compressed read-only - filesystem for Linux. It uses zlib, lzo or xz compression to - compress both files, inodes and directories. Inodes in the system + filesystem for Linux. It uses zlib, lz4, lzo, xz or zstd compression + to compress both files, inodes and directories. Inodes in the system are very small and all blocks are packed to minimise data overhead. Block sizes greater than 4K are supported up to a maximum of 1 Mbytes (default block size 128K). SquashFS 4.0 supports 64 bit filesystems @@ -16,7 +16,7 @@ config SQUASHFS Squashfs is intended for general read-only filesystem use, for archival use (i.e. in cases where a .tar.gz file may be used), and in embedded systems where low overhead is needed. Further information - and tools are available from http://squashfs.sourceforge.net. + and tools are available from github.com/plougher/squashfs-tools. If you want to compile this as a module ( = code which can be inserted in and removed from the running kernel whenever you want), diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 5062326d0efb..4db0d2b0aab8 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -224,11 +224,15 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, int block_size) { int i, j; - struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); + struct squashfs_cache *cache; + if (entries == 0) + return NULL; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (cache == NULL) { ERROR("Failed to allocate %s cache\n", name); - return NULL; + return ERR_PTR(-ENOMEM); } cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); @@ -281,7 +285,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, cleanup: squashfs_cache_delete(cache); - return NULL; + return ERR_PTR(-ENOMEM); } diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 21aaa96856c1..5ca2baa16dc2 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -362,29 +362,33 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) return squashfs_block_size(size); } -void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) +static bool squashfs_fill_page(struct folio *folio, + struct squashfs_cache_entry *buffer, size_t offset, + size_t avail) { - int copied; + size_t copied; void *pageaddr; - pageaddr = kmap_atomic(page); + pageaddr = kmap_local_folio(folio, 0); copied = squashfs_copy_data(pageaddr, buffer, offset, avail); memset(pageaddr + copied, 0, PAGE_SIZE - copied); - kunmap_atomic(pageaddr); + kunmap_local(pageaddr); - flush_dcache_page(page); - if (copied == avail) - SetPageUptodate(page); + flush_dcache_folio(folio); + + return copied == avail; } /* Copy data into page cache */ -void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, - int bytes, int offset) +void squashfs_copy_cache(struct folio *folio, + struct squashfs_cache_entry *buffer, size_t bytes, + size_t offset) { - struct inode *inode = page->mapping->host; + struct address_space *mapping = folio->mapping; + struct inode *inode = mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; - int start_index = page->index & ~mask, end_index = start_index | mask; + int start_index = folio->index & ~mask, end_index = start_index | mask; /* * Loop copying datablock into pages. As the datablock likely covers @@ -394,32 +398,35 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, */ for (i = start_index; i <= end_index && bytes > 0; i++, bytes -= PAGE_SIZE, offset += PAGE_SIZE) { - struct page *push_page; - int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0; + struct folio *push_folio; + size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0; + bool updated = false; - TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); + TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail); - push_page = (i == page->index) ? page : - grab_cache_page_nowait(page->mapping, i); + push_folio = (i == folio->index) ? folio : + __filemap_get_folio(mapping, i, + FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, + mapping_gfp_mask(mapping)); - if (!push_page) + if (IS_ERR(push_folio)) continue; - if (PageUptodate(push_page)) - goto skip_page; + if (folio_test_uptodate(push_folio)) + goto skip_folio; - squashfs_fill_page(push_page, buffer, offset, avail); -skip_page: - unlock_page(push_page); - if (i != page->index) - put_page(push_page); + updated = squashfs_fill_page(push_folio, buffer, offset, avail); +skip_folio: + folio_end_read(push_folio, updated); + if (i != folio->index) + folio_put(push_folio); } } /* Read datablock stored packed inside a fragment (tail-end packed block) */ -static int squashfs_readpage_fragment(struct page *page, int expected) +static int squashfs_readpage_fragment(struct folio *folio, int expected) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); @@ -430,36 +437,34 @@ static int squashfs_readpage_fragment(struct page *page, int expected) squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); else - squashfs_copy_cache(page, buffer, expected, + squashfs_copy_cache(folio, buffer, expected, squashfs_i(inode)->fragment_offset); squashfs_cache_put(buffer); return res; } -static int squashfs_readpage_sparse(struct page *page, int expected) +static int squashfs_readpage_sparse(struct folio *folio, int expected) { - squashfs_copy_cache(page, NULL, expected, 0); + squashfs_copy_cache(folio, NULL, expected, 0); return 0; } static int squashfs_read_folio(struct file *file, struct folio *folio) { - struct page *page = &folio->page; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int index = page->index >> (msblk->block_log - PAGE_SHIFT); + int index = folio->index >> (msblk->block_log - PAGE_SHIFT); int file_end = i_size_read(inode) >> msblk->block_log; int expected = index == file_end ? (i_size_read(inode) & (msblk->block_size - 1)) : msblk->block_size; int res = 0; - void *pageaddr; TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", - page->index, squashfs_i(inode)->start); + folio->index, squashfs_i(inode)->start); - if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> + if (folio->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT)) goto out; @@ -472,23 +477,18 @@ static int squashfs_read_folio(struct file *file, struct folio *folio) goto out; if (res == 0) - res = squashfs_readpage_sparse(page, expected); + res = squashfs_readpage_sparse(folio, expected); else - res = squashfs_readpage_block(page, block, res, expected); + res = squashfs_readpage_block(folio, block, res, expected); } else - res = squashfs_readpage_fragment(page, expected); + res = squashfs_readpage_fragment(folio, expected); if (!res) return 0; out: - pageaddr = kmap_atomic(page); - memset(pageaddr, 0, PAGE_SIZE); - kunmap_atomic(pageaddr); - flush_dcache_page(page); - if (res == 0) - SetPageUptodate(page); - unlock_page(page); + folio_zero_segment(folio, 0, folio_size(folio)); + folio_end_read(folio, res == 0); return res; } diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c index 54c17b7c85fd..40e59a43d098 100644 --- a/fs/squashfs/file_cache.c +++ b/fs/squashfs/file_cache.c @@ -18,9 +18,9 @@ #include "squashfs.h" /* Read separately compressed datablock and memcopy into page cache */ -int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected) +int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, int expected) { - struct inode *i = page->mapping->host; + struct inode *i = folio->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, block, bsize); int res = buffer->error; @@ -29,7 +29,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expecte ERROR("Unable to read page, block %llx, size %x\n", block, bsize); else - squashfs_copy_cache(page, buffer, expected, 0); + squashfs_copy_cache(folio, buffer, expected, 0); squashfs_cache_put(buffer); return res; diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c index d19d4db74af8..2c3e809d6891 100644 --- a/fs/squashfs/file_direct.c +++ b/fs/squashfs/file_direct.c @@ -19,12 +19,11 @@ #include "page_actor.h" /* Read separately compressed datablock directly into page cache */ -int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, - int expected) - +int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, + int expected) { - struct folio *folio = page_folio(target_page); - struct inode *inode = target_page->mapping->host; + struct page *target_page = &folio->page; + struct inode *inode = folio->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; @@ -48,7 +47,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, /* Try to grab all the pages covered by the Squashfs block */ for (i = 0, index = start_index; index <= end_index; index++) { page[i] = (index == folio->index) ? target_page : - grab_cache_page_nowait(target_page->mapping, index); + grab_cache_page_nowait(folio->mapping, index); if (page[i] == NULL) continue; diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 5a756e6790b5..218868b20f16 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -14,6 +14,12 @@ #define WARNING(s, args...) pr_warn("SQUASHFS: "s, ## args) +#ifdef CONFIG_SQUASHFS_FILE_CACHE +#define SQUASHFS_READ_PAGES msblk->max_thread_num +#else +#define SQUASHFS_READ_PAGES 0 +#endif + /* block.c */ extern int squashfs_read_data(struct super_block *, u64, int, u64 *, struct squashfs_page_actor *); @@ -67,12 +73,11 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *, u64, u64, unsigned int); /* file.c */ -void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); -void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, - int); +void squashfs_copy_cache(struct folio *, struct squashfs_cache_entry *, + size_t bytes, size_t offset); /* file_xxx.c */ -extern int squashfs_readpage_block(struct page *, u64, int, int); +int squashfs_readpage_block(struct folio *, u64 block, int bsize, int expected); /* id.c */ extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 22e812808e5c..67c55fe32ce8 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -314,26 +314,29 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_flags |= SB_RDONLY; sb->s_op = &squashfs_super_ops; - err = -ENOMEM; - msblk->block_cache = squashfs_cache_init("metadata", SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); - if (msblk->block_cache == NULL) + if (IS_ERR(msblk->block_cache)) { + err = PTR_ERR(msblk->block_cache); goto failed_mount; + } /* Allocate read_page block */ msblk->read_page = squashfs_cache_init("data", - msblk->max_thread_num, msblk->block_size); - if (msblk->read_page == NULL) { + SQUASHFS_READ_PAGES, msblk->block_size); + if (IS_ERR(msblk->read_page)) { errorf(fc, "Failed to allocate read_page block"); + err = PTR_ERR(msblk->read_page); goto failed_mount; } if (msblk->devblksize == PAGE_SIZE) { struct inode *cache = new_inode(sb); - if (cache == NULL) + if (cache == NULL) { + err = -ENOMEM; goto failed_mount; + } set_nlink(cache, 1); cache->i_size = OFFSET_MAX; @@ -405,9 +408,9 @@ handle_fragments: goto check_directory_table; msblk->fragment_cache = squashfs_cache_init("fragment", - SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); - if (msblk->fragment_cache == NULL) { - err = -ENOMEM; + min(SQUASHFS_CACHED_FRAGMENTS, fragments), msblk->block_size); + if (IS_ERR(msblk->fragment_cache)) { + err = PTR_ERR(msblk->fragment_cache); goto failed_mount; } diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 5a80fe728dc8..182b039ce5fa 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -5,7 +5,7 @@ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * * This file is a stub providing documentation for what functions - * asm-ARCH/syscall.h files need to define. Most arch definitions + * arch/ARCH/include/asm/syscall.h files need to define. Most arch definitions * will be simple inlines. * * All of these functions expect to be called with no locks, diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 262b6596eca5..2026953e2c4e 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -23,7 +23,7 @@ struct device; * * Function implementations generic to all architectures are in * lib/bitmap.c. Functions implementations that are architecture - * specific are in various include/asm-/bitops.h headers + * specific are in various arch//include/asm/bitops.h headers * and other arch/ specific files. * * See lib/bitmap.c for more details. diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 6639f48dac36..800dcc360db2 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -29,25 +29,39 @@ struct task_delay_info { * XXX_delay contains the accumulated delay time in nanoseconds. */ u64 blkio_start; + u64 blkio_delay_max; + u64 blkio_delay_min; u64 blkio_delay; /* wait for sync block io completion */ u64 swapin_start; + u64 swapin_delay_max; + u64 swapin_delay_min; u64 swapin_delay; /* wait for swapin */ u32 blkio_count; /* total count of the number of sync block */ /* io operations performed */ u32 swapin_count; /* total count of swapin */ u64 freepages_start; + u64 freepages_delay_max; + u64 freepages_delay_min; u64 freepages_delay; /* wait for memory reclaim */ u64 thrashing_start; + u64 thrashing_delay_max; + u64 thrashing_delay_min; u64 thrashing_delay; /* wait for thrashing page */ u64 compact_start; + u64 compact_delay_max; + u64 compact_delay_min; u64 compact_delay; /* wait for memory compact */ u64 wpcopy_start; + u64 wpcopy_delay_max; + u64 wpcopy_delay_min; u64 wpcopy_delay; /* wait for write-protect copy */ + u64 irq_delay_max; + u64 irq_delay_min; u64 irq_delay; /* wait for IRQ/SOFTIRQ */ u32 freepages_count; /* total count of memory reclaim */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6bbfc8aa42e8..56465af31044 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -153,7 +153,7 @@ static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache, void __kasan_poison_new_object(struct kmem_cache *cache, void *object); /** - * kasan_unpoison_new_object - Repoison a new slab object. + * kasan_poison_new_object - Repoison a new slab object. * @cache: Cache the object belong to. * @object: Pointer to the object. * diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h index e781727c8916..55bfe670bbb9 100644 --- a/include/linux/min_heap.h +++ b/include/linux/min_heap.h @@ -6,6 +6,17 @@ #include #include +/* + * The Min Heap API provides utilities for managing min-heaps, a binary tree + * structure where each node's value is less than or equal to its children's + * values, ensuring the smallest element is at the root. + * + * Users should avoid directly calling functions prefixed with __min_heap_*(). + * Instead, use the provided macro wrappers. + * + * For further details and examples, refer to Documentation/core-api/min_heap.rst. + */ + /** * Data structure to hold a min-heap. * @nr: Number of elements currently in the heap. @@ -218,7 +229,7 @@ void __min_heap_init_inline(min_heap_char *heap, void *data, int size) } #define min_heap_init_inline(_heap, _data, _size) \ - __min_heap_init_inline((min_heap_char *)_heap, _data, _size) + __min_heap_init_inline(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size) /* Get the minimum element from the heap. */ static __always_inline @@ -228,7 +239,8 @@ void *__min_heap_peek_inline(struct min_heap_char *heap) } #define min_heap_peek_inline(_heap) \ - (__minheap_cast(_heap) __min_heap_peek_inline((min_heap_char *)_heap)) + (__minheap_cast(_heap) \ + __min_heap_peek_inline(container_of(&(_heap)->nr, min_heap_char, nr))) /* Check if the heap is full. */ static __always_inline @@ -238,7 +250,7 @@ bool __min_heap_full_inline(min_heap_char *heap) } #define min_heap_full_inline(_heap) \ - __min_heap_full_inline((min_heap_char *)_heap) + __min_heap_full_inline(container_of(&(_heap)->nr, min_heap_char, nr)) /* Sift the element at pos down the heap. */ static __always_inline @@ -277,8 +289,8 @@ void __min_heap_sift_down_inline(min_heap_char *heap, int pos, size_t elem_size, } #define min_heap_sift_down_inline(_heap, _pos, _func, _args) \ - __min_heap_sift_down_inline((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_sift_down_inline(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \ + __minheap_obj_size(_heap), _func, _args) /* Sift up ith element from the heap, O(log2(nr)). */ static __always_inline @@ -304,8 +316,8 @@ void __min_heap_sift_up_inline(min_heap_char *heap, size_t elem_size, size_t idx } #define min_heap_sift_up_inline(_heap, _idx, _func, _args) \ - __min_heap_sift_up_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \ - _func, _args) + __min_heap_sift_up_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) /* Floyd's approach to heapification that is O(nr). */ static __always_inline @@ -319,7 +331,8 @@ void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size, } #define min_heapify_all_inline(_heap, _func, _args) \ - __min_heapify_all_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heapify_all_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) /* Remove minimum element from the heap, O(log2(nr)). */ static __always_inline @@ -340,7 +353,8 @@ bool __min_heap_pop_inline(min_heap_char *heap, size_t elem_size, } #define min_heap_pop_inline(_heap, _func, _args) \ - __min_heap_pop_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heap_pop_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) /* * Remove the minimum element and then push the given element. The @@ -356,8 +370,8 @@ void __min_heap_pop_push_inline(min_heap_char *heap, const void *element, size_t } #define min_heap_pop_push_inline(_heap, _element, _func, _args) \ - __min_heap_pop_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_pop_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) /* Push an element on to the heap, O(log2(nr)). */ static __always_inline @@ -382,8 +396,8 @@ bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t ele } #define min_heap_push_inline(_heap, _element, _func, _args) \ - __min_heap_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) /* Remove ith element from the heap, O(log2(nr)). */ static __always_inline @@ -411,8 +425,8 @@ bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx, } #define min_heap_del_inline(_heap, _idx, _func, _args) \ - __min_heap_del_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \ - _func, _args) + __min_heap_del_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) void __min_heap_init(min_heap_char *heap, void *data, int size); void *__min_heap_peek(struct min_heap_char *heap); @@ -433,25 +447,31 @@ bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx, const struct min_heap_callbacks *func, void *args); #define min_heap_init(_heap, _data, _size) \ - __min_heap_init((min_heap_char *)_heap, _data, _size) + __min_heap_init(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size) #define min_heap_peek(_heap) \ - (__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap)) + (__minheap_cast(_heap) __min_heap_peek(container_of(&(_heap)->nr, min_heap_char, nr))) #define min_heap_full(_heap) \ - __min_heap_full((min_heap_char *)_heap) + __min_heap_full(container_of(&(_heap)->nr, min_heap_char, nr)) #define min_heap_sift_down(_heap, _pos, _func, _args) \ - __min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args) + __min_heap_sift_down(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_sift_up(_heap, _idx, _func, _args) \ - __min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) + __min_heap_sift_up(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) #define min_heapify_all(_heap, _func, _args) \ - __min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heapify_all(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_pop(_heap, _func, _args) \ - __min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heap_pop(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_pop_push(_heap, _element, _func, _args) \ - __min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_pop_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_push(_heap, _element, _func, _args) \ - __min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) + __min_heap_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_del(_heap, _idx, _func, _args) \ - __min_heap_del((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) + __min_heap_del(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) #endif /* _LINUX_MIN_HEAP_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 64934e0830af..155012467b21 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -398,6 +398,12 @@ struct sched_info { /* Time spent waiting on a runqueue: */ unsigned long long run_delay; + /* Max time spent waiting on a runqueue: */ + unsigned long long max_run_delay; + + /* Min time spent waiting on a runqueue: */ + unsigned long long min_run_delay; + /* Timestamps: */ /* When did we last run on a CPU? */ diff --git a/include/linux/types.h b/include/linux/types.h index 2d7b9ae8714c..1c509ce8f7f6 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -43,7 +43,7 @@ typedef unsigned long uintptr_t; typedef long intptr_t; #ifdef CONFIG_HAVE_UID16 -/* This is defined by include/asm-{arch}/posix_types.h */ +/* This is defined by arch/{arch}/include/asm/posix_types.h */ typedef __kernel_old_uid_t old_uid_t; typedef __kernel_old_gid_t old_gid_t; #endif /* CONFIG_UID16 */ diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h index b50b2eb257a0..934e20ef7f79 100644 --- a/include/uapi/linux/taskstats.h +++ b/include/uapi/linux/taskstats.h @@ -72,6 +72,8 @@ struct taskstats { */ __u64 cpu_count __attribute__((aligned(8))); __u64 cpu_delay_total; + __u64 cpu_delay_max; + __u64 cpu_delay_min; /* Following four fields atomically updated using task->delays->lock */ @@ -80,10 +82,14 @@ struct taskstats { */ __u64 blkio_count; __u64 blkio_delay_total; + __u64 blkio_delay_max; + __u64 blkio_delay_min; /* Delay waiting for page fault I/O (swap in only) */ __u64 swapin_count; __u64 swapin_delay_total; + __u64 swapin_delay_max; + __u64 swapin_delay_min; /* cpu "wall-clock" running time * On some architectures, value will adjust for cpu time stolen @@ -166,10 +172,14 @@ struct taskstats { /* Delay waiting for memory reclaim */ __u64 freepages_count; __u64 freepages_delay_total; + __u64 freepages_delay_max; + __u64 freepages_delay_min; /* Delay waiting for thrashing page */ __u64 thrashing_count; __u64 thrashing_delay_total; + __u64 thrashing_delay_max; + __u64 thrashing_delay_min; /* v10: 64-bit btime to avoid overflow */ __u64 ac_btime64; /* 64-bit begin time */ @@ -177,6 +187,8 @@ struct taskstats { /* v11: Delay waiting for memory compact */ __u64 compact_count; __u64 compact_delay_total; + __u64 compact_delay_max; + __u64 compact_delay_min; /* v12 begin */ __u32 ac_tgid; /* thread group ID */ @@ -198,10 +210,15 @@ struct taskstats { /* v13: Delay waiting for write-protect copy */ __u64 wpcopy_count; __u64 wpcopy_delay_total; + __u64 wpcopy_delay_max; + __u64 wpcopy_delay_min; /* v14: Delay waiting for IRQ/SOFTIRQ */ __u64 irq_count; __u64 irq_delay_total; + __u64 irq_delay_max; + __u64 irq_delay_min; + /* v15: add Delay max */ }; diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 22c7f41ff642..f86ef92a6c46 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -89,7 +89,7 @@ static void __init handle_initrd(char *root_device_name) extern char *envp_init[]; int error; - pr_warn("using deprecated initrd support, will be removed in 2021.\n"); + pr_warn("using deprecated initrd support, will be removed soon.\n"); real_root_dev = new_encode_dev(ROOT_DEV); create_dev("/dev/root.old", Root_RAM0); diff --git a/kernel/capability.c b/kernel/capability.c index dac4df77e376..e089d2628c29 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -38,10 +38,8 @@ __setup("no_file_caps", file_caps_disable); static void warn_legacy_capability_use(void) { - char name[sizeof(current->comm)]; - pr_info_once("warning: `%s' uses 32-bit capabilities (legacy support in use)\n", - get_task_comm(name, current)); + current->comm); } /* @@ -62,10 +60,8 @@ static void warn_legacy_capability_use(void) static void warn_deprecated_v2(void) { - char name[sizeof(current->comm)]; - pr_info_once("warning: `%s' uses deprecated v2 capabilities in a way that may be insecure\n", - get_task_comm(name, current)); + current->comm); } /* diff --git a/kernel/delayacct.c b/kernel/delayacct.c index dead51de8eb5..b238eb8c6573 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -93,9 +93,9 @@ void __delayacct_tsk_init(struct task_struct *tsk) /* * Finish delay accounting for a statistic using its timestamps (@start), - * accumalator (@total) and @count + * accumulator (@total) and @count */ -static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count) +static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count, u64 *max, u64 *min) { s64 ns = local_clock() - *start; unsigned long flags; @@ -104,6 +104,10 @@ static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *cou raw_spin_lock_irqsave(lock, flags); *total += ns; (*count)++; + if (ns > *max) + *max = ns; + if (*min == 0 || ns < *min) + *min = ns; raw_spin_unlock_irqrestore(lock, flags); } } @@ -122,7 +126,9 @@ void __delayacct_blkio_end(struct task_struct *p) delayacct_end(&p->delays->lock, &p->delays->blkio_start, &p->delays->blkio_delay, - &p->delays->blkio_count); + &p->delays->blkio_count, + &p->delays->blkio_delay_max, + &p->delays->blkio_delay_min); } int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) @@ -153,10 +159,12 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) d->cpu_count += t1; + d->cpu_delay_max = tsk->sched_info.max_run_delay; + d->cpu_delay_min = tsk->sched_info.min_run_delay; tmp = (s64)d->cpu_delay_total + t2; d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; - tmp = (s64)d->cpu_run_virtual_total + t3; + d->cpu_run_virtual_total = (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp; @@ -164,20 +172,33 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) return 0; /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ - raw_spin_lock_irqsave(&tsk->delays->lock, flags); + d->blkio_delay_max = tsk->delays->blkio_delay_max; + d->blkio_delay_min = tsk->delays->blkio_delay_min; tmp = d->blkio_delay_total + tsk->delays->blkio_delay; d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; + d->swapin_delay_max = tsk->delays->swapin_delay_max; + d->swapin_delay_min = tsk->delays->swapin_delay_min; tmp = d->swapin_delay_total + tsk->delays->swapin_delay; d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; + d->freepages_delay_max = tsk->delays->freepages_delay_max; + d->freepages_delay_min = tsk->delays->freepages_delay_min; tmp = d->freepages_delay_total + tsk->delays->freepages_delay; d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp; + d->thrashing_delay_max = tsk->delays->thrashing_delay_max; + d->thrashing_delay_min = tsk->delays->thrashing_delay_min; tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay; d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp; + d->compact_delay_max = tsk->delays->compact_delay_max; + d->compact_delay_min = tsk->delays->compact_delay_min; tmp = d->compact_delay_total + tsk->delays->compact_delay; d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp; + d->wpcopy_delay_max = tsk->delays->wpcopy_delay_max; + d->wpcopy_delay_min = tsk->delays->wpcopy_delay_min; tmp = d->wpcopy_delay_total + tsk->delays->wpcopy_delay; d->wpcopy_delay_total = (tmp < d->wpcopy_delay_total) ? 0 : tmp; + d->irq_delay_max = tsk->delays->irq_delay_max; + d->irq_delay_min = tsk->delays->irq_delay_min; tmp = d->irq_delay_total + tsk->delays->irq_delay; d->irq_delay_total = (tmp < d->irq_delay_total) ? 0 : tmp; d->blkio_count += tsk->delays->blkio_count; @@ -213,7 +234,9 @@ void __delayacct_freepages_end(void) delayacct_end(¤t->delays->lock, ¤t->delays->freepages_start, ¤t->delays->freepages_delay, - ¤t->delays->freepages_count); + ¤t->delays->freepages_count, + ¤t->delays->freepages_delay_max, + ¤t->delays->freepages_delay_min); } void __delayacct_thrashing_start(bool *in_thrashing) @@ -235,7 +258,9 @@ void __delayacct_thrashing_end(bool *in_thrashing) delayacct_end(¤t->delays->lock, ¤t->delays->thrashing_start, ¤t->delays->thrashing_delay, - ¤t->delays->thrashing_count); + ¤t->delays->thrashing_count, + ¤t->delays->thrashing_delay_max, + ¤t->delays->thrashing_delay_min); } void __delayacct_swapin_start(void) @@ -248,7 +273,9 @@ void __delayacct_swapin_end(void) delayacct_end(¤t->delays->lock, ¤t->delays->swapin_start, ¤t->delays->swapin_delay, - ¤t->delays->swapin_count); + ¤t->delays->swapin_count, + ¤t->delays->swapin_delay_max, + ¤t->delays->swapin_delay_min); } void __delayacct_compact_start(void) @@ -261,7 +288,9 @@ void __delayacct_compact_end(void) delayacct_end(¤t->delays->lock, ¤t->delays->compact_start, ¤t->delays->compact_delay, - ¤t->delays->compact_count); + ¤t->delays->compact_count, + ¤t->delays->compact_delay_max, + ¤t->delays->compact_delay_min); } void __delayacct_wpcopy_start(void) @@ -274,7 +303,9 @@ void __delayacct_wpcopy_end(void) delayacct_end(¤t->delays->lock, ¤t->delays->wpcopy_start, ¤t->delays->wpcopy_delay, - ¤t->delays->wpcopy_count); + ¤t->delays->wpcopy_count, + ¤t->delays->wpcopy_delay_max, + ¤t->delays->wpcopy_delay_min); } void __delayacct_irq(struct task_struct *task, u32 delta) @@ -284,6 +315,10 @@ void __delayacct_irq(struct task_struct *task, u32 delta) raw_spin_lock_irqsave(&task->delays->lock, flags); task->delays->irq_delay += delta; task->delays->irq_count++; + if (delta > task->delays->irq_delay_max) + task->delays->irq_delay_max = delta; + if (delta && (!task->delays->irq_delay_min || delta < task->delays->irq_delay_min)) + task->delays->irq_delay_min = delta; raw_spin_unlock_irqrestore(&task->delays->lock, flags); } diff --git a/kernel/fork.c b/kernel/fork.c index 9b301180fd41..19d7fe31869b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1514,12 +1514,13 @@ struct file *get_task_exe_file(struct task_struct *task) struct file *exe_file = NULL; struct mm_struct *mm; + if (task->flags & PF_KTHREAD) + return NULL; + task_lock(task); mm = task->mm; - if (mm) { - if (!(task->flags & PF_KTHREAD)) - exe_file = get_mm_exe_file(mm); - } + if (mm) + exe_file = get_mm_exe_file(mm); task_unlock(task); return exe_file; } diff --git a/kernel/futex/waitwake.c b/kernel/futex/waitwake.c index 3a10375d9521..eb86a7ade06a 100644 --- a/kernel/futex/waitwake.c +++ b/kernel/futex/waitwake.c @@ -210,13 +210,12 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) { if (oparg < 0 || oparg > 31) { - char comm[sizeof(current->comm)]; /* * kill this print and return -EINVAL when userspace * is sane again */ pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n", - get_task_comm(comm, current), oparg); + current->comm, oparg); oparg &= 31; } oparg = 1 << oparg; diff --git a/kernel/hung_task.c b/kernel/hung_task.c index c18717189f32..953169893a95 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -147,6 +147,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); + if (t->flags & PF_POSTCOREDUMP) + pr_err(" Blocked by coredump.\n"); pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" " disables this message.\n"); sched_show_task(t); diff --git a/kernel/kthread.c b/kernel/kthread.c index a5ac612b1609..2fd0daa6b3b6 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -1015,7 +1015,7 @@ static void kthread_insert_work(struct kthread_worker *worker, * @work: kthread_work to queue * * Queue @work to work processor @task for async execution. @task - * must have been created with kthread_worker_create(). Returns %true + * must have been created with kthread_create_worker(). Returns %true * if @work was successfully queued, %false if it was already pending. * * Reinitialize the work if it needs to be used by another worker. diff --git a/kernel/resource.c b/kernel/resource.c index b7c0e24d9398..12004452d999 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -1683,8 +1683,7 @@ void __devm_release_region(struct device *dev, struct resource *parent, { struct region_devres match_data = { parent, start, n }; - __release_region(parent, start, n); - WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, + WARN_ON(devres_release(dev, devm_region_release, devm_region_match, &match_data)); } EXPORT_SYMBOL(__devm_release_region); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3e5a6bf587f9..109b5df48e8b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7701,9 +7701,9 @@ void sched_show_task(struct task_struct *p) if (pid_alive(p)) ppid = task_pid_nr(rcu_dereference(p->real_parent)); rcu_read_unlock(); - pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n", + pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n", free, task_pid_nr(p), task_tgid_nr(p), - ppid, read_task_thread_flags(p)); + ppid, p->flags, read_task_thread_flags(p)); print_worker_info(KERN_INFO, p); print_stop_info(KERN_INFO, p); diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 8ee0add5a48a..693537b908a1 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -244,7 +244,10 @@ static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t) delta = rq_clock(rq) - t->sched_info.last_queued; t->sched_info.last_queued = 0; t->sched_info.run_delay += delta; - + if (delta > t->sched_info.max_run_delay) + t->sched_info.max_run_delay = delta; + if (delta && (!t->sched_info.min_run_delay || delta < t->sched_info.min_run_delay)) + t->sched_info.min_run_delay = delta; rq_sched_info_dequeue(rq, delta); } @@ -266,6 +269,10 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t) t->sched_info.run_delay += delta; t->sched_info.last_arrival = now; t->sched_info.pcount++; + if (delta > t->sched_info.max_run_delay) + t->sched_info.max_run_delay = delta; + if (delta && (!t->sched_info.min_run_delay || delta < t->sched_info.min_run_delay)) + t->sched_info.min_run_delay = delta; rq_sched_info_arrive(rq, delta); } diff --git a/kernel/ucount.c b/kernel/ucount.c index f950b5e59d63..86c5f1c0bad9 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -164,8 +164,8 @@ struct ucounts *get_ucounts(struct ucounts *ucounts) struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) { struct hlist_head *hashent = ucounts_hashentry(ns, uid); - struct ucounts *ucounts, *new; bool wrapped; + struct ucounts *ucounts, *new = NULL; spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); @@ -182,17 +182,17 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); - if (ucounts) { - kfree(new); - } else { + if (!ucounts) { hlist_add_head(&new->node, hashent); get_user_ns(new->ns); spin_unlock_irq(&ucounts_lock); return new; } } + wrapped = !get_ucounts_or_wrap(ucounts); spin_unlock_irq(&ucounts_lock); + kfree(new); if (wrapped) { put_ucounts(ucounts); return NULL; diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 41e0f7e9fa35..177abb7d0d4e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -190,7 +190,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs) * with printk_cpu_sync_get_irqsave() that we can still at least * get the message about the lockup out. */ - pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", cpu); + pr_emerg("CPU%u: Watchdog detected hard LOCKUP on cpu %u\n", this_cpu, cpu); printk_cpu_sync_get_irqsave(flags); print_modules(); diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f3d723705879..d597930d381f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2269,7 +2269,6 @@ config TEST_LIST_SORT config TEST_MIN_HEAP tristate "Min heap test" depends on DEBUG_KERNEL || m - select MIN_HEAP help Enable this to turn on min heap function tests. This test is executed only once during system boot (so affects only boot time), @@ -2457,8 +2456,22 @@ config TEST_BITMAP config TEST_UUID tristate "Test functions located in the uuid module at runtime" -config TEST_XARRAY - tristate "Test the XArray code at runtime" +config XARRAY_KUNIT + tristate "KUnit test XArray code at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable this option to test the Xarray code at boot. + + KUnit tests run during boot and output the results to the debug log + in TAP format (http://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. config TEST_MAPLE_TREE tristate "Test the Maple Tree code at runtime or module load" @@ -3161,6 +3174,21 @@ config INT_POW_TEST If unsure, say N +config INT_SQRT_KUNIT_TEST + tristate "Integer square root test" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + This option enables the KUnit test suite for the int_sqrt() function, + which performs square root calculation. The test suite checks + various scenarios, including edge cases, to ensure correctness. + + Enabling this option will include tests that check various scenarios + and edge cases to ensure the accuracy and reliability of the square root + function. + + If unsure, say N + endif # RUNTIME_TESTING_MENU config ARCH_USE_MEMTEST diff --git a/lib/Makefile b/lib/Makefile index a8155c972f02..c0458ff841fe 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -94,7 +94,6 @@ GCOV_PROFILE_test_bitmap.o := n endif obj-$(CONFIG_TEST_UUID) += test_uuid.o -obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o @@ -375,6 +374,7 @@ CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o +obj-$(CONFIG_XARRAY_KUNIT) += test_xarray.o obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 52eb6ba29698..999053fa133e 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only #include #include -#include +#include #include #include #include @@ -12,6 +12,24 @@ #include #include +/* + * The should_fail() functions use prandom instead of the normal Linux RNG + * since they don't need cryptographically secure random numbers. + */ +static DEFINE_PER_CPU(struct rnd_state, fault_rnd_state); + +static u32 fault_prandom_u32_below_100(void) +{ + struct rnd_state *state; + u32 res; + + state = &get_cpu_var(fault_rnd_state); + res = prandom_u32_state(state); + put_cpu_var(fault_rnd_state); + + return res % 100; +} + /* * setup_fault_attr() is a helper function for various __setup handlers, so it * returns 0 on error, because that is what __setup handlers do. @@ -31,6 +49,8 @@ int setup_fault_attr(struct fault_attr *attr, char *str) return 0; } + prandom_init_once(&fault_rnd_state); + attr->probability = probability; attr->interval = interval; atomic_set(&attr->times, times); @@ -146,7 +166,7 @@ bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags) return false; } - if (attr->probability <= get_random_u32_below(100)) + if (attr->probability <= fault_prandom_u32_below_100()) return false; fail: @@ -219,6 +239,8 @@ struct dentry *fault_create_debugfs_attr(const char *name, if (IS_ERR(dir)) return dir; + prandom_init_once(&fault_rnd_state); + debugfs_create_ul("probability", mode, dir, &attr->probability); debugfs_create_ul("interval", mode, dir, &attr->interval); debugfs_create_atomic_t("times", mode, dir, &attr->times); @@ -431,6 +453,8 @@ static const struct config_item_type fault_config_type = { void fault_config_init(struct fault_config *config, const char *name) { + prandom_init_once(&fault_rnd_state); + config_group_init_type_name(&config->group, name, &fault_config_type); } EXPORT_SYMBOL_GPL(fault_config_init); diff --git a/lib/inflate.c b/lib/inflate.c index fbaf03c1748d..eab886baa1b4 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -1257,8 +1257,6 @@ static int INIT gunzip(void) /* Decompress */ if ((res = inflate())) { switch (res) { - case 0: - break; case 1: error("invalid compressed format (err=1)"); break; diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c index 13e15687675a..497d86e039f6 100644 --- a/lib/kunit_iov_iter.c +++ b/lib/kunit_iov_iter.c @@ -63,9 +63,6 @@ static void *__init iov_kunit_create_buffer(struct kunit *test, KUNIT_ASSERT_EQ(test, got, npages); } - for (int i = 0; i < npages; i++) - pages[i]->index = i; - buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer); diff --git a/lib/list_sort.c b/lib/list_sort.c index 8d3f623536fe..a310ecb7ccc0 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -108,6 +108,13 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head, * and list_sort is a stable sort, so it is not necessary to distinguish * the @a < @b and @a == @b cases. * + * The comparison function must adhere to specific mathematical properties + * to ensure correct and stable sorting: + * - Antisymmetry: cmp(@a, @b) must return the opposite sign of + * cmp(@b, @a). + * - Transitivity: if cmp(@a, @b) <= 0 and cmp(@b, @c) <= 0, then + * cmp(@a, @c) <= 0. + * * This is compatible with two styles of @cmp function: * - The traditional style which returns <0 / =0 / >0, or * - Returning a boolean 0/1. diff --git a/lib/math/Makefile b/lib/math/Makefile index 3ef11305f8d2..853f023ae537 100644 --- a/lib/math/Makefile +++ b/lib/math/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o obj-$(CONFIG_TEST_DIV64) += test_div64.o obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o +obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += tests/int_sqrt_kunit.o \ No newline at end of file diff --git a/lib/math/tests/Makefile b/lib/math/tests/Makefile index 6a169123320a..e1a79f093b2d 100644 --- a/lib/math/tests/Makefile +++ b/lib/math/tests/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_INT_POW_TEST) += int_pow_kunit.o +obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o diff --git a/lib/math/tests/int_sqrt_kunit.c b/lib/math/tests/int_sqrt_kunit.c new file mode 100644 index 000000000000..1798e1312eb7 --- /dev/null +++ b/lib/math/tests/int_sqrt_kunit.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include +#include +#include +#include + +struct test_case_params { + unsigned long x; + unsigned long expected_result; + const char *name; +}; + +static const struct test_case_params params[] = { + { 0, 0, "edge case: square root of 0" }, + { 1, 1, "perfect square: square root of 1" }, + { 2, 1, "non-perfect square: square root of 2" }, + { 3, 1, "non-perfect square: square root of 3" }, + { 4, 2, "perfect square: square root of 4" }, + { 5, 2, "non-perfect square: square root of 5" }, + { 6, 2, "non-perfect square: square root of 6" }, + { 7, 2, "non-perfect square: square root of 7" }, + { 8, 2, "non-perfect square: square root of 8" }, + { 9, 3, "perfect square: square root of 9" }, + { 15, 3, "non-perfect square: square root of 15 (N-1 from 16)" }, + { 16, 4, "perfect square: square root of 16" }, + { 17, 4, "non-perfect square: square root of 17 (N+1 from 16)" }, + { 80, 8, "non-perfect square: square root of 80 (N-1 from 81)" }, + { 81, 9, "perfect square: square root of 81" }, + { 82, 9, "non-perfect square: square root of 82 (N+1 from 81)" }, + { 255, 15, "non-perfect square: square root of 255 (N-1 from 256)" }, + { 256, 16, "perfect square: square root of 256" }, + { 257, 16, "non-perfect square: square root of 257 (N+1 from 256)" }, + { 2147483648, 46340, "large input: square root of 2147483648" }, + { 4294967295, 65535, "edge case: ULONG_MAX for 32-bit" }, +}; + +static void get_desc(const struct test_case_params *tc, char *desc) +{ + strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(int_sqrt, params, get_desc); + +static void int_sqrt_test(struct kunit *test) +{ + const struct test_case_params *tc = (const struct test_case_params *)test->param_value; + + KUNIT_EXPECT_EQ(test, tc->expected_result, int_sqrt(tc->x)); +} + +static struct kunit_case math_int_sqrt_test_cases[] = { + KUNIT_CASE_PARAM(int_sqrt_test, int_sqrt_gen_params), + {} +}; + +static struct kunit_suite int_sqrt_test_suite = { + .name = "math-int_sqrt", + .test_cases = math_int_sqrt_test_cases, +}; + +kunit_test_suites(&int_sqrt_test_suite); + +MODULE_DESCRIPTION("math.int_sqrt KUnit test suite"); +MODULE_LICENSE("GPL"); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6c902639728b..0682c9a8de82 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -665,7 +665,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); * structure outside the hash table. * * This function may be called from any process context, including - * non-preemptable context, but cannot be called from softirq or + * non-preemptible context, but cannot be called from softirq or * hardirq context. * * You must call rhashtable_walk_exit after this function returns. diff --git a/lib/sort.c b/lib/sort.c index 048b7a6ef967..8e73dc55476b 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -200,6 +200,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size) * copy (e.g. fix up pointers or auxiliary data), but the built-in swap * avoids a slow retpoline and so is significantly faster. * + * The comparison function must adhere to specific mathematical + * properties to ensure correct and stable sorting: + * - Antisymmetry: cmp_func(a, b) must return the opposite sign of + * cmp_func(b, a). + * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then + * cmp_func(a, c) <= 0. + * * Sorting time is O(n log n) both on average and worst-case. While * quicksort is slightly faster on average, it suffers from exploitable * O(n*n) worst-case behavior and extra memory requirements that make diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c index e6fbb798558b..a9c4a74d3898 100644 --- a/lib/test_min_heap.c +++ b/lib/test_min_heap.c @@ -32,7 +32,7 @@ static __init int pop_verify_heap(bool min_heap, int last; last = values[0]; - min_heap_pop(heap, funcs, NULL); + min_heap_pop_inline(heap, funcs, NULL); while (heap->nr > 0) { if (min_heap) { if (last > values[0]) { @@ -48,7 +48,7 @@ static __init int pop_verify_heap(bool min_heap, } } last = values[0]; - min_heap_pop(heap, funcs, NULL); + min_heap_pop_inline(heap, funcs, NULL); } return err; } @@ -69,7 +69,7 @@ static __init int test_heapify_all(bool min_heap) int i, err; /* Test with known set of values. */ - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); @@ -78,7 +78,7 @@ static __init int test_heapify_all(bool min_heap) for (i = 0; i < heap.nr; i++) values[i] = get_random_u32(); - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); err += pop_verify_heap(min_heap, &heap, &funcs); return err; @@ -102,14 +102,14 @@ static __init int test_heap_push(bool min_heap) /* Test with known set of values copied from data. */ for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_push(&heap, &data[i], &funcs, NULL); + min_heap_push_inline(&heap, &data[i], &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); /* Test with randomly generated values. */ while (heap.nr < heap.size) { temp = get_random_u32(); - min_heap_push(&heap, &temp, &funcs, NULL); + min_heap_push_inline(&heap, &temp, &funcs, NULL); } err += pop_verify_heap(min_heap, &heap, &funcs); @@ -135,22 +135,22 @@ static __init int test_heap_pop_push(bool min_heap) /* Fill values with data to pop and replace. */ temp = min_heap ? 0x80000000 : 0x7FFFFFFF; for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_push(&heap, &temp, &funcs, NULL); + min_heap_push_inline(&heap, &temp, &funcs, NULL); /* Test with known set of values copied from data. */ for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_pop_push(&heap, &data[i], &funcs, NULL); + min_heap_pop_push_inline(&heap, &data[i], &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); heap.nr = 0; for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_push(&heap, &temp, &funcs, NULL); + min_heap_push_inline(&heap, &temp, &funcs, NULL); /* Test with randomly generated values. */ for (i = 0; i < ARRAY_SIZE(data); i++) { temp = get_random_u32(); - min_heap_pop_push(&heap, &temp, &funcs, NULL); + min_heap_pop_push_inline(&heap, &temp, &funcs, NULL); } err += pop_verify_heap(min_heap, &heap, &funcs); @@ -163,7 +163,7 @@ static __init int test_heap_del(bool min_heap) -3, -1, -2, -4, 0x8000000, 0x7FFFFFF }; struct min_heap_test heap; - min_heap_init(&heap, values, ARRAY_SIZE(values)); + min_heap_init_inline(&heap, values, ARRAY_SIZE(values)); heap.nr = ARRAY_SIZE(values); struct min_heap_callbacks funcs = { .less = min_heap ? less_than : greater_than, @@ -172,9 +172,9 @@ static __init int test_heap_del(bool min_heap) int i, err; /* Test with known set of values. */ - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); for (i = 0; i < ARRAY_SIZE(values) / 2; i++) - min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL); + min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); @@ -182,10 +182,10 @@ static __init int test_heap_del(bool min_heap) heap.nr = ARRAY_SIZE(values); for (i = 0; i < heap.nr; i++) values[i] = get_random_u32(); - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); for (i = 0; i < ARRAY_SIZE(values) / 2; i++) - min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL); + min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL); err += pop_verify_heap(min_heap, &heap, &funcs); return err; diff --git a/lib/test_xarray.c b/lib/test_xarray.c index d5c5cbba33ed..b6cac747ec46 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -6,11 +6,10 @@ * Author: Matthew Wilcox */ -#include -#include +#include -static unsigned int tests_run; -static unsigned int tests_passed; +#include +#include static const unsigned int order_limit = IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; @@ -20,15 +19,12 @@ static const unsigned int order_limit = void xa_dump(const struct xarray *xa) { } # endif #undef XA_BUG_ON -#define XA_BUG_ON(xa, x) do { \ - tests_run++; \ - if (x) { \ - printk("BUG at %s:%d\n", __func__, __LINE__); \ - xa_dump(xa); \ - dump_stack(); \ - } else { \ - tests_passed++; \ - } \ +#define XA_BUG_ON(xa, x) do { \ + if (x) { \ + KUNIT_FAIL(test, #x); \ + xa_dump(xa); \ + dump_stack(); \ + } \ } while (0) #endif @@ -42,13 +38,13 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) return xa_store(xa, index, xa_mk_index(index), gfp); } -static void xa_insert_index(struct xarray *xa, unsigned long index) +static void xa_insert_index(struct kunit *test, struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), GFP_KERNEL) != 0); } -static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) +static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long index, gfp_t gfp) { u32 id; @@ -57,7 +53,7 @@ static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) XA_BUG_ON(xa, id != index); } -static void xa_erase_index(struct xarray *xa, unsigned long index) +static void xa_erase_index(struct kunit *test, struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); XA_BUG_ON(xa, xa_load(xa, index) != NULL); @@ -83,8 +79,15 @@ static void *xa_store_order(struct xarray *xa, unsigned long index, return curr; } -static noinline void check_xa_err(struct xarray *xa) +static inline struct xarray *xa_param(struct kunit *test) { + return *(struct xarray **)test->param_value; +} + +static noinline void check_xa_err(struct kunit *test) +{ + struct xarray *xa = xa_param(test); + XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); #ifndef __KERNEL__ @@ -99,8 +102,10 @@ static noinline void check_xa_err(struct xarray *xa) // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); } -static noinline void check_xas_retry(struct xarray *xa) +static noinline void check_xas_retry(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; @@ -109,7 +114,7 @@ static noinline void check_xas_retry(struct xarray *xa) rcu_read_lock(); XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); - xa_erase_index(xa, 1); + xa_erase_index(test, xa, 1); XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); XA_BUG_ON(xa, xas_retry(&xas, NULL)); XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); @@ -140,12 +145,14 @@ static noinline void check_xas_retry(struct xarray *xa) } xas_unlock(&xas); - xa_erase_index(xa, 0); - xa_erase_index(xa, 1); + xa_erase_index(test, xa, 0); + xa_erase_index(test, xa, 1); } -static noinline void check_xa_load(struct xarray *xa) +static noinline void check_xa_load(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long i, j; for (i = 0; i < 1024; i++) { @@ -167,13 +174,15 @@ static noinline void check_xa_load(struct xarray *xa) else XA_BUG_ON(xa, entry); } - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) +static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) { + struct xarray *xa = xa_param(test); + unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; @@ -193,7 +202,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); /* Storing NULL clears marks, and they can't be set again */ - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); xa_set_mark(xa, index, XA_MARK_0); @@ -244,15 +253,17 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); - xa_erase_index(xa, index); - xa_erase_index(xa, next); + xa_erase_index(test, xa, index); + xa_erase_index(test, xa, next); XA_BUG_ON(xa, !xa_empty(xa)); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_mark_2(struct xarray *xa) +static noinline void check_xa_mark_2(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned long index; unsigned int count = 0; @@ -289,9 +300,11 @@ static noinline void check_xa_mark_2(struct xarray *xa) xa_destroy(xa); } -static noinline void check_xa_mark_3(struct xarray *xa) +static noinline void check_xa_mark_3(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0x41); void *entry; int count = 0; @@ -310,19 +323,21 @@ static noinline void check_xa_mark_3(struct xarray *xa) #endif } -static noinline void check_xa_mark(struct xarray *xa) +static noinline void check_xa_mark(struct kunit *test) { unsigned long index; for (index = 0; index < 16384; index += 4) - check_xa_mark_1(xa, index); + check_xa_mark_1(test, index); - check_xa_mark_2(xa); - check_xa_mark_3(xa); + check_xa_mark_2(test); + check_xa_mark_3(test); } -static noinline void check_xa_shrink(struct xarray *xa) +static noinline void check_xa_shrink(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 1); struct xa_node *node; unsigned int order; @@ -347,7 +362,7 @@ static noinline void check_xa_shrink(struct xarray *xa) XA_BUG_ON(xa, xas_load(&xas) != NULL); xas_unlock(&xas); XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); for (order = 0; order < max_order; order++) { @@ -364,45 +379,49 @@ static noinline void check_xa_shrink(struct xarray *xa) XA_BUG_ON(xa, xa_head(xa) == node); rcu_read_unlock(); XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); XA_BUG_ON(xa, xa->xa_head != node); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); } } -static noinline void check_insert(struct xarray *xa) +static noinline void check_insert(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long i; for (i = 0; i < 1024; i++) { - xa_insert_index(xa, i); + xa_insert_index(test, xa, i); XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); } for (i = 10; i < BITS_PER_LONG; i++) { - xa_insert_index(xa, 1UL << i); + xa_insert_index(test, xa, 1UL << i); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); - xa_erase_index(xa, 1UL << i); + xa_erase_index(test, xa, 1UL << i); - xa_insert_index(xa, (1UL << i) - 1); + xa_insert_index(test, xa, (1UL << i) - 1); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); - xa_erase_index(xa, (1UL << i) - 1); + xa_erase_index(test, xa, (1UL << i) - 1); } - xa_insert_index(xa, ~0UL); + xa_insert_index(test, xa, ~0UL); XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); - xa_erase_index(xa, ~0UL); + xa_erase_index(test, xa, ~0UL); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_cmpxchg(struct xarray *xa) +static noinline void check_cmpxchg(struct kunit *test) { + struct xarray *xa = xa_param(test); + void *FIVE = xa_mk_value(5); void *SIX = xa_mk_value(6); void *LOTS = xa_mk_value(12345678); @@ -418,14 +437,16 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); - xa_erase_index(xa, 12345678); - xa_erase_index(xa, 5); + xa_erase_index(test, xa, 12345678); + xa_erase_index(test, xa, 5); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_cmpxchg_order(struct xarray *xa) +static noinline void check_cmpxchg_order(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + void *FIVE = xa_mk_value(5); unsigned int i, order = 3; @@ -476,8 +497,10 @@ static noinline void check_cmpxchg_order(struct xarray *xa) #endif } -static noinline void check_reserve(struct xarray *xa) +static noinline void check_reserve(struct kunit *test) { + struct xarray *xa = xa_param(test); + void *entry; unsigned long index; int count; @@ -494,7 +517,7 @@ static noinline void check_reserve(struct xarray *xa) XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); xa_release(xa, 12345678); - xa_erase_index(xa, 12345678); + xa_erase_index(test, xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* cmpxchg sees a reserved entry as ZERO */ @@ -502,7 +525,7 @@ static noinline void check_reserve(struct xarray *xa) XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, xa_mk_value(12345678), GFP_NOWAIT) != NULL); xa_release(xa, 12345678); - xa_erase_index(xa, 12345678); + xa_erase_index(test, xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* xa_insert treats it as busy */ @@ -542,8 +565,10 @@ static noinline void check_reserve(struct xarray *xa) xa_destroy(xa); } -static noinline void check_xas_erase(struct xarray *xa) +static noinline void check_xas_erase(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; unsigned long i, j; @@ -581,9 +606,11 @@ static noinline void check_xas_erase(struct xarray *xa) } #ifdef CONFIG_XARRAY_MULTI -static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, +static noinline void check_multi_store_1(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); unsigned long min = index & ~((1UL << order) - 1); unsigned long max = min + (1UL << order); @@ -602,13 +629,15 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); - xa_erase_index(xa, min); + xa_erase_index(test, xa, min); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, +static noinline void check_multi_store_2(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); @@ -620,9 +649,11 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, +static noinline void check_multi_store_3(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; int n = 0; @@ -647,9 +678,11 @@ static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, } #endif -static noinline void check_multi_store(struct xarray *xa) +static noinline void check_multi_store(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + unsigned long i, j, k; unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; @@ -714,26 +747,28 @@ static noinline void check_multi_store(struct xarray *xa) } for (i = 0; i < 20; i++) { - check_multi_store_1(xa, 200, i); - check_multi_store_1(xa, 0, i); - check_multi_store_1(xa, (1UL << i) + 1, i); + check_multi_store_1(test, 200, i); + check_multi_store_1(test, 0, i); + check_multi_store_1(test, (1UL << i) + 1, i); } - check_multi_store_2(xa, 4095, 9); + check_multi_store_2(test, 4095, 9); for (i = 1; i < 20; i++) { - check_multi_store_3(xa, 0, i); - check_multi_store_3(xa, 1UL << i, i); + check_multi_store_3(test, 0, i); + check_multi_store_3(test, 1UL << i, i); } #endif } #ifdef CONFIG_XARRAY_MULTI /* mimics page cache __filemap_add_folio() */ -static noinline void check_xa_multi_store_adv_add(struct xarray *xa, +static noinline void check_xa_multi_store_adv_add(struct kunit *test, unsigned long index, unsigned int order, void *p) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); unsigned int nrpages = 1UL << order; @@ -761,10 +796,12 @@ static noinline void check_xa_multi_store_adv_add(struct xarray *xa, } /* mimics page_cache_delete() */ -static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, +static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); xas_set_order(&xas, index, order); @@ -772,12 +809,14 @@ static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, xas_init_marks(&xas); } -static noinline void check_xa_multi_store_adv_delete(struct xarray *xa, +static noinline void check_xa_multi_store_adv_delete(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + xa_lock_irq(xa); - check_xa_multi_store_adv_del_entry(xa, index, order); + check_xa_multi_store_adv_del_entry(test, index, order); xa_unlock_irq(xa); } @@ -814,10 +853,12 @@ static unsigned long some_val = 0xdeadbeef; static unsigned long some_val_2 = 0xdeaddead; /* mimics the page cache usage */ -static noinline void check_xa_multi_store_adv(struct xarray *xa, +static noinline void check_xa_multi_store_adv(struct kunit *test, unsigned long pos, unsigned int order) { + struct xarray *xa = xa_param(test); + unsigned int nrpages = 1UL << order; unsigned long index, base, next_index, next_next_index; unsigned int i; @@ -827,7 +868,7 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, next_index = round_down(base + nrpages, nrpages); next_next_index = round_down(next_index + nrpages, nrpages); - check_xa_multi_store_adv_add(xa, base, order, &some_val); + check_xa_multi_store_adv_add(test, base, order, &some_val); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); @@ -835,20 +876,20 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); /* Use order 0 for the next item */ - check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2); + check_xa_multi_store_adv_add(test, next_index, 0, &some_val_2); XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); /* Remove the next item */ - check_xa_multi_store_adv_delete(xa, next_index, 0); + check_xa_multi_store_adv_delete(test, next_index, 0); /* Now use order for a new pointer */ - check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); + check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); - check_xa_multi_store_adv_delete(xa, next_index, order); - check_xa_multi_store_adv_delete(xa, base, order); + check_xa_multi_store_adv_delete(test, next_index, order); + check_xa_multi_store_adv_delete(test, base, order); XA_BUG_ON(xa, !xa_empty(xa)); /* starting fresh again */ @@ -856,7 +897,7 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, /* let's test some holes now */ /* hole at base and next_next */ - check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); + check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); @@ -867,12 +908,12 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); - check_xa_multi_store_adv_delete(xa, next_index, order); + check_xa_multi_store_adv_delete(test, next_index, order); XA_BUG_ON(xa, !xa_empty(xa)); /* hole at base and next */ - check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2); + check_xa_multi_store_adv_add(test, next_next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); @@ -883,12 +924,12 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); - check_xa_multi_store_adv_delete(xa, next_next_index, order); + check_xa_multi_store_adv_delete(test, next_next_index, order); XA_BUG_ON(xa, !xa_empty(xa)); } #endif -static noinline void check_multi_store_advanced(struct xarray *xa) +static noinline void check_multi_store_advanced(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; @@ -900,59 +941,59 @@ static noinline void check_multi_store_advanced(struct xarray *xa) */ for (pos = 7; pos < end; pos = (pos * pos) + 564) { for (i = 0; i < max_order; i++) { - check_xa_multi_store_adv(xa, pos, i); - check_xa_multi_store_adv(xa, pos + 157, i); + check_xa_multi_store_adv(test, pos, i); + check_xa_multi_store_adv(test, pos + 157, i); } } #endif } -static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, unsigned int base) { int i; u32 id; XA_BUG_ON(xa, !xa_empty(xa)); /* An empty array should assign %base to the first alloc */ - xa_alloc_index(xa, base, GFP_KERNEL); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* Erasing it should make the array empty again */ - xa_erase_index(xa, base); + xa_erase_index(test, xa, base); XA_BUG_ON(xa, !xa_empty(xa)); /* And it should assign %base again */ - xa_alloc_index(xa, base, GFP_KERNEL); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* Allocating and then erasing a lot should not lose base */ for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) - xa_alloc_index(xa, i, GFP_KERNEL); + xa_alloc_index(test, xa, i, GFP_KERNEL); for (i = base; i < 2 * XA_CHUNK_SIZE; i++) - xa_erase_index(xa, i); - xa_alloc_index(xa, base, GFP_KERNEL); + xa_erase_index(test, xa, i); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* Destroying the array should do the same as erasing */ xa_destroy(xa); /* And it should assign %base again */ - xa_alloc_index(xa, base, GFP_KERNEL); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* The next assigned ID should be base+1 */ - xa_alloc_index(xa, base + 1, GFP_KERNEL); - xa_erase_index(xa, base + 1); + xa_alloc_index(test, xa, base + 1, GFP_KERNEL); + xa_erase_index(test, xa, base + 1); /* Storing a value should mark it used */ xa_store_index(xa, base + 1, GFP_KERNEL); - xa_alloc_index(xa, base + 2, GFP_KERNEL); + xa_alloc_index(test, xa, base + 2, GFP_KERNEL); /* If we then erase base, it should be free */ - xa_erase_index(xa, base); - xa_alloc_index(xa, base, GFP_KERNEL); + xa_erase_index(test, xa, base); + xa_alloc_index(test, xa, base, GFP_KERNEL); - xa_erase_index(xa, base + 1); - xa_erase_index(xa, base + 2); + xa_erase_index(test, xa, base + 1); + xa_erase_index(test, xa, base + 2); for (i = 1; i < 5000; i++) { - xa_alloc_index(xa, base + i, GFP_KERNEL); + xa_alloc_index(test, xa, base + i, GFP_KERNEL); } xa_destroy(xa); @@ -975,14 +1016,14 @@ static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); - XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); + XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); - xa_erase_index(xa, 3); + xa_erase_index(test, xa, 3); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, unsigned int base) { unsigned int i, id; unsigned long index; @@ -1018,7 +1059,7 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, id != 5); xa_for_each(xa, index, entry) { - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); } for (i = base; i < base + 9; i++) { @@ -1033,7 +1074,7 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) xa_destroy(xa); } -static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, unsigned int base) { struct xa_limit limit = XA_LIMIT(1, 0x3fff); u32 next = 0; @@ -1049,8 +1090,8 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0x3ffd); - xa_erase_index(xa, 0x3ffd); - xa_erase_index(xa, 1); + xa_erase_index(test, xa, 0x3ffd); + xa_erase_index(test, xa, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (i = 0x3ffe; i < 0x4003; i++) { @@ -1065,8 +1106,8 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) /* Check wrap-around is handled correctly */ if (base != 0) - xa_erase_index(xa, base); - xa_erase_index(xa, base + 1); + xa_erase_index(test, xa, base); + xa_erase_index(test, xa, base + 1); next = UINT_MAX; XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), xa_limit_32b, &next, GFP_KERNEL) != 0); @@ -1079,7 +1120,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, id != base + 1); xa_for_each(xa, index, entry) - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -1087,19 +1128,21 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) static DEFINE_XARRAY_ALLOC(xa0); static DEFINE_XARRAY_ALLOC1(xa1); -static noinline void check_xa_alloc(void) +static noinline void check_xa_alloc(struct kunit *test) { - check_xa_alloc_1(&xa0, 0); - check_xa_alloc_1(&xa1, 1); - check_xa_alloc_2(&xa0, 0); - check_xa_alloc_2(&xa1, 1); - check_xa_alloc_3(&xa0, 0); - check_xa_alloc_3(&xa1, 1); + check_xa_alloc_1(test, &xa0, 0); + check_xa_alloc_1(test, &xa1, 1); + check_xa_alloc_2(test, &xa0, 0); + check_xa_alloc_2(test, &xa1, 1); + check_xa_alloc_3(test, &xa0, 0); + check_xa_alloc_3(test, &xa1, 1); } -static noinline void __check_store_iter(struct xarray *xa, unsigned long start, +static noinline void __check_store_iter(struct kunit *test, unsigned long start, unsigned int order, unsigned int present) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, start, order); void *entry; unsigned int count = 0; @@ -1123,50 +1166,54 @@ retry: XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != xa_mk_index(start)); - xa_erase_index(xa, start); + xa_erase_index(test, xa, start); } -static noinline void check_store_iter(struct xarray *xa) +static noinline void check_store_iter(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int i, j; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; for (i = 0; i < max_order; i++) { unsigned int min = 1 << i; unsigned int max = (2 << i) - 1; - __check_store_iter(xa, 0, i, 0); + __check_store_iter(test, 0, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); - __check_store_iter(xa, min, i, 0); + __check_store_iter(test, min, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, min, GFP_KERNEL); - __check_store_iter(xa, min, i, 1); + __check_store_iter(test, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, max, GFP_KERNEL); - __check_store_iter(xa, min, i, 1); + __check_store_iter(test, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, j, GFP_KERNEL); - __check_store_iter(xa, 0, i, min); + __check_store_iter(test, 0, i, min); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, min + j, GFP_KERNEL); - __check_store_iter(xa, min, i, min); + __check_store_iter(test, min, i, min); XA_BUG_ON(xa, !xa_empty(xa)); } #ifdef CONFIG_XARRAY_MULTI xa_store_index(xa, 63, GFP_KERNEL); xa_store_index(xa, 65, GFP_KERNEL); - __check_store_iter(xa, 64, 2, 1); - xa_erase_index(xa, 63); + __check_store_iter(test, 64, 2, 1); + xa_erase_index(test, xa, 63); #endif XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_find_1(struct xarray *xa, unsigned order) +static noinline void check_multi_find_1(struct kunit *test, unsigned int order) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + unsigned long multi = 3 << order; unsigned long next = 4 << order; unsigned long index; @@ -1189,15 +1236,17 @@ static noinline void check_multi_find_1(struct xarray *xa, unsigned order) XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); XA_BUG_ON(xa, index != next); - xa_erase_index(xa, multi); - xa_erase_index(xa, next); - xa_erase_index(xa, next + 1); + xa_erase_index(test, xa, multi); + xa_erase_index(test, xa, next); + xa_erase_index(test, xa, next + 1); XA_BUG_ON(xa, !xa_empty(xa)); #endif } -static noinline void check_multi_find_2(struct xarray *xa) +static noinline void check_multi_find_2(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; unsigned int i, j; void *entry; @@ -1211,17 +1260,19 @@ static noinline void check_multi_find_2(struct xarray *xa) GFP_KERNEL); rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) { - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); } rcu_read_unlock(); - xa_erase_index(xa, index - 1); + xa_erase_index(test, xa, index - 1); XA_BUG_ON(xa, !xa_empty(xa)); } } } -static noinline void check_multi_find_3(struct xarray *xa) +static noinline void check_multi_find_3(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int order; for (order = 5; order < order_limit; order++) { @@ -1230,12 +1281,14 @@ static noinline void check_multi_find_3(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); } } -static noinline void check_find_1(struct xarray *xa) +static noinline void check_find_1(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long i, j, k; XA_BUG_ON(xa, !xa_empty(xa)); @@ -1272,18 +1325,20 @@ static noinline void check_find_1(struct xarray *xa) else XA_BUG_ON(xa, entry != NULL); } - xa_erase_index(xa, j); + xa_erase_index(test, xa, j); XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); } - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_find_2(struct xarray *xa) +static noinline void check_find_2(struct kunit *test) { + struct xarray *xa = xa_param(test); + void *entry; unsigned long i, j, index; @@ -1303,8 +1358,10 @@ static noinline void check_find_2(struct xarray *xa) xa_destroy(xa); } -static noinline void check_find_3(struct xarray *xa) +static noinline void check_find_3(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned long i, j, k; void *entry; @@ -1328,8 +1385,10 @@ static noinline void check_find_3(struct xarray *xa) xa_destroy(xa); } -static noinline void check_find_4(struct xarray *xa) +static noinline void check_find_4(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long index = 0; void *entry; @@ -1341,22 +1400,22 @@ static noinline void check_find_4(struct xarray *xa) entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); XA_BUG_ON(xa, entry); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); } -static noinline void check_find(struct xarray *xa) +static noinline void check_find(struct kunit *test) { unsigned i; - check_find_1(xa); - check_find_2(xa); - check_find_3(xa); - check_find_4(xa); + check_find_1(test); + check_find_2(test); + check_find_3(test); + check_find_4(test); for (i = 2; i < 10; i++) - check_multi_find_1(xa, i); - check_multi_find_2(xa); - check_multi_find_3(xa); + check_multi_find_1(test, i); + check_multi_find_2(test); + check_multi_find_3(test); } /* See find_swap_entry() in mm/shmem.c */ @@ -1382,8 +1441,10 @@ static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) return entry ? xas.xa_index : -1; } -static noinline void check_find_entry(struct xarray *xa) +static noinline void check_find_entry(struct kunit *test) { + struct xarray *xa = xa_param(test); + #ifdef CONFIG_XARRAY_MULTI unsigned int order; unsigned long offset, index; @@ -1410,12 +1471,14 @@ static noinline void check_find_entry(struct xarray *xa) xa_store_index(xa, ULONG_MAX, GFP_KERNEL); XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_pause(struct xarray *xa) +static noinline void check_pause(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; unsigned int order; @@ -1450,8 +1513,10 @@ static noinline void check_pause(struct xarray *xa) xa_destroy(xa); } -static noinline void check_move_tiny(struct xarray *xa) +static noinline void check_move_tiny(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); @@ -1468,12 +1533,14 @@ static noinline void check_move_tiny(struct xarray *xa) XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); XA_BUG_ON(xa, xas_prev(&xas) != NULL); rcu_read_unlock(); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move_max(struct xarray *xa) +static noinline void check_move_max(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); xa_store_index(xa, ULONG_MAX, GFP_KERNEL); @@ -1489,12 +1556,14 @@ static noinline void check_move_max(struct xarray *xa) XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); rcu_read_unlock(); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move_small(struct xarray *xa, unsigned long idx) +static noinline void check_move_small(struct kunit *test, unsigned long idx) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned long i; @@ -1536,13 +1605,15 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx) XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); rcu_read_unlock(); - xa_erase_index(xa, 0); - xa_erase_index(xa, idx); + xa_erase_index(test, xa, 0); + xa_erase_index(test, xa, idx); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move(struct xarray *xa) +static noinline void check_move(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, (1 << 16) - 1); unsigned long i; @@ -1569,7 +1640,7 @@ static noinline void check_move(struct xarray *xa) rcu_read_unlock(); for (i = (1 << 8); i < (1 << 15); i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); i = xas.xa_index; @@ -1600,17 +1671,17 @@ static noinline void check_move(struct xarray *xa) xa_destroy(xa); - check_move_tiny(xa); - check_move_max(xa); + check_move_tiny(test); + check_move_max(test); for (i = 0; i < 16; i++) - check_move_small(xa, 1UL << i); + check_move_small(test, 1UL << i); for (i = 2; i < 16; i++) - check_move_small(xa, (1UL << i) - 1); + check_move_small(test, (1UL << i) - 1); } -static noinline void xa_store_many_order(struct xarray *xa, +static noinline void xa_store_many_order(struct kunit *test, struct xarray *xa, unsigned long index, unsigned order) { XA_STATE_ORDER(xas, xa, index, order); @@ -1633,30 +1704,34 @@ unlock: XA_BUG_ON(xa, xas_error(&xas)); } -static noinline void check_create_range_1(struct xarray *xa, +static noinline void check_create_range_1(struct kunit *test, unsigned long index, unsigned order) { + struct xarray *xa = xa_param(test); + unsigned long i; - xa_store_many_order(xa, index, order); + xa_store_many_order(test, xa, index, order); for (i = index; i < index + (1UL << order); i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_2(struct xarray *xa, unsigned order) +static noinline void check_create_range_2(struct kunit *test, unsigned int order) { + struct xarray *xa = xa_param(test); + unsigned long i; unsigned long nr = 1UL << order; for (i = 0; i < nr * nr; i += nr) - xa_store_many_order(xa, i, order); + xa_store_many_order(test, xa, i, order); for (i = 0; i < nr * nr; i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_3(void) +static noinline void check_create_range_3(struct kunit *test) { XA_STATE(xas, NULL, 0); xas_set_err(&xas, -EEXIST); @@ -1664,9 +1739,11 @@ static noinline void check_create_range_3(void) XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); } -static noinline void check_create_range_4(struct xarray *xa, +static noinline void check_create_range_4(struct kunit *test, unsigned long index, unsigned order) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, index, order); unsigned long base = xas.xa_index; unsigned long i = 0; @@ -1692,13 +1769,15 @@ unlock: XA_BUG_ON(xa, xas_error(&xas)); for (i = base; i < base + (1UL << order); i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_5(struct xarray *xa, +static noinline void check_create_range_5(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, index, order); unsigned int i; @@ -1715,44 +1794,46 @@ static noinline void check_create_range_5(struct xarray *xa, xa_destroy(xa); } -static noinline void check_create_range(struct xarray *xa) +static noinline void check_create_range(struct kunit *test) { unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; for (order = 0; order < max_order; order++) { - check_create_range_1(xa, 0, order); - check_create_range_1(xa, 1U << order, order); - check_create_range_1(xa, 2U << order, order); - check_create_range_1(xa, 3U << order, order); - check_create_range_1(xa, 1U << 24, order); + check_create_range_1(test, 0, order); + check_create_range_1(test, 1U << order, order); + check_create_range_1(test, 2U << order, order); + check_create_range_1(test, 3U << order, order); + check_create_range_1(test, 1U << 24, order); if (order < 10) - check_create_range_2(xa, order); + check_create_range_2(test, order); - check_create_range_4(xa, 0, order); - check_create_range_4(xa, 1U << order, order); - check_create_range_4(xa, 2U << order, order); - check_create_range_4(xa, 3U << order, order); - check_create_range_4(xa, 1U << 24, order); + check_create_range_4(test, 0, order); + check_create_range_4(test, 1U << order, order); + check_create_range_4(test, 2U << order, order); + check_create_range_4(test, 3U << order, order); + check_create_range_4(test, 1U << 24, order); - check_create_range_4(xa, 1, order); - check_create_range_4(xa, (1U << order) + 1, order); - check_create_range_4(xa, (2U << order) + 1, order); - check_create_range_4(xa, (2U << order) - 1, order); - check_create_range_4(xa, (3U << order) + 1, order); - check_create_range_4(xa, (3U << order) - 1, order); - check_create_range_4(xa, (1U << 24) + 1, order); + check_create_range_4(test, 1, order); + check_create_range_4(test, (1U << order) + 1, order); + check_create_range_4(test, (2U << order) + 1, order); + check_create_range_4(test, (2U << order) - 1, order); + check_create_range_4(test, (3U << order) + 1, order); + check_create_range_4(test, (3U << order) - 1, order); + check_create_range_4(test, (1U << 24) + 1, order); - check_create_range_5(xa, 0, order); - check_create_range_5(xa, (1U << order), order); + check_create_range_5(test, 0, order); + check_create_range_5(test, (1U << order), order); } - check_create_range_3(); + check_create_range_3(test); } -static noinline void __check_store_range(struct xarray *xa, unsigned long first, +static noinline void __check_store_range(struct kunit *test, unsigned long first, unsigned long last) { + struct xarray *xa = xa_param(test); + #ifdef CONFIG_XARRAY_MULTI xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); @@ -1767,26 +1848,28 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first, XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_store_range(struct xarray *xa) +static noinline void check_store_range(struct kunit *test) { unsigned long i, j; for (i = 0; i < 128; i++) { for (j = i; j < 128; j++) { - __check_store_range(xa, i, j); - __check_store_range(xa, 128 + i, 128 + j); - __check_store_range(xa, 4095 + i, 4095 + j); - __check_store_range(xa, 4096 + i, 4096 + j); - __check_store_range(xa, 123456 + i, 123456 + j); - __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); + __check_store_range(test, i, j); + __check_store_range(test, 128 + i, 128 + j); + __check_store_range(test, 4095 + i, 4095 + j); + __check_store_range(test, 4096 + i, 4096 + j); + __check_store_range(test, 123456 + i, 123456 + j); + __check_store_range(test, (1 << 24) + i, (1 << 24) + j); } } } #ifdef CONFIG_XARRAY_MULTI -static void check_split_1(struct xarray *xa, unsigned long index, +static void check_split_1(struct kunit *test, unsigned long index, unsigned int order, unsigned int new_order) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, index, new_order); unsigned int i, found; void *entry; @@ -1822,26 +1905,30 @@ static void check_split_1(struct xarray *xa, unsigned long index, xa_destroy(xa); } -static noinline void check_split(struct xarray *xa) +static noinline void check_split(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int order, new_order; XA_BUG_ON(xa, !xa_empty(xa)); for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { for (new_order = 0; new_order < order; new_order++) { - check_split_1(xa, 0, order, new_order); - check_split_1(xa, 1UL << order, order, new_order); - check_split_1(xa, 3UL << order, order, new_order); + check_split_1(test, 0, order, new_order); + check_split_1(test, 1UL << order, order, new_order); + check_split_1(test, 3UL << order, order, new_order); } } } #else -static void check_split(struct xarray *xa) { } +static void check_split(struct kunit *test) { } #endif -static void check_align_1(struct xarray *xa, char *name) +static void check_align_1(struct kunit *test, char *name) { + struct xarray *xa = xa_param(test); + int i; unsigned int id; unsigned long index; @@ -1861,8 +1948,10 @@ static void check_align_1(struct xarray *xa, char *name) * We should always be able to store without allocating memory after * reserving a slot. */ -static void check_align_2(struct xarray *xa, char *name) +static void check_align_2(struct kunit *test, char *name) { + struct xarray *xa = xa_param(test); + int i; XA_BUG_ON(xa, !xa_empty(xa)); @@ -1881,15 +1970,15 @@ static void check_align_2(struct xarray *xa, char *name) XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_align(struct xarray *xa) +static noinline void check_align(struct kunit *test) { char name[] = "Motorola 68000"; - check_align_1(xa, name); - check_align_1(xa, name + 1); - check_align_1(xa, name + 2); - check_align_1(xa, name + 3); - check_align_2(xa, name); + check_align_1(test, name); + check_align_1(test, name + 1); + check_align_1(test, name + 2); + check_align_1(test, name + 3); + check_align_2(test, name); } static LIST_HEAD(shadow_nodes); @@ -1905,7 +1994,7 @@ static void test_update_node(struct xa_node *node) } } -static noinline void shadow_remove(struct xarray *xa) +static noinline void shadow_remove(struct kunit *test, struct xarray *xa) { struct xa_node *node; @@ -1919,8 +2008,17 @@ static noinline void shadow_remove(struct xarray *xa) xa_unlock(xa); } -static noinline void check_workingset(struct xarray *xa, unsigned long index) +struct workingset_testcase { + struct xarray *xa; + unsigned long index; +}; + +static noinline void check_workingset(struct kunit *test) { + struct workingset_testcase tc = *(struct workingset_testcase *)test->param_value; + struct xarray *xa = tc.xa; + unsigned long index = tc.index; + XA_STATE(xas, xa, index); xas_set_update(&xas, test_update_node); @@ -1943,7 +2041,7 @@ static noinline void check_workingset(struct xarray *xa, unsigned long index) xas_unlock(&xas); XA_BUG_ON(xa, list_empty(&shadow_nodes)); - shadow_remove(xa); + shadow_remove(test, xa); XA_BUG_ON(xa, !list_empty(&shadow_nodes)); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -1952,9 +2050,11 @@ static noinline void check_workingset(struct xarray *xa, unsigned long index) * Check that the pointer / value / sibling entries are accounted the * way we expect them to be. */ -static noinline void check_account(struct xarray *xa) +static noinline void check_account(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + unsigned int order; for (order = 1; order < 12; order++) { @@ -1981,8 +2081,10 @@ static noinline void check_account(struct xarray *xa) #endif } -static noinline void check_get_order(struct xarray *xa) +static noinline void check_get_order(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; unsigned int order; unsigned long i, j; @@ -2001,8 +2103,10 @@ static noinline void check_get_order(struct xarray *xa) } } -static noinline void check_xas_get_order(struct xarray *xa) +static noinline void check_xas_get_order(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; @@ -2034,8 +2138,10 @@ static noinline void check_xas_get_order(struct xarray *xa) } } -static noinline void check_xas_conflict_get_order(struct xarray *xa) +static noinline void check_xas_conflict_get_order(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; @@ -2092,8 +2198,10 @@ static noinline void check_xas_conflict_get_order(struct xarray *xa) } -static noinline void check_destroy(struct xarray *xa) +static noinline void check_destroy(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long index; XA_BUG_ON(xa, !xa_empty(xa)); @@ -2126,52 +2234,59 @@ static noinline void check_destroy(struct xarray *xa) } static DEFINE_XARRAY(array); +static struct xarray *arrays[] = { &array }; +KUNIT_ARRAY_PARAM(array, arrays, NULL); -static int xarray_checks(void) -{ - check_xa_err(&array); - check_xas_retry(&array); - check_xa_load(&array); - check_xa_mark(&array); - check_xa_shrink(&array); - check_xas_erase(&array); - check_insert(&array); - check_cmpxchg(&array); - check_cmpxchg_order(&array); - check_reserve(&array); - check_reserve(&xa0); - check_multi_store(&array); - check_multi_store_advanced(&array); - check_get_order(&array); - check_xas_get_order(&array); - check_xas_conflict_get_order(&array); - check_xa_alloc(); - check_find(&array); - check_find_entry(&array); - check_pause(&array); - check_account(&array); - check_destroy(&array); - check_move(&array); - check_create_range(&array); - check_store_range(&array); - check_store_iter(&array); - check_align(&xa0); - check_split(&array); +static struct xarray *xa0s[] = { &xa0 }; +KUNIT_ARRAY_PARAM(xa0, xa0s, NULL); - check_workingset(&array, 0); - check_workingset(&array, 64); - check_workingset(&array, 4096); +static struct workingset_testcase workingset_testcases[] = { + { &array, 0 }, + { &array, 64 }, + { &array, 4096 }, +}; +KUNIT_ARRAY_PARAM(workingset, workingset_testcases, NULL); - printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); - return (tests_run == tests_passed) ? 0 : -EINVAL; -} +static struct kunit_case xarray_cases[] = { + KUNIT_CASE_PARAM(check_xa_err, array_gen_params), + KUNIT_CASE_PARAM(check_xas_retry, array_gen_params), + KUNIT_CASE_PARAM(check_xa_load, array_gen_params), + KUNIT_CASE_PARAM(check_xa_mark, array_gen_params), + KUNIT_CASE_PARAM(check_xa_shrink, array_gen_params), + KUNIT_CASE_PARAM(check_xas_erase, array_gen_params), + KUNIT_CASE_PARAM(check_insert, array_gen_params), + KUNIT_CASE_PARAM(check_cmpxchg, array_gen_params), + KUNIT_CASE_PARAM(check_cmpxchg_order, array_gen_params), + KUNIT_CASE_PARAM(check_reserve, array_gen_params), + KUNIT_CASE_PARAM(check_reserve, xa0_gen_params), + KUNIT_CASE_PARAM(check_multi_store, array_gen_params), + KUNIT_CASE_PARAM(check_multi_store_advanced, array_gen_params), + KUNIT_CASE_PARAM(check_get_order, array_gen_params), + KUNIT_CASE_PARAM(check_xas_get_order, array_gen_params), + KUNIT_CASE_PARAM(check_xas_conflict_get_order, array_gen_params), + KUNIT_CASE(check_xa_alloc), + KUNIT_CASE_PARAM(check_find, array_gen_params), + KUNIT_CASE_PARAM(check_find_entry, array_gen_params), + KUNIT_CASE_PARAM(check_pause, array_gen_params), + KUNIT_CASE_PARAM(check_account, array_gen_params), + KUNIT_CASE_PARAM(check_destroy, array_gen_params), + KUNIT_CASE_PARAM(check_move, array_gen_params), + KUNIT_CASE_PARAM(check_create_range, array_gen_params), + KUNIT_CASE_PARAM(check_store_range, array_gen_params), + KUNIT_CASE_PARAM(check_store_iter, array_gen_params), + KUNIT_CASE_PARAM(check_align, xa0_gen_params), + KUNIT_CASE_PARAM(check_split, array_gen_params), + KUNIT_CASE_PARAM(check_workingset, workingset_gen_params), + {}, +}; -static void xarray_exit(void) -{ -} +static struct kunit_suite xarray_suite = { + .name = "xarray", + .test_cases = xarray_cases, +}; + +kunit_test_suite(xarray_suite); -module_init(xarray_checks); -module_exit(xarray_exit); MODULE_AUTHOR("Matthew Wilcox "); MODULE_DESCRIPTION("XArray API test module"); MODULE_LICENSE("GPL"); diff --git a/lib/xarray.c b/lib/xarray.c index 32d4bac8c94c..5da8d18899a1 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -435,6 +435,11 @@ static unsigned long max_index(void *entry) return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; } +static inline void *xa_zero_to_null(void *entry) +{ + return xa_is_zero(entry) ? NULL : entry; +} + static void xas_shrink(struct xa_state *xas) { struct xarray *xa = xas->xa; @@ -451,8 +456,8 @@ static void xas_shrink(struct xa_state *xas) break; if (!xa_is_node(entry) && node->shift) break; - if (xa_is_zero(entry) && xa_zero_busy(xa)) - entry = NULL; + if (xa_zero_busy(xa)) + entry = xa_zero_to_null(entry); xas->xa_node = XAS_BOUNDS; RCU_INIT_POINTER(xa->xa_head, entry); @@ -1474,9 +1479,7 @@ void *xa_load(struct xarray *xa, unsigned long index) rcu_read_lock(); do { - entry = xas_load(&xas); - if (xa_is_zero(entry)) - entry = NULL; + entry = xa_zero_to_null(xas_load(&xas)); } while (xas_retry(&xas, entry)); rcu_read_unlock(); @@ -1486,8 +1489,6 @@ EXPORT_SYMBOL(xa_load); static void *xas_result(struct xa_state *xas, void *curr) { - if (xa_is_zero(curr)) - return NULL; if (xas_error(xas)) curr = xas->xa_node; return curr; @@ -1508,7 +1509,7 @@ static void *xas_result(struct xa_state *xas, void *curr) void *__xa_erase(struct xarray *xa, unsigned long index) { XA_STATE(xas, xa, index); - return xas_result(&xas, xas_store(&xas, NULL)); + return xas_result(&xas, xa_zero_to_null(xas_store(&xas, NULL))); } EXPORT_SYMBOL(__xa_erase); @@ -1567,7 +1568,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) xas_clear_mark(&xas, XA_FREE_MARK); } while (__xas_nomem(&xas, gfp)); - return xas_result(&xas, curr); + return xas_result(&xas, xa_zero_to_null(curr)); } EXPORT_SYMBOL(__xa_store); @@ -1600,6 +1601,9 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) } EXPORT_SYMBOL(xa_store); +static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp); + /** * __xa_cmpxchg() - Store this entry in the XArray. * @xa: XArray. @@ -1618,6 +1622,13 @@ EXPORT_SYMBOL(xa_store); */ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, void *old, void *entry, gfp_t gfp) +{ + return xa_zero_to_null(__xa_cmpxchg_raw(xa, index, old, entry, gfp)); +} +EXPORT_SYMBOL(__xa_cmpxchg); + +static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) { XA_STATE(xas, xa, index); void *curr; @@ -1636,7 +1647,6 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, return xas_result(&xas, curr); } -EXPORT_SYMBOL(__xa_cmpxchg); /** * __xa_insert() - Store this entry in the XArray if no entry is present. @@ -1656,26 +1666,16 @@ EXPORT_SYMBOL(__xa_cmpxchg); */ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { - XA_STATE(xas, xa, index); void *curr; + int errno; - if (WARN_ON_ONCE(xa_is_advanced(entry))) - return -EINVAL; if (!entry) entry = XA_ZERO_ENTRY; - - do { - curr = xas_load(&xas); - if (!curr) { - xas_store(&xas, entry); - if (xa_track_free(xa)) - xas_clear_mark(&xas, XA_FREE_MARK); - } else { - xas_set_err(&xas, -EBUSY); - } - } while (__xas_nomem(&xas, gfp)); - - return xas_error(&xas); + curr = __xa_cmpxchg_raw(xa, index, NULL, entry, gfp); + errno = xa_err(curr); + if (errno) + return errno; + return (curr != NULL) ? -EBUSY : 0; } EXPORT_SYMBOL(__xa_insert); diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 820ba3b5cbfc..982bb5ef3233 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1855,7 +1855,7 @@ static int kmemleak_scan_thread(void *arg) * Wait before the first scan to allow the system to fully initialize. */ if (first_run) { - signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000); + signed long timeout = secs_to_jiffies(SECS_FIRST_SCAN); first_run = 0; while (timeout && !kthread_should_stop()) timeout = schedule_timeout_interruptible(timeout); @@ -2241,7 +2241,7 @@ void __init kmemleak_init(void) return; jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); - jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); + jiffies_scan_wait = secs_to_jiffies(SECS_SCAN_WAIT); object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index de47ad999d7b..f4c5d1a8c01f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -210,7 +210,7 @@ static const u16 mgmt_untrusted_events[] = { MGMT_EV_EXP_FEATURE_CHANGED, }; -#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) +#define CACHE_TIMEOUT secs_to_jiffies(2) #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ "\x00\x00\x00\x00\x00\x00\x00\x00" diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 4cc97f971264..7c6f7c9f7332 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -39,20 +39,15 @@ static const char *const sctp_conntrack_names[] = { [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT", }; -#define SECS * HZ -#define MINS * 60 SECS -#define HOURS * 60 MINS -#define DAYS * 24 HOURS - static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { - [SCTP_CONNTRACK_CLOSED] = 10 SECS, - [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, - [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, - [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, - [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS, - [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS, - [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, - [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, + [SCTP_CONNTRACK_CLOSED] = secs_to_jiffies(10), + [SCTP_CONNTRACK_COOKIE_WAIT] = secs_to_jiffies(3), + [SCTP_CONNTRACK_COOKIE_ECHOED] = secs_to_jiffies(3), + [SCTP_CONNTRACK_ESTABLISHED] = secs_to_jiffies(210), + [SCTP_CONNTRACK_SHUTDOWN_SENT] = secs_to_jiffies(3), + [SCTP_CONNTRACK_SHUTDOWN_RECD] = secs_to_jiffies(3), + [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = secs_to_jiffies(3), + [SCTP_CONNTRACK_HEARTBEAT_SENT] = secs_to_jiffies(30), }; #define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 3bb04b05c5ce..bea70eb6f034 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -640,10 +640,8 @@ EXPORT_SYMBOL(wireless_send_event); #ifdef CONFIG_CFG80211_WEXT static void wireless_warn_cfg80211_wext(void) { - char name[sizeof(current->comm)]; - pr_warn_once("warning: `%s' uses wireless extensions which will stop working for Wi-Fi 7 hardware; use nl80211\n", - get_task_comm(name, current)); + current->comm); } #endif diff --git a/samples/livepatch/livepatch-callbacks-busymod.c b/samples/livepatch/livepatch-callbacks-busymod.c index 378e2d40271a..69105596e72e 100644 --- a/samples/livepatch/livepatch-callbacks-busymod.c +++ b/samples/livepatch/livepatch-callbacks-busymod.c @@ -44,8 +44,7 @@ static void busymod_work_func(struct work_struct *work) static int livepatch_callbacks_mod_init(void) { pr_info("%s\n", __func__); - schedule_delayed_work(&work, - msecs_to_jiffies(1000 * 0)); + schedule_delayed_work(&work, 0); return 0; } diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c index 6701641bf12d..f3f153895d6c 100644 --- a/samples/livepatch/livepatch-shadow-fix1.c +++ b/samples/livepatch/livepatch-shadow-fix1.c @@ -72,8 +72,7 @@ static struct dummy *livepatch_fix1_dummy_alloc(void) if (!d) return NULL; - d->jiffies_expire = jiffies + - msecs_to_jiffies(1000 * EXPIRE_PERIOD); + d->jiffies_expire = jiffies + secs_to_jiffies(EXPIRE_PERIOD); /* * Patch: save the extra memory location into a SV_LEAK shadow diff --git a/samples/livepatch/livepatch-shadow-mod.c b/samples/livepatch/livepatch-shadow-mod.c index 7e753b0d2fa6..5d83ad5a8118 100644 --- a/samples/livepatch/livepatch-shadow-mod.c +++ b/samples/livepatch/livepatch-shadow-mod.c @@ -101,8 +101,7 @@ static __used noinline struct dummy *dummy_alloc(void) if (!d) return NULL; - d->jiffies_expire = jiffies + - msecs_to_jiffies(1000 * EXPIRE_PERIOD); + d->jiffies_expire = jiffies + secs_to_jiffies(EXPIRE_PERIOD); /* Oops, forgot to save leak! */ leak = kzalloc(sizeof(*leak), GFP_KERNEL); @@ -152,8 +151,7 @@ static void alloc_work_func(struct work_struct *work) list_add(&d->list, &dummy_list); mutex_unlock(&dummy_list_mutex); - schedule_delayed_work(&alloc_dwork, - msecs_to_jiffies(1000 * ALLOC_PERIOD)); + schedule_delayed_work(&alloc_dwork, secs_to_jiffies(ALLOC_PERIOD)); } /* @@ -184,16 +182,13 @@ static void cleanup_work_func(struct work_struct *work) } mutex_unlock(&dummy_list_mutex); - schedule_delayed_work(&cleanup_dwork, - msecs_to_jiffies(1000 * CLEANUP_PERIOD)); + schedule_delayed_work(&cleanup_dwork, secs_to_jiffies(CLEANUP_PERIOD)); } static int livepatch_shadow_mod_init(void) { - schedule_delayed_work(&alloc_dwork, - msecs_to_jiffies(1000 * ALLOC_PERIOD)); - schedule_delayed_work(&cleanup_dwork, - msecs_to_jiffies(1000 * CLEANUP_PERIOD)); + schedule_delayed_work(&alloc_dwork, secs_to_jiffies(ALLOC_PERIOD)); + schedule_delayed_work(&cleanup_dwork, secs_to_jiffies(CLEANUP_PERIOD)); return 0; } diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 9eed3683ad76..9d469c20871f 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -834,16 +834,6 @@ foreach my $entry (@mode_permission_funcs) { $mode_perms_search = "(?:${mode_perms_search})"; our %deprecated_apis = ( - "synchronize_rcu_bh" => "synchronize_rcu", - "synchronize_rcu_bh_expedited" => "synchronize_rcu_expedited", - "call_rcu_bh" => "call_rcu", - "rcu_barrier_bh" => "rcu_barrier", - "synchronize_sched" => "synchronize_rcu", - "synchronize_sched_expedited" => "synchronize_rcu_expedited", - "call_rcu_sched" => "call_rcu", - "rcu_barrier_sched" => "rcu_barrier", - "get_state_synchronize_sched" => "get_state_synchronize_rcu", - "cond_synchronize_sched" => "cond_synchronize_rcu", "kmap" => "kmap_local_page", "kunmap" => "kunmap_local", "kmap_atomic" => "kmap_local_page", @@ -2875,7 +2865,7 @@ sub process { if ($realfile =~ m@^include/asm/@) { ERROR("MODIFIED_INCLUDE_ASM", - "do not modify files in include/asm, change architecture specific files in include/asm-\n" . "$here$rawline\n"); + "do not modify files in include/asm, change architecture specific files in arch//include/asm\n" . "$here$rawline\n"); } $found_file = 1; } @@ -3237,12 +3227,12 @@ sub process { my ($cid, $ctitle) = git_commit_info($orig_commit, $id, $title); - if ($ctitle ne $title || $tag_case || $tag_space || - $id_length || $id_case || !$title_has_quotes) { + if (defined($cid) && ($ctitle ne $title || $tag_case || $tag_space || $id_length || $id_case || !$title_has_quotes)) { + my $fixed = "Fixes: $cid (\"$ctitle\")"; if (WARN("BAD_FIXES_TAG", - "Please use correct Fixes: style 'Fixes: <12 chars of sha1> (\"\")' - ie: 'Fixes: $cid (\"$ctitle\")'\n" . $herecurr) && + "Please use correct Fixes: style 'Fixes: <12+ chars of sha1> (\"<title line>\")' - ie: '$fixed'\n" . $herecurr) && $fix) { - $fixed[$fixlinenr] = "Fixes: $cid (\"$ctitle\")"; + $fixed[$fixlinenr] = $fixed; } } } @@ -5513,9 +5503,9 @@ sub process { } } -# check for unnecessary parentheses around comparisons in if uses -# when !drivers/staging or command-line uses --strict - if (($realfile !~ m@^(?:drivers/staging/)@ || $check_orig) && +# check for unnecessary parentheses around comparisons +# except in drivers/staging + if (($realfile !~ m@^(?:drivers/staging/)@) && $perl_version_ok && defined($stat) && $stat =~ /(^.\s*if\s*($balanced_parens))/) { my $if_stat = $1; diff --git a/scripts/coccinelle/misc/secs_to_jiffies.cocci b/scripts/coccinelle/misc/secs_to_jiffies.cocci new file mode 100644 index 000000000000..8bbb2884ea5d --- /dev/null +++ b/scripts/coccinelle/misc/secs_to_jiffies.cocci @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/// +/// Find usages of: +/// - msecs_to_jiffies(value*1000) +/// - msecs_to_jiffies(value*MSEC_PER_SEC) +/// +// Confidence: High +// Copyright: (C) 2024 Easwar Hariharan, Microsoft +// Keywords: secs, seconds, jiffies +// + +virtual patch + +@depends on patch@ constant C; @@ + +- msecs_to_jiffies(C * 1000) ++ secs_to_jiffies(C) + +@depends on patch@ constant C; @@ + +- msecs_to_jiffies(C * MSEC_PER_SEC) ++ secs_to_jiffies(C) diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 05bd9ca1fbfa..a290db720b0f 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -222,6 +222,7 @@ autonymous||autonomous auxillary||auxiliary auxilliary||auxiliary avaiable||available +avaialable||available avaible||available availabe||available availabled||available @@ -267,6 +268,7 @@ broadcase||broadcast broadcat||broadcast bufer||buffer bufferred||buffered +bufferur||buffer bufufer||buffer cacluated||calculated caculate||calculate @@ -405,6 +407,7 @@ configutation||configuration congiuration||configuration conider||consider conjuction||conjunction +connction||connection connecetd||connected connectinos||connections connetor||connector @@ -413,6 +416,7 @@ connnections||connections consistancy||consistency consistant||consistent consits||consists +constructred||constructed containes||contains containts||contains contaisn||contains @@ -450,6 +454,7 @@ creationg||creating cryptocraphic||cryptographic cummulative||cumulative cunter||counter +curent||current curently||currently cylic||cyclic dafault||default @@ -461,6 +466,7 @@ decendant||descendant decendants||descendants decompres||decompress decsribed||described +decrese||decrease decription||description detault||default dectected||detected @@ -485,6 +491,7 @@ delare||declare delares||declares delaring||declaring delemiter||delimiter +deley||delay delibrately||deliberately delievered||delivered demodualtor||demodulator @@ -551,6 +558,7 @@ disgest||digest disired||desired dispalying||displaying dissable||disable +dissapeared||disappeared diplay||display directon||direction direcly||directly @@ -606,6 +614,7 @@ eigth||eight elementry||elementary eletronic||electronic embeded||embedded +emtpy||empty enabledi||enabled enbale||enable enble||enable @@ -669,6 +678,7 @@ exmaple||example expecially||especially experies||expires explicite||explicit +explicity||explicitly explicitely||explicitly explict||explicit explictely||explicitly @@ -723,10 +733,12 @@ followign||following followings||following follwing||following fonud||found +forcebly||forcibly forseeable||foreseeable forse||force fortan||fortran forwardig||forwarding +forwared||forwarded frambuffer||framebuffer framming||framing framwork||framework @@ -767,6 +779,7 @@ grahpical||graphical granularty||granularity grapic||graphic grranted||granted +grups||groups guage||gauge guarenteed||guaranteed guarentee||guarantee @@ -780,6 +793,7 @@ hardare||hardware harware||hardware hardward||hardware havind||having +heigth||height heirarchically||hierarchically heirarchy||hierarchy heirachy||hierarchy @@ -788,9 +802,11 @@ hearbeat||heartbeat heterogenous||heterogeneous hexdecimal||hexadecimal hybernate||hibernate +hiearchy||hierarchy hierachy||hierarchy hierarchie||hierarchy homogenous||homogeneous +horizental||horizontal howver||however hsould||should hypervior||hypervisor @@ -842,6 +858,7 @@ independed||independent indiate||indicate indicat||indicate inexpect||inexpected +infalte||inflate inferface||interface infinit||infinite infomation||information @@ -861,6 +878,7 @@ initators||initiators initialiazation||initialization initializationg||initialization initializiation||initialization +initializtion||initialization initialze||initialize initialzed||initialized initialzing||initializing @@ -877,6 +895,7 @@ instanciate||instantiate instanciated||instantiated instuments||instruments insufficent||insufficient +intead||instead inteface||interface integreated||integrated integrety||integrity @@ -1081,6 +1100,7 @@ notications||notifications notifcations||notifications notifed||notified notity||notify +notfify||notify nubmer||number numebr||number numer||number @@ -1122,6 +1142,7 @@ orientatied||orientated orientied||oriented orignal||original originial||original +orphanded||orphaned otherise||otherwise ouput||output oustanding||outstanding @@ -1184,9 +1205,11 @@ peroid||period persistance||persistence persistant||persistent phoneticly||phonetically +pipline||pipeline plaform||platform plalform||platform platfoem||platform +platfomr||platform platfrom||platform plattform||platform pleaes||please @@ -1211,6 +1234,7 @@ preceeding||preceding preceed||precede precendence||precedence precission||precision +predicition||prediction preemptable||preemptible prefered||preferred prefferably||preferably @@ -1289,6 +1313,7 @@ querrying||querying queus||queues randomally||randomly raoming||roaming +readyness||readiness reasearcher||researcher reasearchers||researchers reasearch||research @@ -1305,8 +1330,10 @@ recieves||receives recieving||receiving recogniced||recognised recognizeable||recognizable +recompte||recompute recommanded||recommended recyle||recycle +redect||reject redircet||redirect redirectrion||redirection redundacy||redundancy @@ -1314,6 +1341,7 @@ reename||rename refcounf||refcount refence||reference refered||referred +referencce||reference referenace||reference refererence||reference refering||referring @@ -1348,11 +1376,13 @@ replys||replies reponse||response representaion||representation repsonse||response +reqested||requested reqeust||request reqister||register requed||requeued requestied||requested requiere||require +requieres||requires requirment||requirement requred||required requried||required @@ -1440,6 +1470,7 @@ sequencial||sequential serivce||service serveral||several servive||service +sesion||session setts||sets settting||setting shapshot||snapshot @@ -1602,11 +1633,13 @@ trys||tries thses||these tiggers||triggers tiggered||triggered +tiggerring||triggering tipically||typically timeing||timing timming||timing timout||timeout tmis||this +tolarance||tolerance toogle||toggle torerable||tolerable torlence||tolerance @@ -1633,6 +1666,7 @@ trasfer||transfer trasmission||transmission trasmitter||transmitter treshold||threshold +trigged||triggered triggerd||triggered trigerred||triggered trigerring||triggering @@ -1648,6 +1682,7 @@ uknown||unknown usccess||success uncommited||uncommitted uncompatible||incompatible +uncomressed||uncompressed unconditionaly||unconditionally undeflow||underflow undelying||underlying @@ -1715,6 +1750,7 @@ utitity||utility utitlty||utility vaid||valid vaild||valid +validationg||validating valide||valid variantions||variations varible||variable @@ -1724,6 +1760,7 @@ verbse||verbose veify||verify verfication||verification veriosn||version +versoin||version verisons||versions verison||version veritical||vertical diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index e1a5e13ea269..1a2d02fee09b 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -76,7 +76,6 @@ static void report_access(const char *access, struct task_struct *target, struct task_struct *agent) { struct access_report_info *info; - char agent_comm[sizeof(agent->comm)]; assert_spin_locked(&target->alloc_lock); /* for target->comm */ @@ -86,8 +85,7 @@ static void report_access(const char *access, struct task_struct *target, */ pr_notice_ratelimited( "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n", - access, target->comm, target->pid, - get_task_comm(agent_comm, agent), agent->pid); + access, target->comm, target->pid, agent->comm, agent->pid); return; } diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c index ca2c6f5de407..c073b38cd673 100644 --- a/sound/usb/line6/toneport.c +++ b/sound/usb/line6/toneport.c @@ -386,7 +386,7 @@ static int toneport_setup(struct usb_line6_toneport *toneport) toneport_update_led(toneport); schedule_delayed_work(&toneport->line6.startup_work, - msecs_to_jiffies(TONEPORT_PCM_DELAY * 1000)); + secs_to_jiffies(TONEPORT_PCM_DELAY)); return 0; } diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c index 1334214546d7..100ad3dc091a 100644 --- a/tools/accounting/getdelays.c +++ b/tools/accounting/getdelays.c @@ -192,60 +192,77 @@ static int get_family_id(int sd) } #define average_ms(t, c) (t / 1000000ULL / (c ? c : 1)) +#define delay_ms(t) (t / 1000000ULL) static void print_delayacct(struct taskstats *t) { - printf("\n\nCPU %15s%15s%15s%15s%15s\n" - " %15llu%15llu%15llu%15llu%15.3fms\n" - "IO %15s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "SWAP %15s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "RECLAIM %12s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "THRASHING%12s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "COMPACT %12s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "WPCOPY %12s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "IRQ %15s%15s%15s\n" - " %15llu%15llu%15.3fms\n", + printf("\n\nCPU %15s%15s%15s%15s%15s%15s\n" + " %15llu%15llu%15llu%15llu%15.3fms%13.6fms\n" + "IO %15s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "SWAP %15s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "RECLAIM %12s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "THRASHING%12s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "COMPACT %12s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "WPCOPY %12s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "IRQ %15s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n", "count", "real total", "virtual total", - "delay total", "delay average", + "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->cpu_count, (unsigned long long)t->cpu_run_real_total, (unsigned long long)t->cpu_run_virtual_total, (unsigned long long)t->cpu_delay_total, average_ms((double)t->cpu_delay_total, t->cpu_count), - "count", "delay total", "delay average", + delay_ms((double)t->cpu_delay_max), + delay_ms((double)t->cpu_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->blkio_count, (unsigned long long)t->blkio_delay_total, average_ms((double)t->blkio_delay_total, t->blkio_count), - "count", "delay total", "delay average", + delay_ms((double)t->blkio_delay_max), + delay_ms((double)t->blkio_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->swapin_count, (unsigned long long)t->swapin_delay_total, average_ms((double)t->swapin_delay_total, t->swapin_count), - "count", "delay total", "delay average", + delay_ms((double)t->swapin_delay_max), + delay_ms((double)t->swapin_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->freepages_count, (unsigned long long)t->freepages_delay_total, average_ms((double)t->freepages_delay_total, t->freepages_count), - "count", "delay total", "delay average", + delay_ms((double)t->freepages_delay_max), + delay_ms((double)t->freepages_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->thrashing_count, (unsigned long long)t->thrashing_delay_total, average_ms((double)t->thrashing_delay_total, t->thrashing_count), - "count", "delay total", "delay average", + delay_ms((double)t->thrashing_delay_max), + delay_ms((double)t->thrashing_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->compact_count, (unsigned long long)t->compact_delay_total, average_ms((double)t->compact_delay_total, t->compact_count), - "count", "delay total", "delay average", + delay_ms((double)t->compact_delay_max), + delay_ms((double)t->compact_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->wpcopy_count, (unsigned long long)t->wpcopy_delay_total, average_ms((double)t->wpcopy_delay_total, t->wpcopy_count), - "count", "delay total", "delay average", + delay_ms((double)t->wpcopy_delay_max), + delay_ms((double)t->wpcopy_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->irq_count, (unsigned long long)t->irq_delay_total, - average_ms((double)t->irq_delay_total, t->irq_count)); + average_ms((double)t->irq_delay_total, t->irq_count), + delay_ms((double)t->irq_delay_max), + delay_ms((double)t->irq_delay_min)); } static void task_context_switch_counts(struct taskstats *t) diff --git a/tools/accounting/procacct.c b/tools/accounting/procacct.c index 90c4a37f53d9..e8dee05a6264 100644 --- a/tools/accounting/procacct.c +++ b/tools/accounting/procacct.c @@ -274,12 +274,11 @@ int main(int argc, char *argv[]) int maskset = 0; char *logfile = NULL; int cfd = 0; - int forking = 0; struct msgtemplate msg; - while (!forking) { - c = getopt(argc, argv, "m:vr:"); + while (1) { + c = getopt(argc, argv, "m:vr:w:"); if (c < 0) break; diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c index 9faa686f90e4..e9728e86b4f2 100644 --- a/tools/testing/selftests/pidfd/pidfd_test.c +++ b/tools/testing/selftests/pidfd/pidfd_test.c @@ -497,7 +497,7 @@ static int child_poll_leader_exit_test(void *args) pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL); /* - * glibc exit calls exit_group syscall, so explicity call exit only + * glibc exit calls exit_group syscall, so explicitly call exit only * so that only the group leader exits, leaving the threads alone. */ *child_exit_secs = time(NULL);