mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 02:05:33 +00:00
Merge branch 'mm-nonmm-stable' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
This commit is contained in:
commit
17aa0382f1
1
.mailmap
1
.mailmap
@ -410,6 +410,7 @@ Liam Mark <quic_lmark@quicinc.com> <lmark@codeaurora.org>
|
|||||||
Linas Vepstas <linas@austin.ibm.com>
|
Linas Vepstas <linas@austin.ibm.com>
|
||||||
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
|
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch>
|
||||||
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
|
Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de>
|
||||||
|
Linus Lüssing <linus.luessing@c0d3.blue> <ll@simonwunderlich.de>
|
||||||
<linux-hardening@vger.kernel.org> <kernel-hardening@lists.openwall.com>
|
<linux-hardening@vger.kernel.org> <kernel-hardening@lists.openwall.com>
|
||||||
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
Li Yang <leoyang.li@nxp.com> <leoli@freescale.com>
|
||||||
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org>
|
||||||
|
2
CREDITS
2
CREDITS
@ -4339,7 +4339,7 @@ D: Freescale Highspeed USB device driver
|
|||||||
D: Freescale QE SoC support and Ethernet driver
|
D: Freescale QE SoC support and Ethernet driver
|
||||||
S: B-1206 Jingmao Guojigongyu
|
S: B-1206 Jingmao Guojigongyu
|
||||||
S: 16 Baliqiao Nanjie, Beijing 101100
|
S: 16 Baliqiao Nanjie, Beijing 101100
|
||||||
S: People's Repulic of China
|
S: People's Republic of China
|
||||||
|
|
||||||
N: Vlad Yasevich
|
N: Vlad Yasevich
|
||||||
E: vyasevich@gmail.com
|
E: vyasevich@gmail.com
|
||||||
|
@ -100,29 +100,29 @@ Get delays, since system boot, for pid 10::
|
|||||||
# ./getdelays -d -p 10
|
# ./getdelays -d -p 10
|
||||||
(output similar to next case)
|
(output similar to next case)
|
||||||
|
|
||||||
Get sum of delays, since system boot, for all pids with tgid 5::
|
Get sum and peak of delays, since system boot, for all pids with tgid 242::
|
||||||
|
|
||||||
# ./getdelays -d -t 5
|
bash-4.4# ./getdelays -d -t 242
|
||||||
print delayacct stats ON
|
print delayacct stats ON
|
||||||
TGID 5
|
TGID 242
|
||||||
|
|
||||||
|
|
||||||
CPU count real total virtual total delay total delay average
|
CPU count real total virtual total delay total delay average delay max delay min
|
||||||
8 7000000 6872122 3382277 0.423ms
|
39 156000000 156576579 2111069 0.054ms 0.212296ms 0.031307ms
|
||||||
IO count delay total delay average
|
IO count delay total delay average delay max delay min
|
||||||
0 0 0.000ms
|
0 0 0.000ms 0.000000ms 0.000000ms
|
||||||
SWAP count delay total delay average
|
SWAP count delay total delay average delay max delay min
|
||||||
0 0 0.000ms
|
0 0 0.000ms 0.000000ms 0.000000ms
|
||||||
RECLAIM count delay total delay average
|
RECLAIM count delay total delay average delay max delay min
|
||||||
0 0 0.000ms
|
0 0 0.000ms 0.000000ms 0.000000ms
|
||||||
THRASHING count delay total delay average
|
THRASHING count delay total delay average delay max delay min
|
||||||
0 0 0.000ms
|
0 0 0.000ms 0.000000ms 0.000000ms
|
||||||
COMPACT count delay total delay average
|
COMPACT count delay total delay average delay max delay min
|
||||||
0 0 0.000ms
|
0 0 0.000ms 0.000000ms 0.000000ms
|
||||||
WPCOPY count delay total delay average
|
WPCOPY count delay total delay average delay max delay min
|
||||||
0 0 0.000ms
|
156 11215873 0.072ms 0.207403ms 0.033913ms
|
||||||
IRQ count delay total delay average
|
IRQ count delay total delay average delay max delay min
|
||||||
0 0 0.000ms
|
0 0 0.000ms 0.000000ms 0.000000ms
|
||||||
|
|
||||||
Get IO accounting for pid 1, it works only with -p::
|
Get IO accounting for pid 1, it works only with -p::
|
||||||
|
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
Min Heap API
|
Min Heap API
|
||||||
============
|
============
|
||||||
|
|
||||||
|
:Author: Kuan-Wei Chiu <visitorckw@gmail.com>
|
||||||
|
|
||||||
Introduction
|
Introduction
|
||||||
============
|
============
|
||||||
|
|
||||||
|
@ -42,8 +42,8 @@ call xa_tag_pointer() to create an entry with a tag, xa_untag_pointer()
|
|||||||
to turn a tagged entry back into an untagged pointer and xa_pointer_tag()
|
to turn a tagged entry back into an untagged pointer and xa_pointer_tag()
|
||||||
to retrieve the tag of an entry. Tagged pointers use the same bits that
|
to retrieve the tag of an entry. Tagged pointers use the same bits that
|
||||||
are used to distinguish value entries from normal pointers, so you must
|
are used to distinguish value entries from normal pointers, so you must
|
||||||
decide whether they want to store value entries or tagged pointers in
|
decide whether you want to store value entries or tagged pointers in any
|
||||||
any particular XArray.
|
particular XArray.
|
||||||
|
|
||||||
The XArray does not support storing IS_ERR() pointers as some
|
The XArray does not support storing IS_ERR() pointers as some
|
||||||
conflict with value entries or internal entries.
|
conflict with value entries or internal entries.
|
||||||
@ -52,8 +52,9 @@ An unusual feature of the XArray is the ability to create entries which
|
|||||||
occupy a range of indices. Once stored to, looking up any index in
|
occupy a range of indices. Once stored to, looking up any index in
|
||||||
the range will return the same entry as looking up any other index in
|
the range will return the same entry as looking up any other index in
|
||||||
the range. Storing to any index will store to all of them. Multi-index
|
the range. Storing to any index will store to all of them. Multi-index
|
||||||
entries can be explicitly split into smaller entries, or storing ``NULL``
|
entries can be explicitly split into smaller entries. Unsetting (using
|
||||||
into any entry will cause the XArray to forget about the range.
|
xa_erase() or xa_store() with ``NULL``) any entry will cause the XArray
|
||||||
|
to forget about the range.
|
||||||
|
|
||||||
Normal API
|
Normal API
|
||||||
==========
|
==========
|
||||||
@ -63,13 +64,14 @@ for statically allocated XArrays or xa_init() for dynamically
|
|||||||
allocated ones. A freshly-initialised XArray contains a ``NULL``
|
allocated ones. A freshly-initialised XArray contains a ``NULL``
|
||||||
pointer at every index.
|
pointer at every index.
|
||||||
|
|
||||||
You can then set entries using xa_store() and get entries
|
You can then set entries using xa_store() and get entries using
|
||||||
using xa_load(). xa_store will overwrite any entry with the
|
xa_load(). xa_store() will overwrite any entry with the new entry and
|
||||||
new entry and return the previous entry stored at that index. You can
|
return the previous entry stored at that index. You can unset entries
|
||||||
use xa_erase() instead of calling xa_store() with a
|
using xa_erase() or by setting the entry to ``NULL`` using xa_store().
|
||||||
``NULL`` entry. There is no difference between an entry that has never
|
There is no difference between an entry that has never been stored to
|
||||||
been stored to, one that has been erased and one that has most recently
|
and one that has been erased with xa_erase(); an entry that has most
|
||||||
had ``NULL`` stored to it.
|
recently had ``NULL`` stored to it is also equivalent except if the
|
||||||
|
XArray was initialized with ``XA_FLAGS_ALLOC``.
|
||||||
|
|
||||||
You can conditionally replace an entry at an index by using
|
You can conditionally replace an entry at an index by using
|
||||||
xa_cmpxchg(). Like cmpxchg(), it will only succeed if
|
xa_cmpxchg(). Like cmpxchg(), it will only succeed if
|
||||||
|
@ -6,7 +6,7 @@ Squashfs 4.0 Filesystem
|
|||||||
|
|
||||||
Squashfs is a compressed read-only filesystem for Linux.
|
Squashfs is a compressed read-only filesystem for Linux.
|
||||||
|
|
||||||
It uses zlib, lz4, lzo, or xz compression to compress files, inodes and
|
It uses zlib, lz4, lzo, xz or zstd compression to compress files, inodes and
|
||||||
directories. Inodes in the system are very small and all blocks are packed to
|
directories. Inodes in the system are very small and all blocks are packed to
|
||||||
minimise data overhead. Block sizes greater than 4K are supported up to a
|
minimise data overhead. Block sizes greater than 4K are supported up to a
|
||||||
maximum of 1Mbytes (default block size 128K).
|
maximum of 1Mbytes (default block size 128K).
|
||||||
@ -16,8 +16,8 @@ use (i.e. in cases where a .tar.gz file may be used), and in constrained
|
|||||||
block device/memory systems (e.g. embedded systems) where low overhead is
|
block device/memory systems (e.g. embedded systems) where low overhead is
|
||||||
needed.
|
needed.
|
||||||
|
|
||||||
Mailing list: squashfs-devel@lists.sourceforge.net
|
Mailing list (kernel code): linux-fsdevel@vger.kernel.org
|
||||||
Web site: www.squashfs.org
|
Web site: github.com/plougher/squashfs-tools
|
||||||
|
|
||||||
1. Filesystem Features
|
1. Filesystem Features
|
||||||
----------------------
|
----------------------
|
||||||
@ -58,11 +58,9 @@ inodes have different sizes).
|
|||||||
|
|
||||||
As squashfs is a read-only filesystem, the mksquashfs program must be used to
|
As squashfs is a read-only filesystem, the mksquashfs program must be used to
|
||||||
create populated squashfs filesystems. This and other squashfs utilities
|
create populated squashfs filesystems. This and other squashfs utilities
|
||||||
can be obtained from http://www.squashfs.org. Usage instructions can be
|
are very likely packaged by your linux distribution (called squashfs-tools).
|
||||||
obtained from this site also.
|
The source code can be obtained from github.com/plougher/squashfs-tools.
|
||||||
|
Usage instructions can also be obtained from this site.
|
||||||
The squashfs-tools development tree is now located on kernel.org
|
|
||||||
git://git.kernel.org/pub/scm/fs/squashfs/squashfs-tools.git
|
|
||||||
|
|
||||||
2.1 Mount options
|
2.1 Mount options
|
||||||
-----------------
|
-----------------
|
||||||
|
10
MAINTAINERS
10
MAINTAINERS
@ -2827,7 +2827,7 @@ ARM/NXP S32G ARCHITECTURE
|
|||||||
R: Chester Lin <chester62515@gmail.com>
|
R: Chester Lin <chester62515@gmail.com>
|
||||||
R: Matthias Brugger <mbrugger@suse.com>
|
R: Matthias Brugger <mbrugger@suse.com>
|
||||||
R: Ghennadi Procopciuc <ghennadi.procopciuc@oss.nxp.com>
|
R: Ghennadi Procopciuc <ghennadi.procopciuc@oss.nxp.com>
|
||||||
L: NXP S32 Linux Team <s32@nxp.com>
|
R: NXP S32 Linux Team <s32@nxp.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: arch/arm64/boot/dts/freescale/s32g*.dts*
|
F: arch/arm64/boot/dts/freescale/s32g*.dts*
|
||||||
@ -16600,8 +16600,8 @@ F: arch/nios2/
|
|||||||
|
|
||||||
NITRO ENCLAVES (NE)
|
NITRO ENCLAVES (NE)
|
||||||
M: Alexandru Ciobotaru <alcioa@amazon.com>
|
M: Alexandru Ciobotaru <alcioa@amazon.com>
|
||||||
|
R: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com>
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-kernel@vger.kernel.org
|
||||||
L: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com>
|
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/
|
W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/
|
||||||
F: Documentation/virt/ne_overview.rst
|
F: Documentation/virt/ne_overview.rst
|
||||||
@ -16612,8 +16612,8 @@ F: samples/nitro_enclaves/
|
|||||||
|
|
||||||
NITRO SECURE MODULE (NSM)
|
NITRO SECURE MODULE (NSM)
|
||||||
M: Alexander Graf <graf@amazon.com>
|
M: Alexander Graf <graf@amazon.com>
|
||||||
|
R: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com>
|
||||||
L: linux-kernel@vger.kernel.org
|
L: linux-kernel@vger.kernel.org
|
||||||
L: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com>
|
|
||||||
S: Supported
|
S: Supported
|
||||||
W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/
|
W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/
|
||||||
F: drivers/misc/nsm.c
|
F: drivers/misc/nsm.c
|
||||||
@ -18425,8 +18425,8 @@ M: Fabio Estevam <festevam@gmail.com>
|
|||||||
M: Shawn Guo <shawnguo@kernel.org>
|
M: Shawn Guo <shawnguo@kernel.org>
|
||||||
M: Jacky Bai <ping.bai@nxp.com>
|
M: Jacky Bai <ping.bai@nxp.com>
|
||||||
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
R: Pengutronix Kernel Team <kernel@pengutronix.de>
|
||||||
|
R: NXP S32 Linux Team <s32@nxp.com>
|
||||||
L: linux-gpio@vger.kernel.org
|
L: linux-gpio@vger.kernel.org
|
||||||
L: NXP S32 Linux Team <s32@nxp.com>
|
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/pinctrl/fsl,*
|
F: Documentation/devicetree/bindings/pinctrl/fsl,*
|
||||||
F: Documentation/devicetree/bindings/pinctrl/nxp,s32*
|
F: Documentation/devicetree/bindings/pinctrl/nxp,s32*
|
||||||
@ -19561,7 +19561,7 @@ F: drivers/ras/amd/fmpm.c
|
|||||||
|
|
||||||
RASPBERRY PI PISP BACK END
|
RASPBERRY PI PISP BACK END
|
||||||
M: Jacopo Mondi <jacopo.mondi@ideasonboard.com>
|
M: Jacopo Mondi <jacopo.mondi@ideasonboard.com>
|
||||||
L: Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
|
R: Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com>
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/media/raspberrypi,pispbe.yaml
|
F: Documentation/devicetree/bindings/media/raspberrypi,pispbe.yaml
|
||||||
|
@ -10,7 +10,6 @@
|
|||||||
#include <linux/preempt.h>
|
#include <linux/preempt.h>
|
||||||
#include <asm/fpu.h>
|
#include <asm/fpu.h>
|
||||||
#include <asm/thread_info.h>
|
#include <asm/thread_info.h>
|
||||||
#include <asm/fpu.h>
|
|
||||||
|
|
||||||
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
|
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
|
||||||
#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
|
#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));
|
||||||
|
@ -200,7 +200,6 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
|
|||||||
struct callee_regs *cregs)
|
struct callee_regs *cregs)
|
||||||
{
|
{
|
||||||
struct disasm_state state;
|
struct disasm_state state;
|
||||||
char buf[TASK_COMM_LEN];
|
|
||||||
|
|
||||||
/* handle user mode only and only if enabled by sysadmin */
|
/* handle user mode only and only if enabled by sysadmin */
|
||||||
if (!user_mode(regs) || !unaligned_enabled)
|
if (!user_mode(regs) || !unaligned_enabled)
|
||||||
@ -212,11 +211,11 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
|
|||||||
" performance significantly\n. To enable further"
|
" performance significantly\n. To enable further"
|
||||||
" logging of such instances, please \n"
|
" logging of such instances, please \n"
|
||||||
" echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
|
" echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
|
||||||
get_task_comm(buf, current), task_pid_nr(current));
|
current->comm, task_pid_nr(current));
|
||||||
} else {
|
} else {
|
||||||
/* Add rate limiting if it gets down to it */
|
/* Add rate limiting if it gets down to it */
|
||||||
pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
|
pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
|
||||||
get_task_comm(buf, current), task_pid_nr(current),
|
current->comm, task_pid_nr(current),
|
||||||
address, regs->ret);
|
address, regs->ret);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -31,10 +31,10 @@
|
|||||||
/*
|
/*
|
||||||
* Constants
|
* Constants
|
||||||
*/
|
*/
|
||||||
#define SHARPSL_CHARGE_ON_TIME_INTERVAL (msecs_to_jiffies(1*60*1000)) /* 1 min */
|
#define SHARPSL_CHARGE_ON_TIME_INTERVAL (secs_to_jiffies(60))
|
||||||
#define SHARPSL_CHARGE_FINISH_TIME (msecs_to_jiffies(10*60*1000)) /* 10 min */
|
#define SHARPSL_CHARGE_FINISH_TIME (secs_to_jiffies(10*60))
|
||||||
#define SHARPSL_BATCHK_TIME (msecs_to_jiffies(15*1000)) /* 15 sec */
|
#define SHARPSL_BATCHK_TIME (secs_to_jiffies(15))
|
||||||
#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */
|
#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */
|
||||||
|
|
||||||
#define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */
|
#define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */
|
||||||
#define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */
|
#define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */
|
||||||
|
@ -629,7 +629,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -586,7 +586,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -606,7 +606,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -578,7 +578,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -588,7 +588,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -605,7 +605,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -692,7 +692,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -578,7 +578,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -579,7 +579,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -595,7 +595,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -575,7 +575,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -576,7 +576,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -451,7 +451,6 @@ CONFIG_TEST_PRINTF=m
|
|||||||
CONFIG_TEST_SCANF=m
|
CONFIG_TEST_SCANF=m
|
||||||
CONFIG_TEST_BITMAP=m
|
CONFIG_TEST_BITMAP=m
|
||||||
CONFIG_TEST_UUID=m
|
CONFIG_TEST_UUID=m
|
||||||
CONFIG_TEST_XARRAY=m
|
|
||||||
CONFIG_TEST_MAPLE_TREE=m
|
CONFIG_TEST_MAPLE_TREE=m
|
||||||
CONFIG_TEST_RHASHTABLE=m
|
CONFIG_TEST_RHASHTABLE=m
|
||||||
CONFIG_TEST_IDA=m
|
CONFIG_TEST_IDA=m
|
||||||
|
@ -4957,7 +4957,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||||||
* states are synchronized from L0 to L1. L1 needs to inform L0 about
|
* states are synchronized from L0 to L1. L1 needs to inform L0 about
|
||||||
* MER=1 only when there are pending external interrupts.
|
* MER=1 only when there are pending external interrupts.
|
||||||
* In the above if check, MER bit is set if there are pending
|
* In the above if check, MER bit is set if there are pending
|
||||||
* external interrupts. Hence, explicity mask off MER bit
|
* external interrupts. Hence, explicitly mask off MER bit
|
||||||
* here as otherwise it may generate spurious interrupts in L2 KVM
|
* here as otherwise it may generate spurious interrupts in L2 KVM
|
||||||
* causing an endless loop, which results in L2 guest getting hung.
|
* causing an endless loop, which results in L2 guest getting hung.
|
||||||
*/
|
*/
|
||||||
|
@ -544,7 +544,7 @@ static int drc_pmem_query_health(struct papr_scm_priv *p)
|
|||||||
|
|
||||||
/* Jiffies offset for which the health data is assumed to be same */
|
/* Jiffies offset for which the health data is assumed to be same */
|
||||||
cache_timeout = p->lasthealth_jiffies +
|
cache_timeout = p->lasthealth_jiffies +
|
||||||
msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000);
|
secs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL);
|
||||||
|
|
||||||
/* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
|
/* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
|
||||||
if (time_after(jiffies, cache_timeout))
|
if (time_after(jiffies, cache_timeout))
|
||||||
|
@ -166,7 +166,7 @@ static struct timer_list lgr_timer;
|
|||||||
*/
|
*/
|
||||||
static void lgr_timer_set(void)
|
static void lgr_timer_set(void)
|
||||||
{
|
{
|
||||||
mod_timer(&lgr_timer, jiffies + msecs_to_jiffies(LGR_TIMER_INTERVAL_SECS * MSEC_PER_SEC));
|
mod_timer(&lgr_timer, jiffies + secs_to_jiffies(LGR_TIMER_INTERVAL_SECS));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -662,12 +662,12 @@ static void stp_check_leap(void)
|
|||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
pr_err("failed to set leap second flags\n");
|
pr_err("failed to set leap second flags\n");
|
||||||
/* arm Timer to clear leap second flags */
|
/* arm Timer to clear leap second flags */
|
||||||
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC));
|
mod_timer(&stp_timer, jiffies + secs_to_jiffies(14400));
|
||||||
} else {
|
} else {
|
||||||
/* The day the leap second is scheduled for hasn't been reached. Retry
|
/* The day the leap second is scheduled for hasn't been reached. Retry
|
||||||
* in one hour.
|
* in one hour.
|
||||||
*/
|
*/
|
||||||
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC));
|
mod_timer(&stp_timer, jiffies + secs_to_jiffies(3600));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -371,7 +371,7 @@ static void set_topology_timer(void)
|
|||||||
if (atomic_add_unless(&topology_poll, -1, 0))
|
if (atomic_add_unless(&topology_poll, -1, 0))
|
||||||
mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
|
mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
|
||||||
else
|
else
|
||||||
mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
|
mod_timer(&topology_timer, jiffies + secs_to_jiffies(60));
|
||||||
}
|
}
|
||||||
|
|
||||||
void topology_expect_change(void)
|
void topology_expect_change(void)
|
||||||
|
@ -204,7 +204,7 @@ static void cmm_set_timer(void)
|
|||||||
del_timer(&cmm_timer);
|
del_timer(&cmm_timer);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
mod_timer(&cmm_timer, jiffies + msecs_to_jiffies(cmm_timeout_seconds * MSEC_PER_SEC));
|
mod_timer(&cmm_timer, jiffies + secs_to_jiffies(cmm_timeout_seconds));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cmm_timer_fn(struct timer_list *unused)
|
static void cmm_timer_fn(struct timer_list *unused)
|
||||||
|
@ -246,9 +246,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus)
|
|||||||
|
|
||||||
/* VM86_SCREEN_BITMAP had numerous bugs and appears to have no users. */
|
/* VM86_SCREEN_BITMAP had numerous bugs and appears to have no users. */
|
||||||
if (v.flags & VM86_SCREEN_BITMAP) {
|
if (v.flags & VM86_SCREEN_BITMAP) {
|
||||||
char comm[TASK_COMM_LEN];
|
pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n",
|
||||||
|
current->comm);
|
||||||
pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", get_task_comm(comm, current));
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -199,7 +199,6 @@ out_err:
|
|||||||
|
|
||||||
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
|
int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
|
||||||
{
|
{
|
||||||
char task_comm[TASK_COMM_LEN];
|
|
||||||
int rc = 0, i;
|
int rc = 0, i;
|
||||||
|
|
||||||
ctx->hdev = hdev;
|
ctx->hdev = hdev;
|
||||||
@ -272,7 +271,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx)
|
|||||||
mutex_init(&ctx->ts_reg_lock);
|
mutex_init(&ctx->ts_reg_lock);
|
||||||
|
|
||||||
dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
|
dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n",
|
||||||
get_task_comm(task_comm, current), ctx->asid);
|
current->comm, ctx->asid);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -817,7 +817,7 @@ static void device_hard_reset_pending(struct work_struct *work)
|
|||||||
}
|
}
|
||||||
|
|
||||||
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
|
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
|
||||||
msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
|
secs_to_jiffies(HL_PENDING_RESET_PER_SEC));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -362,8 +362,7 @@ static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout)
|
|||||||
* a different default timeout for Gaudi
|
* a different default timeout for Gaudi
|
||||||
*/
|
*/
|
||||||
if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
|
if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
|
||||||
hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED *
|
hdev->timeout_jiffies = secs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED);
|
||||||
MSEC_PER_SEC);
|
|
||||||
|
|
||||||
hdev->reset_upon_device_release = 0;
|
hdev->reset_upon_device_release = 0;
|
||||||
break;
|
break;
|
||||||
|
@ -1279,13 +1279,10 @@ static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long ar
|
|||||||
retcode = -EFAULT;
|
retcode = -EFAULT;
|
||||||
|
|
||||||
out_err:
|
out_err:
|
||||||
if (retcode) {
|
if (retcode)
|
||||||
char task_comm[TASK_COMM_LEN];
|
|
||||||
|
|
||||||
dev_dbg_ratelimited(dev,
|
dev_dbg_ratelimited(dev,
|
||||||
"error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
|
"error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
|
||||||
task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
|
task_pid_nr(current), current->comm, cmd, nr);
|
||||||
}
|
|
||||||
|
|
||||||
if (kdata != stack_kdata)
|
if (kdata != stack_kdata)
|
||||||
kfree(kdata);
|
kfree(kdata);
|
||||||
@ -1308,11 +1305,9 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
|
|||||||
if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) {
|
if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) {
|
||||||
ioctl = &hl_ioctls_control[nr - HL_COMMAND_START];
|
ioctl = &hl_ioctls_control[nr - HL_COMMAND_START];
|
||||||
} else {
|
} else {
|
||||||
char task_comm[TASK_COMM_LEN];
|
|
||||||
|
|
||||||
dev_dbg_ratelimited(hdev->dev_ctrl,
|
dev_dbg_ratelimited(hdev->dev_ctrl,
|
||||||
"invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
|
"invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n",
|
||||||
task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr);
|
task_pid_nr(current), current->comm, cmd, nr);
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -544,7 +544,7 @@ static void print_stats(struct xen_blkif_ring *ring)
|
|||||||
ring->st_rd_req, ring->st_wr_req,
|
ring->st_rd_req, ring->st_wr_req,
|
||||||
ring->st_f_req, ring->st_ds_req,
|
ring->st_f_req, ring->st_ds_req,
|
||||||
ring->persistent_gnt_c, max_pgrants);
|
ring->persistent_gnt_c, max_pgrants);
|
||||||
ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
|
ring->st_print = jiffies + secs_to_jiffies(10);
|
||||||
ring->st_rd_req = 0;
|
ring->st_rd_req = 0;
|
||||||
ring->st_wr_req = 0;
|
ring->st_wr_req = 0;
|
||||||
ring->st_oo_req = 0;
|
ring->st_oo_req = 0;
|
||||||
|
@ -391,7 +391,6 @@ void intel_display_driver_resume_access(struct drm_i915_private *i915)
|
|||||||
*/
|
*/
|
||||||
bool intel_display_driver_check_access(struct drm_i915_private *i915)
|
bool intel_display_driver_check_access(struct drm_i915_private *i915)
|
||||||
{
|
{
|
||||||
char comm[TASK_COMM_LEN];
|
|
||||||
char current_task[TASK_COMM_LEN + 16];
|
char current_task[TASK_COMM_LEN + 16];
|
||||||
char allowed_task[TASK_COMM_LEN + 16] = "none";
|
char allowed_task[TASK_COMM_LEN + 16] = "none";
|
||||||
|
|
||||||
@ -400,12 +399,11 @@ bool intel_display_driver_check_access(struct drm_i915_private *i915)
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
snprintf(current_task, sizeof(current_task), "%s[%d]",
|
snprintf(current_task, sizeof(current_task), "%s[%d]",
|
||||||
get_task_comm(comm, current),
|
current->comm, task_pid_vnr(current));
|
||||||
task_pid_vnr(current));
|
|
||||||
|
|
||||||
if (i915->display.access.allowed_task)
|
if (i915->display.access.allowed_task)
|
||||||
snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
|
snprintf(allowed_task, sizeof(allowed_task), "%s[%d]",
|
||||||
get_task_comm(comm, i915->display.access.allowed_task),
|
i915->display.access.allowed_task->comm,
|
||||||
task_pid_vnr(i915->display.access.allowed_task));
|
task_pid_vnr(i915->display.access.allowed_task));
|
||||||
|
|
||||||
drm_dbg_kms(&i915->drm,
|
drm_dbg_kms(&i915->drm,
|
||||||
|
@ -279,7 +279,6 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
|
|||||||
const u64 plength = 0x10000;
|
const u64 plength = 0x10000;
|
||||||
const u64 ioffset = plength;
|
const u64 ioffset = plength;
|
||||||
const u64 ilength = 0x02000;
|
const u64 ilength = 0x02000;
|
||||||
char name[TASK_COMM_LEN];
|
|
||||||
int cid, ret;
|
int cid, ret;
|
||||||
u64 size;
|
u64 size;
|
||||||
|
|
||||||
@ -338,8 +337,7 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm,
|
|||||||
chan->userd = &chan->user;
|
chan->userd = &chan->user;
|
||||||
}
|
}
|
||||||
|
|
||||||
get_task_comm(name, current);
|
snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current));
|
||||||
snprintf(args.name, sizeof(args.name), "%s[%d]", name, task_pid_nr(current));
|
|
||||||
|
|
||||||
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
|
ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass,
|
||||||
&args, sizeof(args), &chan->user);
|
&args, sizeof(args), &chan->user);
|
||||||
|
@ -1159,7 +1159,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
|
|||||||
{
|
{
|
||||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||||
struct nouveau_cli *cli;
|
struct nouveau_cli *cli;
|
||||||
char name[32], tmpname[TASK_COMM_LEN];
|
char name[32];
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* need to bring up power immediately if opening device */
|
/* need to bring up power immediately if opening device */
|
||||||
@ -1169,10 +1169,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
get_task_comm(tmpname, current);
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
snprintf(name, sizeof(name), "%s[%d]",
|
snprintf(name, sizeof(name), "%s[%d]",
|
||||||
tmpname, pid_nr(rcu_dereference(fpriv->pid)));
|
current->comm, pid_nr(rcu_dereference(fpriv->pid)));
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
|
if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) {
|
||||||
|
@ -514,7 +514,7 @@ static int wait_for_lmem_ready(struct xe_device *xe)
|
|||||||
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
|
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
|
||||||
|
|
||||||
start = jiffies;
|
start = jiffies;
|
||||||
timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */
|
timeout = start + secs_to_jiffies(60); /* 60 sec! */
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if (signal_pending(current))
|
if (signal_pending(current))
|
||||||
|
@ -92,7 +92,7 @@ struct iowait_work {
|
|||||||
*
|
*
|
||||||
* The lock field is used by waiters to record
|
* The lock field is used by waiters to record
|
||||||
* the seqlock_t that guards the list head.
|
* the seqlock_t that guards the list head.
|
||||||
* Waiters explicity know that, but the destroy
|
* Waiters explicitly know that, but the destroy
|
||||||
* code that unwaits QPs does not.
|
* code that unwaits QPs does not.
|
||||||
*/
|
*/
|
||||||
struct iowait {
|
struct iowait {
|
||||||
|
@ -72,7 +72,7 @@ struct usnic_ib_create_qp_resp {
|
|||||||
u64 bar_bus_addr;
|
u64 bar_bus_addr;
|
||||||
u32 bar_len;
|
u32 bar_len;
|
||||||
/*
|
/*
|
||||||
* WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface
|
* WQ, RQ, CQ are explicitly specified bc exposing a generic resources inteface
|
||||||
* expands the scope of ABI to many files.
|
* expands the scope of ABI to many files.
|
||||||
*/
|
*/
|
||||||
u32 wq_cnt;
|
u32 wq_cnt;
|
||||||
|
@ -178,7 +178,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
|
|||||||
* received 'update stats' event, we keep a 3 seconds timeout in case,
|
* received 'update stats' event, we keep a 3 seconds timeout in case,
|
||||||
* fw_stats_done is not marked yet
|
* fw_stats_done is not marked yet
|
||||||
*/
|
*/
|
||||||
timeout = jiffies + msecs_to_jiffies(3 * 1000);
|
timeout = jiffies + secs_to_jiffies(3);
|
||||||
|
|
||||||
ath11k_debugfs_fw_stats_reset(ar);
|
ath11k_debugfs_fw_stats_reset(ar);
|
||||||
|
|
||||||
|
@ -1810,7 +1810,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
|
|||||||
rfi->cur_idx = cur_idx;
|
rfi->cur_idx = cur_idx;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* explicity window move updating the expected index */
|
/* explicitly window move updating the expected index */
|
||||||
exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
|
exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
|
||||||
|
|
||||||
brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
|
brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",
|
||||||
|
@ -1044,7 +1044,7 @@ static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
|
|||||||
static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
|
static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
|
||||||
{
|
{
|
||||||
timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
|
timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
|
||||||
pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
|
pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60);
|
||||||
add_timer(&pacb->refresh_timer);
|
add_timer(&pacb->refresh_timer);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -966,7 +966,7 @@ static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach)
|
|||||||
*
|
*
|
||||||
* This routine is the release handler for the fops registered with
|
* This routine is the release handler for the fops registered with
|
||||||
* the CXL services on an initial attach for a context. It is called
|
* the CXL services on an initial attach for a context. It is called
|
||||||
* when a close (explicity by the user or as part of a process tear
|
* when a close (explicitly by the user or as part of a process tear
|
||||||
* down) is performed on the adapter file descriptor returned to the
|
* down) is performed on the adapter file descriptor returned to the
|
||||||
* user. The user should be aware that explicitly performing a close
|
* user. The user should be aware that explicitly performing a close
|
||||||
* considered catastrophic and subsequent usage of the superpipe API
|
* considered catastrophic and subsequent usage of the superpipe API
|
||||||
|
@ -598,7 +598,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
|
|||||||
jiffies + msecs_to_jiffies(1000 * timeout));
|
jiffies + msecs_to_jiffies(1000 * timeout));
|
||||||
/* Set up heart beat (HB) timer */
|
/* Set up heart beat (HB) timer */
|
||||||
mod_timer(&phba->hb_tmofunc,
|
mod_timer(&phba->hb_tmofunc,
|
||||||
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
|
jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
|
||||||
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
|
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
|
||||||
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
|
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
|
||||||
phba->last_completion_time = jiffies;
|
phba->last_completion_time = jiffies;
|
||||||
@ -1267,7 +1267,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
|
|||||||
!test_bit(FC_UNLOADING, &phba->pport->load_flag))
|
!test_bit(FC_UNLOADING, &phba->pport->load_flag))
|
||||||
mod_timer(&phba->hb_tmofunc,
|
mod_timer(&phba->hb_tmofunc,
|
||||||
jiffies +
|
jiffies +
|
||||||
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
|
secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1555,7 +1555,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
|
|||||||
/* If IOs are completing, no need to issue a MBX_HEARTBEAT */
|
/* If IOs are completing, no need to issue a MBX_HEARTBEAT */
|
||||||
spin_lock_irq(&phba->pport->work_port_lock);
|
spin_lock_irq(&phba->pport->work_port_lock);
|
||||||
if (time_after(phba->last_completion_time +
|
if (time_after(phba->last_completion_time +
|
||||||
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
|
secs_to_jiffies(LPFC_HB_MBOX_INTERVAL),
|
||||||
jiffies)) {
|
jiffies)) {
|
||||||
spin_unlock_irq(&phba->pport->work_port_lock);
|
spin_unlock_irq(&phba->pport->work_port_lock);
|
||||||
if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
|
if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
|
||||||
@ -3354,7 +3354,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
|
|||||||
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||||
if (mbx_action == LPFC_MBX_NO_WAIT)
|
if (mbx_action == LPFC_MBX_NO_WAIT)
|
||||||
return;
|
return;
|
||||||
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
|
timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
|
||||||
spin_lock_irqsave(&phba->hbalock, iflag);
|
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||||
if (phba->sli.mbox_active) {
|
if (phba->sli.mbox_active) {
|
||||||
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
|
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
|
||||||
@ -4924,14 +4924,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
|
|||||||
stat = 1;
|
stat = 1;
|
||||||
goto finished;
|
goto finished;
|
||||||
}
|
}
|
||||||
if (time >= msecs_to_jiffies(30 * 1000)) {
|
if (time >= secs_to_jiffies(30)) {
|
||||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||||
"0461 Scanning longer than 30 "
|
"0461 Scanning longer than 30 "
|
||||||
"seconds. Continuing initialization\n");
|
"seconds. Continuing initialization\n");
|
||||||
stat = 1;
|
stat = 1;
|
||||||
goto finished;
|
goto finished;
|
||||||
}
|
}
|
||||||
if (time >= msecs_to_jiffies(15 * 1000) &&
|
if (time >= secs_to_jiffies(15) &&
|
||||||
phba->link_state <= LPFC_LINK_DOWN) {
|
phba->link_state <= LPFC_LINK_DOWN) {
|
||||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||||
"0465 Link down longer than 15 "
|
"0465 Link down longer than 15 "
|
||||||
@ -4945,7 +4945,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
|
|||||||
if (vport->num_disc_nodes || vport->fc_prli_sent)
|
if (vport->num_disc_nodes || vport->fc_prli_sent)
|
||||||
goto finished;
|
goto finished;
|
||||||
if (!atomic_read(&vport->fc_map_cnt) &&
|
if (!atomic_read(&vport->fc_map_cnt) &&
|
||||||
time < msecs_to_jiffies(2 * 1000))
|
time < secs_to_jiffies(2))
|
||||||
goto finished;
|
goto finished;
|
||||||
if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
|
if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
|
||||||
goto finished;
|
goto finished;
|
||||||
@ -5179,8 +5179,8 @@ lpfc_vmid_poll(struct timer_list *t)
|
|||||||
lpfc_worker_wake_up(phba);
|
lpfc_worker_wake_up(phba);
|
||||||
|
|
||||||
/* restart the timer for the next iteration */
|
/* restart the timer for the next iteration */
|
||||||
mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
|
mod_timer(&phba->inactive_vmid_poll,
|
||||||
LPFC_VMID_TIMER));
|
jiffies + secs_to_jiffies(LPFC_VMID_TIMER));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -906,7 +906,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||||||
(ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
|
(ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
|
||||||
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
|
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
|
||||||
mod_timer(&ndlp->nlp_delayfunc,
|
mod_timer(&ndlp->nlp_delayfunc,
|
||||||
jiffies + msecs_to_jiffies(1000 * 1));
|
jiffies + secs_to_jiffies(1));
|
||||||
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
|
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
|
||||||
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
||||||
lpfc_printf_vlog(vport, KERN_INFO,
|
lpfc_printf_vlog(vport, KERN_INFO,
|
||||||
@ -1332,7 +1332,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Put ndlp in npr state set plogi timer for 1 sec */
|
/* Put ndlp in npr state set plogi timer for 1 sec */
|
||||||
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
|
mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1));
|
||||||
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
|
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
|
||||||
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
||||||
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
|
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
|
||||||
@ -1936,7 +1936,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
|
|||||||
|
|
||||||
/* Put ndlp in npr state set plogi timer for 1 sec */
|
/* Put ndlp in npr state set plogi timer for 1 sec */
|
||||||
mod_timer(&ndlp->nlp_delayfunc,
|
mod_timer(&ndlp->nlp_delayfunc,
|
||||||
jiffies + msecs_to_jiffies(1000 * 1));
|
jiffies + secs_to_jiffies(1));
|
||||||
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
|
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
|
||||||
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
||||||
|
|
||||||
@ -2743,7 +2743,7 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||||||
|
|
||||||
if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) {
|
if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) {
|
||||||
mod_timer(&ndlp->nlp_delayfunc,
|
mod_timer(&ndlp->nlp_delayfunc,
|
||||||
jiffies + msecs_to_jiffies(1000 * 1));
|
jiffies + secs_to_jiffies(1));
|
||||||
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
|
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
|
||||||
clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
|
clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
|
||||||
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
|
||||||
|
@ -2237,7 +2237,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
|
|||||||
* wait. Print a message if a 10 second wait expires and renew the
|
* wait. Print a message if a 10 second wait expires and renew the
|
||||||
* wait. This is unexpected.
|
* wait. This is unexpected.
|
||||||
*/
|
*/
|
||||||
wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
|
wait_tmo = secs_to_jiffies(LPFC_NVME_WAIT_TMO);
|
||||||
while (true) {
|
while (true) {
|
||||||
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
|
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
|
||||||
if (unlikely(!ret)) {
|
if (unlikely(!ret)) {
|
||||||
|
@ -9012,7 +9012,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
|
|||||||
|
|
||||||
/* Start heart beat timer */
|
/* Start heart beat timer */
|
||||||
mod_timer(&phba->hb_tmofunc,
|
mod_timer(&phba->hb_tmofunc,
|
||||||
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
|
jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
|
||||||
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
|
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
|
||||||
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
|
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
|
||||||
phba->last_completion_time = jiffies;
|
phba->last_completion_time = jiffies;
|
||||||
@ -13323,7 +13323,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
|
|||||||
lpfc_sli_mbox_sys_flush(phba);
|
lpfc_sli_mbox_sys_flush(phba);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
|
timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
|
||||||
|
|
||||||
/* Disable softirqs, including timers from obtaining phba->hbalock */
|
/* Disable softirqs, including timers from obtaining phba->hbalock */
|
||||||
local_bh_disable();
|
local_bh_disable();
|
||||||
|
@ -278,7 +278,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
|
|||||||
if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
|
if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
|
||||||
mod_timer(&vport->phba->inactive_vmid_poll,
|
mod_timer(&vport->phba->inactive_vmid_poll,
|
||||||
jiffies +
|
jiffies +
|
||||||
msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
|
secs_to_jiffies(LPFC_VMID_TIMER));
|
||||||
vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
|
vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -736,7 +736,7 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
time_remaining = wait_for_completion_timeout(&completion,
|
time_remaining = wait_for_completion_timeout(&completion,
|
||||||
msecs_to_jiffies(60*1000)); // 1 min
|
secs_to_jiffies(60)); // 1 min
|
||||||
if (!time_remaining) {
|
if (!time_remaining) {
|
||||||
kfree(payload.func_specific);
|
kfree(payload.func_specific);
|
||||||
pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n");
|
pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n");
|
||||||
|
@ -59,7 +59,7 @@ static int bcm2835_audio_send_msg_locked(struct bcm2835_audio_instance *instance
|
|||||||
|
|
||||||
if (wait) {
|
if (wait) {
|
||||||
if (!wait_for_completion_timeout(&instance->msg_avail_comp,
|
if (!wait_for_completion_timeout(&instance->msg_avail_comp,
|
||||||
msecs_to_jiffies(10 * 1000))) {
|
secs_to_jiffies(10))) {
|
||||||
dev_err(instance->dev,
|
dev_err(instance->dev,
|
||||||
"vchi message timeout, msg=%d\n", m->type);
|
"vchi message timeout, msg=%d\n", m->type);
|
||||||
return -ETIMEDOUT;
|
return -ETIMEDOUT;
|
||||||
|
@ -2622,14 +2622,13 @@ static int tty_tiocgicount(struct tty_struct *tty, void __user *arg)
|
|||||||
|
|
||||||
static int tty_set_serial(struct tty_struct *tty, struct serial_struct *ss)
|
static int tty_set_serial(struct tty_struct *tty, struct serial_struct *ss)
|
||||||
{
|
{
|
||||||
char comm[TASK_COMM_LEN];
|
|
||||||
int flags;
|
int flags;
|
||||||
|
|
||||||
flags = ss->flags & ASYNC_DEPRECATED;
|
flags = ss->flags & ASYNC_DEPRECATED;
|
||||||
|
|
||||||
if (flags)
|
if (flags)
|
||||||
pr_warn_ratelimited("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
|
pr_warn_ratelimited("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n",
|
||||||
__func__, get_task_comm(comm, current), flags);
|
__func__, current->comm, flags);
|
||||||
|
|
||||||
if (!tty->ops->set_serial)
|
if (!tty->ops->set_serial)
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
|
@ -166,7 +166,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
|
|||||||
if (IS_ERR(in)) {
|
if (IS_ERR(in)) {
|
||||||
doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino,
|
doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino,
|
||||||
PTR_ERR(in));
|
PTR_ERR(in));
|
||||||
qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
|
qri->timeout = jiffies + secs_to_jiffies(60); /* XXX */
|
||||||
} else {
|
} else {
|
||||||
qri->timeout = 0;
|
qri->timeout = 0;
|
||||||
qri->inode = in;
|
qri->inode = in;
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
* nilfs_palloc_groups_per_desc_block - get the number of groups that a group
|
* nilfs_palloc_groups_per_desc_block - get the number of groups that a group
|
||||||
* descriptor block can maintain
|
* descriptor block can maintain
|
||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
|
*
|
||||||
|
* Return: Number of groups that a group descriptor block can maintain.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
nilfs_palloc_groups_per_desc_block(const struct inode *inode)
|
nilfs_palloc_groups_per_desc_block(const struct inode *inode)
|
||||||
@ -32,6 +34,8 @@ nilfs_palloc_groups_per_desc_block(const struct inode *inode)
|
|||||||
/**
|
/**
|
||||||
* nilfs_palloc_groups_count - get maximum number of groups
|
* nilfs_palloc_groups_count - get maximum number of groups
|
||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
|
*
|
||||||
|
* Return: Maximum number of groups.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
nilfs_palloc_groups_count(const struct inode *inode)
|
nilfs_palloc_groups_count(const struct inode *inode)
|
||||||
@ -43,6 +47,8 @@ nilfs_palloc_groups_count(const struct inode *inode)
|
|||||||
* nilfs_palloc_init_blockgroup - initialize private variables for allocator
|
* nilfs_palloc_init_blockgroup - initialize private variables for allocator
|
||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @entry_size: size of the persistent object
|
* @entry_size: size of the persistent object
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size)
|
int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size)
|
||||||
{
|
{
|
||||||
@ -78,6 +84,9 @@ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size)
|
|||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @nr: serial number of the entry (e.g. inode number)
|
* @nr: serial number of the entry (e.g. inode number)
|
||||||
* @offset: pointer to store offset number in the group
|
* @offset: pointer to store offset number in the group
|
||||||
|
*
|
||||||
|
* Return: Number of the group that contains the entry with the index
|
||||||
|
* specified by @nr.
|
||||||
*/
|
*/
|
||||||
static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
|
static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
|
||||||
unsigned long *offset)
|
unsigned long *offset)
|
||||||
@ -93,8 +102,8 @@ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr,
|
|||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @group: group number
|
* @group: group number
|
||||||
*
|
*
|
||||||
* nilfs_palloc_desc_blkoff() returns block offset of the descriptor
|
* Return: Index number in the metadata file of the descriptor block of
|
||||||
* block which contains a descriptor of the specified group.
|
* the group specified by @group.
|
||||||
*/
|
*/
|
||||||
static unsigned long
|
static unsigned long
|
||||||
nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
|
nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
|
||||||
@ -111,6 +120,9 @@ nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group)
|
|||||||
*
|
*
|
||||||
* nilfs_palloc_bitmap_blkoff() returns block offset of the bitmap
|
* nilfs_palloc_bitmap_blkoff() returns block offset of the bitmap
|
||||||
* block used to allocate/deallocate entries in the specified group.
|
* block used to allocate/deallocate entries in the specified group.
|
||||||
|
*
|
||||||
|
* Return: Index number in the metadata file of the bitmap block of
|
||||||
|
* the group specified by @group.
|
||||||
*/
|
*/
|
||||||
static unsigned long
|
static unsigned long
|
||||||
nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
|
nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
|
||||||
@ -125,6 +137,8 @@ nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group)
|
|||||||
* nilfs_palloc_group_desc_nfrees - get the number of free entries in a group
|
* nilfs_palloc_group_desc_nfrees - get the number of free entries in a group
|
||||||
* @desc: pointer to descriptor structure for the group
|
* @desc: pointer to descriptor structure for the group
|
||||||
* @lock: spin lock protecting @desc
|
* @lock: spin lock protecting @desc
|
||||||
|
*
|
||||||
|
* Return: Number of free entries written in the group descriptor @desc.
|
||||||
*/
|
*/
|
||||||
static unsigned long
|
static unsigned long
|
||||||
nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc,
|
nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc,
|
||||||
@ -143,6 +157,9 @@ nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc,
|
|||||||
* @desc: pointer to descriptor structure for the group
|
* @desc: pointer to descriptor structure for the group
|
||||||
* @lock: spin lock protecting @desc
|
* @lock: spin lock protecting @desc
|
||||||
* @n: delta to be added
|
* @n: delta to be added
|
||||||
|
*
|
||||||
|
* Return: Number of free entries after adjusting the group descriptor
|
||||||
|
* @desc.
|
||||||
*/
|
*/
|
||||||
static u32
|
static u32
|
||||||
nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc,
|
nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc,
|
||||||
@ -161,6 +178,9 @@ nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc,
|
|||||||
* nilfs_palloc_entry_blkoff - get block offset of an entry block
|
* nilfs_palloc_entry_blkoff - get block offset of an entry block
|
||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @nr: serial number of the entry (e.g. inode number)
|
* @nr: serial number of the entry (e.g. inode number)
|
||||||
|
*
|
||||||
|
* Return: Index number in the metadata file of the block containing
|
||||||
|
* the entry specified by @nr.
|
||||||
*/
|
*/
|
||||||
static unsigned long
|
static unsigned long
|
||||||
nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
|
nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr)
|
||||||
@ -238,6 +258,12 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff,
|
|||||||
* @blkoff: block offset
|
* @blkoff: block offset
|
||||||
* @prev: nilfs_bh_assoc struct of the last used buffer
|
* @prev: nilfs_bh_assoc struct of the last used buffer
|
||||||
* @lock: spin lock protecting @prev
|
* @lock: spin lock protecting @prev
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
|
* * %-ENOENT - Non-existent block.
|
||||||
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*/
|
*/
|
||||||
static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff,
|
static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff,
|
||||||
struct nilfs_bh_assoc *prev,
|
struct nilfs_bh_assoc *prev,
|
||||||
@ -258,6 +284,8 @@ static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff,
|
|||||||
* @group: group number
|
* @group: group number
|
||||||
* @create: create flag
|
* @create: create flag
|
||||||
* @bhp: pointer to store the resultant buffer head
|
* @bhp: pointer to store the resultant buffer head
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_palloc_get_desc_block(struct inode *inode,
|
static int nilfs_palloc_get_desc_block(struct inode *inode,
|
||||||
unsigned long group,
|
unsigned long group,
|
||||||
@ -277,6 +305,8 @@ static int nilfs_palloc_get_desc_block(struct inode *inode,
|
|||||||
* @group: group number
|
* @group: group number
|
||||||
* @create: create flag
|
* @create: create flag
|
||||||
* @bhp: pointer to store the resultant buffer head
|
* @bhp: pointer to store the resultant buffer head
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_palloc_get_bitmap_block(struct inode *inode,
|
static int nilfs_palloc_get_bitmap_block(struct inode *inode,
|
||||||
unsigned long group,
|
unsigned long group,
|
||||||
@ -294,6 +324,8 @@ static int nilfs_palloc_get_bitmap_block(struct inode *inode,
|
|||||||
* nilfs_palloc_delete_bitmap_block - delete a bitmap block
|
* nilfs_palloc_delete_bitmap_block - delete a bitmap block
|
||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @group: group number
|
* @group: group number
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_palloc_delete_bitmap_block(struct inode *inode,
|
static int nilfs_palloc_delete_bitmap_block(struct inode *inode,
|
||||||
unsigned long group)
|
unsigned long group)
|
||||||
@ -312,6 +344,8 @@ static int nilfs_palloc_delete_bitmap_block(struct inode *inode,
|
|||||||
* @nr: serial number of the entry (e.g. inode number)
|
* @nr: serial number of the entry (e.g. inode number)
|
||||||
* @create: create flag
|
* @create: create flag
|
||||||
* @bhp: pointer to store the resultant buffer head
|
* @bhp: pointer to store the resultant buffer head
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
|
int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
|
||||||
int create, struct buffer_head **bhp)
|
int create, struct buffer_head **bhp)
|
||||||
@ -328,6 +362,8 @@ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr,
|
|||||||
* nilfs_palloc_delete_entry_block - delete an entry block
|
* nilfs_palloc_delete_entry_block - delete an entry block
|
||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @nr: serial number of the entry
|
* @nr: serial number of the entry
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_palloc_delete_entry_block(struct inode *inode, __u64 nr)
|
static int nilfs_palloc_delete_entry_block(struct inode *inode, __u64 nr)
|
||||||
{
|
{
|
||||||
@ -397,6 +433,9 @@ size_t nilfs_palloc_entry_offset(const struct inode *inode, __u64 nr,
|
|||||||
* @bsize: size in bits
|
* @bsize: size in bits
|
||||||
* @lock: spin lock protecting @bitmap
|
* @lock: spin lock protecting @bitmap
|
||||||
* @wrap: whether to wrap around
|
* @wrap: whether to wrap around
|
||||||
|
*
|
||||||
|
* Return: Offset number within the group of the found free entry, or
|
||||||
|
* %-ENOSPC if not found.
|
||||||
*/
|
*/
|
||||||
static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
|
static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
|
||||||
unsigned long target,
|
unsigned long target,
|
||||||
@ -438,6 +477,9 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap,
|
|||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @curr: current group number
|
* @curr: current group number
|
||||||
* @max: maximum number of groups
|
* @max: maximum number of groups
|
||||||
|
*
|
||||||
|
* Return: Number of remaining descriptors (= groups) managed by the descriptor
|
||||||
|
* block.
|
||||||
*/
|
*/
|
||||||
static unsigned long
|
static unsigned long
|
||||||
nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
|
nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
|
||||||
@ -453,6 +495,8 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode,
|
|||||||
* nilfs_palloc_count_desc_blocks - count descriptor blocks number
|
* nilfs_palloc_count_desc_blocks - count descriptor blocks number
|
||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @desc_blocks: descriptor blocks number [out]
|
* @desc_blocks: descriptor blocks number [out]
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_palloc_count_desc_blocks(struct inode *inode,
|
static int nilfs_palloc_count_desc_blocks(struct inode *inode,
|
||||||
unsigned long *desc_blocks)
|
unsigned long *desc_blocks)
|
||||||
@ -473,6 +517,8 @@ static int nilfs_palloc_count_desc_blocks(struct inode *inode,
|
|||||||
* MDT file growing
|
* MDT file growing
|
||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @desc_blocks: known current descriptor blocks count
|
* @desc_blocks: known current descriptor blocks count
|
||||||
|
*
|
||||||
|
* Return: true if a group can be added in the metadata file, false if not.
|
||||||
*/
|
*/
|
||||||
static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode,
|
static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode,
|
||||||
unsigned long desc_blocks)
|
unsigned long desc_blocks)
|
||||||
@ -487,6 +533,12 @@ static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode,
|
|||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @nused: current number of used entries
|
* @nused: current number of used entries
|
||||||
* @nmaxp: max number of entries [out]
|
* @nmaxp: max number of entries [out]
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
|
* * %-ERANGE - Number of entries in use is out of range.
|
||||||
*/
|
*/
|
||||||
int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
|
int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
|
||||||
{
|
{
|
||||||
@ -518,6 +570,13 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp)
|
|||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @req: nilfs_palloc_req structure exchanged for the allocation
|
* @req: nilfs_palloc_req structure exchanged for the allocation
|
||||||
* @wrap: whether to wrap around
|
* @wrap: whether to wrap around
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
|
* * %-ENOSPC - Entries exhausted (No entries available for allocation).
|
||||||
|
* * %-EROFS - Read only filesystem
|
||||||
*/
|
*/
|
||||||
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
|
int nilfs_palloc_prepare_alloc_entry(struct inode *inode,
|
||||||
struct nilfs_palloc_req *req, bool wrap)
|
struct nilfs_palloc_req *req, bool wrap)
|
||||||
@ -710,6 +769,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode,
|
|||||||
* nilfs_palloc_prepare_free_entry - prepare to deallocate a persistent object
|
* nilfs_palloc_prepare_free_entry - prepare to deallocate a persistent object
|
||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @req: nilfs_palloc_req structure exchanged for the removal
|
* @req: nilfs_palloc_req structure exchanged for the removal
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_palloc_prepare_free_entry(struct inode *inode,
|
int nilfs_palloc_prepare_free_entry(struct inode *inode,
|
||||||
struct nilfs_palloc_req *req)
|
struct nilfs_palloc_req *req)
|
||||||
@ -754,6 +815,8 @@ void nilfs_palloc_abort_free_entry(struct inode *inode,
|
|||||||
* @inode: inode of metadata file using this allocator
|
* @inode: inode of metadata file using this allocator
|
||||||
* @entry_nrs: array of entry numbers to be deallocated
|
* @entry_nrs: array of entry numbers to be deallocated
|
||||||
* @nitems: number of entries stored in @entry_nrs
|
* @nitems: number of entries stored in @entry_nrs
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
|
int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems)
|
||||||
{
|
{
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
*
|
*
|
||||||
* The number of entries per group is defined by the number of bits
|
* The number of entries per group is defined by the number of bits
|
||||||
* that a bitmap block can maintain.
|
* that a bitmap block can maintain.
|
||||||
|
*
|
||||||
|
* Return: Number of entries per group.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
nilfs_palloc_entries_per_group(const struct inode *inode)
|
nilfs_palloc_entries_per_group(const struct inode *inode)
|
||||||
|
120
fs/nilfs2/bmap.c
120
fs/nilfs2/bmap.c
@ -47,17 +47,14 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap,
|
|||||||
* @ptrp: place to store the value associated to @key
|
* @ptrp: place to store the value associated to @key
|
||||||
*
|
*
|
||||||
* Description: nilfs_bmap_lookup_at_level() finds a record whose key
|
* Description: nilfs_bmap_lookup_at_level() finds a record whose key
|
||||||
* matches @key in the block at @level of the bmap.
|
* matches @key in the block at @level of the bmap. The record associated
|
||||||
|
* with @key is stored in the place pointed to by @ptrp.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned and the record associated with @key
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* is stored in the place pointed by @ptrp. On error, one of the following
|
* failure:
|
||||||
* negative error codes is returned.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOENT - A record associated with @key does not exist.
|
||||||
* %-EIO - I/O error.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-ENOENT - A record associated with @key does not exist.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
|
int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level,
|
||||||
__u64 *ptrp)
|
__u64 *ptrp)
|
||||||
@ -138,14 +135,11 @@ static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr)
|
|||||||
* Description: nilfs_bmap_insert() inserts the new key-record pair specified
|
* Description: nilfs_bmap_insert() inserts the new key-record pair specified
|
||||||
* by @key and @rec into @bmap.
|
* by @key and @rec into @bmap.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EEXIST - A record associated with @key already exists.
|
||||||
* %-EIO - I/O error.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EEXIST - A record associated with @key already exist.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec)
|
int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec)
|
||||||
{
|
{
|
||||||
@ -193,14 +187,11 @@ static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key)
|
|||||||
* Description: nilfs_bmap_seek_key() seeks a valid key on @bmap
|
* Description: nilfs_bmap_seek_key() seeks a valid key on @bmap
|
||||||
* starting from @start, and stores it to @keyp if found.
|
* starting from @start, and stores it to @keyp if found.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOENT - No valid entry was found.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-ENOENT - No valid entry was found
|
|
||||||
*/
|
*/
|
||||||
int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp)
|
int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp)
|
||||||
{
|
{
|
||||||
@ -236,14 +227,11 @@ int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp)
|
|||||||
* Description: nilfs_bmap_delete() deletes the key-record pair specified by
|
* Description: nilfs_bmap_delete() deletes the key-record pair specified by
|
||||||
* @key from @bmap.
|
* @key from @bmap.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOENT - A record associated with @key does not exist.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-ENOENT - A record associated with @key does not exist.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key)
|
int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key)
|
||||||
{
|
{
|
||||||
@ -290,12 +278,10 @@ static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key)
|
|||||||
* Description: nilfs_bmap_truncate() removes key-record pairs whose keys are
|
* Description: nilfs_bmap_truncate() removes key-record pairs whose keys are
|
||||||
* greater than or equal to @key from @bmap.
|
* greater than or equal to @key from @bmap.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key)
|
int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key)
|
||||||
{
|
{
|
||||||
@ -330,12 +316,10 @@ void nilfs_bmap_clear(struct nilfs_bmap *bmap)
|
|||||||
* Description: nilfs_bmap_propagate() marks the buffers that directly or
|
* Description: nilfs_bmap_propagate() marks the buffers that directly or
|
||||||
* indirectly refer to the block specified by @bh dirty.
|
* indirectly refer to the block specified by @bh dirty.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh)
|
int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh)
|
||||||
{
|
{
|
||||||
@ -362,22 +346,22 @@ void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* nilfs_bmap_assign - assign a new block number to a block
|
* nilfs_bmap_assign - assign a new block number to a block
|
||||||
* @bmap: bmap
|
* @bmap: bmap
|
||||||
* @bh: pointer to buffer head
|
* @bh: place to store a pointer to the buffer head to which a block
|
||||||
|
* address is assigned (in/out)
|
||||||
* @blocknr: block number
|
* @blocknr: block number
|
||||||
* @binfo: block information
|
* @binfo: block information
|
||||||
*
|
*
|
||||||
* Description: nilfs_bmap_assign() assigns the block number @blocknr to the
|
* Description: nilfs_bmap_assign() assigns the block number @blocknr to the
|
||||||
* buffer specified by @bh.
|
* buffer specified by @bh. The block information is stored in the memory
|
||||||
|
* pointed to by @binfo, and the buffer head may be replaced as a block
|
||||||
|
* address is assigned, in which case a pointer to the new buffer head is
|
||||||
|
* stored in the memory pointed to by @bh.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned and the buffer head of a newly
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* create buffer and the block information associated with the buffer are
|
* failure:
|
||||||
* stored in the place pointed by @bh and @binfo, respectively. On error, one
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* of the following negative error codes is returned.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-EIO - I/O error.
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_bmap_assign(struct nilfs_bmap *bmap,
|
int nilfs_bmap_assign(struct nilfs_bmap *bmap,
|
||||||
struct buffer_head **bh,
|
struct buffer_head **bh,
|
||||||
@ -402,12 +386,10 @@ int nilfs_bmap_assign(struct nilfs_bmap *bmap,
|
|||||||
* Description: nilfs_bmap_mark() marks the block specified by @key and @level
|
* Description: nilfs_bmap_mark() marks the block specified by @key and @level
|
||||||
* as dirty.
|
* as dirty.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level)
|
int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level)
|
||||||
{
|
{
|
||||||
@ -430,7 +412,7 @@ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level)
|
|||||||
* Description: nilfs_test_and_clear() is the atomic operation to test and
|
* Description: nilfs_test_and_clear() is the atomic operation to test and
|
||||||
* clear the dirty state of @bmap.
|
* clear the dirty state of @bmap.
|
||||||
*
|
*
|
||||||
* Return Value: 1 is returned if @bmap is dirty, or 0 if clear.
|
* Return: 1 if @bmap is dirty, or 0 if clear.
|
||||||
*/
|
*/
|
||||||
int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
|
int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap)
|
||||||
{
|
{
|
||||||
@ -490,10 +472,10 @@ static struct lock_class_key nilfs_bmap_mdt_lock_key;
|
|||||||
*
|
*
|
||||||
* Description: nilfs_bmap_read() initializes the bmap @bmap.
|
* Description: nilfs_bmap_read() initializes the bmap @bmap.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, the following negative
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* error code is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (corrupted bmap).
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*/
|
*/
|
||||||
int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
|
int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode)
|
||||||
{
|
{
|
||||||
|
@ -201,7 +201,8 @@ void nilfs_btnode_delete(struct buffer_head *bh)
|
|||||||
* Note that the current implementation does not support folio sizes larger
|
* Note that the current implementation does not support folio sizes larger
|
||||||
* than the page size.
|
* than the page size.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, or the following negative error code on failure.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
* * %-EIO - I/O error (metadata corruption).
|
* * %-EIO - I/O error (metadata corruption).
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*/
|
*/
|
||||||
|
@ -334,7 +334,7 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node,
|
|||||||
* @inode: host inode of btree
|
* @inode: host inode of btree
|
||||||
* @blocknr: block number
|
* @blocknr: block number
|
||||||
*
|
*
|
||||||
* Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
|
* Return: 0 if normal, 1 if the node is broken.
|
||||||
*/
|
*/
|
||||||
static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
|
static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
|
||||||
size_t size, struct inode *inode,
|
size_t size, struct inode *inode,
|
||||||
@ -366,7 +366,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node,
|
|||||||
* @node: btree root node to be examined
|
* @node: btree root node to be examined
|
||||||
* @inode: host inode of btree
|
* @inode: host inode of btree
|
||||||
*
|
*
|
||||||
* Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned.
|
* Return: 0 if normal, 1 if the root node is broken.
|
||||||
*/
|
*/
|
||||||
static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
|
static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
|
||||||
struct inode *inode)
|
struct inode *inode)
|
||||||
@ -652,8 +652,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree,
|
|||||||
* @minlevel: start level
|
* @minlevel: start level
|
||||||
* @nextkey: place to store the next valid key
|
* @nextkey: place to store the next valid key
|
||||||
*
|
*
|
||||||
* Return Value: If a next key was found, 0 is returned. Otherwise,
|
* Return: 0 if the next key was found, %-ENOENT if not found.
|
||||||
* -ENOENT is returned.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree,
|
static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree,
|
||||||
const struct nilfs_btree_path *path,
|
const struct nilfs_btree_path *path,
|
||||||
|
@ -191,14 +191,11 @@ static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile,
|
|||||||
* @cnop: place to store the next checkpoint number
|
* @cnop: place to store the next checkpoint number
|
||||||
* @bhp: place to store a pointer to buffer_head struct
|
* @bhp: place to store a pointer to buffer_head struct
|
||||||
*
|
*
|
||||||
* Return Value: On success, it returns 0. On error, the following negative
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* error code is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-ENOMEM - Insufficient memory available.
|
* * %-ENOENT - no block exists in the range.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error
|
|
||||||
*
|
|
||||||
* %-ENOENT - no block exists in the range.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile,
|
static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile,
|
||||||
__u64 start_cno, __u64 end_cno,
|
__u64 start_cno, __u64 end_cno,
|
||||||
@ -239,7 +236,8 @@ static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
|
|||||||
* stores it to the inode file given by @ifile and the nilfs root object
|
* stores it to the inode file given by @ifile and the nilfs root object
|
||||||
* given by @root.
|
* given by @root.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, or the following negative error code on failure.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
* * %-EINVAL - Invalid checkpoint.
|
* * %-EINVAL - Invalid checkpoint.
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* * %-EIO - I/O error (including metadata corruption).
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
@ -307,7 +305,8 @@ out_sem:
|
|||||||
* In either case, the buffer of the block containing the checkpoint entry
|
* In either case, the buffer of the block containing the checkpoint entry
|
||||||
* and the cpfile inode are made dirty for inclusion in the write log.
|
* and the cpfile inode are made dirty for inclusion in the write log.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, or the following negative error code on failure.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* * %-EIO - I/O error (including metadata corruption).
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* * %-EROFS - Read only filesystem
|
* * %-EROFS - Read only filesystem
|
||||||
@ -376,7 +375,8 @@ out_sem:
|
|||||||
* cpfile with the data given by the arguments @root, @blkinc, @ctime, and
|
* cpfile with the data given by the arguments @root, @blkinc, @ctime, and
|
||||||
* @minor.
|
* @minor.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, or the following negative error code on failure.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* * %-EIO - I/O error (including metadata corruption).
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*/
|
*/
|
||||||
@ -447,14 +447,11 @@ error:
|
|||||||
* the period from @start to @end, excluding @end itself. The checkpoints
|
* the period from @start to @end, excluding @end itself. The checkpoints
|
||||||
* which have been already deleted are ignored.
|
* which have been already deleted are ignored.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EINVAL - Invalid checkpoints.
|
||||||
* %-EIO - I/O error.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EINVAL - invalid checkpoints.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
|
int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
|
||||||
__u64 start,
|
__u64 start,
|
||||||
@ -718,7 +715,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
|
|||||||
* number to continue searching.
|
* number to continue searching.
|
||||||
*
|
*
|
||||||
* Return: Count of checkpoint info items stored in the output buffer on
|
* Return: Count of checkpoint info items stored in the output buffer on
|
||||||
* success, or the following negative error code on failure.
|
* success, or one of the following negative error codes on failure:
|
||||||
* * %-EINVAL - Invalid checkpoint mode.
|
* * %-EINVAL - Invalid checkpoint mode.
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* * %-EIO - I/O error (including metadata corruption).
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
@ -743,7 +740,8 @@ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode,
|
|||||||
* @cpfile: checkpoint file inode
|
* @cpfile: checkpoint file inode
|
||||||
* @cno: checkpoint number to delete
|
* @cno: checkpoint number to delete
|
||||||
*
|
*
|
||||||
* Return: 0 on success, or the following negative error code on failure.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
* * %-EBUSY - Checkpoint in use (snapshot specified).
|
* * %-EBUSY - Checkpoint in use (snapshot specified).
|
||||||
* * %-EIO - I/O error (including metadata corruption).
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* * %-ENOENT - No valid checkpoint found.
|
* * %-ENOENT - No valid checkpoint found.
|
||||||
@ -1011,7 +1009,7 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
|
|||||||
* @cno: checkpoint number
|
* @cno: checkpoint number
|
||||||
*
|
*
|
||||||
* Return: 1 if the checkpoint specified by @cno is a snapshot, 0 if not, or
|
* Return: 1 if the checkpoint specified by @cno is a snapshot, 0 if not, or
|
||||||
* the following negative error code on failure.
|
* one of the following negative error codes on failure:
|
||||||
* * %-EIO - I/O error (including metadata corruption).
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* * %-ENOENT - No such checkpoint.
|
* * %-ENOENT - No such checkpoint.
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
@ -1058,14 +1056,11 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
|
|||||||
* Description: nilfs_change_cpmode() changes the mode of the checkpoint
|
* Description: nilfs_change_cpmode() changes the mode of the checkpoint
|
||||||
* specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
|
* specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOENT - No such checkpoint.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-ENOENT - No such checkpoint.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
|
int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
|
||||||
{
|
{
|
||||||
@ -1097,14 +1092,12 @@ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode)
|
|||||||
* @cpstat: pointer to a structure of checkpoint statistics
|
* @cpstat: pointer to a structure of checkpoint statistics
|
||||||
*
|
*
|
||||||
* Description: nilfs_cpfile_get_stat() returns information about checkpoints.
|
* Description: nilfs_cpfile_get_stat() returns information about checkpoints.
|
||||||
|
* The checkpoint statistics are stored in the location pointed to by @cpstat.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned, and checkpoints information is
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* stored in the place pointed by @cpstat. On error, one of the following
|
* failure:
|
||||||
* negative error codes is returned.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error.
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
|
int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
|
||||||
{
|
{
|
||||||
@ -1135,6 +1128,8 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
|
|||||||
* @cpsize: size of a checkpoint entry
|
* @cpsize: size of a checkpoint entry
|
||||||
* @raw_inode: on-disk cpfile inode
|
* @raw_inode: on-disk cpfile inode
|
||||||
* @inodep: buffer to store the inode
|
* @inodep: buffer to store the inode
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
|
int nilfs_cpfile_read(struct super_block *sb, size_t cpsize,
|
||||||
struct nilfs_inode *raw_inode, struct inode **inodep)
|
struct nilfs_inode *raw_inode, struct inode **inodep)
|
||||||
|
@ -276,7 +276,8 @@ void nilfs_dat_abort_update(struct inode *dat,
|
|||||||
* @dat: DAT file inode
|
* @dat: DAT file inode
|
||||||
* @vblocknr: virtual block number
|
* @vblocknr: virtual block number
|
||||||
*
|
*
|
||||||
* Return: 0 on success, or the following negative error code on failure.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
* * %-EINVAL - Invalid DAT entry (internal code).
|
* * %-EINVAL - Invalid DAT entry (internal code).
|
||||||
* * %-EIO - I/O error (including metadata corruption).
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
@ -302,14 +303,11 @@ int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr)
|
|||||||
* Description: nilfs_dat_freev() frees the virtual block numbers specified by
|
* Description: nilfs_dat_freev() frees the virtual block numbers specified by
|
||||||
* @vblocknrs and @nitems.
|
* @vblocknrs and @nitems.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOENT - The virtual block number have not been allocated.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-ENOENT - The virtual block number have not been allocated.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
|
int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
|
||||||
{
|
{
|
||||||
@ -325,12 +323,10 @@ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems)
|
|||||||
* Description: nilfs_dat_move() changes the block number associated with
|
* Description: nilfs_dat_move() changes the block number associated with
|
||||||
* @vblocknr to @blocknr.
|
* @vblocknr to @blocknr.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
|
int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
|
||||||
{
|
{
|
||||||
@ -390,17 +386,14 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
|
|||||||
* @blocknrp: pointer to a block number
|
* @blocknrp: pointer to a block number
|
||||||
*
|
*
|
||||||
* Description: nilfs_dat_translate() maps the virtual block number @vblocknr
|
* Description: nilfs_dat_translate() maps the virtual block number @vblocknr
|
||||||
* to the corresponding block number.
|
* to the corresponding block number. The block number associated with
|
||||||
|
* @vblocknr is stored in the place pointed to by @blocknrp.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned and the block number associated
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* with @vblocknr is stored in the place pointed by @blocknrp. On error, one
|
* failure:
|
||||||
* of the following negative error codes is returned.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOENT - A block number associated with @vblocknr does not exist.
|
||||||
* %-EIO - I/O error.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-ENOENT - A block number associated with @vblocknr does not exist.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
|
int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
|
||||||
{
|
{
|
||||||
@ -489,6 +482,8 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz,
|
|||||||
* @entry_size: size of a dat entry
|
* @entry_size: size of a dat entry
|
||||||
* @raw_inode: on-disk dat inode
|
* @raw_inode: on-disk dat inode
|
||||||
* @inodep: buffer to store the inode
|
* @inodep: buffer to store the inode
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_dat_read(struct super_block *sb, size_t entry_size,
|
int nilfs_dat_read(struct super_block *sb, size_t entry_size,
|
||||||
struct nilfs_inode *raw_inode, struct inode **inodep)
|
struct nilfs_inode *raw_inode, struct inode **inodep)
|
||||||
|
@ -400,7 +400,7 @@ int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
|
int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
|
||||||
struct folio *folio, struct inode *inode)
|
struct folio *folio, struct inode *inode)
|
||||||
{
|
{
|
||||||
size_t from = offset_in_folio(folio, de);
|
size_t from = offset_in_folio(folio, de);
|
||||||
@ -410,11 +410,15 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
|
|||||||
|
|
||||||
folio_lock(folio);
|
folio_lock(folio);
|
||||||
err = nilfs_prepare_chunk(folio, from, to);
|
err = nilfs_prepare_chunk(folio, from, to);
|
||||||
BUG_ON(err);
|
if (unlikely(err)) {
|
||||||
|
folio_unlock(folio);
|
||||||
|
return err;
|
||||||
|
}
|
||||||
de->inode = cpu_to_le64(inode->i_ino);
|
de->inode = cpu_to_le64(inode->i_ino);
|
||||||
de->file_type = fs_umode_to_ftype(inode->i_mode);
|
de->file_type = fs_umode_to_ftype(inode->i_mode);
|
||||||
nilfs_commit_chunk(folio, mapping, from, to);
|
nilfs_commit_chunk(folio, mapping, from, to);
|
||||||
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
|
inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -543,7 +547,10 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct folio *folio)
|
|||||||
from = (char *)pde - kaddr;
|
from = (char *)pde - kaddr;
|
||||||
folio_lock(folio);
|
folio_lock(folio);
|
||||||
err = nilfs_prepare_chunk(folio, from, to);
|
err = nilfs_prepare_chunk(folio, from, to);
|
||||||
BUG_ON(err);
|
if (unlikely(err)) {
|
||||||
|
folio_unlock(folio);
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
if (pde)
|
if (pde)
|
||||||
pde->rec_len = nilfs_rec_len_to_disk(to - from);
|
pde->rec_len = nilfs_rec_len_to_disk(to - from);
|
||||||
dir->inode = 0;
|
dir->inode = 0;
|
||||||
|
@ -46,14 +46,11 @@
|
|||||||
* specified by @pbn to the GC pagecache with the key @blkoff.
|
* specified by @pbn to the GC pagecache with the key @blkoff.
|
||||||
* This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer.
|
* This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On Error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error code is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOENT - The block specified with @pbn does not exist.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-ENOENT - The block specified with @pbn does not exist.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
|
int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
|
||||||
sector_t pbn, __u64 vbn,
|
sector_t pbn, __u64 vbn,
|
||||||
@ -114,12 +111,11 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff,
|
|||||||
* specified by @vbn to the GC pagecache. @pbn can be supplied by the
|
* specified by @vbn to the GC pagecache. @pbn can be supplied by the
|
||||||
* caller to avoid translation of the disk block address.
|
* caller to avoid translation of the disk block address.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On Error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error code is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOENT - Invalid virtual block address.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
|
int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn,
|
||||||
__u64 vbn, struct buffer_head **out_bh)
|
__u64 vbn, struct buffer_head **out_bh)
|
||||||
|
@ -38,17 +38,16 @@ static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile)
|
|||||||
* @out_ino: pointer to a variable to store inode number
|
* @out_ino: pointer to a variable to store inode number
|
||||||
* @out_bh: buffer_head contains newly allocated disk inode
|
* @out_bh: buffer_head contains newly allocated disk inode
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned and the newly allocated inode
|
* nilfs_ifile_create_inode() allocates a new inode in the ifile metadata
|
||||||
* number is stored in the place pointed by @ino, and buffer_head pointer
|
* file and stores the inode number in the variable pointed to by @out_ino,
|
||||||
* that contains newly allocated disk inode structure is stored in the
|
* as well as storing the ifile's buffer with the disk inode in the location
|
||||||
* place pointed by @out_bh
|
* pointed to by @out_bh.
|
||||||
* On error, one of the following negative error codes is returned.
|
|
||||||
*
|
*
|
||||||
* %-EIO - I/O error.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
*
|
* failure:
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOSPC - No inode left.
|
* * %-ENOSPC - No inode left.
|
||||||
*/
|
*/
|
||||||
int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
|
int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
|
||||||
struct buffer_head **out_bh)
|
struct buffer_head **out_bh)
|
||||||
@ -83,14 +82,11 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino,
|
|||||||
* @ifile: ifile inode
|
* @ifile: ifile inode
|
||||||
* @ino: inode number
|
* @ino: inode number
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EIO - I/O error.
|
* * %-ENOENT - Inode number unallocated.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-ENOENT - The inode number @ino have not been allocated.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
|
int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
|
||||||
{
|
{
|
||||||
@ -150,6 +146,8 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino,
|
|||||||
* @ifile: ifile inode
|
* @ifile: ifile inode
|
||||||
* @nmaxinodes: current maximum of available inodes count [out]
|
* @nmaxinodes: current maximum of available inodes count [out]
|
||||||
* @nfreeinodes: free inodes count [out]
|
* @nfreeinodes: free inodes count [out]
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_ifile_count_free_inodes(struct inode *ifile,
|
int nilfs_ifile_count_free_inodes(struct inode *ifile,
|
||||||
u64 *nmaxinodes, u64 *nfreeinodes)
|
u64 *nmaxinodes, u64 *nfreeinodes)
|
||||||
@ -174,7 +172,8 @@ int nilfs_ifile_count_free_inodes(struct inode *ifile,
|
|||||||
* @cno: number of checkpoint entry to read
|
* @cno: number of checkpoint entry to read
|
||||||
* @inode_size: size of an inode
|
* @inode_size: size of an inode
|
||||||
*
|
*
|
||||||
* Return: 0 on success, or the following negative error code on failure.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
* * %-EINVAL - Invalid checkpoint.
|
* * %-EINVAL - Invalid checkpoint.
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* * %-EIO - I/O error (including metadata corruption).
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
|
@ -68,6 +68,8 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n)
|
|||||||
*
|
*
|
||||||
* This function does not issue actual read request of the specified data
|
* This function does not issue actual read request of the specified data
|
||||||
* block. It is done by VFS.
|
* block. It is done by VFS.
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_get_block(struct inode *inode, sector_t blkoff,
|
int nilfs_get_block(struct inode *inode, sector_t blkoff,
|
||||||
struct buffer_head *bh_result, int create)
|
struct buffer_head *bh_result, int create)
|
||||||
@ -141,6 +143,8 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
|
|||||||
* address_space_operations.
|
* address_space_operations.
|
||||||
* @file: file struct of the file to be read
|
* @file: file struct of the file to be read
|
||||||
* @folio: the folio to be read
|
* @folio: the folio to be read
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_read_folio(struct file *file, struct folio *folio)
|
static int nilfs_read_folio(struct file *file, struct folio *folio)
|
||||||
{
|
{
|
||||||
@ -598,10 +602,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino,
|
|||||||
* or does nothing if the inode already has it. This function allocates
|
* or does nothing if the inode already has it. This function allocates
|
||||||
* an additional inode to maintain page cache of B-tree nodes one-on-one.
|
* an additional inode to maintain page cache of B-tree nodes one-on-one.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On errors, one of the following
|
* Return: 0 on success, or %-ENOMEM if memory is insufficient.
|
||||||
* negative error code is returned.
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_attach_btree_node_cache(struct inode *inode)
|
int nilfs_attach_btree_node_cache(struct inode *inode)
|
||||||
{
|
{
|
||||||
@ -660,11 +661,8 @@ void nilfs_detach_btree_node_cache(struct inode *inode)
|
|||||||
* in one inode and the one for b-tree node pages is set up in the
|
* in one inode and the one for b-tree node pages is set up in the
|
||||||
* other inode, which is attached to the former inode.
|
* other inode, which is attached to the former inode.
|
||||||
*
|
*
|
||||||
* Return Value: On success, a pointer to the inode for data pages is
|
* Return: a pointer to the inode for data pages on success, or %-ENOMEM
|
||||||
* returned. On errors, one of the following negative error code is returned
|
* if memory is insufficient.
|
||||||
* in a pointer type.
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient memory available.
|
|
||||||
*/
|
*/
|
||||||
struct inode *nilfs_iget_for_shadow(struct inode *inode)
|
struct inode *nilfs_iget_for_shadow(struct inode *inode)
|
||||||
{
|
{
|
||||||
|
@ -33,17 +33,14 @@
|
|||||||
* @dofunc: concrete function of get/set metadata info
|
* @dofunc: concrete function of get/set metadata info
|
||||||
*
|
*
|
||||||
* Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of
|
* Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of
|
||||||
* calling dofunc() function on the basis of @argv argument.
|
* calling dofunc() function on the basis of @argv argument. If successful,
|
||||||
|
* the requested metadata information is copied to userspace memory.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned and requested metadata info
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* is copied into userspace. On error, one of the following
|
* failure:
|
||||||
* negative error codes is returned.
|
* * %-EFAULT - Failure during execution of requested operation.
|
||||||
*
|
* * %-EINVAL - Invalid arguments from userspace.
|
||||||
* %-EINVAL - Invalid arguments from userspace.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EFAULT - Failure during execution of requested operation.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
|
static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs,
|
||||||
struct nilfs_argv *argv, int dir,
|
struct nilfs_argv *argv, int dir,
|
||||||
@ -190,13 +187,10 @@ static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp)
|
|||||||
* given checkpoint between checkpoint and snapshot state. This ioctl
|
* given checkpoint between checkpoint and snapshot state. This ioctl
|
||||||
* is used in chcp and mkcp utilities.
|
* is used in chcp and mkcp utilities.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned and mode of a checkpoint is
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* changed. On error, one of the following negative error codes
|
* failure:
|
||||||
* is returned.
|
* %-EFAULT - Failure during checkpoint mode changing.
|
||||||
*
|
* %-EPERM - Operation not permitted.
|
||||||
* %-EPERM - Operation not permitted.
|
|
||||||
*
|
|
||||||
* %-EFAULT - Failure during checkpoint mode changing.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
|
static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
|
||||||
unsigned int cmd, void __user *argp)
|
unsigned int cmd, void __user *argp)
|
||||||
@ -244,13 +238,10 @@ out:
|
|||||||
* checkpoint from NILFS2 file system. This ioctl is used in rmcp
|
* checkpoint from NILFS2 file system. This ioctl is used in rmcp
|
||||||
* utility.
|
* utility.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned and a checkpoint is
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* removed. On error, one of the following negative error codes
|
* failure:
|
||||||
* is returned.
|
* %-EFAULT - Failure during checkpoint removing.
|
||||||
*
|
* %-EPERM - Operation not permitted.
|
||||||
* %-EPERM - Operation not permitted.
|
|
||||||
*
|
|
||||||
* %-EFAULT - Failure during checkpoint removing.
|
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
|
nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp,
|
||||||
@ -296,7 +287,7 @@ out:
|
|||||||
* requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in
|
* requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in
|
||||||
* lscp utility and by nilfs_cleanerd daemon.
|
* lscp utility and by nilfs_cleanerd daemon.
|
||||||
*
|
*
|
||||||
* Return value: count of nilfs_cpinfo structures in output buffer.
|
* Return: Count of nilfs_cpinfo structures in output buffer.
|
||||||
*/
|
*/
|
||||||
static ssize_t
|
static ssize_t
|
||||||
nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
||||||
@ -320,17 +311,14 @@ nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
|||||||
*
|
*
|
||||||
* Description: nilfs_ioctl_get_cpstat() returns information about checkpoints.
|
* Description: nilfs_ioctl_get_cpstat() returns information about checkpoints.
|
||||||
* The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities
|
* The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities
|
||||||
* and by nilfs_cleanerd daemon.
|
* and by nilfs_cleanerd daemon. The checkpoint statistics are copied to
|
||||||
|
* the userspace memory pointed to by @argp.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned, and checkpoints information is
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* copied into userspace pointer @argp. On error, one of the following
|
* failure:
|
||||||
* negative error codes is returned.
|
* * %-EFAULT - Failure during getting checkpoints statistics.
|
||||||
*
|
* * %-EIO - I/O error.
|
||||||
* %-EIO - I/O error.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EFAULT - Failure during getting checkpoints statistics.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp,
|
static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp,
|
||||||
unsigned int cmd, void __user *argp)
|
unsigned int cmd, void __user *argp)
|
||||||
@ -363,7 +351,8 @@ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp,
|
|||||||
* info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used
|
* info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used
|
||||||
* in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon.
|
* in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon.
|
||||||
*
|
*
|
||||||
* Return value: count of nilfs_suinfo structures in output buffer.
|
* Return: Count of nilfs_suinfo structures in output buffer on success,
|
||||||
|
* or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static ssize_t
|
static ssize_t
|
||||||
nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
||||||
@ -387,17 +376,14 @@ nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
|||||||
*
|
*
|
||||||
* Description: nilfs_ioctl_get_sustat() returns segment usage statistics.
|
* Description: nilfs_ioctl_get_sustat() returns segment usage statistics.
|
||||||
* The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities
|
* The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities
|
||||||
* and by nilfs_cleanerd daemon.
|
* and by nilfs_cleanerd daemon. The requested segment usage information is
|
||||||
|
* copied to the userspace memory pointed to by @argp.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned, and segment usage information is
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* copied into userspace pointer @argp. On error, one of the following
|
* failure:
|
||||||
* negative error codes is returned.
|
* * %-EFAULT - Failure during getting segment usage statistics.
|
||||||
*
|
* * %-EIO - I/O error.
|
||||||
* %-EIO - I/O error.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EFAULT - Failure during getting segment usage statistics.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp,
|
static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp,
|
||||||
unsigned int cmd, void __user *argp)
|
unsigned int cmd, void __user *argp)
|
||||||
@ -430,7 +416,8 @@ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp,
|
|||||||
* on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used
|
* on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used
|
||||||
* by nilfs_cleanerd daemon.
|
* by nilfs_cleanerd daemon.
|
||||||
*
|
*
|
||||||
* Return value: count of nilfs_vinfo structures in output buffer.
|
* Return: Count of nilfs_vinfo structures in output buffer on success, or
|
||||||
|
* a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static ssize_t
|
static ssize_t
|
||||||
nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
||||||
@ -457,7 +444,8 @@ nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
|||||||
* about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl
|
* about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl
|
||||||
* is used by nilfs_cleanerd daemon.
|
* is used by nilfs_cleanerd daemon.
|
||||||
*
|
*
|
||||||
* Return value: count of nilfs_bdescs structures in output buffer.
|
* Return: Count of nilfs_bdescs structures in output buffer on success, or
|
||||||
|
* a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static ssize_t
|
static ssize_t
|
||||||
nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
||||||
@ -494,19 +482,15 @@ nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags,
|
|||||||
*
|
*
|
||||||
* Description: nilfs_ioctl_do_get_bdescs() function returns information
|
* Description: nilfs_ioctl_do_get_bdescs() function returns information
|
||||||
* about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl
|
* about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl
|
||||||
* is used by nilfs_cleanerd daemon.
|
* is used by nilfs_cleanerd daemon. If successful, disk block descriptors
|
||||||
|
* are copied to userspace pointer @argp.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned, and disk block descriptors are
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* copied into userspace pointer @argp. On error, one of the following
|
* failure:
|
||||||
* negative error codes is returned.
|
* * %-EFAULT - Failure during getting disk block descriptors.
|
||||||
*
|
* * %-EINVAL - Invalid arguments from userspace.
|
||||||
* %-EINVAL - Invalid arguments from userspace.
|
* * %-EIO - I/O error.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error.
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EFAULT - Failure during getting disk block descriptors.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp,
|
static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp,
|
||||||
unsigned int cmd, void __user *argp)
|
unsigned int cmd, void __user *argp)
|
||||||
@ -540,16 +524,12 @@ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp,
|
|||||||
* Description: nilfs_ioctl_move_inode_block() function registers data/node
|
* Description: nilfs_ioctl_move_inode_block() function registers data/node
|
||||||
* buffer in the GC pagecache and submit read request.
|
* buffer in the GC pagecache and submit read request.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EEXIST - Block conflict detected.
|
||||||
* %-EIO - I/O error.
|
* * %-EIO - I/O error.
|
||||||
*
|
* * %-ENOENT - Requested block doesn't exist.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-ENOENT - Requested block doesn't exist.
|
|
||||||
*
|
|
||||||
* %-EEXIST - Blocks conflict is detected.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_move_inode_block(struct inode *inode,
|
static int nilfs_ioctl_move_inode_block(struct inode *inode,
|
||||||
struct nilfs_vdesc *vdesc,
|
struct nilfs_vdesc *vdesc,
|
||||||
@ -604,8 +584,8 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode,
|
|||||||
* blocks that garbage collector specified with the array of nilfs_vdesc
|
* blocks that garbage collector specified with the array of nilfs_vdesc
|
||||||
* structures and stores them into page caches of GC inodes.
|
* structures and stores them into page caches of GC inodes.
|
||||||
*
|
*
|
||||||
* Return Value: Number of processed nilfs_vdesc structures or
|
* Return: Number of processed nilfs_vdesc structures on success, or
|
||||||
* error code, otherwise.
|
* a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_move_blocks(struct super_block *sb,
|
static int nilfs_ioctl_move_blocks(struct super_block *sb,
|
||||||
struct nilfs_argv *argv, void *buf)
|
struct nilfs_argv *argv, void *buf)
|
||||||
@ -682,14 +662,11 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb,
|
|||||||
* in the period from p_start to p_end, excluding p_end itself. The checkpoints
|
* in the period from p_start to p_end, excluding p_end itself. The checkpoints
|
||||||
* which have been already deleted are ignored.
|
* which have been already deleted are ignored.
|
||||||
*
|
*
|
||||||
* Return Value: Number of processed nilfs_period structures or
|
* Return: Number of processed nilfs_period structures on success, or one of
|
||||||
* error code, otherwise.
|
* the following negative error codes on failure:
|
||||||
*
|
* * %-EINVAL - invalid checkpoints.
|
||||||
* %-EIO - I/O error.
|
* * %-EIO - I/O error.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EINVAL - invalid checkpoints.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs,
|
static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs,
|
||||||
struct nilfs_argv *argv, void *buf)
|
struct nilfs_argv *argv, void *buf)
|
||||||
@ -717,14 +694,11 @@ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs,
|
|||||||
* Description: nilfs_ioctl_free_vblocknrs() function frees
|
* Description: nilfs_ioctl_free_vblocknrs() function frees
|
||||||
* the virtual block numbers specified by @buf and @argv->v_nmembs.
|
* the virtual block numbers specified by @buf and @argv->v_nmembs.
|
||||||
*
|
*
|
||||||
* Return Value: Number of processed virtual block numbers or
|
* Return: Number of processed virtual block numbers on success, or one of the
|
||||||
* error code, otherwise.
|
* following negative error codes on failure:
|
||||||
*
|
* * %-EIO - I/O error.
|
||||||
* %-EIO - I/O error.
|
* * %-ENOENT - Unallocated virtual block number.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-ENOENT - The virtual block number have not been allocated.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs,
|
static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs,
|
||||||
struct nilfs_argv *argv, void *buf)
|
struct nilfs_argv *argv, void *buf)
|
||||||
@ -746,14 +720,11 @@ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs,
|
|||||||
* Description: nilfs_ioctl_mark_blocks_dirty() function marks
|
* Description: nilfs_ioctl_mark_blocks_dirty() function marks
|
||||||
* metadata file or data blocks as dirty.
|
* metadata file or data blocks as dirty.
|
||||||
*
|
*
|
||||||
* Return Value: Number of processed block descriptors or
|
* Return: Number of processed block descriptors on success, or one of the
|
||||||
* error code, otherwise.
|
* following negative error codes on failure:
|
||||||
*
|
* * %-EIO - I/O error.
|
||||||
* %-ENOMEM - Insufficient memory available.
|
* * %-ENOENT - Non-existent block (hole block).
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error
|
|
||||||
*
|
|
||||||
* %-ENOENT - the specified block does not exist (hole block)
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs,
|
static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs,
|
||||||
struct nilfs_argv *argv, void *buf)
|
struct nilfs_argv *argv, void *buf)
|
||||||
@ -852,7 +823,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
|
|||||||
* from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by
|
* from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by
|
||||||
* nilfs_cleanerd daemon.
|
* nilfs_cleanerd daemon.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned or error code, otherwise.
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
|
static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
|
||||||
unsigned int cmd, void __user *argp)
|
unsigned int cmd, void __user *argp)
|
||||||
@ -976,20 +947,14 @@ out:
|
|||||||
* and metadata are written out to the device when it successfully
|
* and metadata are written out to the device when it successfully
|
||||||
* returned.
|
* returned.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is retured. On errors, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error code is returned.
|
* failure:
|
||||||
*
|
* * %-EFAULT - Failure during execution of requested operation.
|
||||||
* %-EROFS - Read only filesystem.
|
* * %-EIO - I/O error.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error
|
* * %-ENOSPC - No space left on device (only in a panic state).
|
||||||
*
|
* * %-ERESTARTSYS - Interrupted.
|
||||||
* %-ENOSPC - No space left on device (only in a panic state).
|
* * %-EROFS - Read only filesystem.
|
||||||
*
|
|
||||||
* %-ERESTARTSYS - Interrupted.
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient memory available.
|
|
||||||
*
|
|
||||||
* %-EFAULT - Failure during execution of requested operation.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
|
static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
|
||||||
unsigned int cmd, void __user *argp)
|
unsigned int cmd, void __user *argp)
|
||||||
@ -1023,7 +988,7 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
|
|||||||
* @filp: file object
|
* @filp: file object
|
||||||
* @argp: pointer on argument from userspace
|
* @argp: pointer on argument from userspace
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned or error code, otherwise.
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
|
static int nilfs_ioctl_resize(struct inode *inode, struct file *filp,
|
||||||
void __user *argp)
|
void __user *argp)
|
||||||
@ -1059,7 +1024,7 @@ out:
|
|||||||
* checks the arguments from userspace and calls nilfs_sufile_trim_fs, which
|
* checks the arguments from userspace and calls nilfs_sufile_trim_fs, which
|
||||||
* performs the actual trim operation.
|
* performs the actual trim operation.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned or negative error code, otherwise.
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp)
|
static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp)
|
||||||
{
|
{
|
||||||
@ -1101,7 +1066,7 @@ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp)
|
|||||||
* of segments in bytes and upper limit of segments in bytes.
|
* of segments in bytes and upper limit of segments in bytes.
|
||||||
* The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility.
|
* The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned or error code, otherwise.
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
|
static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp)
|
||||||
{
|
{
|
||||||
@ -1152,17 +1117,15 @@ out:
|
|||||||
* @dofunc: concrete function of getting metadata info
|
* @dofunc: concrete function of getting metadata info
|
||||||
*
|
*
|
||||||
* Description: nilfs_ioctl_get_info() gets metadata info by means of
|
* Description: nilfs_ioctl_get_info() gets metadata info by means of
|
||||||
* calling dofunc() function.
|
* calling dofunc() function. The requested metadata information is copied
|
||||||
|
* to userspace memory @argp.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned and requested metadata info
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* is copied into userspace. On error, one of the following
|
* failure:
|
||||||
* negative error codes is returned.
|
* * %-EFAULT - Failure during execution of requested operation.
|
||||||
*
|
* * %-EINVAL - Invalid arguments from userspace.
|
||||||
* %-EINVAL - Invalid arguments from userspace.
|
* * %-EIO - I/O error.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EFAULT - Failure during execution of requested operation.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
|
static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
|
||||||
unsigned int cmd, void __user *argp,
|
unsigned int cmd, void __user *argp,
|
||||||
@ -1202,18 +1165,14 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp,
|
|||||||
* encapsulated in nilfs_argv and updates the segment usage info
|
* encapsulated in nilfs_argv and updates the segment usage info
|
||||||
* according to the flags in nilfs_suinfo_update.
|
* according to the flags in nilfs_suinfo_update.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* following negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EEXIST - Block conflict detected.
|
||||||
* %-EPERM - Not enough permissions
|
* * %-EFAULT - Error copying input data.
|
||||||
*
|
* * %-EINVAL - Invalid values in input (segment number, flags or nblocks).
|
||||||
* %-EFAULT - Error copying input data
|
* * %-EIO - I/O error.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error.
|
* * %-EPERM - Not enough permissions.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EINVAL - Invalid values in input (segment number, flags or nblocks)
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp,
|
static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp,
|
||||||
unsigned int cmd, void __user *argp)
|
unsigned int cmd, void __user *argp)
|
||||||
@ -1309,7 +1268,8 @@ static int nilfs_ioctl_get_fslabel(struct super_block *sb, void __user *argp)
|
|||||||
* @filp: file object
|
* @filp: file object
|
||||||
* @argp: pointer to userspace memory that contains the volume name
|
* @argp: pointer to userspace memory that contains the volume name
|
||||||
*
|
*
|
||||||
* Return: 0 on success, or the following negative error code on failure.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
* * %-EFAULT - Error copying input data.
|
* * %-EFAULT - Error copying input data.
|
||||||
* * %-EINVAL - Label length exceeds record size in superblock.
|
* * %-EINVAL - Label length exceeds record size in superblock.
|
||||||
* * %-EIO - I/O error.
|
* * %-EIO - I/O error.
|
||||||
|
@ -226,20 +226,21 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block,
|
|||||||
* @out_bh: output of a pointer to the buffer_head
|
* @out_bh: output of a pointer to the buffer_head
|
||||||
*
|
*
|
||||||
* nilfs_mdt_get_block() looks up the specified buffer and tries to create
|
* nilfs_mdt_get_block() looks up the specified buffer and tries to create
|
||||||
* a new buffer if @create is not zero. On success, the returned buffer is
|
* a new buffer if @create is not zero. If (and only if) this function
|
||||||
* assured to be either existing or formatted using a buffer lock on success.
|
* succeeds, it stores a pointer to the retrieved buffer head in the location
|
||||||
* @out_bh is substituted only when zero is returned.
|
* pointed to by @out_bh.
|
||||||
*
|
*
|
||||||
* Return Value: On success, it returns 0. On error, the following negative
|
* The retrieved buffer may be either an existing one or a newly allocated one.
|
||||||
* error code is returned.
|
* For a newly created buffer, if the callback function argument @init_block
|
||||||
|
* is non-NULL, the callback will be called with the buffer locked to format
|
||||||
|
* the block.
|
||||||
*
|
*
|
||||||
* %-ENOMEM - Insufficient memory available.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
*
|
* failure:
|
||||||
* %-EIO - I/O error
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOENT - The specified block does not exist (hole block).
|
||||||
* %-ENOENT - the specified block does not exist (hole block)
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
* * %-EROFS - Read only filesystem (for create mode).
|
||||||
* %-EROFS - Read only filesystem (for create mode)
|
|
||||||
*/
|
*/
|
||||||
int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
|
int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
|
||||||
void (*init_block)(struct inode *,
|
void (*init_block)(struct inode *,
|
||||||
@ -275,14 +276,11 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create,
|
|||||||
* @out_bh, and block offset to @blkoff, respectively. @out_bh and
|
* @out_bh, and block offset to @blkoff, respectively. @out_bh and
|
||||||
* @blkoff are substituted only when zero is returned.
|
* @blkoff are substituted only when zero is returned.
|
||||||
*
|
*
|
||||||
* Return Value: On success, it returns 0. On error, the following negative
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* error code is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-ENOMEM - Insufficient memory available.
|
* * %-ENOENT - No block was found in the range.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error
|
|
||||||
*
|
|
||||||
* %-ENOENT - no block was found in the range
|
|
||||||
*/
|
*/
|
||||||
int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
|
int nilfs_mdt_find_block(struct inode *inode, unsigned long start,
|
||||||
unsigned long end, unsigned long *blkoff,
|
unsigned long end, unsigned long *blkoff,
|
||||||
@ -321,12 +319,11 @@ out:
|
|||||||
* @inode: inode of the meta data file
|
* @inode: inode of the meta data file
|
||||||
* @block: block offset
|
* @block: block offset
|
||||||
*
|
*
|
||||||
* Return Value: On success, zero is returned.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* On error, one of the following negative error code is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-ENOMEM - Insufficient memory available.
|
* * %-ENOENT - Non-existent block.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error
|
|
||||||
*/
|
*/
|
||||||
int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
|
int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
|
||||||
{
|
{
|
||||||
@ -349,12 +346,10 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block)
|
|||||||
* nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
|
* nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and
|
||||||
* tries to release the page including the buffer from a page cache.
|
* tries to release the page including the buffer from a page cache.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error code is returned.
|
* failure:
|
||||||
*
|
* * %-EBUSY - Page has an active buffer.
|
||||||
* %-EBUSY - page has an active buffer.
|
* * %-ENOENT - Page cache has no page addressed by the offset.
|
||||||
*
|
|
||||||
* %-ENOENT - page cache has no page addressed by the offset.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
|
int nilfs_mdt_forget_block(struct inode *inode, unsigned long block)
|
||||||
{
|
{
|
||||||
@ -524,6 +519,8 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size,
|
|||||||
* nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
|
* nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file
|
||||||
* @inode: inode of the metadata file
|
* @inode: inode of the metadata file
|
||||||
* @shadow: shadow mapping
|
* @shadow: shadow mapping
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_mdt_setup_shadow_map(struct inode *inode,
|
int nilfs_mdt_setup_shadow_map(struct inode *inode,
|
||||||
struct nilfs_shadow_map *shadow)
|
struct nilfs_shadow_map *shadow)
|
||||||
@ -545,6 +542,8 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
|
|||||||
/**
|
/**
|
||||||
* nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
|
* nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map
|
||||||
* @inode: inode of the metadata file
|
* @inode: inode of the metadata file
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_mdt_save_to_shadow_map(struct inode *inode)
|
int nilfs_mdt_save_to_shadow_map(struct inode *inode)
|
||||||
{
|
{
|
||||||
|
@ -370,6 +370,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
|
|||||||
struct folio *old_folio;
|
struct folio *old_folio;
|
||||||
struct nilfs_dir_entry *old_de;
|
struct nilfs_dir_entry *old_de;
|
||||||
struct nilfs_transaction_info ti;
|
struct nilfs_transaction_info ti;
|
||||||
|
bool old_is_dir = S_ISDIR(old_inode->i_mode);
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if (flags & ~RENAME_NOREPLACE)
|
if (flags & ~RENAME_NOREPLACE)
|
||||||
@ -385,7 +386,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (S_ISDIR(old_inode->i_mode)) {
|
if (old_is_dir && old_dir != new_dir) {
|
||||||
err = -EIO;
|
err = -EIO;
|
||||||
dir_de = nilfs_dotdot(old_inode, &dir_folio);
|
dir_de = nilfs_dotdot(old_inode, &dir_folio);
|
||||||
if (!dir_de)
|
if (!dir_de)
|
||||||
@ -397,7 +398,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
|
|||||||
struct nilfs_dir_entry *new_de;
|
struct nilfs_dir_entry *new_de;
|
||||||
|
|
||||||
err = -ENOTEMPTY;
|
err = -ENOTEMPTY;
|
||||||
if (dir_de && !nilfs_empty_dir(new_inode))
|
if (old_is_dir && !nilfs_empty_dir(new_inode))
|
||||||
goto out_dir;
|
goto out_dir;
|
||||||
|
|
||||||
new_de = nilfs_find_entry(new_dir, &new_dentry->d_name,
|
new_de = nilfs_find_entry(new_dir, &new_dentry->d_name,
|
||||||
@ -406,11 +407,13 @@ static int nilfs_rename(struct mnt_idmap *idmap,
|
|||||||
err = PTR_ERR(new_de);
|
err = PTR_ERR(new_de);
|
||||||
goto out_dir;
|
goto out_dir;
|
||||||
}
|
}
|
||||||
nilfs_set_link(new_dir, new_de, new_folio, old_inode);
|
err = nilfs_set_link(new_dir, new_de, new_folio, old_inode);
|
||||||
folio_release_kmap(new_folio, new_de);
|
folio_release_kmap(new_folio, new_de);
|
||||||
|
if (unlikely(err))
|
||||||
|
goto out_dir;
|
||||||
nilfs_mark_inode_dirty(new_dir);
|
nilfs_mark_inode_dirty(new_dir);
|
||||||
inode_set_ctime_current(new_inode);
|
inode_set_ctime_current(new_inode);
|
||||||
if (dir_de)
|
if (old_is_dir)
|
||||||
drop_nlink(new_inode);
|
drop_nlink(new_inode);
|
||||||
drop_nlink(new_inode);
|
drop_nlink(new_inode);
|
||||||
nilfs_mark_inode_dirty(new_inode);
|
nilfs_mark_inode_dirty(new_inode);
|
||||||
@ -418,7 +421,7 @@ static int nilfs_rename(struct mnt_idmap *idmap,
|
|||||||
err = nilfs_add_link(new_dentry, old_inode);
|
err = nilfs_add_link(new_dentry, old_inode);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_dir;
|
goto out_dir;
|
||||||
if (dir_de) {
|
if (old_is_dir) {
|
||||||
inc_nlink(new_dir);
|
inc_nlink(new_dir);
|
||||||
nilfs_mark_inode_dirty(new_dir);
|
nilfs_mark_inode_dirty(new_dir);
|
||||||
}
|
}
|
||||||
@ -430,28 +433,28 @@ static int nilfs_rename(struct mnt_idmap *idmap,
|
|||||||
*/
|
*/
|
||||||
inode_set_ctime_current(old_inode);
|
inode_set_ctime_current(old_inode);
|
||||||
|
|
||||||
nilfs_delete_entry(old_de, old_folio);
|
err = nilfs_delete_entry(old_de, old_folio);
|
||||||
|
if (likely(!err)) {
|
||||||
if (dir_de) {
|
if (old_is_dir) {
|
||||||
nilfs_set_link(old_inode, dir_de, dir_folio, new_dir);
|
if (old_dir != new_dir)
|
||||||
folio_release_kmap(dir_folio, dir_de);
|
err = nilfs_set_link(old_inode, dir_de,
|
||||||
drop_nlink(old_dir);
|
dir_folio, new_dir);
|
||||||
|
drop_nlink(old_dir);
|
||||||
|
}
|
||||||
|
nilfs_mark_inode_dirty(old_dir);
|
||||||
}
|
}
|
||||||
folio_release_kmap(old_folio, old_de);
|
|
||||||
|
|
||||||
nilfs_mark_inode_dirty(old_dir);
|
|
||||||
nilfs_mark_inode_dirty(old_inode);
|
nilfs_mark_inode_dirty(old_inode);
|
||||||
|
|
||||||
err = nilfs_transaction_commit(old_dir->i_sb);
|
|
||||||
return err;
|
|
||||||
|
|
||||||
out_dir:
|
out_dir:
|
||||||
if (dir_de)
|
if (dir_de)
|
||||||
folio_release_kmap(dir_folio, dir_de);
|
folio_release_kmap(dir_folio, dir_de);
|
||||||
out_old:
|
out_old:
|
||||||
folio_release_kmap(old_folio, old_de);
|
folio_release_kmap(old_folio, old_de);
|
||||||
out:
|
out:
|
||||||
nilfs_transaction_abort(old_dir->i_sb);
|
if (likely(!err))
|
||||||
|
err = nilfs_transaction_commit(old_dir->i_sb);
|
||||||
|
else
|
||||||
|
nilfs_transaction_abort(old_dir->i_sb);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,8 +261,8 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *, const struct qstr *,
|
|||||||
int nilfs_delete_entry(struct nilfs_dir_entry *, struct folio *);
|
int nilfs_delete_entry(struct nilfs_dir_entry *, struct folio *);
|
||||||
int nilfs_empty_dir(struct inode *);
|
int nilfs_empty_dir(struct inode *);
|
||||||
struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct folio **);
|
struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct folio **);
|
||||||
void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
|
int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
|
||||||
struct folio *, struct inode *);
|
struct folio *folio, struct inode *inode);
|
||||||
|
|
||||||
/* file.c */
|
/* file.c */
|
||||||
extern int nilfs_sync_file(struct file *, loff_t, loff_t, int);
|
extern int nilfs_sync_file(struct file *, loff_t, loff_t, int);
|
||||||
|
@ -135,8 +135,7 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
|
|||||||
* nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not.
|
* nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not.
|
||||||
* @folio: Folio to be checked.
|
* @folio: Folio to be checked.
|
||||||
*
|
*
|
||||||
* nilfs_folio_buffers_clean() returns false if the folio has dirty buffers.
|
* Return: false if the folio has dirty buffers, true otherwise.
|
||||||
* Otherwise, it returns true.
|
|
||||||
*/
|
*/
|
||||||
bool nilfs_folio_buffers_clean(struct folio *folio)
|
bool nilfs_folio_buffers_clean(struct folio *folio)
|
||||||
{
|
{
|
||||||
@ -392,6 +391,11 @@ void nilfs_clear_dirty_pages(struct address_space *mapping)
|
|||||||
/**
|
/**
|
||||||
* nilfs_clear_folio_dirty - discard dirty folio
|
* nilfs_clear_folio_dirty - discard dirty folio
|
||||||
* @folio: dirty folio that will be discarded
|
* @folio: dirty folio that will be discarded
|
||||||
|
*
|
||||||
|
* nilfs_clear_folio_dirty() clears working states including dirty state for
|
||||||
|
* the folio and its buffers. If the folio has buffers, clear only if it is
|
||||||
|
* confirmed that none of the buffer heads are busy (none have valid
|
||||||
|
* references and none are locked).
|
||||||
*/
|
*/
|
||||||
void nilfs_clear_folio_dirty(struct folio *folio)
|
void nilfs_clear_folio_dirty(struct folio *folio)
|
||||||
{
|
{
|
||||||
@ -399,10 +403,6 @@ void nilfs_clear_folio_dirty(struct folio *folio)
|
|||||||
|
|
||||||
BUG_ON(!folio_test_locked(folio));
|
BUG_ON(!folio_test_locked(folio));
|
||||||
|
|
||||||
folio_clear_uptodate(folio);
|
|
||||||
folio_clear_mappedtodisk(folio);
|
|
||||||
folio_clear_checked(folio);
|
|
||||||
|
|
||||||
head = folio_buffers(folio);
|
head = folio_buffers(folio);
|
||||||
if (head) {
|
if (head) {
|
||||||
const unsigned long clear_bits =
|
const unsigned long clear_bits =
|
||||||
@ -410,6 +410,25 @@ void nilfs_clear_folio_dirty(struct folio *folio)
|
|||||||
BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
|
BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) |
|
||||||
BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
|
BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) |
|
||||||
BIT(BH_Delay));
|
BIT(BH_Delay));
|
||||||
|
bool busy, invalidated = false;
|
||||||
|
|
||||||
|
recheck_buffers:
|
||||||
|
busy = false;
|
||||||
|
bh = head;
|
||||||
|
do {
|
||||||
|
if (atomic_read(&bh->b_count) | buffer_locked(bh)) {
|
||||||
|
busy = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
} while (bh = bh->b_this_page, bh != head);
|
||||||
|
|
||||||
|
if (busy) {
|
||||||
|
if (invalidated)
|
||||||
|
return;
|
||||||
|
invalidate_bh_lrus();
|
||||||
|
invalidated = true;
|
||||||
|
goto recheck_buffers;
|
||||||
|
}
|
||||||
|
|
||||||
bh = head;
|
bh = head;
|
||||||
do {
|
do {
|
||||||
@ -419,6 +438,9 @@ void nilfs_clear_folio_dirty(struct folio *folio)
|
|||||||
} while (bh = bh->b_this_page, bh != head);
|
} while (bh = bh->b_this_page, bh != head);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
folio_clear_uptodate(folio);
|
||||||
|
folio_clear_mappedtodisk(folio);
|
||||||
|
folio_clear_checked(folio);
|
||||||
__nilfs_clear_folio_dirty(folio);
|
__nilfs_clear_folio_dirty(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -477,8 +499,9 @@ void __nilfs_clear_folio_dirty(struct folio *folio)
|
|||||||
* This function searches an extent of buffers marked "delayed" which
|
* This function searches an extent of buffers marked "delayed" which
|
||||||
* starts from a block offset equal to or larger than @start_blk. If
|
* starts from a block offset equal to or larger than @start_blk. If
|
||||||
* such an extent was found, this will store the start offset in
|
* such an extent was found, this will store the start offset in
|
||||||
* @blkoff and return its length in blocks. Otherwise, zero is
|
* @blkoff and return its length in blocks.
|
||||||
* returned.
|
*
|
||||||
|
* Return: Length in blocks of found extent, 0 otherwise.
|
||||||
*/
|
*/
|
||||||
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
|
unsigned long nilfs_find_uncommitted_extent(struct inode *inode,
|
||||||
sector_t start_blk,
|
sector_t start_blk,
|
||||||
|
@ -88,6 +88,8 @@ static int nilfs_warn_segment_error(struct super_block *sb, int err)
|
|||||||
* @check_bytes: number of bytes to be checked
|
* @check_bytes: number of bytes to be checked
|
||||||
* @start: DBN of start block
|
* @start: DBN of start block
|
||||||
* @nblock: number of blocks to be checked
|
* @nblock: number of blocks to be checked
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or %-EIO if an I/O error occurs.
|
||||||
*/
|
*/
|
||||||
static int nilfs_compute_checksum(struct the_nilfs *nilfs,
|
static int nilfs_compute_checksum(struct the_nilfs *nilfs,
|
||||||
struct buffer_head *bhs, u32 *sum,
|
struct buffer_head *bhs, u32 *sum,
|
||||||
@ -126,6 +128,11 @@ static int nilfs_compute_checksum(struct the_nilfs *nilfs,
|
|||||||
* @sr_block: disk block number of the super root block
|
* @sr_block: disk block number of the super root block
|
||||||
* @pbh: address of a buffer_head pointer to return super root buffer
|
* @pbh: address of a buffer_head pointer to return super root buffer
|
||||||
* @check: CRC check flag
|
* @check: CRC check flag
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
|
* * %-EINVAL - Super root block corrupted.
|
||||||
|
* * %-EIO - I/O error.
|
||||||
*/
|
*/
|
||||||
int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
|
int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
|
||||||
struct buffer_head **pbh, int check)
|
struct buffer_head **pbh, int check)
|
||||||
@ -176,6 +183,8 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block,
|
|||||||
* @nilfs: nilfs object
|
* @nilfs: nilfs object
|
||||||
* @start_blocknr: start block number of the log
|
* @start_blocknr: start block number of the log
|
||||||
* @sum: pointer to return segment summary structure
|
* @sum: pointer to return segment summary structure
|
||||||
|
*
|
||||||
|
* Return: Buffer head pointer, or NULL if an I/O error occurs.
|
||||||
*/
|
*/
|
||||||
static struct buffer_head *
|
static struct buffer_head *
|
||||||
nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
|
nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
|
||||||
@ -195,6 +204,13 @@ nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr,
|
|||||||
* @seg_seq: sequence number of segment
|
* @seg_seq: sequence number of segment
|
||||||
* @bh_sum: buffer head of summary block
|
* @bh_sum: buffer head of summary block
|
||||||
* @sum: segment summary struct
|
* @sum: segment summary struct
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or one of the following internal codes on failure:
|
||||||
|
* * %NILFS_SEG_FAIL_MAGIC - Magic number mismatch.
|
||||||
|
* * %NILFS_SEG_FAIL_SEQ - Sequence number mismatch.
|
||||||
|
* * %NIFLS_SEG_FAIL_CONSISTENCY - Block count out of range.
|
||||||
|
* * %NILFS_SEG_FAIL_IO - I/O error.
|
||||||
|
* * %NILFS_SEG_FAIL_CHECKSUM_FULL - Full log checksum verification failed.
|
||||||
*/
|
*/
|
||||||
static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq,
|
static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq,
|
||||||
struct buffer_head *bh_sum,
|
struct buffer_head *bh_sum,
|
||||||
@ -238,6 +254,9 @@ out:
|
|||||||
* @pbh: the current buffer head on summary blocks [in, out]
|
* @pbh: the current buffer head on summary blocks [in, out]
|
||||||
* @offset: the current byte offset on summary blocks [in, out]
|
* @offset: the current byte offset on summary blocks [in, out]
|
||||||
* @bytes: byte size of the item to be read
|
* @bytes: byte size of the item to be read
|
||||||
|
*
|
||||||
|
* Return: Kernel space address of current segment summary entry, or
|
||||||
|
* NULL if an I/O error occurs.
|
||||||
*/
|
*/
|
||||||
static void *nilfs_read_summary_info(struct the_nilfs *nilfs,
|
static void *nilfs_read_summary_info(struct the_nilfs *nilfs,
|
||||||
struct buffer_head **pbh,
|
struct buffer_head **pbh,
|
||||||
@ -300,6 +319,11 @@ static void nilfs_skip_summary_info(struct the_nilfs *nilfs,
|
|||||||
* @start_blocknr: start block number of the log
|
* @start_blocknr: start block number of the log
|
||||||
* @sum: log summary information
|
* @sum: log summary information
|
||||||
* @head: list head to add nilfs_recovery_block struct
|
* @head: list head to add nilfs_recovery_block struct
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
|
* * %-EIO - I/O error.
|
||||||
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*/
|
*/
|
||||||
static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
|
static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr,
|
||||||
struct nilfs_segment_summary *sum,
|
struct nilfs_segment_summary *sum,
|
||||||
@ -571,6 +595,12 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs,
|
|||||||
* @sb: super block instance
|
* @sb: super block instance
|
||||||
* @root: NILFS root instance
|
* @root: NILFS root instance
|
||||||
* @ri: pointer to a nilfs_recovery_info
|
* @ri: pointer to a nilfs_recovery_info
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
|
* * %-EINVAL - Log format error.
|
||||||
|
* * %-EIO - I/O error.
|
||||||
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*/
|
*/
|
||||||
static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
|
static int nilfs_do_roll_forward(struct the_nilfs *nilfs,
|
||||||
struct super_block *sb,
|
struct super_block *sb,
|
||||||
@ -754,18 +784,13 @@ static void nilfs_abort_roll_forward(struct the_nilfs *nilfs)
|
|||||||
* @sb: super block instance
|
* @sb: super block instance
|
||||||
* @ri: pointer to a nilfs_recovery_info struct to store search results.
|
* @ri: pointer to a nilfs_recovery_info struct to store search results.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error code is returned.
|
* failure:
|
||||||
*
|
* * %-EINVAL - Inconsistent filesystem state.
|
||||||
* %-EINVAL - Inconsistent filesystem state.
|
* * %-EIO - I/O error.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error
|
* * %-ENOSPC - No space left on device (only in a panic state).
|
||||||
*
|
* * %-ERESTARTSYS - Interrupted.
|
||||||
* %-ENOSPC - No space left on device (only in a panic state).
|
|
||||||
*
|
|
||||||
* %-ERESTARTSYS - Interrupted.
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
|
int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs,
|
||||||
struct super_block *sb,
|
struct super_block *sb,
|
||||||
@ -830,14 +855,11 @@ failed:
|
|||||||
* segment pointed by the superblock. It sets up struct the_nilfs through
|
* segment pointed by the superblock. It sets up struct the_nilfs through
|
||||||
* this search. It fills nilfs_recovery_info (ri) required for recovery.
|
* this search. It fills nilfs_recovery_info (ri) required for recovery.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error code is returned.
|
* failure:
|
||||||
*
|
* * %-EINVAL - No valid segment found.
|
||||||
* %-EINVAL - No valid segment found
|
* * %-EIO - I/O error.
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_search_super_root(struct the_nilfs *nilfs,
|
int nilfs_search_super_root(struct the_nilfs *nilfs,
|
||||||
struct nilfs_recovery_info *ri)
|
struct nilfs_recovery_info *ri)
|
||||||
|
@ -406,12 +406,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
|
|||||||
* @segbuf: buffer storing a log to be written
|
* @segbuf: buffer storing a log to be written
|
||||||
* @nilfs: nilfs object
|
* @nilfs: nilfs object
|
||||||
*
|
*
|
||||||
* Return Value: On Success, 0 is returned. On Error, one of the following
|
* Return: Always 0.
|
||||||
* negative error code is returned.
|
|
||||||
*
|
|
||||||
* %-EIO - I/O error
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient memory available.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
|
static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
|
||||||
struct the_nilfs *nilfs)
|
struct the_nilfs *nilfs)
|
||||||
@ -452,10 +447,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf,
|
|||||||
* nilfs_segbuf_wait - wait for completion of requested BIOs
|
* nilfs_segbuf_wait - wait for completion of requested BIOs
|
||||||
* @segbuf: segment buffer
|
* @segbuf: segment buffer
|
||||||
*
|
*
|
||||||
* Return Value: On Success, 0 is returned. On Error, one of the following
|
* Return: 0 on success, or %-EIO if I/O error is detected.
|
||||||
* negative error code is returned.
|
|
||||||
*
|
|
||||||
* %-EIO - I/O error
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
|
static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf)
|
||||||
{
|
{
|
||||||
|
@ -191,12 +191,10 @@ static int nilfs_prepare_segment_lock(struct super_block *sb,
|
|||||||
* When @vacancy_check flag is set, this function will check the amount of
|
* When @vacancy_check flag is set, this function will check the amount of
|
||||||
* free space, and will wait for the GC to reclaim disk space if low capacity.
|
* free space, and will wait for the GC to reclaim disk space if low capacity.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error code is returned.
|
* failure:
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient memory available.
|
* * %-ENOSPC - No space left on device (if checking free space).
|
||||||
*
|
|
||||||
* %-ENOSPC - No space left on device
|
|
||||||
*/
|
*/
|
||||||
int nilfs_transaction_begin(struct super_block *sb,
|
int nilfs_transaction_begin(struct super_block *sb,
|
||||||
struct nilfs_transaction_info *ti,
|
struct nilfs_transaction_info *ti,
|
||||||
@ -252,6 +250,8 @@ int nilfs_transaction_begin(struct super_block *sb,
|
|||||||
* nilfs_transaction_commit() sets a timer to start the segment
|
* nilfs_transaction_commit() sets a timer to start the segment
|
||||||
* constructor. If a sync flag is set, it starts construction
|
* constructor. If a sync flag is set, it starts construction
|
||||||
* directly.
|
* directly.
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_transaction_commit(struct super_block *sb)
|
int nilfs_transaction_commit(struct super_block *sb)
|
||||||
{
|
{
|
||||||
@ -407,6 +407,8 @@ static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci,
|
|||||||
/**
|
/**
|
||||||
* nilfs_segctor_reset_segment_buffer - reset the current segment buffer
|
* nilfs_segctor_reset_segment_buffer - reset the current segment buffer
|
||||||
* @sci: nilfs_sc_info
|
* @sci: nilfs_sc_info
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
|
static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci)
|
||||||
{
|
{
|
||||||
@ -734,7 +736,6 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
|
|||||||
if (!head)
|
if (!head)
|
||||||
head = create_empty_buffers(folio,
|
head = create_empty_buffers(folio,
|
||||||
i_blocksize(inode), 0);
|
i_blocksize(inode), 0);
|
||||||
folio_unlock(folio);
|
|
||||||
|
|
||||||
bh = head;
|
bh = head;
|
||||||
do {
|
do {
|
||||||
@ -744,11 +745,14 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode,
|
|||||||
list_add_tail(&bh->b_assoc_buffers, listp);
|
list_add_tail(&bh->b_assoc_buffers, listp);
|
||||||
ndirties++;
|
ndirties++;
|
||||||
if (unlikely(ndirties >= nlimit)) {
|
if (unlikely(ndirties >= nlimit)) {
|
||||||
|
folio_unlock(folio);
|
||||||
folio_batch_release(&fbatch);
|
folio_batch_release(&fbatch);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
return ndirties;
|
return ndirties;
|
||||||
}
|
}
|
||||||
} while (bh = bh->b_this_page, bh != head);
|
} while (bh = bh->b_this_page, bh != head);
|
||||||
|
|
||||||
|
folio_unlock(folio);
|
||||||
}
|
}
|
||||||
folio_batch_release(&fbatch);
|
folio_batch_release(&fbatch);
|
||||||
cond_resched();
|
cond_resched();
|
||||||
@ -1118,7 +1122,8 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci,
|
|||||||
* a super root block containing this sufile change is complete, and it can
|
* a super root block containing this sufile change is complete, and it can
|
||||||
* be canceled with nilfs_sufile_cancel_freev() until then.
|
* be canceled with nilfs_sufile_cancel_freev() until then.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, or the following negative error code on failure.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
* * %-EINVAL - Invalid segment number.
|
* * %-EINVAL - Invalid segment number.
|
||||||
* * %-EIO - I/O error (including metadata corruption).
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
@ -1315,6 +1320,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode)
|
|||||||
* nilfs_segctor_begin_construction - setup segment buffer to make a new log
|
* nilfs_segctor_begin_construction - setup segment buffer to make a new log
|
||||||
* @sci: nilfs_sc_info
|
* @sci: nilfs_sc_info
|
||||||
* @nilfs: nilfs object
|
* @nilfs: nilfs object
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
|
static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci,
|
||||||
struct the_nilfs *nilfs)
|
struct the_nilfs *nilfs)
|
||||||
@ -2312,18 +2319,13 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force)
|
|||||||
* nilfs_construct_segment - construct a logical segment
|
* nilfs_construct_segment - construct a logical segment
|
||||||
* @sb: super block
|
* @sb: super block
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On errors, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error code is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EROFS - Read only filesystem.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
* * %-ENOSPC - No space left on device (only in a panic state).
|
||||||
* %-EIO - I/O error
|
* * %-ERESTARTSYS - Interrupted.
|
||||||
*
|
* * %-EROFS - Read only filesystem.
|
||||||
* %-ENOSPC - No space left on device (only in a panic state).
|
|
||||||
*
|
|
||||||
* %-ERESTARTSYS - Interrupted.
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_construct_segment(struct super_block *sb)
|
int nilfs_construct_segment(struct super_block *sb)
|
||||||
{
|
{
|
||||||
@ -2347,18 +2349,13 @@ int nilfs_construct_segment(struct super_block *sb)
|
|||||||
* @start: start byte offset
|
* @start: start byte offset
|
||||||
* @end: end byte offset (inclusive)
|
* @end: end byte offset (inclusive)
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On errors, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error code is returned.
|
* failure:
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-EROFS - Read only filesystem.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
* * %-ENOSPC - No space left on device (only in a panic state).
|
||||||
* %-EIO - I/O error
|
* * %-ERESTARTSYS - Interrupted.
|
||||||
*
|
* * %-EROFS - Read only filesystem.
|
||||||
* %-ENOSPC - No space left on device (only in a panic state).
|
|
||||||
*
|
|
||||||
* %-ERESTARTSYS - Interrupted.
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
|
int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode,
|
||||||
loff_t start, loff_t end)
|
loff_t start, loff_t end)
|
||||||
@ -2464,6 +2461,8 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err)
|
|||||||
* nilfs_segctor_construct - form logs and write them to disk
|
* nilfs_segctor_construct - form logs and write them to disk
|
||||||
* @sci: segment constructor object
|
* @sci: segment constructor object
|
||||||
* @mode: mode of log forming
|
* @mode: mode of log forming
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
|
static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode)
|
||||||
{
|
{
|
||||||
@ -2836,7 +2835,8 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
|
|||||||
* This allocates a log writer object, initializes it, and starts the
|
* This allocates a log writer object, initializes it, and starts the
|
||||||
* log writer.
|
* log writer.
|
||||||
*
|
*
|
||||||
* Return: 0 on success, or the following negative error code on failure.
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
* * %-EINTR - Log writer thread creation failed due to interruption.
|
* * %-EINTR - Log writer thread creation failed due to interruption.
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*/
|
*/
|
||||||
|
@ -133,6 +133,8 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
|
|||||||
/**
|
/**
|
||||||
* nilfs_sufile_get_ncleansegs - return the number of clean segments
|
* nilfs_sufile_get_ncleansegs - return the number of clean segments
|
||||||
* @sufile: inode of segment usage file
|
* @sufile: inode of segment usage file
|
||||||
|
*
|
||||||
|
* Return: Number of clean segments.
|
||||||
*/
|
*/
|
||||||
unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
|
unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
|
||||||
{
|
{
|
||||||
@ -155,17 +157,13 @@ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile)
|
|||||||
* of successfully modified segments from the head is stored in the
|
* of successfully modified segments from the head is stored in the
|
||||||
* place @ndone points to.
|
* place @ndone points to.
|
||||||
*
|
*
|
||||||
* Return Value: On success, zero is returned. On error, one of the
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* following negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EINVAL - Invalid segment usage number
|
||||||
* %-EIO - I/O error.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOENT - Given segment usage is in hole block (may be returned if
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
* @create is zero)
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOENT - Given segment usage is in hole block (may be returned if
|
|
||||||
* @create is zero)
|
|
||||||
*
|
|
||||||
* %-EINVAL - Invalid segment usage number
|
|
||||||
*/
|
*/
|
||||||
int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
|
int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs,
|
||||||
int create, size_t *ndone,
|
int create, size_t *ndone,
|
||||||
@ -272,10 +270,7 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create,
|
|||||||
* @start: minimum segment number of allocatable region (inclusive)
|
* @start: minimum segment number of allocatable region (inclusive)
|
||||||
* @end: maximum segment number of allocatable region (inclusive)
|
* @end: maximum segment number of allocatable region (inclusive)
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the
|
* Return: 0 on success, or %-ERANGE if segment range is invalid.
|
||||||
* following negative error codes is returned.
|
|
||||||
*
|
|
||||||
* %-ERANGE - invalid segment region
|
|
||||||
*/
|
*/
|
||||||
int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
|
int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
|
||||||
{
|
{
|
||||||
@ -300,17 +295,14 @@ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end)
|
|||||||
* @sufile: inode of segment usage file
|
* @sufile: inode of segment usage file
|
||||||
* @segnump: pointer to segment number
|
* @segnump: pointer to segment number
|
||||||
*
|
*
|
||||||
* Description: nilfs_sufile_alloc() allocates a clean segment.
|
* Description: nilfs_sufile_alloc() allocates a clean segment, and stores
|
||||||
|
* its segment number in the place pointed to by @segnump.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned and the segment number of the
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* allocated segment is stored in the place pointed by @segnump. On error, one
|
* failure:
|
||||||
* of the following negative error codes is returned.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error.
|
* * %-ENOSPC - No clean segment left.
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-ENOSPC - No clean segment left.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
|
int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
|
||||||
{
|
{
|
||||||
@ -510,6 +502,8 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
|
|||||||
* nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
|
* nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
|
||||||
* @sufile: inode of segment usage file
|
* @sufile: inode of segment usage file
|
||||||
* @segnum: segment number
|
* @segnum: segment number
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
|
int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
|
||||||
{
|
{
|
||||||
@ -569,6 +563,8 @@ out_sem:
|
|||||||
* @segnum: segment number
|
* @segnum: segment number
|
||||||
* @nblocks: number of live blocks in the segment
|
* @nblocks: number of live blocks in the segment
|
||||||
* @modtime: modification time (option)
|
* @modtime: modification time (option)
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
|
int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
|
||||||
unsigned long nblocks, time64_t modtime)
|
unsigned long nblocks, time64_t modtime)
|
||||||
@ -610,16 +606,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
|
|||||||
* @sufile: inode of segment usage file
|
* @sufile: inode of segment usage file
|
||||||
* @sustat: pointer to a structure of segment usage statistics
|
* @sustat: pointer to a structure of segment usage statistics
|
||||||
*
|
*
|
||||||
* Description: nilfs_sufile_get_stat() returns information about segment
|
* Description: nilfs_sufile_get_stat() retrieves segment usage statistics
|
||||||
* usage.
|
* and stores them in the location pointed to by @sustat.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned, and segment usage information is
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* stored in the place pointed by @sustat. On error, one of the following
|
* failure:
|
||||||
* negative error codes is returned.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-EIO - I/O error.
|
|
||||||
*
|
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*/
|
*/
|
||||||
int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
|
int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
|
||||||
{
|
{
|
||||||
@ -683,16 +676,12 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
|
|||||||
* @start: start segment number (inclusive)
|
* @start: start segment number (inclusive)
|
||||||
* @end: end segment number (inclusive)
|
* @end: end segment number (inclusive)
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* following negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EBUSY - Dirty or active segments are present in the range.
|
||||||
* %-EIO - I/O error.
|
* * %-EINVAL - Invalid number of segments specified.
|
||||||
*
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*
|
|
||||||
* %-EINVAL - Invalid number of segments specified
|
|
||||||
*
|
|
||||||
* %-EBUSY - Dirty or active segments are present in the range
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_sufile_truncate_range(struct inode *sufile,
|
static int nilfs_sufile_truncate_range(struct inode *sufile,
|
||||||
__u64 start, __u64 end)
|
__u64 start, __u64 end)
|
||||||
@ -787,16 +776,12 @@ out:
|
|||||||
* @sufile: inode of segment usage file
|
* @sufile: inode of segment usage file
|
||||||
* @newnsegs: new number of segments
|
* @newnsegs: new number of segments
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* following negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EBUSY - Dirty or active segments exist in the region to be truncated.
|
||||||
* %-EIO - I/O error.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
* * %-ENOSPC - Enough free space is not left for shrinking.
|
||||||
*
|
|
||||||
* %-ENOSPC - Enough free space is not left for shrinking
|
|
||||||
*
|
|
||||||
* %-EBUSY - Dirty or active segments exist in the region to be truncated
|
|
||||||
*/
|
*/
|
||||||
int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
|
int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
|
||||||
{
|
{
|
||||||
@ -865,7 +850,7 @@ out:
|
|||||||
* @nsi: size of suinfo array
|
* @nsi: size of suinfo array
|
||||||
*
|
*
|
||||||
* Return: Count of segment usage info items stored in the output buffer on
|
* Return: Count of segment usage info items stored in the output buffer on
|
||||||
* success, or the following negative error code on failure.
|
* success, or one of the following negative error codes on failure:
|
||||||
* * %-EIO - I/O error (including metadata corruption).
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
* * %-ENOMEM - Insufficient memory available.
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
*/
|
*/
|
||||||
@ -939,14 +924,11 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
|
|||||||
* segment usage accordingly. Only the fields indicated by the sup_flags
|
* segment usage accordingly. Only the fields indicated by the sup_flags
|
||||||
* are updated.
|
* are updated.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* following negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EINVAL - Invalid values in input (segment number, flags or nblocks).
|
||||||
* %-EIO - I/O error.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EINVAL - Invalid values in input (segment number, flags or nblocks)
|
|
||||||
*/
|
*/
|
||||||
ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
|
ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
|
||||||
unsigned int supsz, size_t nsup)
|
unsigned int supsz, size_t nsup)
|
||||||
@ -1073,7 +1055,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
|
|||||||
* and start+len is rounded down. For each clean segment blkdev_issue_discard
|
* and start+len is rounded down. For each clean segment blkdev_issue_discard
|
||||||
* function is invoked.
|
* function is invoked.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned or negative error code, otherwise.
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
|
int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
|
||||||
{
|
{
|
||||||
@ -1219,6 +1201,8 @@ out_sem:
|
|||||||
* @susize: size of a segment usage entry
|
* @susize: size of a segment usage entry
|
||||||
* @raw_inode: on-disk sufile inode
|
* @raw_inode: on-disk sufile inode
|
||||||
* @inodep: buffer to store the inode
|
* @inodep: buffer to store the inode
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_sufile_read(struct super_block *sb, size_t susize,
|
int nilfs_sufile_read(struct super_block *sb, size_t susize,
|
||||||
struct nilfs_inode *raw_inode, struct inode **inodep)
|
struct nilfs_inode *raw_inode, struct inode **inodep)
|
||||||
|
@ -58,6 +58,8 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range);
|
|||||||
* nilfs_sufile_scrap - make a segment garbage
|
* nilfs_sufile_scrap - make a segment garbage
|
||||||
* @sufile: inode of segment usage file
|
* @sufile: inode of segment usage file
|
||||||
* @segnum: segment number to be freed
|
* @segnum: segment number to be freed
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum)
|
static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum)
|
||||||
{
|
{
|
||||||
@ -68,6 +70,8 @@ static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum)
|
|||||||
* nilfs_sufile_free - free segment
|
* nilfs_sufile_free - free segment
|
||||||
* @sufile: inode of segment usage file
|
* @sufile: inode of segment usage file
|
||||||
* @segnum: segment number to be freed
|
* @segnum: segment number to be freed
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
|
static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
|
||||||
{
|
{
|
||||||
@ -80,6 +84,8 @@ static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum)
|
|||||||
* @segnumv: array of segment numbers
|
* @segnumv: array of segment numbers
|
||||||
* @nsegs: size of @segnumv array
|
* @nsegs: size of @segnumv array
|
||||||
* @ndone: place to store the number of freed segments
|
* @ndone: place to store the number of freed segments
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv,
|
static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv,
|
||||||
size_t nsegs, size_t *ndone)
|
size_t nsegs, size_t *ndone)
|
||||||
@ -95,8 +101,7 @@ static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv,
|
|||||||
* @nsegs: size of @segnumv array
|
* @nsegs: size of @segnumv array
|
||||||
* @ndone: place to store the number of cancelled segments
|
* @ndone: place to store the number of cancelled segments
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, a negative error codes
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
* is returned.
|
|
||||||
*/
|
*/
|
||||||
static inline int nilfs_sufile_cancel_freev(struct inode *sufile,
|
static inline int nilfs_sufile_cancel_freev(struct inode *sufile,
|
||||||
__u64 *segnumv, size_t nsegs,
|
__u64 *segnumv, size_t nsegs,
|
||||||
@ -114,14 +119,11 @@ static inline int nilfs_sufile_cancel_freev(struct inode *sufile,
|
|||||||
* Description: nilfs_sufile_set_error() marks the segment specified by
|
* Description: nilfs_sufile_set_error() marks the segment specified by
|
||||||
* @segnum as erroneous. The error segment will never be used again.
|
* @segnum as erroneous. The error segment will never be used again.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, one of the following
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
* negative error codes is returned.
|
* failure:
|
||||||
*
|
* * %-EINVAL - Invalid segment usage number.
|
||||||
* %-EIO - I/O error.
|
* * %-EIO - I/O error (including metadata corruption).
|
||||||
*
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
* %-ENOMEM - Insufficient amount of memory available.
|
|
||||||
*
|
|
||||||
* %-EINVAL - Invalid segment usage number.
|
|
||||||
*/
|
*/
|
||||||
static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
|
static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
|
||||||
{
|
{
|
||||||
|
@ -309,6 +309,8 @@ int nilfs_commit_super(struct super_block *sb, int flag)
|
|||||||
* This function restores state flags in the on-disk super block.
|
* This function restores state flags in the on-disk super block.
|
||||||
* This will set "clean" flag (i.e. NILFS_VALID_FS) unless the
|
* This will set "clean" flag (i.e. NILFS_VALID_FS) unless the
|
||||||
* filesystem was not clean previously.
|
* filesystem was not clean previously.
|
||||||
|
*
|
||||||
|
* Return: 0 on success, %-EIO if I/O error or superblock is corrupted.
|
||||||
*/
|
*/
|
||||||
int nilfs_cleanup_super(struct super_block *sb)
|
int nilfs_cleanup_super(struct super_block *sb)
|
||||||
{
|
{
|
||||||
@ -339,6 +341,8 @@ int nilfs_cleanup_super(struct super_block *sb)
|
|||||||
* nilfs_move_2nd_super - relocate secondary super block
|
* nilfs_move_2nd_super - relocate secondary super block
|
||||||
* @sb: super block instance
|
* @sb: super block instance
|
||||||
* @sb2off: new offset of the secondary super block (in bytes)
|
* @sb2off: new offset of the secondary super block (in bytes)
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
|
static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off)
|
||||||
{
|
{
|
||||||
@ -420,6 +424,8 @@ out:
|
|||||||
* nilfs_resize_fs - resize the filesystem
|
* nilfs_resize_fs - resize the filesystem
|
||||||
* @sb: super block instance
|
* @sb: super block instance
|
||||||
* @newsize: new size of the filesystem (in bytes)
|
* @newsize: new size of the filesystem (in bytes)
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
|
int nilfs_resize_fs(struct super_block *sb, __u64 newsize)
|
||||||
{
|
{
|
||||||
@ -987,7 +993,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
|
|||||||
* nilfs_tree_is_busy() - try to shrink dentries of a checkpoint
|
* nilfs_tree_is_busy() - try to shrink dentries of a checkpoint
|
||||||
* @root_dentry: root dentry of the tree to be shrunk
|
* @root_dentry: root dentry of the tree to be shrunk
|
||||||
*
|
*
|
||||||
* This function returns true if the tree was in-use.
|
* Return: true if the tree was in-use, false otherwise.
|
||||||
*/
|
*/
|
||||||
static bool nilfs_tree_is_busy(struct dentry *root_dentry)
|
static bool nilfs_tree_is_busy(struct dentry *root_dentry)
|
||||||
{
|
{
|
||||||
@ -1033,6 +1039,8 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno)
|
|||||||
*
|
*
|
||||||
* This function is called exclusively by nilfs->ns_mount_mutex.
|
* This function is called exclusively by nilfs->ns_mount_mutex.
|
||||||
* So, the recovery process is protected from other simultaneous mounts.
|
* So, the recovery process is protected from other simultaneous mounts.
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
nilfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||||
|
@ -49,8 +49,8 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs,
|
|||||||
* alloc_nilfs - allocate a nilfs object
|
* alloc_nilfs - allocate a nilfs object
|
||||||
* @sb: super block instance
|
* @sb: super block instance
|
||||||
*
|
*
|
||||||
* Return Value: On success, pointer to the_nilfs is returned.
|
* Return: a pointer to the allocated nilfs object on success, or NULL on
|
||||||
* On error, NULL is returned.
|
* failure.
|
||||||
*/
|
*/
|
||||||
struct the_nilfs *alloc_nilfs(struct super_block *sb)
|
struct the_nilfs *alloc_nilfs(struct super_block *sb)
|
||||||
{
|
{
|
||||||
@ -165,6 +165,9 @@ static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri)
|
|||||||
* containing a super root from a given super block, and initializes
|
* containing a super root from a given super block, and initializes
|
||||||
* relevant information on the nilfs object preparatory for log
|
* relevant information on the nilfs object preparatory for log
|
||||||
* scanning and recovery.
|
* scanning and recovery.
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or %-EINVAL if current segment number is out
|
||||||
|
* of range.
|
||||||
*/
|
*/
|
||||||
static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
|
static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
|
||||||
struct nilfs_super_block *sbp)
|
struct nilfs_super_block *sbp)
|
||||||
@ -200,8 +203,7 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs,
|
|||||||
* exponent information written in @sbp and stores it in @blocksize,
|
* exponent information written in @sbp and stores it in @blocksize,
|
||||||
* or aborts with an error message if it's too large.
|
* or aborts with an error message if it's too large.
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. If the block size is too
|
* Return: 0 on success, or %-EINVAL if the block size is too large.
|
||||||
* large, -EINVAL is returned.
|
|
||||||
*/
|
*/
|
||||||
static int nilfs_get_blocksize(struct super_block *sb,
|
static int nilfs_get_blocksize(struct super_block *sb,
|
||||||
struct nilfs_super_block *sbp, int *blocksize)
|
struct nilfs_super_block *sbp, int *blocksize)
|
||||||
@ -226,6 +228,13 @@ static int nilfs_get_blocksize(struct super_block *sb,
|
|||||||
* load_nilfs() searches and load the latest super root,
|
* load_nilfs() searches and load the latest super root,
|
||||||
* attaches the last segment, and does recovery if needed.
|
* attaches the last segment, and does recovery if needed.
|
||||||
* The caller must call this exclusively for simultaneous mounts.
|
* The caller must call this exclusively for simultaneous mounts.
|
||||||
|
*
|
||||||
|
* Return: 0 on success, or one of the following negative error codes on
|
||||||
|
* failure:
|
||||||
|
* * %-EINVAL - No valid segment found.
|
||||||
|
* * %-EIO - I/O error.
|
||||||
|
* * %-ENOMEM - Insufficient memory available.
|
||||||
|
* * %-EROFS - Read only device or RO compat mode (if recovery is required)
|
||||||
*/
|
*/
|
||||||
int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
|
int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
|
||||||
{
|
{
|
||||||
@ -395,6 +404,8 @@ static unsigned long long nilfs_max_size(unsigned int blkbits)
|
|||||||
* nilfs_nrsvsegs - calculate the number of reserved segments
|
* nilfs_nrsvsegs - calculate the number of reserved segments
|
||||||
* @nilfs: nilfs object
|
* @nilfs: nilfs object
|
||||||
* @nsegs: total number of segments
|
* @nsegs: total number of segments
|
||||||
|
*
|
||||||
|
* Return: Number of reserved segments.
|
||||||
*/
|
*/
|
||||||
unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
|
unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
|
||||||
{
|
{
|
||||||
@ -406,6 +417,8 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs)
|
|||||||
/**
|
/**
|
||||||
* nilfs_max_segment_count - calculate the maximum number of segments
|
* nilfs_max_segment_count - calculate the maximum number of segments
|
||||||
* @nilfs: nilfs object
|
* @nilfs: nilfs object
|
||||||
|
*
|
||||||
|
* Return: Maximum number of segments
|
||||||
*/
|
*/
|
||||||
static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
|
static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
|
||||||
{
|
{
|
||||||
@ -538,7 +551,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp)
|
|||||||
* area, or if the parameters themselves are not normal, it is
|
* area, or if the parameters themselves are not normal, it is
|
||||||
* determined to be invalid.
|
* determined to be invalid.
|
||||||
*
|
*
|
||||||
* Return Value: true if invalid, false if valid.
|
* Return: true if invalid, false if valid.
|
||||||
*/
|
*/
|
||||||
static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
|
static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset)
|
||||||
{
|
{
|
||||||
@ -684,8 +697,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs,
|
|||||||
* reading the super block, getting disk layout information, initializing
|
* reading the super block, getting disk layout information, initializing
|
||||||
* shared fields in the_nilfs).
|
* shared fields in the_nilfs).
|
||||||
*
|
*
|
||||||
* Return Value: On success, 0 is returned. On error, a negative error
|
* Return: 0 on success, or a negative error code on failure.
|
||||||
* code is returned.
|
|
||||||
*/
|
*/
|
||||||
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
|
int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb)
|
||||||
{
|
{
|
||||||
|
157
fs/ocfs2/alloc.c
157
fs/ocfs2/alloc.c
@ -566,7 +566,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle,
|
|||||||
struct ocfs2_path *path,
|
struct ocfs2_path *path,
|
||||||
struct ocfs2_extent_rec *insert_rec);
|
struct ocfs2_extent_rec *insert_rec);
|
||||||
/*
|
/*
|
||||||
* Reset the actual path elements so that we can re-use the structure
|
* Reset the actual path elements so that we can reuse the structure
|
||||||
* to build another path. Generally, this involves freeing the buffer
|
* to build another path. Generally, this involves freeing the buffer
|
||||||
* heads.
|
* heads.
|
||||||
*/
|
*/
|
||||||
@ -1182,7 +1182,7 @@ static int ocfs2_add_branch(handle_t *handle,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* If there is a gap before the root end and the real end
|
* If there is a gap before the root end and the real end
|
||||||
* of the righmost leaf block, we need to remove the gap
|
* of the rightmost leaf block, we need to remove the gap
|
||||||
* between new_cpos and root_end first so that the tree
|
* between new_cpos and root_end first so that the tree
|
||||||
* is consistent after we add a new branch(it will start
|
* is consistent after we add a new branch(it will start
|
||||||
* from new_cpos).
|
* from new_cpos).
|
||||||
@ -1238,7 +1238,7 @@ static int ocfs2_add_branch(handle_t *handle,
|
|||||||
|
|
||||||
/* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
|
/* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
|
||||||
* linked with the rest of the tree.
|
* linked with the rest of the tree.
|
||||||
* conversly, new_eb_bhs[0] is the new bottommost leaf.
|
* conversely, new_eb_bhs[0] is the new bottommost leaf.
|
||||||
*
|
*
|
||||||
* when we leave the loop, new_last_eb_blk will point to the
|
* when we leave the loop, new_last_eb_blk will point to the
|
||||||
* newest leaf, and next_blkno will point to the topmost extent
|
* newest leaf, and next_blkno will point to the topmost extent
|
||||||
@ -3712,7 +3712,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
|
|||||||
* update split_index here.
|
* update split_index here.
|
||||||
*
|
*
|
||||||
* When the split_index is zero, we need to merge it to the
|
* When the split_index is zero, we need to merge it to the
|
||||||
* prevoius extent block. It is more efficient and easier
|
* previous extent block. It is more efficient and easier
|
||||||
* if we do merge_right first and merge_left later.
|
* if we do merge_right first and merge_left later.
|
||||||
*/
|
*/
|
||||||
ret = ocfs2_merge_rec_right(path, handle, et, split_rec,
|
ret = ocfs2_merge_rec_right(path, handle, et, split_rec,
|
||||||
@ -4517,7 +4517,7 @@ static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This should only be called against the righmost leaf extent list.
|
* This should only be called against the rightmost leaf extent list.
|
||||||
*
|
*
|
||||||
* ocfs2_figure_appending_type() will figure out whether we'll have to
|
* ocfs2_figure_appending_type() will figure out whether we'll have to
|
||||||
* insert at the tail of the rightmost leaf.
|
* insert at the tail of the rightmost leaf.
|
||||||
@ -6154,6 +6154,9 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
|
|||||||
int status;
|
int status;
|
||||||
struct inode *inode = NULL;
|
struct inode *inode = NULL;
|
||||||
struct buffer_head *bh = NULL;
|
struct buffer_head *bh = NULL;
|
||||||
|
struct ocfs2_dinode *di;
|
||||||
|
struct ocfs2_truncate_log *tl;
|
||||||
|
unsigned int tl_count;
|
||||||
|
|
||||||
inode = ocfs2_get_system_file_inode(osb,
|
inode = ocfs2_get_system_file_inode(osb,
|
||||||
TRUNCATE_LOG_SYSTEM_INODE,
|
TRUNCATE_LOG_SYSTEM_INODE,
|
||||||
@ -6171,6 +6174,18 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
|
|||||||
goto bail;
|
goto bail;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
di = (struct ocfs2_dinode *)bh->b_data;
|
||||||
|
tl = &di->id2.i_dealloc;
|
||||||
|
tl_count = le16_to_cpu(tl->tl_count);
|
||||||
|
if (unlikely(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
|
||||||
|
tl_count == 0)) {
|
||||||
|
status = -EFSCORRUPTED;
|
||||||
|
iput(inode);
|
||||||
|
brelse(bh);
|
||||||
|
mlog_errno(status);
|
||||||
|
goto bail;
|
||||||
|
}
|
||||||
|
|
||||||
*tl_inode = inode;
|
*tl_inode = inode;
|
||||||
*tl_bh = bh;
|
*tl_bh = bh;
|
||||||
bail:
|
bail:
|
||||||
@ -6808,27 +6823,27 @@ static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
|
void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle,
|
||||||
unsigned int from, unsigned int to,
|
size_t from, size_t to, struct folio *folio, int zero,
|
||||||
struct page *page, int zero, u64 *phys)
|
u64 *phys)
|
||||||
{
|
{
|
||||||
int ret, partial = 0;
|
int ret, partial = 0;
|
||||||
loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from;
|
loff_t start_byte = folio_pos(folio) + from;
|
||||||
loff_t length = to - from;
|
loff_t length = to - from;
|
||||||
|
|
||||||
ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
|
ret = ocfs2_map_folio_blocks(folio, phys, inode, from, to, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
|
|
||||||
if (zero)
|
if (zero)
|
||||||
zero_user_segment(page, from, to);
|
folio_zero_segment(folio, from, to);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Need to set the buffers we zero'd into uptodate
|
* Need to set the buffers we zero'd into uptodate
|
||||||
* here if they aren't - ocfs2_map_page_blocks()
|
* here if they aren't - ocfs2_map_page_blocks()
|
||||||
* might've skipped some
|
* might've skipped some
|
||||||
*/
|
*/
|
||||||
ret = walk_page_buffers(handle, page_buffers(page),
|
ret = walk_page_buffers(handle, folio_buffers(folio),
|
||||||
from, to, &partial,
|
from, to, &partial,
|
||||||
ocfs2_zero_func);
|
ocfs2_zero_func);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -6841,92 +6856,88 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (!partial)
|
if (!partial)
|
||||||
SetPageUptodate(page);
|
folio_mark_uptodate(folio);
|
||||||
|
|
||||||
flush_dcache_page(page);
|
flush_dcache_folio(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
|
static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start,
|
||||||
loff_t end, struct page **pages,
|
loff_t end, struct folio **folios, int numfolios,
|
||||||
int numpages, u64 phys, handle_t *handle)
|
u64 phys, handle_t *handle)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
struct page *page;
|
|
||||||
unsigned int from, to = PAGE_SIZE;
|
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
|
|
||||||
BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
|
BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
|
||||||
|
|
||||||
if (numpages == 0)
|
if (numfolios == 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
to = PAGE_SIZE;
|
for (i = 0; i < numfolios; i++) {
|
||||||
for(i = 0; i < numpages; i++) {
|
struct folio *folio = folios[i];
|
||||||
page = pages[i];
|
size_t to = folio_size(folio);
|
||||||
|
size_t from = offset_in_folio(folio, start);
|
||||||
|
|
||||||
from = start & (PAGE_SIZE - 1);
|
if (to > end - folio_pos(folio))
|
||||||
if ((end >> PAGE_SHIFT) == page->index)
|
to = end - folio_pos(folio);
|
||||||
to = end & (PAGE_SIZE - 1);
|
|
||||||
|
|
||||||
BUG_ON(from > PAGE_SIZE);
|
ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1,
|
||||||
BUG_ON(to > PAGE_SIZE);
|
&phys);
|
||||||
|
|
||||||
ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
|
start = folio_next_index(folio) << PAGE_SHIFT;
|
||||||
&phys);
|
|
||||||
|
|
||||||
start = (page->index + 1) << PAGE_SHIFT;
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (pages)
|
if (folios)
|
||||||
ocfs2_unlock_and_free_pages(pages, numpages);
|
ocfs2_unlock_and_free_folios(folios, numfolios);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
|
static int ocfs2_grab_folios(struct inode *inode, loff_t start, loff_t end,
|
||||||
struct page **pages, int *num)
|
struct folio **folios, int *num)
|
||||||
{
|
{
|
||||||
int numpages, ret = 0;
|
int numfolios, ret = 0;
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
unsigned long index;
|
unsigned long index;
|
||||||
loff_t last_page_bytes;
|
loff_t last_page_bytes;
|
||||||
|
|
||||||
BUG_ON(start > end);
|
BUG_ON(start > end);
|
||||||
|
|
||||||
numpages = 0;
|
numfolios = 0;
|
||||||
last_page_bytes = PAGE_ALIGN(end);
|
last_page_bytes = PAGE_ALIGN(end);
|
||||||
index = start >> PAGE_SHIFT;
|
index = start >> PAGE_SHIFT;
|
||||||
do {
|
do {
|
||||||
pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
|
folios[numfolios] = __filemap_get_folio(mapping, index,
|
||||||
if (!pages[numpages]) {
|
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
|
||||||
ret = -ENOMEM;
|
if (IS_ERR(folios[numfolios])) {
|
||||||
|
ret = PTR_ERR(folios[numfolios]);
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
numpages++;
|
index = folio_next_index(folios[numfolios]);
|
||||||
index++;
|
numfolios++;
|
||||||
} while (index < (last_page_bytes >> PAGE_SHIFT));
|
} while (index < (last_page_bytes >> PAGE_SHIFT));
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
if (pages)
|
if (folios)
|
||||||
ocfs2_unlock_and_free_pages(pages, numpages);
|
ocfs2_unlock_and_free_folios(folios, numfolios);
|
||||||
numpages = 0;
|
numfolios = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
*num = numpages;
|
*num = numfolios;
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
|
static int ocfs2_grab_eof_folios(struct inode *inode, loff_t start, loff_t end,
|
||||||
struct page **pages, int *num)
|
struct folio **folios, int *num)
|
||||||
{
|
{
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
|
|
||||||
BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
|
BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
|
||||||
(end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
|
(end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
|
||||||
|
|
||||||
return ocfs2_grab_pages(inode, start, end, pages, num);
|
return ocfs2_grab_folios(inode, start, end, folios, num);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -6940,8 +6951,8 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
|
|||||||
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
|
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
|
||||||
u64 range_start, u64 range_end)
|
u64 range_start, u64 range_end)
|
||||||
{
|
{
|
||||||
int ret = 0, numpages;
|
int ret = 0, numfolios;
|
||||||
struct page **pages = NULL;
|
struct folio **folios = NULL;
|
||||||
u64 phys;
|
u64 phys;
|
||||||
unsigned int ext_flags;
|
unsigned int ext_flags;
|
||||||
struct super_block *sb = inode->i_sb;
|
struct super_block *sb = inode->i_sb;
|
||||||
@ -6954,17 +6965,17 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Avoid zeroing pages fully beyond current i_size. It is pointless as
|
* Avoid zeroing folios fully beyond current i_size. It is pointless as
|
||||||
* underlying blocks of those pages should be already zeroed out and
|
* underlying blocks of those folios should be already zeroed out and
|
||||||
* page writeback will skip them anyway.
|
* page writeback will skip them anyway.
|
||||||
*/
|
*/
|
||||||
range_end = min_t(u64, range_end, i_size_read(inode));
|
range_end = min_t(u64, range_end, i_size_read(inode));
|
||||||
if (range_start >= range_end)
|
if (range_start >= range_end)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pages = kcalloc(ocfs2_pages_per_cluster(sb),
|
folios = kcalloc(ocfs2_pages_per_cluster(sb),
|
||||||
sizeof(struct page *), GFP_NOFS);
|
sizeof(struct folio *), GFP_NOFS);
|
||||||
if (pages == NULL) {
|
if (folios == NULL) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto out;
|
goto out;
|
||||||
@ -6985,18 +6996,18 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
|
|||||||
if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
|
if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
|
ret = ocfs2_grab_eof_folios(inode, range_start, range_end, folios,
|
||||||
&numpages);
|
&numfolios);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ocfs2_zero_cluster_pages(inode, range_start, range_end, pages,
|
ocfs2_zero_cluster_folios(inode, range_start, range_end, folios,
|
||||||
numpages, phys, handle);
|
numfolios, phys, handle);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Initiate writeout of the pages we zero'd here. We don't
|
* Initiate writeout of the folios we zero'd here. We don't
|
||||||
* wait on them - the truncate_inode_pages() call later will
|
* wait on them - the truncate_inode_pages() call later will
|
||||||
* do that for us.
|
* do that for us.
|
||||||
*/
|
*/
|
||||||
@ -7006,7 +7017,7 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
|
|||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
kfree(pages);
|
kfree(folios);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -7059,7 +7070,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
|
|||||||
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
||||||
struct buffer_head *di_bh)
|
struct buffer_head *di_bh)
|
||||||
{
|
{
|
||||||
int ret, has_data, num_pages = 0;
|
int ret, has_data, num_folios = 0;
|
||||||
int need_free = 0;
|
int need_free = 0;
|
||||||
u32 bit_off, num;
|
u32 bit_off, num;
|
||||||
handle_t *handle;
|
handle_t *handle;
|
||||||
@ -7068,7 +7079,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||||
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
|
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
|
||||||
struct ocfs2_alloc_context *data_ac = NULL;
|
struct ocfs2_alloc_context *data_ac = NULL;
|
||||||
struct page *page = NULL;
|
struct folio *folio = NULL;
|
||||||
struct ocfs2_extent_tree et;
|
struct ocfs2_extent_tree et;
|
||||||
int did_quota = 0;
|
int did_quota = 0;
|
||||||
|
|
||||||
@ -7119,12 +7130,12 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Save two copies, one for insert, and one that can
|
* Save two copies, one for insert, and one that can
|
||||||
* be changed by ocfs2_map_and_dirty_page() below.
|
* be changed by ocfs2_map_and_dirty_folio() below.
|
||||||
*/
|
*/
|
||||||
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
|
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
|
||||||
|
|
||||||
ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
|
ret = ocfs2_grab_eof_folios(inode, 0, page_end, &folio,
|
||||||
&num_pages);
|
&num_folios);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
need_free = 1;
|
need_free = 1;
|
||||||
@ -7135,15 +7146,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||||||
* This should populate the 1st page for us and mark
|
* This should populate the 1st page for us and mark
|
||||||
* it up to date.
|
* it up to date.
|
||||||
*/
|
*/
|
||||||
ret = ocfs2_read_inline_data(inode, page, di_bh);
|
ret = ocfs2_read_inline_data(inode, folio, di_bh);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
need_free = 1;
|
need_free = 1;
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
|
ocfs2_map_and_dirty_folio(inode, handle, 0, page_end, folio, 0,
|
||||||
&phys);
|
&phys);
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&oi->ip_lock);
|
spin_lock(&oi->ip_lock);
|
||||||
@ -7174,8 +7185,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
out_unlock:
|
out_unlock:
|
||||||
if (page)
|
if (folio)
|
||||||
ocfs2_unlock_and_free_pages(&page, num_pages);
|
ocfs2_unlock_and_free_folios(&folio, num_folios);
|
||||||
|
|
||||||
out_commit:
|
out_commit:
|
||||||
if (ret < 0 && did_quota)
|
if (ret < 0 && did_quota)
|
||||||
|
@ -254,11 +254,9 @@ static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec)
|
|||||||
return !rec->e_leaf_clusters;
|
return !rec->e_leaf_clusters;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
|
void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle,
|
||||||
struct page **pages, int *num);
|
size_t from, size_t to, struct folio *folio, int zero,
|
||||||
void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
|
u64 *phys);
|
||||||
unsigned int from, unsigned int to,
|
|
||||||
struct page *page, int zero, u64 *phys);
|
|
||||||
/*
|
/*
|
||||||
* Structures which describe a path through a btree, and functions to
|
* Structures which describe a path through a btree, and functions to
|
||||||
* manipulate them.
|
* manipulate them.
|
||||||
|
337
fs/ocfs2/aops.c
337
fs/ocfs2/aops.c
@ -215,10 +215,9 @@ bail:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ocfs2_read_inline_data(struct inode *inode, struct page *page,
|
int ocfs2_read_inline_data(struct inode *inode, struct folio *folio,
|
||||||
struct buffer_head *di_bh)
|
struct buffer_head *di_bh)
|
||||||
{
|
{
|
||||||
void *kaddr;
|
|
||||||
loff_t size;
|
loff_t size;
|
||||||
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
|
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
|
||||||
|
|
||||||
@ -230,7 +229,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
|
|||||||
|
|
||||||
size = i_size_read(inode);
|
size = i_size_read(inode);
|
||||||
|
|
||||||
if (size > PAGE_SIZE ||
|
if (size > folio_size(folio) ||
|
||||||
size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
|
size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
|
||||||
ocfs2_error(inode->i_sb,
|
ocfs2_error(inode->i_sb,
|
||||||
"Inode %llu has with inline data has bad size: %Lu\n",
|
"Inode %llu has with inline data has bad size: %Lu\n",
|
||||||
@ -239,25 +238,18 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
|
|||||||
return -EROFS;
|
return -EROFS;
|
||||||
}
|
}
|
||||||
|
|
||||||
kaddr = kmap_atomic(page);
|
folio_fill_tail(folio, 0, di->id2.i_data.id_data, size);
|
||||||
if (size)
|
folio_mark_uptodate(folio);
|
||||||
memcpy(kaddr, di->id2.i_data.id_data, size);
|
|
||||||
/* Clear the remaining part of the page */
|
|
||||||
memset(kaddr + size, 0, PAGE_SIZE - size);
|
|
||||||
flush_dcache_page(page);
|
|
||||||
kunmap_atomic(kaddr);
|
|
||||||
|
|
||||||
SetPageUptodate(page);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
|
static int ocfs2_readpage_inline(struct inode *inode, struct folio *folio)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct buffer_head *di_bh = NULL;
|
struct buffer_head *di_bh = NULL;
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!folio_test_locked(folio));
|
||||||
BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
|
BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
|
||||||
|
|
||||||
ret = ocfs2_read_inode_block(inode, &di_bh);
|
ret = ocfs2_read_inode_block(inode, &di_bh);
|
||||||
@ -266,9 +258,9 @@ static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = ocfs2_read_inline_data(inode, page, di_bh);
|
ret = ocfs2_read_inline_data(inode, folio, di_bh);
|
||||||
out:
|
out:
|
||||||
unlock_page(page);
|
folio_unlock(folio);
|
||||||
|
|
||||||
brelse(di_bh);
|
brelse(di_bh);
|
||||||
return ret;
|
return ret;
|
||||||
@ -283,7 +275,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
|
|||||||
|
|
||||||
trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index);
|
trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index);
|
||||||
|
|
||||||
ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page);
|
ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio);
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
if (ret == AOP_TRUNCATED_PAGE)
|
if (ret == AOP_TRUNCATED_PAGE)
|
||||||
unlock = 0;
|
unlock = 0;
|
||||||
@ -305,7 +297,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* i_size might have just been updated as we grabed the meta lock. We
|
* i_size might have just been updated as we grabbed the meta lock. We
|
||||||
* might now be discovering a truncate that hit on another node.
|
* might now be discovering a truncate that hit on another node.
|
||||||
* block_read_full_folio->get_block freaks out if it is asked to read
|
* block_read_full_folio->get_block freaks out if it is asked to read
|
||||||
* beyond the end of a file, so we check here. Callers
|
* beyond the end of a file, so we check here. Callers
|
||||||
@ -322,7 +314,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
|
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
|
||||||
ret = ocfs2_readpage_inline(inode, &folio->page);
|
ret = ocfs2_readpage_inline(inode, folio);
|
||||||
else
|
else
|
||||||
ret = block_read_full_folio(folio, ocfs2_get_block);
|
ret = block_read_full_folio(folio, ocfs2_get_block);
|
||||||
unlock = 0;
|
unlock = 0;
|
||||||
@ -534,7 +526,7 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
|
|||||||
*
|
*
|
||||||
* from == to == 0 is code for "zero the entire cluster region"
|
* from == to == 0 is code for "zero the entire cluster region"
|
||||||
*/
|
*/
|
||||||
static void ocfs2_clear_page_regions(struct page *page,
|
static void ocfs2_clear_folio_regions(struct folio *folio,
|
||||||
struct ocfs2_super *osb, u32 cpos,
|
struct ocfs2_super *osb, u32 cpos,
|
||||||
unsigned from, unsigned to)
|
unsigned from, unsigned to)
|
||||||
{
|
{
|
||||||
@ -543,7 +535,7 @@ static void ocfs2_clear_page_regions(struct page *page,
|
|||||||
|
|
||||||
ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
|
ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
|
||||||
|
|
||||||
kaddr = kmap_atomic(page);
|
kaddr = kmap_local_folio(folio, 0);
|
||||||
|
|
||||||
if (from || to) {
|
if (from || to) {
|
||||||
if (from > cluster_start)
|
if (from > cluster_start)
|
||||||
@ -554,13 +546,13 @@ static void ocfs2_clear_page_regions(struct page *page,
|
|||||||
memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
|
memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
kunmap_atomic(kaddr);
|
kunmap_local(kaddr);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Nonsparse file systems fully allocate before we get to the write
|
* Nonsparse file systems fully allocate before we get to the write
|
||||||
* code. This prevents ocfs2_write() from tagging the write as an
|
* code. This prevents ocfs2_write() from tagging the write as an
|
||||||
* allocating one, which means ocfs2_map_page_blocks() might try to
|
* allocating one, which means ocfs2_map_folio_blocks() might try to
|
||||||
* read-in the blocks at the tail of our file. Avoid reading them by
|
* read-in the blocks at the tail of our file. Avoid reading them by
|
||||||
* testing i_size against each block offset.
|
* testing i_size against each block offset.
|
||||||
*/
|
*/
|
||||||
@ -585,11 +577,10 @@ static int ocfs2_should_read_blk(struct inode *inode, struct folio *folio,
|
|||||||
*
|
*
|
||||||
* This will also skip zeroing, which is handled externally.
|
* This will also skip zeroing, which is handled externally.
|
||||||
*/
|
*/
|
||||||
int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
|
int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno,
|
||||||
struct inode *inode, unsigned int from,
|
struct inode *inode, unsigned int from,
|
||||||
unsigned int to, int new)
|
unsigned int to, int new)
|
||||||
{
|
{
|
||||||
struct folio *folio = page_folio(page);
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
|
struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
|
||||||
unsigned int block_end, block_start;
|
unsigned int block_end, block_start;
|
||||||
@ -729,24 +720,24 @@ struct ocfs2_write_ctxt {
|
|||||||
unsigned int w_large_pages;
|
unsigned int w_large_pages;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Pages involved in this write.
|
* Folios involved in this write.
|
||||||
*
|
*
|
||||||
* w_target_page is the page being written to by the user.
|
* w_target_folio is the folio being written to by the user.
|
||||||
*
|
*
|
||||||
* w_pages is an array of pages which always contains
|
* w_folios is an array of folios which always contains
|
||||||
* w_target_page, and in the case of an allocating write with
|
* w_target_folio, and in the case of an allocating write with
|
||||||
* page_size < cluster size, it will contain zero'd and mapped
|
* page_size < cluster size, it will contain zero'd and mapped
|
||||||
* pages adjacent to w_target_page which need to be written
|
* pages adjacent to w_target_folio which need to be written
|
||||||
* out in so that future reads from that region will get
|
* out in so that future reads from that region will get
|
||||||
* zero's.
|
* zero's.
|
||||||
*/
|
*/
|
||||||
unsigned int w_num_pages;
|
unsigned int w_num_folios;
|
||||||
struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
|
struct folio *w_folios[OCFS2_MAX_CTXT_PAGES];
|
||||||
struct page *w_target_page;
|
struct folio *w_target_folio;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* w_target_locked is used for page_mkwrite path indicating no unlocking
|
* w_target_locked is used for page_mkwrite path indicating no unlocking
|
||||||
* against w_target_page in ocfs2_write_end_nolock.
|
* against w_target_folio in ocfs2_write_end_nolock.
|
||||||
*/
|
*/
|
||||||
unsigned int w_target_locked:1;
|
unsigned int w_target_locked:1;
|
||||||
|
|
||||||
@ -771,40 +762,40 @@ struct ocfs2_write_ctxt {
|
|||||||
unsigned int w_unwritten_count;
|
unsigned int w_unwritten_count;
|
||||||
};
|
};
|
||||||
|
|
||||||
void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
|
void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for(i = 0; i < num_pages; i++) {
|
for(i = 0; i < num_folios; i++) {
|
||||||
if (pages[i]) {
|
if (!folios[i])
|
||||||
unlock_page(pages[i]);
|
continue;
|
||||||
mark_page_accessed(pages[i]);
|
folio_unlock(folios[i]);
|
||||||
put_page(pages[i]);
|
folio_mark_accessed(folios[i]);
|
||||||
}
|
folio_put(folios[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
|
static void ocfs2_unlock_folios(struct ocfs2_write_ctxt *wc)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* w_target_locked is only set to true in the page_mkwrite() case.
|
* w_target_locked is only set to true in the page_mkwrite() case.
|
||||||
* The intent is to allow us to lock the target page from write_begin()
|
* The intent is to allow us to lock the target page from write_begin()
|
||||||
* to write_end(). The caller must hold a ref on w_target_page.
|
* to write_end(). The caller must hold a ref on w_target_folio.
|
||||||
*/
|
*/
|
||||||
if (wc->w_target_locked) {
|
if (wc->w_target_locked) {
|
||||||
BUG_ON(!wc->w_target_page);
|
BUG_ON(!wc->w_target_folio);
|
||||||
for (i = 0; i < wc->w_num_pages; i++) {
|
for (i = 0; i < wc->w_num_folios; i++) {
|
||||||
if (wc->w_target_page == wc->w_pages[i]) {
|
if (wc->w_target_folio == wc->w_folios[i]) {
|
||||||
wc->w_pages[i] = NULL;
|
wc->w_folios[i] = NULL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
mark_page_accessed(wc->w_target_page);
|
folio_mark_accessed(wc->w_target_folio);
|
||||||
put_page(wc->w_target_page);
|
folio_put(wc->w_target_folio);
|
||||||
}
|
}
|
||||||
ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
|
ocfs2_unlock_and_free_folios(wc->w_folios, wc->w_num_folios);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ocfs2_free_unwritten_list(struct inode *inode,
|
static void ocfs2_free_unwritten_list(struct inode *inode,
|
||||||
@ -826,7 +817,7 @@ static void ocfs2_free_write_ctxt(struct inode *inode,
|
|||||||
struct ocfs2_write_ctxt *wc)
|
struct ocfs2_write_ctxt *wc)
|
||||||
{
|
{
|
||||||
ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
|
ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
|
||||||
ocfs2_unlock_pages(wc);
|
ocfs2_unlock_folios(wc);
|
||||||
brelse(wc->w_di_bh);
|
brelse(wc->w_di_bh);
|
||||||
kfree(wc);
|
kfree(wc);
|
||||||
}
|
}
|
||||||
@ -869,29 +860,30 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
|
|||||||
* and dirty so they'll be written out (in order to prevent uninitialised
|
* and dirty so they'll be written out (in order to prevent uninitialised
|
||||||
* block data from leaking). And clear the new bit.
|
* block data from leaking). And clear the new bit.
|
||||||
*/
|
*/
|
||||||
static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
|
static void ocfs2_zero_new_buffers(struct folio *folio, size_t from, size_t to)
|
||||||
{
|
{
|
||||||
unsigned int block_start, block_end;
|
unsigned int block_start, block_end;
|
||||||
struct buffer_head *head, *bh;
|
struct buffer_head *head, *bh;
|
||||||
|
|
||||||
BUG_ON(!PageLocked(page));
|
BUG_ON(!folio_test_locked(folio));
|
||||||
if (!page_has_buffers(page))
|
head = folio_buffers(folio);
|
||||||
|
if (!head)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bh = head = page_buffers(page);
|
bh = head;
|
||||||
block_start = 0;
|
block_start = 0;
|
||||||
do {
|
do {
|
||||||
block_end = block_start + bh->b_size;
|
block_end = block_start + bh->b_size;
|
||||||
|
|
||||||
if (buffer_new(bh)) {
|
if (buffer_new(bh)) {
|
||||||
if (block_end > from && block_start < to) {
|
if (block_end > from && block_start < to) {
|
||||||
if (!PageUptodate(page)) {
|
if (!folio_test_uptodate(folio)) {
|
||||||
unsigned start, end;
|
unsigned start, end;
|
||||||
|
|
||||||
start = max(from, block_start);
|
start = max(from, block_start);
|
||||||
end = min(to, block_end);
|
end = min(to, block_end);
|
||||||
|
|
||||||
zero_user_segment(page, start, end);
|
folio_zero_segment(folio, start, end);
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -916,29 +908,26 @@ static void ocfs2_write_failure(struct inode *inode,
|
|||||||
int i;
|
int i;
|
||||||
unsigned from = user_pos & (PAGE_SIZE - 1),
|
unsigned from = user_pos & (PAGE_SIZE - 1),
|
||||||
to = user_pos + user_len;
|
to = user_pos + user_len;
|
||||||
struct page *tmppage;
|
|
||||||
|
|
||||||
if (wc->w_target_page)
|
if (wc->w_target_folio)
|
||||||
ocfs2_zero_new_buffers(wc->w_target_page, from, to);
|
ocfs2_zero_new_buffers(wc->w_target_folio, from, to);
|
||||||
|
|
||||||
for(i = 0; i < wc->w_num_pages; i++) {
|
for (i = 0; i < wc->w_num_folios; i++) {
|
||||||
tmppage = wc->w_pages[i];
|
struct folio *folio = wc->w_folios[i];
|
||||||
|
|
||||||
if (tmppage && page_has_buffers(tmppage)) {
|
if (folio && folio_buffers(folio)) {
|
||||||
if (ocfs2_should_order_data(inode))
|
if (ocfs2_should_order_data(inode))
|
||||||
ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
|
ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
|
||||||
user_pos, user_len);
|
user_pos, user_len);
|
||||||
|
|
||||||
block_commit_write(tmppage, from, to);
|
block_commit_write(&folio->page, from, to);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
|
static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno,
|
||||||
struct ocfs2_write_ctxt *wc,
|
struct ocfs2_write_ctxt *wc, struct folio *folio, u32 cpos,
|
||||||
struct page *page, u32 cpos,
|
loff_t user_pos, unsigned user_len, int new)
|
||||||
loff_t user_pos, unsigned user_len,
|
|
||||||
int new)
|
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
unsigned int map_from = 0, map_to = 0;
|
unsigned int map_from = 0, map_to = 0;
|
||||||
@ -951,20 +940,19 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
|
|||||||
/* treat the write as new if the a hole/lseek spanned across
|
/* treat the write as new if the a hole/lseek spanned across
|
||||||
* the page boundary.
|
* the page boundary.
|
||||||
*/
|
*/
|
||||||
new = new | ((i_size_read(inode) <= page_offset(page)) &&
|
new = new | ((i_size_read(inode) <= folio_pos(folio)) &&
|
||||||
(page_offset(page) <= user_pos));
|
(folio_pos(folio) <= user_pos));
|
||||||
|
|
||||||
if (page == wc->w_target_page) {
|
if (folio == wc->w_target_folio) {
|
||||||
map_from = user_pos & (PAGE_SIZE - 1);
|
map_from = user_pos & (PAGE_SIZE - 1);
|
||||||
map_to = map_from + user_len;
|
map_to = map_from + user_len;
|
||||||
|
|
||||||
if (new)
|
if (new)
|
||||||
ret = ocfs2_map_page_blocks(page, p_blkno, inode,
|
ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
|
||||||
cluster_start, cluster_end,
|
cluster_start, cluster_end, new);
|
||||||
new);
|
|
||||||
else
|
else
|
||||||
ret = ocfs2_map_page_blocks(page, p_blkno, inode,
|
ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
|
||||||
map_from, map_to, new);
|
map_from, map_to, new);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto out;
|
goto out;
|
||||||
@ -978,7 +966,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* If we haven't allocated the new page yet, we
|
* If we haven't allocated the new folio yet, we
|
||||||
* shouldn't be writing it out without copying user
|
* shouldn't be writing it out without copying user
|
||||||
* data. This is likely a math error from the caller.
|
* data. This is likely a math error from the caller.
|
||||||
*/
|
*/
|
||||||
@ -987,8 +975,8 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
|
|||||||
map_from = cluster_start;
|
map_from = cluster_start;
|
||||||
map_to = cluster_end;
|
map_to = cluster_end;
|
||||||
|
|
||||||
ret = ocfs2_map_page_blocks(page, p_blkno, inode,
|
ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
|
||||||
cluster_start, cluster_end, new);
|
cluster_start, cluster_end, new);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto out;
|
goto out;
|
||||||
@ -996,20 +984,20 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Parts of newly allocated pages need to be zero'd.
|
* Parts of newly allocated folios need to be zero'd.
|
||||||
*
|
*
|
||||||
* Above, we have also rewritten 'to' and 'from' - as far as
|
* Above, we have also rewritten 'to' and 'from' - as far as
|
||||||
* the rest of the function is concerned, the entire cluster
|
* the rest of the function is concerned, the entire cluster
|
||||||
* range inside of a page needs to be written.
|
* range inside of a folio needs to be written.
|
||||||
*
|
*
|
||||||
* We can skip this if the page is up to date - it's already
|
* We can skip this if the folio is uptodate - it's already
|
||||||
* been zero'd from being read in as a hole.
|
* been zero'd from being read in as a hole.
|
||||||
*/
|
*/
|
||||||
if (new && !PageUptodate(page))
|
if (new && !folio_test_uptodate(folio))
|
||||||
ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
|
ocfs2_clear_folio_regions(folio, OCFS2_SB(inode->i_sb),
|
||||||
cpos, user_data_from, user_data_to);
|
cpos, user_data_from, user_data_to);
|
||||||
|
|
||||||
flush_dcache_page(page);
|
flush_dcache_folio(folio);
|
||||||
|
|
||||||
out:
|
out:
|
||||||
return ret;
|
return ret;
|
||||||
@ -1018,11 +1006,9 @@ out:
|
|||||||
/*
|
/*
|
||||||
* This function will only grab one clusters worth of pages.
|
* This function will only grab one clusters worth of pages.
|
||||||
*/
|
*/
|
||||||
static int ocfs2_grab_pages_for_write(struct address_space *mapping,
|
static int ocfs2_grab_folios_for_write(struct address_space *mapping,
|
||||||
struct ocfs2_write_ctxt *wc,
|
struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos,
|
||||||
u32 cpos, loff_t user_pos,
|
unsigned user_len, int new, struct folio *mmap_folio)
|
||||||
unsigned user_len, int new,
|
|
||||||
struct page *mmap_page)
|
|
||||||
{
|
{
|
||||||
int ret = 0, i;
|
int ret = 0, i;
|
||||||
unsigned long start, target_index, end_index, index;
|
unsigned long start, target_index, end_index, index;
|
||||||
@ -1039,7 +1025,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
|
|||||||
* last page of the write.
|
* last page of the write.
|
||||||
*/
|
*/
|
||||||
if (new) {
|
if (new) {
|
||||||
wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
|
wc->w_num_folios = ocfs2_pages_per_cluster(inode->i_sb);
|
||||||
start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
|
start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
|
||||||
/*
|
/*
|
||||||
* We need the index *past* the last page we could possibly
|
* We need the index *past* the last page we could possibly
|
||||||
@ -1049,15 +1035,15 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
|
|||||||
last_byte = max(user_pos + user_len, i_size_read(inode));
|
last_byte = max(user_pos + user_len, i_size_read(inode));
|
||||||
BUG_ON(last_byte < 1);
|
BUG_ON(last_byte < 1);
|
||||||
end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
|
end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
|
||||||
if ((start + wc->w_num_pages) > end_index)
|
if ((start + wc->w_num_folios) > end_index)
|
||||||
wc->w_num_pages = end_index - start;
|
wc->w_num_folios = end_index - start;
|
||||||
} else {
|
} else {
|
||||||
wc->w_num_pages = 1;
|
wc->w_num_folios = 1;
|
||||||
start = target_index;
|
start = target_index;
|
||||||
}
|
}
|
||||||
end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
|
end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
|
||||||
|
|
||||||
for(i = 0; i < wc->w_num_pages; i++) {
|
for(i = 0; i < wc->w_num_folios; i++) {
|
||||||
index = start + i;
|
index = start + i;
|
||||||
|
|
||||||
if (index >= target_index && index <= end_index &&
|
if (index >= target_index && index <= end_index &&
|
||||||
@ -1067,37 +1053,38 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
|
|||||||
* and wants us to directly use the page
|
* and wants us to directly use the page
|
||||||
* passed in.
|
* passed in.
|
||||||
*/
|
*/
|
||||||
lock_page(mmap_page);
|
folio_lock(mmap_folio);
|
||||||
|
|
||||||
/* Exit and let the caller retry */
|
/* Exit and let the caller retry */
|
||||||
if (mmap_page->mapping != mapping) {
|
if (mmap_folio->mapping != mapping) {
|
||||||
WARN_ON(mmap_page->mapping);
|
WARN_ON(mmap_folio->mapping);
|
||||||
unlock_page(mmap_page);
|
folio_unlock(mmap_folio);
|
||||||
ret = -EAGAIN;
|
ret = -EAGAIN;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
get_page(mmap_page);
|
folio_get(mmap_folio);
|
||||||
wc->w_pages[i] = mmap_page;
|
wc->w_folios[i] = mmap_folio;
|
||||||
wc->w_target_locked = true;
|
wc->w_target_locked = true;
|
||||||
} else if (index >= target_index && index <= end_index &&
|
} else if (index >= target_index && index <= end_index &&
|
||||||
wc->w_type == OCFS2_WRITE_DIRECT) {
|
wc->w_type == OCFS2_WRITE_DIRECT) {
|
||||||
/* Direct write has no mapping page. */
|
/* Direct write has no mapping page. */
|
||||||
wc->w_pages[i] = NULL;
|
wc->w_folios[i] = NULL;
|
||||||
continue;
|
continue;
|
||||||
} else {
|
} else {
|
||||||
wc->w_pages[i] = find_or_create_page(mapping, index,
|
wc->w_folios[i] = __filemap_get_folio(mapping, index,
|
||||||
GFP_NOFS);
|
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
|
||||||
if (!wc->w_pages[i]) {
|
GFP_NOFS);
|
||||||
ret = -ENOMEM;
|
if (IS_ERR(wc->w_folios[i])) {
|
||||||
|
ret = PTR_ERR(wc->w_folios[i]);
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
wait_for_stable_page(wc->w_pages[i]);
|
folio_wait_stable(wc->w_folios[i]);
|
||||||
|
|
||||||
if (index == target_index)
|
if (index == target_index)
|
||||||
wc->w_target_page = wc->w_pages[i];
|
wc->w_target_folio = wc->w_folios[i];
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -1181,19 +1168,18 @@ static int ocfs2_write_cluster(struct address_space *mapping,
|
|||||||
if (!should_zero)
|
if (!should_zero)
|
||||||
p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
|
p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
|
||||||
|
|
||||||
for(i = 0; i < wc->w_num_pages; i++) {
|
for (i = 0; i < wc->w_num_folios; i++) {
|
||||||
int tmpret;
|
int tmpret;
|
||||||
|
|
||||||
/* This is the direct io target page. */
|
/* This is the direct io target page. */
|
||||||
if (wc->w_pages[i] == NULL) {
|
if (wc->w_folios[i] == NULL) {
|
||||||
p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits));
|
p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits));
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
|
tmpret = ocfs2_prepare_folio_for_write(inode, &p_blkno, wc,
|
||||||
wc->w_pages[i], cpos,
|
wc->w_folios[i], cpos, user_pos, user_len,
|
||||||
user_pos, user_len,
|
should_zero);
|
||||||
should_zero);
|
|
||||||
if (tmpret) {
|
if (tmpret) {
|
||||||
mlog_errno(tmpret);
|
mlog_errno(tmpret);
|
||||||
if (ret == 0)
|
if (ret == 0)
|
||||||
@ -1472,7 +1458,7 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||||
struct page *page;
|
struct folio *folio;
|
||||||
handle_t *handle;
|
handle_t *handle;
|
||||||
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
|
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
|
||||||
|
|
||||||
@ -1483,19 +1469,21 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
page = find_or_create_page(mapping, 0, GFP_NOFS);
|
folio = __filemap_get_folio(mapping, 0,
|
||||||
if (!page) {
|
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
|
||||||
|
if (IS_ERR(folio)) {
|
||||||
ocfs2_commit_trans(osb, handle);
|
ocfs2_commit_trans(osb, handle);
|
||||||
ret = -ENOMEM;
|
ret = PTR_ERR(folio);
|
||||||
mlog_errno(ret);
|
mlog_errno(ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
* If we don't set w_num_pages then this page won't get unlocked
|
* If we don't set w_num_folios then this folio won't get unlocked
|
||||||
* and freed on cleanup of the write context.
|
* and freed on cleanup of the write context.
|
||||||
*/
|
*/
|
||||||
wc->w_pages[0] = wc->w_target_page = page;
|
wc->w_target_folio = folio;
|
||||||
wc->w_num_pages = 1;
|
wc->w_folios[0] = folio;
|
||||||
|
wc->w_num_folios = 1;
|
||||||
|
|
||||||
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
|
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
|
||||||
OCFS2_JOURNAL_ACCESS_WRITE);
|
OCFS2_JOURNAL_ACCESS_WRITE);
|
||||||
@ -1509,8 +1497,8 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
|
|||||||
if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
|
if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
|
||||||
ocfs2_set_inode_data_inline(inode, di);
|
ocfs2_set_inode_data_inline(inode, di);
|
||||||
|
|
||||||
if (!PageUptodate(page)) {
|
if (!folio_test_uptodate(folio)) {
|
||||||
ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
|
ret = ocfs2_read_inline_data(inode, folio, wc->w_di_bh);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
ocfs2_commit_trans(osb, handle);
|
ocfs2_commit_trans(osb, handle);
|
||||||
|
|
||||||
@ -1533,9 +1521,8 @@ int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
|
static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
|
||||||
struct inode *inode, loff_t pos,
|
struct inode *inode, loff_t pos, size_t len,
|
||||||
unsigned len, struct page *mmap_page,
|
struct folio *mmap_folio, struct ocfs2_write_ctxt *wc)
|
||||||
struct ocfs2_write_ctxt *wc)
|
|
||||||
{
|
{
|
||||||
int ret, written = 0;
|
int ret, written = 0;
|
||||||
loff_t end = pos + len;
|
loff_t end = pos + len;
|
||||||
@ -1550,7 +1537,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
|
|||||||
* Handle inodes which already have inline data 1st.
|
* Handle inodes which already have inline data 1st.
|
||||||
*/
|
*/
|
||||||
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
|
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
|
||||||
if (mmap_page == NULL &&
|
if (mmap_folio == NULL &&
|
||||||
ocfs2_size_fits_inline_data(wc->w_di_bh, end))
|
ocfs2_size_fits_inline_data(wc->w_di_bh, end))
|
||||||
goto do_inline_write;
|
goto do_inline_write;
|
||||||
|
|
||||||
@ -1574,7 +1561,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
|
|||||||
* Check whether the write can fit.
|
* Check whether the write can fit.
|
||||||
*/
|
*/
|
||||||
di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
|
di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
|
||||||
if (mmap_page ||
|
if (mmap_folio ||
|
||||||
end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
|
end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -1641,9 +1628,9 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int ocfs2_write_begin_nolock(struct address_space *mapping,
|
int ocfs2_write_begin_nolock(struct address_space *mapping,
|
||||||
loff_t pos, unsigned len, ocfs2_write_type_t type,
|
loff_t pos, unsigned len, ocfs2_write_type_t type,
|
||||||
struct folio **foliop, void **fsdata,
|
struct folio **foliop, void **fsdata,
|
||||||
struct buffer_head *di_bh, struct page *mmap_page)
|
struct buffer_head *di_bh, struct folio *mmap_folio)
|
||||||
{
|
{
|
||||||
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
|
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
|
||||||
unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
|
unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
|
||||||
@ -1666,7 +1653,7 @@ try_again:
|
|||||||
|
|
||||||
if (ocfs2_supports_inline_data(osb)) {
|
if (ocfs2_supports_inline_data(osb)) {
|
||||||
ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
|
ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
|
||||||
mmap_page, wc);
|
mmap_folio, wc);
|
||||||
if (ret == 1) {
|
if (ret == 1) {
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto success;
|
goto success;
|
||||||
@ -1718,7 +1705,7 @@ try_again:
|
|||||||
(unsigned long long)OCFS2_I(inode)->ip_blkno,
|
(unsigned long long)OCFS2_I(inode)->ip_blkno,
|
||||||
(long long)i_size_read(inode),
|
(long long)i_size_read(inode),
|
||||||
le32_to_cpu(di->i_clusters),
|
le32_to_cpu(di->i_clusters),
|
||||||
pos, len, type, mmap_page,
|
pos, len, type, mmap_folio,
|
||||||
clusters_to_alloc, extents_to_split);
|
clusters_to_alloc, extents_to_split);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1789,21 +1776,21 @@ try_again:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Fill our page array first. That way we've grabbed enough so
|
* Fill our folio array first. That way we've grabbed enough so
|
||||||
* that we can zero and flush if we error after adding the
|
* that we can zero and flush if we error after adding the
|
||||||
* extent.
|
* extent.
|
||||||
*/
|
*/
|
||||||
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
|
ret = ocfs2_grab_folios_for_write(mapping, wc, wc->w_cpos, pos, len,
|
||||||
cluster_of_pages, mmap_page);
|
cluster_of_pages, mmap_folio);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
/*
|
/*
|
||||||
* ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
|
* ocfs2_grab_folios_for_write() returns -EAGAIN if it
|
||||||
* the target page. In this case, we exit with no error and no target
|
* could not lock the target folio. In this case, we exit
|
||||||
* page. This will trigger the caller, page_mkwrite(), to re-try
|
* with no error and no target folio. This will trigger
|
||||||
* the operation.
|
* the caller, page_mkwrite(), to re-try the operation.
|
||||||
*/
|
*/
|
||||||
if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) {
|
if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) {
|
||||||
BUG_ON(wc->w_target_page);
|
BUG_ON(wc->w_target_folio);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out_quota;
|
goto out_quota;
|
||||||
}
|
}
|
||||||
@ -1826,7 +1813,7 @@ try_again:
|
|||||||
|
|
||||||
success:
|
success:
|
||||||
if (foliop)
|
if (foliop)
|
||||||
*foliop = page_folio(wc->w_target_page);
|
*foliop = wc->w_target_folio;
|
||||||
*fsdata = wc;
|
*fsdata = wc;
|
||||||
return 0;
|
return 0;
|
||||||
out_quota:
|
out_quota:
|
||||||
@ -1845,7 +1832,7 @@ out:
|
|||||||
* to VM code.
|
* to VM code.
|
||||||
*/
|
*/
|
||||||
if (wc->w_target_locked)
|
if (wc->w_target_locked)
|
||||||
unlock_page(mmap_page);
|
folio_unlock(mmap_folio);
|
||||||
|
|
||||||
ocfs2_free_write_ctxt(inode, wc);
|
ocfs2_free_write_ctxt(inode, wc);
|
||||||
|
|
||||||
@ -1924,18 +1911,15 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
|
|||||||
struct ocfs2_dinode *di,
|
struct ocfs2_dinode *di,
|
||||||
struct ocfs2_write_ctxt *wc)
|
struct ocfs2_write_ctxt *wc)
|
||||||
{
|
{
|
||||||
void *kaddr;
|
|
||||||
|
|
||||||
if (unlikely(*copied < len)) {
|
if (unlikely(*copied < len)) {
|
||||||
if (!PageUptodate(wc->w_target_page)) {
|
if (!folio_test_uptodate(wc->w_target_folio)) {
|
||||||
*copied = 0;
|
*copied = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kaddr = kmap_atomic(wc->w_target_page);
|
memcpy_from_folio(di->id2.i_data.id_data + pos, wc->w_target_folio,
|
||||||
memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
|
pos, *copied);
|
||||||
kunmap_atomic(kaddr);
|
|
||||||
|
|
||||||
trace_ocfs2_write_end_inline(
|
trace_ocfs2_write_end_inline(
|
||||||
(unsigned long long)OCFS2_I(inode)->ip_blkno,
|
(unsigned long long)OCFS2_I(inode)->ip_blkno,
|
||||||
@ -1944,17 +1928,16 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
|
|||||||
le16_to_cpu(di->i_dyn_features));
|
le16_to_cpu(di->i_dyn_features));
|
||||||
}
|
}
|
||||||
|
|
||||||
int ocfs2_write_end_nolock(struct address_space *mapping,
|
int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos,
|
||||||
loff_t pos, unsigned len, unsigned copied, void *fsdata)
|
unsigned len, unsigned copied, void *fsdata)
|
||||||
{
|
{
|
||||||
int i, ret;
|
int i, ret;
|
||||||
unsigned from, to, start = pos & (PAGE_SIZE - 1);
|
size_t from, to, start = pos & (PAGE_SIZE - 1);
|
||||||
struct inode *inode = mapping->host;
|
struct inode *inode = mapping->host;
|
||||||
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
|
||||||
struct ocfs2_write_ctxt *wc = fsdata;
|
struct ocfs2_write_ctxt *wc = fsdata;
|
||||||
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
|
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
|
||||||
handle_t *handle = wc->w_handle;
|
handle_t *handle = wc->w_handle;
|
||||||
struct page *tmppage;
|
|
||||||
|
|
||||||
BUG_ON(!list_empty(&wc->w_unwritten_list));
|
BUG_ON(!list_empty(&wc->w_unwritten_list));
|
||||||
|
|
||||||
@ -1973,44 +1956,44 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
|
|||||||
goto out_write_size;
|
goto out_write_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(copied < len) && wc->w_target_page) {
|
if (unlikely(copied < len) && wc->w_target_folio) {
|
||||||
loff_t new_isize;
|
loff_t new_isize;
|
||||||
|
|
||||||
if (!PageUptodate(wc->w_target_page))
|
if (!folio_test_uptodate(wc->w_target_folio))
|
||||||
copied = 0;
|
copied = 0;
|
||||||
|
|
||||||
new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
|
new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
|
||||||
if (new_isize > page_offset(wc->w_target_page))
|
if (new_isize > folio_pos(wc->w_target_folio))
|
||||||
ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
|
ocfs2_zero_new_buffers(wc->w_target_folio, start+copied,
|
||||||
start+len);
|
start+len);
|
||||||
else {
|
else {
|
||||||
/*
|
/*
|
||||||
* When page is fully beyond new isize (data copy
|
* When folio is fully beyond new isize (data copy
|
||||||
* failed), do not bother zeroing the page. Invalidate
|
* failed), do not bother zeroing the folio. Invalidate
|
||||||
* it instead so that writeback does not get confused
|
* it instead so that writeback does not get confused
|
||||||
* put page & buffer dirty bits into inconsistent
|
* put page & buffer dirty bits into inconsistent
|
||||||
* state.
|
* state.
|
||||||
*/
|
*/
|
||||||
block_invalidate_folio(page_folio(wc->w_target_page),
|
block_invalidate_folio(wc->w_target_folio, 0,
|
||||||
0, PAGE_SIZE);
|
folio_size(wc->w_target_folio));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (wc->w_target_page)
|
if (wc->w_target_folio)
|
||||||
flush_dcache_page(wc->w_target_page);
|
flush_dcache_folio(wc->w_target_folio);
|
||||||
|
|
||||||
for(i = 0; i < wc->w_num_pages; i++) {
|
for (i = 0; i < wc->w_num_folios; i++) {
|
||||||
tmppage = wc->w_pages[i];
|
struct folio *folio = wc->w_folios[i];
|
||||||
|
|
||||||
/* This is the direct io target page. */
|
/* This is the direct io target folio */
|
||||||
if (tmppage == NULL)
|
if (folio == NULL)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (tmppage == wc->w_target_page) {
|
if (folio == wc->w_target_folio) {
|
||||||
from = wc->w_target_from;
|
from = wc->w_target_from;
|
||||||
to = wc->w_target_to;
|
to = wc->w_target_to;
|
||||||
|
|
||||||
BUG_ON(from > PAGE_SIZE ||
|
BUG_ON(from > folio_size(folio) ||
|
||||||
to > PAGE_SIZE ||
|
to > folio_size(folio) ||
|
||||||
to < from);
|
to < from);
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
@ -2019,19 +2002,17 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
|
|||||||
* to flush their entire range.
|
* to flush their entire range.
|
||||||
*/
|
*/
|
||||||
from = 0;
|
from = 0;
|
||||||
to = PAGE_SIZE;
|
to = folio_size(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (page_has_buffers(tmppage)) {
|
if (folio_buffers(folio)) {
|
||||||
if (handle && ocfs2_should_order_data(inode)) {
|
if (handle && ocfs2_should_order_data(inode)) {
|
||||||
loff_t start_byte =
|
loff_t start_byte = folio_pos(folio) + from;
|
||||||
((loff_t)tmppage->index << PAGE_SHIFT) +
|
|
||||||
from;
|
|
||||||
loff_t length = to - from;
|
loff_t length = to - from;
|
||||||
ocfs2_jbd2_inode_add_write(handle, inode,
|
ocfs2_jbd2_inode_add_write(handle, inode,
|
||||||
start_byte, length);
|
start_byte, length);
|
||||||
}
|
}
|
||||||
block_commit_write(tmppage, from, to);
|
block_commit_write(&folio->page, from, to);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2060,7 +2041,7 @@ out:
|
|||||||
* this lock and will ask for the page lock when flushing the data.
|
* this lock and will ask for the page lock when flushing the data.
|
||||||
* put it here to preserve the unlock order.
|
* put it here to preserve the unlock order.
|
||||||
*/
|
*/
|
||||||
ocfs2_unlock_pages(wc);
|
ocfs2_unlock_folios(wc);
|
||||||
|
|
||||||
if (handle)
|
if (handle)
|
||||||
ocfs2_commit_trans(osb, handle);
|
ocfs2_commit_trans(osb, handle);
|
||||||
|
@ -8,16 +8,11 @@
|
|||||||
|
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
|
||||||
handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
|
int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno,
|
||||||
struct page *page,
|
|
||||||
unsigned from,
|
|
||||||
unsigned to);
|
|
||||||
|
|
||||||
int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
|
|
||||||
struct inode *inode, unsigned int from,
|
struct inode *inode, unsigned int from,
|
||||||
unsigned int to, int new);
|
unsigned int to, int new);
|
||||||
|
|
||||||
void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages);
|
void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios);
|
||||||
|
|
||||||
int walk_page_buffers( handle_t *handle,
|
int walk_page_buffers( handle_t *handle,
|
||||||
struct buffer_head *head,
|
struct buffer_head *head,
|
||||||
@ -37,11 +32,11 @@ typedef enum {
|
|||||||
} ocfs2_write_type_t;
|
} ocfs2_write_type_t;
|
||||||
|
|
||||||
int ocfs2_write_begin_nolock(struct address_space *mapping,
|
int ocfs2_write_begin_nolock(struct address_space *mapping,
|
||||||
loff_t pos, unsigned len, ocfs2_write_type_t type,
|
loff_t pos, unsigned len, ocfs2_write_type_t type,
|
||||||
struct folio **foliop, void **fsdata,
|
struct folio **foliop, void **fsdata,
|
||||||
struct buffer_head *di_bh, struct page *mmap_page);
|
struct buffer_head *di_bh, struct folio *mmap_folio);
|
||||||
|
|
||||||
int ocfs2_read_inline_data(struct inode *inode, struct page *page,
|
int ocfs2_read_inline_data(struct inode *inode, struct folio *folio,
|
||||||
struct buffer_head *di_bh);
|
struct buffer_head *di_bh);
|
||||||
int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size);
|
int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size);
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
|
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#include "linux/kstrtox.h"
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/jiffies.h>
|
#include <linux/jiffies.h>
|
||||||
@ -1020,7 +1021,7 @@ fire_callbacks:
|
|||||||
if (list_empty(&slot->ds_live_item))
|
if (list_empty(&slot->ds_live_item))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/* live nodes only go dead after enough consequtive missed
|
/* live nodes only go dead after enough consecutive missed
|
||||||
* samples.. reset the missed counter whenever we see
|
* samples.. reset the missed counter whenever we see
|
||||||
* activity */
|
* activity */
|
||||||
if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {
|
if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {
|
||||||
@ -1535,10 +1536,11 @@ static int o2hb_read_block_input(struct o2hb_region *reg,
|
|||||||
{
|
{
|
||||||
unsigned long bytes;
|
unsigned long bytes;
|
||||||
char *p = (char *)page;
|
char *p = (char *)page;
|
||||||
|
int ret;
|
||||||
|
|
||||||
bytes = simple_strtoul(p, &p, 0);
|
ret = kstrtoul(p, 0, &bytes);
|
||||||
if (!p || (*p && (*p != '\n')))
|
if (ret)
|
||||||
return -EINVAL;
|
return ret;
|
||||||
|
|
||||||
/* Heartbeat and fs min / max block sizes are the same. */
|
/* Heartbeat and fs min / max block sizes are the same. */
|
||||||
if (bytes > 4096 || bytes < 512)
|
if (bytes > 4096 || bytes < 512)
|
||||||
@ -1622,13 +1624,14 @@ static ssize_t o2hb_region_blocks_store(struct config_item *item,
|
|||||||
struct o2hb_region *reg = to_o2hb_region(item);
|
struct o2hb_region *reg = to_o2hb_region(item);
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
char *p = (char *)page;
|
char *p = (char *)page;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (reg->hr_bdev_file)
|
if (reg->hr_bdev_file)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
tmp = simple_strtoul(p, &p, 0);
|
ret = kstrtoul(p, 0, &tmp);
|
||||||
if (!p || (*p && (*p != '\n')))
|
if (ret)
|
||||||
return -EINVAL;
|
return ret;
|
||||||
|
|
||||||
if (tmp > O2NM_MAX_NODES || tmp == 0)
|
if (tmp > O2NM_MAX_NODES || tmp == 0)
|
||||||
return -ERANGE;
|
return -ERANGE;
|
||||||
@ -1776,8 +1779,8 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
|
|||||||
if (o2nm_this_node() == O2NM_MAX_NODES)
|
if (o2nm_this_node() == O2NM_MAX_NODES)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
fd = simple_strtol(p, &p, 0);
|
ret = kstrtol(p, 0, &fd);
|
||||||
if (!p || (*p && (*p != '\n')))
|
if (ret < 0)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (fd < 0 || fd >= INT_MAX)
|
if (fd < 0 || fd >= INT_MAX)
|
||||||
@ -2136,10 +2139,11 @@ static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *ite
|
|||||||
{
|
{
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
char *p = (char *)page;
|
char *p = (char *)page;
|
||||||
|
int ret;
|
||||||
|
|
||||||
tmp = simple_strtoul(p, &p, 10);
|
ret = kstrtoul(p, 10, &tmp);
|
||||||
if (!p || (*p && (*p != '\n')))
|
if (ret)
|
||||||
return -EINVAL;
|
return ret;
|
||||||
|
|
||||||
/* this will validate ranges for us. */
|
/* this will validate ranges for us. */
|
||||||
o2hb_dead_threshold_set((unsigned int) tmp);
|
o2hb_dead_threshold_set((unsigned int) tmp);
|
||||||
|
@ -29,7 +29,7 @@
|
|||||||
* just calling printk() so that this can eventually make its way through
|
* just calling printk() so that this can eventually make its way through
|
||||||
* relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
|
* relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
|
||||||
* The inline tests and macro dance give GCC the opportunity to quite cleverly
|
* The inline tests and macro dance give GCC the opportunity to quite cleverly
|
||||||
* only emit the appropriage printk() when the caller passes in a constant
|
* only emit the appropriate printk() when the caller passes in a constant
|
||||||
* mask, as is almost always the case.
|
* mask, as is almost always the case.
|
||||||
*
|
*
|
||||||
* All this bitmask nonsense is managed from the files under
|
* All this bitmask nonsense is managed from the files under
|
||||||
|
@ -23,7 +23,7 @@
|
|||||||
* race between when we see a node start heartbeating and when we connect
|
* race between when we see a node start heartbeating and when we connect
|
||||||
* to it.
|
* to it.
|
||||||
*
|
*
|
||||||
* So nodes that are in this transtion put a hold on the quorum decision
|
* So nodes that are in this transition put a hold on the quorum decision
|
||||||
* with a counter. As they fall out of this transition they drop the count
|
* with a counter. As they fall out of this transition they drop the count
|
||||||
* and if they're the last, they fire off the decision.
|
* and if they're the last, they fire off the decision.
|
||||||
*/
|
*/
|
||||||
@ -189,7 +189,7 @@ static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* as a node comes up we delay the quorum decision until we know the fate of
|
/* as a node comes up we delay the quorum decision until we know the fate of
|
||||||
* the connection. the hold will be droped in conn_up or hb_down. it might be
|
* the connection. the hold will be dropped in conn_up or hb_down. it might be
|
||||||
* perpetuated by con_err until hb_down. if we already have a conn, we might
|
* perpetuated by con_err until hb_down. if we already have a conn, we might
|
||||||
* be dropping a hold that conn_up got. */
|
* be dropping a hold that conn_up got. */
|
||||||
void o2quo_hb_up(u8 node)
|
void o2quo_hb_up(u8 node)
|
||||||
@ -256,7 +256,7 @@ void o2quo_hb_still_up(u8 node)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* This is analogous to hb_up. as a node's connection comes up we delay the
|
/* This is analogous to hb_up. as a node's connection comes up we delay the
|
||||||
* quorum decision until we see it heartbeating. the hold will be droped in
|
* quorum decision until we see it heartbeating. the hold will be dropped in
|
||||||
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
|
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
|
||||||
* it's already heartbeating we might be dropping a hold that conn_up got.
|
* it's already heartbeating we might be dropping a hold that conn_up got.
|
||||||
* */
|
* */
|
||||||
|
@ -5,13 +5,13 @@
|
|||||||
*
|
*
|
||||||
* ----
|
* ----
|
||||||
*
|
*
|
||||||
* Callers for this were originally written against a very simple synchronus
|
* Callers for this were originally written against a very simple synchronous
|
||||||
* API. This implementation reflects those simple callers. Some day I'm sure
|
* API. This implementation reflects those simple callers. Some day I'm sure
|
||||||
* we'll need to move to a more robust posting/callback mechanism.
|
* we'll need to move to a more robust posting/callback mechanism.
|
||||||
*
|
*
|
||||||
* Transmit calls pass in kernel virtual addresses and block copying this into
|
* Transmit calls pass in kernel virtual addresses and block copying this into
|
||||||
* the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
|
* the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
|
||||||
* for a failed socket to timeout. TX callers can also pass in a poniter to an
|
* for a failed socket to timeout. TX callers can also pass in a pointer to an
|
||||||
* 'int' which gets filled with an errno off the wire in response to the
|
* 'int' which gets filled with an errno off the wire in response to the
|
||||||
* message they send.
|
* message they send.
|
||||||
*
|
*
|
||||||
@ -101,7 +101,7 @@ static struct socket *o2net_listen_sock;
|
|||||||
* o2net_wq. teardown detaches the callbacks before destroying the workqueue.
|
* o2net_wq. teardown detaches the callbacks before destroying the workqueue.
|
||||||
* quorum work is queued as sock containers are shutdown.. stop_listening
|
* quorum work is queued as sock containers are shutdown.. stop_listening
|
||||||
* tears down all the node's sock containers, preventing future shutdowns
|
* tears down all the node's sock containers, preventing future shutdowns
|
||||||
* and queued quroum work, before canceling delayed quorum work and
|
* and queued quorum work, before canceling delayed quorum work and
|
||||||
* destroying the work queue.
|
* destroying the work queue.
|
||||||
*/
|
*/
|
||||||
static struct workqueue_struct *o2net_wq;
|
static struct workqueue_struct *o2net_wq;
|
||||||
@ -1419,7 +1419,7 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* this work func is triggerd by data ready. it reads until it can read no
|
/* this work func is triggered by data ready. it reads until it can read no
|
||||||
* more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
|
* more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
|
||||||
* our work the work struct will be marked and we'll be called again. */
|
* our work the work struct will be marked and we'll be called again. */
|
||||||
static void o2net_rx_until_empty(struct work_struct *work)
|
static void o2net_rx_until_empty(struct work_struct *work)
|
||||||
|
@ -118,7 +118,7 @@ struct dlm_lockstatus {
|
|||||||
#define LKM_VALBLK 0x00000100 /* lock value block request */
|
#define LKM_VALBLK 0x00000100 /* lock value block request */
|
||||||
#define LKM_NOQUEUE 0x00000200 /* non blocking request */
|
#define LKM_NOQUEUE 0x00000200 /* non blocking request */
|
||||||
#define LKM_CONVERT 0x00000400 /* conversion request */
|
#define LKM_CONVERT 0x00000400 /* conversion request */
|
||||||
#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */
|
#define LKM_NODLCKWT 0x00000800 /* this lock won't deadlock (U) */
|
||||||
#define LKM_UNLOCK 0x00001000 /* deallocate this lock */
|
#define LKM_UNLOCK 0x00001000 /* deallocate this lock */
|
||||||
#define LKM_CANCEL 0x00002000 /* cancel conversion request */
|
#define LKM_CANCEL 0x00002000 /* cancel conversion request */
|
||||||
#define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */
|
#define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */
|
||||||
|
@ -207,7 +207,7 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
|
|||||||
* 1) all recovery threads cluster wide will work on recovering
|
* 1) all recovery threads cluster wide will work on recovering
|
||||||
* ONE node at a time
|
* ONE node at a time
|
||||||
* 2) negotiate who will take over all the locks for the dead node.
|
* 2) negotiate who will take over all the locks for the dead node.
|
||||||
* thats right... ALL the locks.
|
* that's right... ALL the locks.
|
||||||
* 3) once a new master is chosen, everyone scans all locks
|
* 3) once a new master is chosen, everyone scans all locks
|
||||||
* and moves aside those mastered by the dead guy
|
* and moves aside those mastered by the dead guy
|
||||||
* 4) each of these locks should be locked until recovery is done
|
* 4) each of these locks should be locked until recovery is done
|
||||||
@ -1469,7 +1469,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
|
|||||||
* The first one is handled at the end of this function. The
|
* The first one is handled at the end of this function. The
|
||||||
* other two are handled in the worker thread after locks have
|
* other two are handled in the worker thread after locks have
|
||||||
* been attached. Yes, we don't wait for purge time to match
|
* been attached. Yes, we don't wait for purge time to match
|
||||||
* kref_init. The lockres will still have atleast one ref
|
* kref_init. The lockres will still have at least one ref
|
||||||
* added because it is in the hash __dlm_insert_lockres() */
|
* added because it is in the hash __dlm_insert_lockres() */
|
||||||
extra_refs++;
|
extra_refs++;
|
||||||
|
|
||||||
@ -1735,7 +1735,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
|
|||||||
spin_unlock(&res->spinlock);
|
spin_unlock(&res->spinlock);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* put.. incase we are not the master */
|
/* put.. in case we are not the master */
|
||||||
spin_unlock(&res->spinlock);
|
spin_unlock(&res->spinlock);
|
||||||
dlm_lockres_put(res);
|
dlm_lockres_put(res);
|
||||||
}
|
}
|
||||||
|
@ -20,6 +20,7 @@
|
|||||||
|
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/fs.h>
|
#include <linux/fs.h>
|
||||||
|
#include <linux/fs_context.h>
|
||||||
#include <linux/pagemap.h>
|
#include <linux/pagemap.h>
|
||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
@ -506,9 +507,7 @@ bail:
|
|||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dlmfs_fill_super(struct super_block * sb,
|
static int dlmfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||||
void * data,
|
|
||||||
int silent)
|
|
||||||
{
|
{
|
||||||
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
sb->s_maxbytes = MAX_LFS_FILESIZE;
|
||||||
sb->s_blocksize = PAGE_SIZE;
|
sb->s_blocksize = PAGE_SIZE;
|
||||||
@ -556,17 +555,27 @@ static const struct inode_operations dlmfs_file_inode_operations = {
|
|||||||
.setattr = dlmfs_file_setattr,
|
.setattr = dlmfs_file_setattr,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
|
static int dlmfs_get_tree(struct fs_context *fc)
|
||||||
int flags, const char *dev_name, void *data)
|
|
||||||
{
|
{
|
||||||
return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
|
return get_tree_nodev(fc, dlmfs_fill_super);
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct fs_context_operations dlmfs_context_ops = {
|
||||||
|
.get_tree = dlmfs_get_tree,
|
||||||
|
};
|
||||||
|
|
||||||
|
static int dlmfs_init_fs_context(struct fs_context *fc)
|
||||||
|
{
|
||||||
|
fc->ops = &dlmfs_context_ops;
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct file_system_type dlmfs_fs_type = {
|
static struct file_system_type dlmfs_fs_type = {
|
||||||
.owner = THIS_MODULE,
|
.owner = THIS_MODULE,
|
||||||
.name = "ocfs2_dlmfs",
|
.name = "ocfs2_dlmfs",
|
||||||
.mount = dlmfs_mount,
|
|
||||||
.kill_sb = kill_litter_super,
|
.kill_sb = kill_litter_super,
|
||||||
|
.init_fs_context = dlmfs_init_fs_context,
|
||||||
};
|
};
|
||||||
MODULE_ALIAS_FS("ocfs2_dlmfs");
|
MODULE_ALIAS_FS("ocfs2_dlmfs");
|
||||||
|
|
||||||
|
@ -794,7 +794,7 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Keep a list of processes who have interest in a lockres.
|
* Keep a list of processes who have interest in a lockres.
|
||||||
* Note: this is now only uesed for check recursive cluster locking.
|
* Note: this is now only used for check recursive cluster locking.
|
||||||
*/
|
*/
|
||||||
static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
|
static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
|
||||||
struct ocfs2_lock_holder *oh)
|
struct ocfs2_lock_holder *oh)
|
||||||
@ -2529,30 +2529,28 @@ bail:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* This is working around a lock inversion between tasks acquiring DLM
|
* This is working around a lock inversion between tasks acquiring DLM
|
||||||
* locks while holding a page lock and the downconvert thread which
|
* locks while holding a folio lock and the downconvert thread which
|
||||||
* blocks dlm lock acquiry while acquiring page locks.
|
* blocks dlm lock acquiry while acquiring folio locks.
|
||||||
*
|
*
|
||||||
* ** These _with_page variantes are only intended to be called from aop
|
* ** These _with_folio variants are only intended to be called from aop
|
||||||
* methods that hold page locks and return a very specific *positive* error
|
* methods that hold folio locks and return a very specific *positive* error
|
||||||
* code that aop methods pass up to the VFS -- test for errors with != 0. **
|
* code that aop methods pass up to the VFS -- test for errors with != 0. **
|
||||||
*
|
*
|
||||||
* The DLM is called such that it returns -EAGAIN if it would have
|
* The DLM is called such that it returns -EAGAIN if it would have
|
||||||
* blocked waiting for the downconvert thread. In that case we unlock
|
* blocked waiting for the downconvert thread. In that case we unlock
|
||||||
* our page so the downconvert thread can make progress. Once we've
|
* our folio so the downconvert thread can make progress. Once we've
|
||||||
* done this we have to return AOP_TRUNCATED_PAGE so the aop method
|
* done this we have to return AOP_TRUNCATED_PAGE so the aop method
|
||||||
* that called us can bubble that back up into the VFS who will then
|
* that called us can bubble that back up into the VFS who will then
|
||||||
* immediately retry the aop call.
|
* immediately retry the aop call.
|
||||||
*/
|
*/
|
||||||
int ocfs2_inode_lock_with_page(struct inode *inode,
|
int ocfs2_inode_lock_with_folio(struct inode *inode,
|
||||||
struct buffer_head **ret_bh,
|
struct buffer_head **ret_bh, int ex, struct folio *folio)
|
||||||
int ex,
|
|
||||||
struct page *page)
|
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
|
ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
|
||||||
if (ret == -EAGAIN) {
|
if (ret == -EAGAIN) {
|
||||||
unlock_page(page);
|
folio_unlock(folio);
|
||||||
/*
|
/*
|
||||||
* If we can't get inode lock immediately, we should not return
|
* If we can't get inode lock immediately, we should not return
|
||||||
* directly here, since this will lead to a softlockup problem.
|
* directly here, since this will lead to a softlockup problem.
|
||||||
@ -2630,7 +2628,7 @@ void ocfs2_inode_unlock(struct inode *inode,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This _tracker variantes are introduced to deal with the recursive cluster
|
* This _tracker variants are introduced to deal with the recursive cluster
|
||||||
* locking issue. The idea is to keep track of a lock holder on the stack of
|
* locking issue. The idea is to keep track of a lock holder on the stack of
|
||||||
* the current process. If there's a lock holder on the stack, we know the
|
* the current process. If there's a lock holder on the stack, we know the
|
||||||
* task context is already protected by cluster locking. Currently, they're
|
* task context is already protected by cluster locking. Currently, they're
|
||||||
@ -2735,7 +2733,7 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
|
|||||||
struct ocfs2_lock_res *lockres;
|
struct ocfs2_lock_res *lockres;
|
||||||
|
|
||||||
lockres = &OCFS2_I(inode)->ip_inode_lockres;
|
lockres = &OCFS2_I(inode)->ip_inode_lockres;
|
||||||
/* had_lock means that the currect process already takes the cluster
|
/* had_lock means that the current process already takes the cluster
|
||||||
* lock previously.
|
* lock previously.
|
||||||
* If had_lock is 1, we have nothing to do here.
|
* If had_lock is 1, we have nothing to do here.
|
||||||
* If had_lock is 0, we will release the lock.
|
* If had_lock is 0, we will release the lock.
|
||||||
@ -3802,9 +3800,9 @@ recheck:
|
|||||||
* set when the ast is received for an upconvert just before the
|
* set when the ast is received for an upconvert just before the
|
||||||
* OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
|
* OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
|
||||||
* on the heels of the ast, we want to delay the downconvert just
|
* on the heels of the ast, we want to delay the downconvert just
|
||||||
* enough to allow the up requestor to do its task. Because this
|
* enough to allow the up requester to do its task. Because this
|
||||||
* lock is in the blocked queue, the lock will be downconverted
|
* lock is in the blocked queue, the lock will be downconverted
|
||||||
* as soon as the requestor is done with the lock.
|
* as soon as the requester is done with the lock.
|
||||||
*/
|
*/
|
||||||
if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
|
if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
|
||||||
goto leave_requeue;
|
goto leave_requeue;
|
||||||
|
@ -137,10 +137,8 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
|
|||||||
int ex,
|
int ex,
|
||||||
int arg_flags,
|
int arg_flags,
|
||||||
int subclass);
|
int subclass);
|
||||||
int ocfs2_inode_lock_with_page(struct inode *inode,
|
int ocfs2_inode_lock_with_folio(struct inode *inode,
|
||||||
struct buffer_head **ret_bh,
|
struct buffer_head **ret_bh, int ex, struct folio *folio);
|
||||||
int ex,
|
|
||||||
struct page *page);
|
|
||||||
/* Variants without special locking class or flags */
|
/* Variants without special locking class or flags */
|
||||||
#define ocfs2_inode_lock_full(i, r, e, f)\
|
#define ocfs2_inode_lock_full(i, r, e, f)\
|
||||||
ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)
|
ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)
|
||||||
|
@ -435,6 +435,16 @@ static int ocfs2_get_clusters_nocache(struct inode *inode,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)) {
|
||||||
|
ocfs2_error(inode->i_sb,
|
||||||
|
"Inode %lu has an invalid extent (next_free_rec %u, count %u)\n",
|
||||||
|
inode->i_ino,
|
||||||
|
le16_to_cpu(el->l_next_free_rec),
|
||||||
|
le16_to_cpu(el->l_count));
|
||||||
|
ret = -EROFS;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
i = ocfs2_search_extent_list(el, v_cluster);
|
i = ocfs2_search_extent_list(el, v_cluster);
|
||||||
if (i == -1) {
|
if (i == -1) {
|
||||||
/*
|
/*
|
||||||
|
@ -782,11 +782,11 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
|
|||||||
goto out_commit_trans;
|
goto out_commit_trans;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get the offsets within the page that we want to zero */
|
/* Get the offsets within the folio that we want to zero */
|
||||||
zero_from = abs_from & (PAGE_SIZE - 1);
|
zero_from = offset_in_folio(folio, abs_from);
|
||||||
zero_to = abs_to & (PAGE_SIZE - 1);
|
zero_to = offset_in_folio(folio, abs_to);
|
||||||
if (!zero_to)
|
if (!zero_to)
|
||||||
zero_to = PAGE_SIZE;
|
zero_to = folio_size(folio);
|
||||||
|
|
||||||
trace_ocfs2_write_zero_page(
|
trace_ocfs2_write_zero_page(
|
||||||
(unsigned long long)OCFS2_I(inode)->ip_blkno,
|
(unsigned long long)OCFS2_I(inode)->ip_blkno,
|
||||||
|
@ -200,6 +200,20 @@ bail:
|
|||||||
return inode;
|
return inode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int ocfs2_dinode_has_extents(struct ocfs2_dinode *di)
|
||||||
|
{
|
||||||
|
/* inodes flagged with other stuff in id2 */
|
||||||
|
if (di->i_flags & (OCFS2_SUPER_BLOCK_FL | OCFS2_LOCAL_ALLOC_FL |
|
||||||
|
OCFS2_CHAIN_FL | OCFS2_DEALLOC_FL))
|
||||||
|
return 0;
|
||||||
|
/* i_flags doesn't indicate when id2 is a fast symlink */
|
||||||
|
if (S_ISLNK(di->i_mode) && di->i_size && di->i_clusters == 0)
|
||||||
|
return 0;
|
||||||
|
if (di->i_dyn_features & OCFS2_INLINE_DATA_FL)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* here's how inodes get read from disk:
|
* here's how inodes get read from disk:
|
||||||
@ -1122,7 +1136,7 @@ static void ocfs2_clear_inode(struct inode *inode)
|
|||||||
|
|
||||||
dquot_drop(inode);
|
dquot_drop(inode);
|
||||||
|
|
||||||
/* To preven remote deletes we hold open lock before, now it
|
/* To prevent remote deletes we hold open lock before, now it
|
||||||
* is time to unlock PR and EX open locks. */
|
* is time to unlock PR and EX open locks. */
|
||||||
ocfs2_open_unlock(inode);
|
ocfs2_open_unlock(inode);
|
||||||
|
|
||||||
@ -1437,7 +1451,7 @@ static int ocfs2_filecheck_validate_inode_block(struct super_block *sb,
|
|||||||
* Call ocfs2_validate_meta_ecc() first since it has ecc repair
|
* Call ocfs2_validate_meta_ecc() first since it has ecc repair
|
||||||
* function, but we should not return error immediately when ecc
|
* function, but we should not return error immediately when ecc
|
||||||
* validation fails, because the reason is quite likely the invalid
|
* validation fails, because the reason is quite likely the invalid
|
||||||
* inode number inputed.
|
* inode number inputted.
|
||||||
*/
|
*/
|
||||||
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check);
|
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
@ -1547,6 +1561,16 @@ static int ocfs2_filecheck_repair_inode_block(struct super_block *sb,
|
|||||||
le32_to_cpu(di->i_fs_generation));
|
le32_to_cpu(di->i_fs_generation));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ocfs2_dinode_has_extents(di) &&
|
||||||
|
le16_to_cpu(di->id2.i_list.l_next_free_rec) > le16_to_cpu(di->id2.i_list.l_count)) {
|
||||||
|
di->id2.i_list.l_next_free_rec = di->id2.i_list.l_count;
|
||||||
|
changed = 1;
|
||||||
|
mlog(ML_ERROR,
|
||||||
|
"Filecheck: reset dinode #%llu: l_next_free_rec to %u\n",
|
||||||
|
(unsigned long long)bh->b_blocknr,
|
||||||
|
le16_to_cpu(di->id2.i_list.l_next_free_rec));
|
||||||
|
}
|
||||||
|
|
||||||
if (changed || ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check)) {
|
if (changed || ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check)) {
|
||||||
ocfs2_compute_meta_ecc(sb, bh->b_data, &di->i_check);
|
ocfs2_compute_meta_ecc(sb, bh->b_data, &di->i_check);
|
||||||
mark_buffer_dirty(bh);
|
mark_buffer_dirty(bh);
|
||||||
|
@ -796,7 +796,7 @@ bail:
|
|||||||
/*
|
/*
|
||||||
* OCFS2_IOC_INFO handles an array of requests passed from userspace.
|
* OCFS2_IOC_INFO handles an array of requests passed from userspace.
|
||||||
*
|
*
|
||||||
* ocfs2_info_handle() recevies a large info aggregation, grab and
|
* ocfs2_info_handle() receives a large info aggregation, grab and
|
||||||
* validate the request count from header, then break it into small
|
* validate the request count from header, then break it into small
|
||||||
* pieces, later specific handlers can handle them one by one.
|
* pieces, later specific handlers can handle them one by one.
|
||||||
*
|
*
|
||||||
|
@ -1956,7 +1956,7 @@ bail:
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
|
* Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
|
||||||
* randomness to the timeout to minimize multple nodes firing the timer at the
|
* randomness to the timeout to minimize multiple nodes firing the timer at the
|
||||||
* same time.
|
* same time.
|
||||||
*/
|
*/
|
||||||
static inline unsigned long ocfs2_orphan_scan_timeout(void)
|
static inline unsigned long ocfs2_orphan_scan_timeout(void)
|
||||||
|
@ -44,13 +44,13 @@ static vm_fault_t ocfs2_fault(struct vm_fault *vmf)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
|
static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
|
||||||
struct buffer_head *di_bh, struct page *page)
|
struct buffer_head *di_bh, struct folio *folio)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
vm_fault_t ret = VM_FAULT_NOPAGE;
|
vm_fault_t ret = VM_FAULT_NOPAGE;
|
||||||
struct inode *inode = file_inode(file);
|
struct inode *inode = file_inode(file);
|
||||||
struct address_space *mapping = inode->i_mapping;
|
struct address_space *mapping = inode->i_mapping;
|
||||||
loff_t pos = page_offset(page);
|
loff_t pos = folio_pos(folio);
|
||||||
unsigned int len = PAGE_SIZE;
|
unsigned int len = PAGE_SIZE;
|
||||||
pgoff_t last_index;
|
pgoff_t last_index;
|
||||||
struct folio *locked_folio = NULL;
|
struct folio *locked_folio = NULL;
|
||||||
@ -72,9 +72,9 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
|
|||||||
*
|
*
|
||||||
* Let VM retry with these cases.
|
* Let VM retry with these cases.
|
||||||
*/
|
*/
|
||||||
if ((page->mapping != inode->i_mapping) ||
|
if ((folio->mapping != inode->i_mapping) ||
|
||||||
(!PageUptodate(page)) ||
|
!folio_test_uptodate(folio) ||
|
||||||
(page_offset(page) >= size))
|
(pos >= size))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -87,11 +87,11 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
|
|||||||
* worry about ocfs2_write_begin() skipping some buffer reads
|
* worry about ocfs2_write_begin() skipping some buffer reads
|
||||||
* because the "write" would invalidate their data.
|
* because the "write" would invalidate their data.
|
||||||
*/
|
*/
|
||||||
if (page->index == last_index)
|
if (folio->index == last_index)
|
||||||
len = ((size - 1) & ~PAGE_MASK) + 1;
|
len = ((size - 1) & ~PAGE_MASK) + 1;
|
||||||
|
|
||||||
err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
|
err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
|
||||||
&locked_folio, &fsdata, di_bh, page);
|
&locked_folio, &fsdata, di_bh, folio);
|
||||||
if (err) {
|
if (err) {
|
||||||
if (err != -ENOSPC)
|
if (err != -ENOSPC)
|
||||||
mlog_errno(err);
|
mlog_errno(err);
|
||||||
@ -112,7 +112,7 @@ out:
|
|||||||
|
|
||||||
static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
|
static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
struct page *page = vmf->page;
|
struct folio *folio = page_folio(vmf->page);
|
||||||
struct inode *inode = file_inode(vmf->vma->vm_file);
|
struct inode *inode = file_inode(vmf->vma->vm_file);
|
||||||
struct buffer_head *di_bh = NULL;
|
struct buffer_head *di_bh = NULL;
|
||||||
sigset_t oldset;
|
sigset_t oldset;
|
||||||
@ -141,7 +141,7 @@ static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
|
|||||||
*/
|
*/
|
||||||
down_write(&OCFS2_I(inode)->ip_alloc_sem);
|
down_write(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
|
|
||||||
ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
|
ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, folio);
|
||||||
|
|
||||||
up_write(&OCFS2_I(inode)->ip_alloc_sem);
|
up_write(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
|
|
||||||
|
@ -492,7 +492,7 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
|
|||||||
bg = (struct ocfs2_group_desc *)gd_bh->b_data;
|
bg = (struct ocfs2_group_desc *)gd_bh->b_data;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* moving goal is not allowd to start with a group desc blok(#0 blk)
|
* moving goal is not allowed to start with a group desc blok(#0 blk)
|
||||||
* let's compromise to the latter cluster.
|
* let's compromise to the latter cluster.
|
||||||
*/
|
*/
|
||||||
if (range->me_goal == le64_to_cpu(bg->bg_blkno))
|
if (range->me_goal == le64_to_cpu(bg->bg_blkno))
|
||||||
@ -658,7 +658,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* probe the victim cluster group to find a proper
|
* probe the victim cluster group to find a proper
|
||||||
* region to fit wanted movement, it even will perfrom
|
* region to fit wanted movement, it even will perform
|
||||||
* a best-effort attempt by compromising to a threshold
|
* a best-effort attempt by compromising to a threshold
|
||||||
* around the goal.
|
* around the goal.
|
||||||
*/
|
*/
|
||||||
@ -920,7 +920,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* rememer ip_xattr_sem also needs to be held if necessary
|
* remember ip_xattr_sem also needs to be held if necessary
|
||||||
*/
|
*/
|
||||||
down_write(&OCFS2_I(inode)->ip_alloc_sem);
|
down_write(&OCFS2_I(inode)->ip_alloc_sem);
|
||||||
|
|
||||||
@ -1022,7 +1022,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
|
|||||||
context->range = ⦥
|
context->range = ⦥
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* ok, the default theshold for the defragmentation
|
* ok, the default threshold for the defragmentation
|
||||||
* is 1M, since our maximum clustersize was 1M also.
|
* is 1M, since our maximum clustersize was 1M also.
|
||||||
* any thought?
|
* any thought?
|
||||||
*/
|
*/
|
||||||
|
@ -508,7 +508,6 @@ static int __ocfs2_mknod_locked(struct inode *dir,
|
|||||||
struct inode *inode,
|
struct inode *inode,
|
||||||
dev_t dev,
|
dev_t dev,
|
||||||
struct buffer_head **new_fe_bh,
|
struct buffer_head **new_fe_bh,
|
||||||
struct buffer_head *parent_fe_bh,
|
|
||||||
handle_t *handle,
|
handle_t *handle,
|
||||||
struct ocfs2_alloc_context *inode_ac,
|
struct ocfs2_alloc_context *inode_ac,
|
||||||
u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
|
u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit)
|
||||||
@ -641,8 +640,8 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
|
return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh,
|
||||||
parent_fe_bh, handle, inode_ac,
|
handle, inode_ac, fe_blkno,
|
||||||
fe_blkno, suballoc_loc, suballoc_bit);
|
suballoc_loc, suballoc_bit);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ocfs2_mkdir(struct mnt_idmap *idmap,
|
static int ocfs2_mkdir(struct mnt_idmap *idmap,
|
||||||
@ -2576,7 +2575,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
|
|||||||
clear_nlink(inode);
|
clear_nlink(inode);
|
||||||
/* do the real work now. */
|
/* do the real work now. */
|
||||||
status = __ocfs2_mknod_locked(dir, inode,
|
status = __ocfs2_mknod_locked(dir, inode,
|
||||||
0, &new_di_bh, parent_di_bh, handle,
|
0, &new_di_bh, handle,
|
||||||
inode_ac, di_blkno, suballoc_loc,
|
inode_ac, di_blkno, suballoc_loc,
|
||||||
suballoc_bit);
|
suballoc_bit);
|
||||||
if (status < 0) {
|
if (status < 0) {
|
||||||
|
@ -132,7 +132,7 @@
|
|||||||
* well as the name of the cluster being joined.
|
* well as the name of the cluster being joined.
|
||||||
* mount.ocfs2 must pass in a matching stack name.
|
* mount.ocfs2 must pass in a matching stack name.
|
||||||
*
|
*
|
||||||
* If not set, the classic stack will be used. This is compatbile with
|
* If not set, the classic stack will be used. This is compatible with
|
||||||
* all older versions.
|
* all older versions.
|
||||||
*/
|
*/
|
||||||
#define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080
|
#define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080
|
||||||
@ -143,7 +143,7 @@
|
|||||||
/* Support for extended attributes */
|
/* Support for extended attributes */
|
||||||
#define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200
|
#define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200
|
||||||
|
|
||||||
/* Support for indexed directores */
|
/* Support for indexed directories */
|
||||||
#define OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS 0x0400
|
#define OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS 0x0400
|
||||||
|
|
||||||
/* Metadata checksum and error correction */
|
/* Metadata checksum and error correction */
|
||||||
@ -156,7 +156,7 @@
|
|||||||
#define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000
|
#define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Incompat bit to indicate useable clusterinfo with stackflags for all
|
* Incompat bit to indicate usable clusterinfo with stackflags for all
|
||||||
* cluster stacks (userspace adnd o2cb). If this bit is set,
|
* cluster stacks (userspace adnd o2cb). If this bit is set,
|
||||||
* INCOMPAT_USERSPACE_STACK becomes superfluous and thus should not be set.
|
* INCOMPAT_USERSPACE_STACK becomes superfluous and thus should not be set.
|
||||||
*/
|
*/
|
||||||
@ -1083,7 +1083,7 @@ struct ocfs2_xattr_block {
|
|||||||
struct ocfs2_xattr_header xb_header; /* xattr header if this
|
struct ocfs2_xattr_header xb_header; /* xattr header if this
|
||||||
block contains xattr */
|
block contains xattr */
|
||||||
struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this
|
struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this
|
||||||
block cotains xattr
|
block contains xattr
|
||||||
tree. */
|
tree. */
|
||||||
} xb_attrs;
|
} xb_attrs;
|
||||||
};
|
};
|
||||||
|
@ -215,7 +215,7 @@ struct ocfs2_move_extents {
|
|||||||
movement less likely
|
movement less likely
|
||||||
to fail, may make fs
|
to fail, may make fs
|
||||||
even more fragmented */
|
even more fragmented */
|
||||||
#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmenation
|
#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmentation
|
||||||
completely gets done.
|
completely gets done.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user