NFS Client Updates for Linux 6.3

New Features:
   * Convert the read and write paths to use folios
 
 Bugfixes and Cleanups:
   * Fix tracepoint state manager flag printing
   * Fix disabling swap files
   * Fix NFSv4 client identifier sysfs path in the documentation
   * Don't clear NFS_CAP_COPY if server returns NFS4ERR_OFFLOAD_DENIED
   * Treat GETDEVICEINFO errors as a layout failure
   * Replace kmap_atomic() calls with kmap_local_page()
   * Constify sunrpc sysfs kobj_type structures
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEnZ5MQTpR7cLU7KEp18tUv7ClQOsFAmP2lSQACgkQ18tUv7Cl
 QOuaPw//TNLuEMiiMtM6EXFPc2p2tNMfM+EeMjYKZkS5WnvszVOZRzQfXz4dvvif
 R2BIHT+C6AWkPOxnbuhIk86acy/wRuAPD9OMyp+3PNa4oEqjyIk5QaTvgYeeRBNe
 H7haAqCiEBx/g8F9DladWLAwpilTSPpupdmiXwTS7Q7wB6NkKqc1eAJ+BTuRYAsm
 19p2nGTzenWrut4HMOOmuAVrA2OcWtzgYzdJlY19lWRmzfCvX1gh3i2JxGutmnl3
 tduviVBdu5QGnTKHJiP853kzs3VwEHIFSO+5z137JWKTm3IgPxW/u3zf+ijmw22t
 vjbtFAhb4+u5juchvVKyIX4lClPSKZincQ6dn9soOm7qJcxYWNm40DzxAF21GUAq
 d4tp+zBoe9pfKxZrylp6YxIchLQYhU+dL0hhqbKzfOAFOg28k1JsJubl/wRvWKVe
 LGbvFOrUYo8arPOKfxBWMf4HCouu/vGq/qng+U/Ppjw3h8gD32aniyP3qN9Fm1JA
 rFAMKTnD4I3r3x1E4HX3icUMwil36zBr9cPglQBYD5DW54ConEW8e6hvQmjTdQHP
 EgN9+PSYzYXuKR/Fij7BYOymUD9grT1+5hVaOPzlM4SY3jMy2novcQGoGiLVOd0t
 PGQ5047qbESFD7VkW4GBAYftrSMLiU+l5KL3aN1JRtprxu6NaYA=
 =39ue
 -----END PGP SIGNATURE-----

Merge tag 'nfs-for-6.3-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client updates from Anna Schumaker:
 "New Features:

   - Convert the read and write paths to use folios

  Bugfixes and Cleanups:

   - Fix tracepoint state manager flag printing

   - Fix disabling swap files

   - Fix NFSv4 client identifier sysfs path in the documentation

   - Don't clear NFS_CAP_COPY if server returns NFS4ERR_OFFLOAD_DENIED

   - Treat GETDEVICEINFO errors as a layout failure

   - Replace kmap_atomic() calls with kmap_local_page()

   - Constify sunrpc sysfs kobj_type structures"

* tag 'nfs-for-6.3-1' of git://git.linux-nfs.org/projects/anna/linux-nfs: (25 commits)
  fs/nfs: Replace kmap_atomic() with kmap_local_page() in dir.c
  pNFS/filelayout: treat GETDEVICEINFO errors as layout failure
  Documentation: Fix sysfs path for the NFSv4 client identifier
  nfs42: do not fail with EIO if ssc returns NFS4ERR_OFFLOAD_DENIED
  NFS: fix disabling of swap
  SUNRPC: make kobj_type structures constant
  nfs4trace: fix state manager flag printing
  NFS: Remove unnecessary check in nfs_read_folio()
  NFS: Improve tracing of nfs_wb_folio()
  NFS: Enable tracing of nfs_invalidate_folio() and nfs_launder_folio()
  NFS: fix up nfs_release_folio() to try to release the page
  NFS: Clean up O_DIRECT request allocation
  NFS: Fix up nfs_vm_page_mkwrite() for folios
  NFS: Convert nfs_write_begin/end to use folios
  NFS: Remove unused function nfs_wb_page()
  NFS: Convert buffered writes to use folios
  NFS: Convert the function nfs_wb_page() to use folios
  NFS: Convert buffered reads to use folios
  NFS: Add a helper nfs_wb_folio()
  NFS: Convert the remaining pagelist helper functions to support folios
  ...
This commit is contained in:
Linus Torvalds 2023-02-22 14:47:20 -08:00
commit d8ca6dbb8d
20 changed files with 698 additions and 434 deletions

View File

@ -152,7 +152,7 @@ string:
via the kernel command line, or when the "nfs" module is via the kernel command line, or when the "nfs" module is
loaded. loaded.
/sys/fs/nfs/client/net/identifier /sys/fs/nfs/net/nfs_client/identifier
This virtual file, available since Linux 5.3, is local to the This virtual file, available since Linux 5.3, is local to the
network namespace in which it is accessed and so can provide network namespace in which it is accessed and so can provide
distinction between network namespaces (containers) when the distinction between network namespaces (containers) when the
@ -164,7 +164,7 @@ then that uniquifier can be used. For example, a uniquifier might
be formed at boot using the container's internal identifier: be formed at boot using the container's internal identifier:
sha256sum /etc/machine-id | awk '{print $1}' \\ sha256sum /etc/machine-id | awk '{print $1}' \\
> /sys/fs/nfs/client/net/identifier > /sys/fs/nfs/net/nfs_client/identifier
Security considerations Security considerations
----------------------- -----------------------

View File

@ -203,14 +203,14 @@ static void nfs_readdir_page_init_array(struct page *page, u64 last_cookie,
{ {
struct nfs_cache_array *array; struct nfs_cache_array *array;
array = kmap_atomic(page); array = kmap_local_page(page);
array->change_attr = change_attr; array->change_attr = change_attr;
array->last_cookie = last_cookie; array->last_cookie = last_cookie;
array->size = 0; array->size = 0;
array->page_full = 0; array->page_full = 0;
array->page_is_eof = 0; array->page_is_eof = 0;
array->cookies_are_ordered = 1; array->cookies_are_ordered = 1;
kunmap_atomic(array); kunmap_local(array);
} }
/* /*
@ -221,11 +221,11 @@ static void nfs_readdir_clear_array(struct page *page)
struct nfs_cache_array *array; struct nfs_cache_array *array;
unsigned int i; unsigned int i;
array = kmap_atomic(page); array = kmap_local_page(page);
for (i = 0; i < array->size; i++) for (i = 0; i < array->size; i++)
kfree(array->array[i].name); kfree(array->array[i].name);
array->size = 0; array->size = 0;
kunmap_atomic(array); kunmap_local(array);
} }
static void nfs_readdir_free_folio(struct folio *folio) static void nfs_readdir_free_folio(struct folio *folio)
@ -371,14 +371,14 @@ static pgoff_t nfs_readdir_page_cookie_hash(u64 cookie)
static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie, static bool nfs_readdir_page_validate(struct page *page, u64 last_cookie,
u64 change_attr) u64 change_attr)
{ {
struct nfs_cache_array *array = kmap_atomic(page); struct nfs_cache_array *array = kmap_local_page(page);
int ret = true; int ret = true;
if (array->change_attr != change_attr) if (array->change_attr != change_attr)
ret = false; ret = false;
if (nfs_readdir_array_index_cookie(array) != last_cookie) if (nfs_readdir_array_index_cookie(array) != last_cookie)
ret = false; ret = false;
kunmap_atomic(array); kunmap_local(array);
return ret; return ret;
} }
@ -418,9 +418,9 @@ static u64 nfs_readdir_page_last_cookie(struct page *page)
struct nfs_cache_array *array; struct nfs_cache_array *array;
u64 ret; u64 ret;
array = kmap_atomic(page); array = kmap_local_page(page);
ret = array->last_cookie; ret = array->last_cookie;
kunmap_atomic(array); kunmap_local(array);
return ret; return ret;
} }
@ -429,9 +429,9 @@ static bool nfs_readdir_page_needs_filling(struct page *page)
struct nfs_cache_array *array; struct nfs_cache_array *array;
bool ret; bool ret;
array = kmap_atomic(page); array = kmap_local_page(page);
ret = !nfs_readdir_array_is_full(array); ret = !nfs_readdir_array_is_full(array);
kunmap_atomic(array); kunmap_local(array);
return ret; return ret;
} }
@ -439,9 +439,9 @@ static void nfs_readdir_page_set_eof(struct page *page)
{ {
struct nfs_cache_array *array; struct nfs_cache_array *array;
array = kmap_atomic(page); array = kmap_local_page(page);
nfs_readdir_array_set_eof(array); nfs_readdir_array_set_eof(array);
kunmap_atomic(array); kunmap_local(array);
} }
static struct page *nfs_readdir_page_get_next(struct address_space *mapping, static struct page *nfs_readdir_page_get_next(struct address_space *mapping,
@ -568,14 +568,14 @@ static int nfs_readdir_search_array(struct nfs_readdir_descriptor *desc)
struct nfs_cache_array *array; struct nfs_cache_array *array;
int status; int status;
array = kmap_atomic(desc->page); array = kmap_local_page(desc->page);
if (desc->dir_cookie == 0) if (desc->dir_cookie == 0)
status = nfs_readdir_search_for_pos(array, desc); status = nfs_readdir_search_for_pos(array, desc);
else else
status = nfs_readdir_search_for_cookie(array, desc); status = nfs_readdir_search_for_cookie(array, desc);
kunmap_atomic(array); kunmap_local(array);
return status; return status;
} }

View File

@ -343,14 +343,12 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
struct nfs_page *req; struct nfs_page *req;
unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
/* XXX do we need to do the eof zeroing found in async_filler? */ /* XXX do we need to do the eof zeroing found in async_filler? */
req = nfs_create_request(dreq->ctx, pagevec[i], req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
pgbase, req_len); pgbase, pos, req_len);
if (IS_ERR(req)) { if (IS_ERR(req)) {
result = PTR_ERR(req); result = PTR_ERR(req);
break; break;
} }
req->wb_index = pos >> PAGE_SHIFT;
req->wb_offset = pos & ~PAGE_MASK;
if (!nfs_pageio_add_request(&desc, req)) { if (!nfs_pageio_add_request(&desc, req)) {
result = desc.pg_error; result = desc.pg_error;
nfs_release_request(req); nfs_release_request(req);
@ -802,8 +800,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
struct nfs_page *req; struct nfs_page *req;
unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase); unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
req = nfs_create_request(dreq->ctx, pagevec[i], req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
pgbase, req_len); pgbase, pos, req_len);
if (IS_ERR(req)) { if (IS_ERR(req)) {
result = PTR_ERR(req); result = PTR_ERR(req);
break; break;
@ -816,8 +814,6 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
} }
nfs_lock_request(req); nfs_lock_request(req);
req->wb_index = pos >> PAGE_SHIFT;
req->wb_offset = pos & ~PAGE_MASK;
if (!nfs_pageio_add_request(&desc, req)) { if (!nfs_pageio_add_request(&desc, req)) {
result = desc.pg_error; result = desc.pg_error;
nfs_unlock_and_release_request(req); nfs_unlock_and_release_request(req);

View File

@ -277,27 +277,28 @@ EXPORT_SYMBOL_GPL(nfs_file_fsync);
* and that the new data won't completely replace the old data in * and that the new data won't completely replace the old data in
* that range of the file. * that range of the file.
*/ */
static bool nfs_full_page_write(struct page *page, loff_t pos, unsigned int len) static bool nfs_folio_is_full_write(struct folio *folio, loff_t pos,
unsigned int len)
{ {
unsigned int pglen = nfs_page_length(page); unsigned int pglen = nfs_folio_length(folio);
unsigned int offset = pos & (PAGE_SIZE - 1); unsigned int offset = offset_in_folio(folio, pos);
unsigned int end = offset + len; unsigned int end = offset + len;
return !pglen || (end >= pglen && !offset); return !pglen || (end >= pglen && !offset);
} }
static bool nfs_want_read_modify_write(struct file *file, struct page *page, static bool nfs_want_read_modify_write(struct file *file, struct folio *folio,
loff_t pos, unsigned int len) loff_t pos, unsigned int len)
{ {
/* /*
* Up-to-date pages, those with ongoing or full-page write * Up-to-date pages, those with ongoing or full-page write
* don't need read/modify/write * don't need read/modify/write
*/ */
if (PageUptodate(page) || PagePrivate(page) || if (folio_test_uptodate(folio) || folio_test_private(folio) ||
nfs_full_page_write(page, pos, len)) nfs_folio_is_full_write(folio, pos, len))
return false; return false;
if (pnfs_ld_read_whole_page(file->f_mapping->host)) if (pnfs_ld_read_whole_page(file_inode(file)))
return true; return true;
/* Open for reading too? */ /* Open for reading too? */
if (file->f_mode & FMODE_READ) if (file->f_mode & FMODE_READ)
@ -305,6 +306,15 @@ static bool nfs_want_read_modify_write(struct file *file, struct page *page,
return false; return false;
} }
static struct folio *
nfs_folio_grab_cache_write_begin(struct address_space *mapping, pgoff_t index)
{
unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
return __filemap_get_folio(mapping, index, fgp_flags,
mapping_gfp_mask(mapping));
}
/* /*
* This does the "real" work of the write. We must allocate and lock the * This does the "real" work of the write. We must allocate and lock the
* page to be sent back to the generic routine, which then copies the * page to be sent back to the generic routine, which then copies the
@ -314,32 +324,31 @@ static bool nfs_want_read_modify_write(struct file *file, struct page *page,
* increment the page use counts until he is done with the page. * increment the page use counts until he is done with the page.
*/ */
static int nfs_write_begin(struct file *file, struct address_space *mapping, static int nfs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, loff_t pos, unsigned len, struct page **pagep,
struct page **pagep, void **fsdata) void **fsdata)
{ {
int ret; struct folio *folio;
pgoff_t index = pos >> PAGE_SHIFT;
struct page *page;
int once_thru = 0; int once_thru = 0;
int ret;
dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n", dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n",
file, mapping->host->i_ino, len, (long long) pos); file, mapping->host->i_ino, len, (long long) pos);
start: start:
page = grab_cache_page_write_begin(mapping, index); folio = nfs_folio_grab_cache_write_begin(mapping, pos >> PAGE_SHIFT);
if (!page) if (!folio)
return -ENOMEM; return -ENOMEM;
*pagep = page; *pagep = &folio->page;
ret = nfs_flush_incompatible(file, page); ret = nfs_flush_incompatible(file, folio);
if (ret) { if (ret) {
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
} else if (!once_thru && } else if (!once_thru &&
nfs_want_read_modify_write(file, page, pos, len)) { nfs_want_read_modify_write(file, folio, pos, len)) {
once_thru = 1; once_thru = 1;
ret = nfs_read_folio(file, page_folio(page)); ret = nfs_read_folio(file, folio);
put_page(page); folio_put(folio);
if (!ret) if (!ret)
goto start; goto start;
} }
@ -347,11 +356,12 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping,
} }
static int nfs_write_end(struct file *file, struct address_space *mapping, static int nfs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata) struct page *page, void *fsdata)
{ {
unsigned offset = pos & (PAGE_SIZE - 1);
struct nfs_open_context *ctx = nfs_file_open_context(file); struct nfs_open_context *ctx = nfs_file_open_context(file);
struct folio *folio = page_folio(page);
unsigned offset = offset_in_folio(folio, pos);
int status; int status;
dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n", dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n",
@ -361,26 +371,26 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
* Zero any uninitialised parts of the page, and then mark the page * Zero any uninitialised parts of the page, and then mark the page
* as up to date if it turns out that we're extending the file. * as up to date if it turns out that we're extending the file.
*/ */
if (!PageUptodate(page)) { if (!folio_test_uptodate(folio)) {
unsigned pglen = nfs_page_length(page); size_t fsize = folio_size(folio);
unsigned pglen = nfs_folio_length(folio);
unsigned end = offset + copied; unsigned end = offset + copied;
if (pglen == 0) { if (pglen == 0) {
zero_user_segments(page, 0, offset, folio_zero_segments(folio, 0, offset, end, fsize);
end, PAGE_SIZE); folio_mark_uptodate(folio);
SetPageUptodate(page);
} else if (end >= pglen) { } else if (end >= pglen) {
zero_user_segment(page, end, PAGE_SIZE); folio_zero_segment(folio, end, fsize);
if (offset == 0) if (offset == 0)
SetPageUptodate(page); folio_mark_uptodate(folio);
} else } else
zero_user_segment(page, pglen, PAGE_SIZE); folio_zero_segment(folio, pglen, fsize);
} }
status = nfs_updatepage(file, page, offset, copied); status = nfs_update_folio(file, folio, offset, copied);
unlock_page(page); folio_unlock(folio);
put_page(page); folio_put(folio);
if (status < 0) if (status < 0)
return status; return status;
@ -402,14 +412,16 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
static void nfs_invalidate_folio(struct folio *folio, size_t offset, static void nfs_invalidate_folio(struct folio *folio, size_t offset,
size_t length) size_t length)
{ {
struct inode *inode = folio_file_mapping(folio)->host;
dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n", dfprintk(PAGECACHE, "NFS: invalidate_folio(%lu, %zu, %zu)\n",
folio->index, offset, length); folio->index, offset, length);
if (offset != 0 || length < folio_size(folio)) if (offset != 0 || length < folio_size(folio))
return; return;
/* Cancel any unstarted writes on this page */ /* Cancel any unstarted writes on this page */
nfs_wb_folio_cancel(folio->mapping->host, folio); nfs_wb_folio_cancel(inode, folio);
folio_wait_fscache(folio); folio_wait_fscache(folio);
trace_nfs_invalidate_folio(inode, folio);
} }
/* /*
@ -423,8 +435,13 @@ static bool nfs_release_folio(struct folio *folio, gfp_t gfp)
dfprintk(PAGECACHE, "NFS: release_folio(%p)\n", folio); dfprintk(PAGECACHE, "NFS: release_folio(%p)\n", folio);
/* If the private flag is set, then the folio is not freeable */ /* If the private flag is set, then the folio is not freeable */
if (folio_test_private(folio)) if (folio_test_private(folio)) {
return false; if ((current_gfp_context(gfp) & GFP_KERNEL) != GFP_KERNEL ||
current_is_kswapd())
return false;
if (nfs_wb_folio(folio_file_mapping(folio)->host, folio) < 0)
return false;
}
return nfs_fscache_release_folio(folio, gfp); return nfs_fscache_release_folio(folio, gfp);
} }
@ -465,12 +482,15 @@ static void nfs_check_dirty_writeback(struct folio *folio,
static int nfs_launder_folio(struct folio *folio) static int nfs_launder_folio(struct folio *folio)
{ {
struct inode *inode = folio->mapping->host; struct inode *inode = folio->mapping->host;
int ret;
dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n", dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n",
inode->i_ino, folio_pos(folio)); inode->i_ino, folio_pos(folio));
folio_wait_fscache(folio); folio_wait_fscache(folio);
return nfs_wb_page(inode, &folio->page); ret = nfs_wb_folio(inode, folio);
trace_nfs_launder_folio_done(inode, folio, ret);
return ret;
} }
static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file, static int nfs_swap_activate(struct swap_info_struct *sis, struct file *file,
@ -547,22 +567,22 @@ const struct address_space_operations nfs_file_aops = {
*/ */
static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf) static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
{ {
struct page *page = vmf->page;
struct file *filp = vmf->vma->vm_file; struct file *filp = vmf->vma->vm_file;
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
unsigned pagelen; unsigned pagelen;
vm_fault_t ret = VM_FAULT_NOPAGE; vm_fault_t ret = VM_FAULT_NOPAGE;
struct address_space *mapping; struct address_space *mapping;
struct folio *folio = page_folio(vmf->page);
dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n", dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n",
filp, filp->f_mapping->host->i_ino, filp, filp->f_mapping->host->i_ino,
(long long)page_offset(page)); (long long)folio_file_pos(folio));
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
/* make sure the cache has finished storing the page */ /* make sure the cache has finished storing the page */
if (PageFsCache(page) && if (folio_test_fscache(folio) &&
wait_on_page_fscache_killable(vmf->page) < 0) { folio_wait_fscache_killable(folio) < 0) {
ret = VM_FAULT_RETRY; ret = VM_FAULT_RETRY;
goto out; goto out;
} }
@ -571,25 +591,25 @@ static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
nfs_wait_bit_killable, nfs_wait_bit_killable,
TASK_KILLABLE|TASK_FREEZABLE_UNSAFE); TASK_KILLABLE|TASK_FREEZABLE_UNSAFE);
lock_page(page); folio_lock(folio);
mapping = page_file_mapping(page); mapping = folio_file_mapping(folio);
if (mapping != inode->i_mapping) if (mapping != inode->i_mapping)
goto out_unlock; goto out_unlock;
wait_on_page_writeback(page); folio_wait_writeback(folio);
pagelen = nfs_page_length(page); pagelen = nfs_folio_length(folio);
if (pagelen == 0) if (pagelen == 0)
goto out_unlock; goto out_unlock;
ret = VM_FAULT_LOCKED; ret = VM_FAULT_LOCKED;
if (nfs_flush_incompatible(filp, page) == 0 && if (nfs_flush_incompatible(filp, folio) == 0 &&
nfs_updatepage(filp, page, 0, pagelen) == 0) nfs_update_folio(filp, folio, 0, pagelen) == 0)
goto out; goto out;
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
out_unlock: out_unlock:
unlock_page(page); folio_unlock(folio);
out: out:
sb_end_pagefault(inode->i_sb); sb_end_pagefault(inode->i_sb);
return ret; return ret;

View File

@ -862,6 +862,8 @@ fl_pnfs_update_layout(struct inode *ino,
status = filelayout_check_deviceid(lo, fl, gfp_flags); status = filelayout_check_deviceid(lo, fl, gfp_flags);
if (status) { if (status) {
pnfs_error_mark_layout_for_return(ino, lseg);
pnfs_set_lo_fail(lseg);
pnfs_put_lseg(lseg); pnfs_put_lseg(lseg);
lseg = NULL; lseg = NULL;
} }

View File

@ -760,17 +760,18 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
* Record the page as unstable (an extra writeback period) and mark its * Record the page as unstable (an extra writeback period) and mark its
* inode as dirty. * inode as dirty.
*/ */
static inline static inline void nfs_folio_mark_unstable(struct folio *folio,
void nfs_mark_page_unstable(struct page *page, struct nfs_commit_info *cinfo) struct nfs_commit_info *cinfo)
{ {
if (!cinfo->dreq) { if (folio && !cinfo->dreq) {
struct inode *inode = page_file_mapping(page)->host; struct inode *inode = folio_file_mapping(folio)->host;
long nr = folio_nr_pages(folio);
/* This page is really still in write-back - just that the /* This page is really still in write-back - just that the
* writeback is happening on the server now. * writeback is happening on the server now.
*/ */
inc_node_page_state(page, NR_WRITEBACK); node_stat_mod_folio(folio, NR_WRITEBACK, nr);
inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); wb_stat_mod(&inode_to_bdi(inode)->wb, WB_WRITEBACK, nr);
__mark_inode_dirty(inode, I_DIRTY_DATASYNC); __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
} }
} }
@ -794,6 +795,24 @@ unsigned int nfs_page_length(struct page *page)
return 0; return 0;
} }
/*
* Determine the number of bytes of data the page contains
*/
static inline size_t nfs_folio_length(struct folio *folio)
{
loff_t i_size = i_size_read(folio_file_mapping(folio)->host);
if (i_size > 0) {
pgoff_t index = folio_index(folio) >> folio_order(folio);
pgoff_t end_index = (i_size - 1) >> folio_shift(folio);
if (index < end_index)
return folio_size(folio);
if (index == end_index)
return offset_in_folio(folio, i_size - 1) + 1;
}
return 0;
}
/* /*
* Convert a umode to a dirent->d_type * Convert a umode to a dirent->d_type
*/ */
@ -807,11 +826,10 @@ unsigned char nfs_umode_to_dtype(umode_t mode)
* Determine the number of pages in an array of length 'len' and * Determine the number of pages in an array of length 'len' and
* with a base offset of 'base' * with a base offset of 'base'
*/ */
static inline static inline unsigned int nfs_page_array_len(unsigned int base, size_t len)
unsigned int nfs_page_array_len(unsigned int base, size_t len)
{ {
return ((unsigned long)len + (unsigned long)base + return ((unsigned long)len + (unsigned long)base + PAGE_SIZE - 1) >>
PAGE_SIZE - 1) >> PAGE_SHIFT; PAGE_SHIFT;
} }
/* /*

View File

@ -460,7 +460,8 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
if (err >= 0) if (err >= 0)
break; break;
if (err == -ENOTSUPP && if ((err == -ENOTSUPP ||
err == -NFS4ERR_OFFLOAD_DENIED) &&
nfs42_files_from_same_server(src, dst)) { nfs42_files_from_same_server(src, dst)) {
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
break; break;

View File

@ -10604,7 +10604,9 @@ static void nfs4_disable_swap(struct inode *inode)
/* The state manager thread will now exit once it is /* The state manager thread will now exit once it is
* woken. * woken.
*/ */
wake_up_var(&NFS_SERVER(inode)->nfs_client->cl_state); struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
nfs4_schedule_state_manager(clp);
} }
static const struct inode_operations nfs4_dir_inode_operations = { static const struct inode_operations nfs4_dir_inode_operations = {

View File

@ -292,32 +292,34 @@ TRACE_DEFINE_ENUM(NFS4CLNT_MOVED);
TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED); TRACE_DEFINE_ENUM(NFS4CLNT_LEASE_MOVED);
TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED); TRACE_DEFINE_ENUM(NFS4CLNT_DELEGATION_EXPIRED);
TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER); TRACE_DEFINE_ENUM(NFS4CLNT_RUN_MANAGER);
TRACE_DEFINE_ENUM(NFS4CLNT_MANAGER_AVAILABLE);
TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_RUNNING); TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_RUNNING);
TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_READ); TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_READ);
TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_RW); TRACE_DEFINE_ENUM(NFS4CLNT_RECALL_ANY_LAYOUT_RW);
TRACE_DEFINE_ENUM(NFS4CLNT_DELEGRETURN_DELAYED);
#define show_nfs4_clp_state(state) \ #define show_nfs4_clp_state(state) \
__print_flags(state, "|", \ __print_flags(state, "|", \
{ NFS4CLNT_MANAGER_RUNNING, "MANAGER_RUNNING" }, \ { BIT(NFS4CLNT_MANAGER_RUNNING), "MANAGER_RUNNING" }, \
{ NFS4CLNT_CHECK_LEASE, "CHECK_LEASE" }, \ { BIT(NFS4CLNT_CHECK_LEASE), "CHECK_LEASE" }, \
{ NFS4CLNT_LEASE_EXPIRED, "LEASE_EXPIRED" }, \ { BIT(NFS4CLNT_LEASE_EXPIRED), "LEASE_EXPIRED" }, \
{ NFS4CLNT_RECLAIM_REBOOT, "RECLAIM_REBOOT" }, \ { BIT(NFS4CLNT_RECLAIM_REBOOT), "RECLAIM_REBOOT" }, \
{ NFS4CLNT_RECLAIM_NOGRACE, "RECLAIM_NOGRACE" }, \ { BIT(NFS4CLNT_RECLAIM_NOGRACE), "RECLAIM_NOGRACE" }, \
{ NFS4CLNT_DELEGRETURN, "DELEGRETURN" }, \ { BIT(NFS4CLNT_DELEGRETURN), "DELEGRETURN" }, \
{ NFS4CLNT_SESSION_RESET, "SESSION_RESET" }, \ { BIT(NFS4CLNT_SESSION_RESET), "SESSION_RESET" }, \
{ NFS4CLNT_LEASE_CONFIRM, "LEASE_CONFIRM" }, \ { BIT(NFS4CLNT_LEASE_CONFIRM), "LEASE_CONFIRM" }, \
{ NFS4CLNT_SERVER_SCOPE_MISMATCH, \ { BIT(NFS4CLNT_SERVER_SCOPE_MISMATCH), "SERVER_SCOPE_MISMATCH" }, \
"SERVER_SCOPE_MISMATCH" }, \ { BIT(NFS4CLNT_PURGE_STATE), "PURGE_STATE" }, \
{ NFS4CLNT_PURGE_STATE, "PURGE_STATE" }, \ { BIT(NFS4CLNT_BIND_CONN_TO_SESSION), "BIND_CONN_TO_SESSION" }, \
{ NFS4CLNT_BIND_CONN_TO_SESSION, \ { BIT(NFS4CLNT_MOVED), "MOVED" }, \
"BIND_CONN_TO_SESSION" }, \ { BIT(NFS4CLNT_LEASE_MOVED), "LEASE_MOVED" }, \
{ NFS4CLNT_MOVED, "MOVED" }, \ { BIT(NFS4CLNT_DELEGATION_EXPIRED), "DELEGATION_EXPIRED" }, \
{ NFS4CLNT_LEASE_MOVED, "LEASE_MOVED" }, \ { BIT(NFS4CLNT_RUN_MANAGER), "RUN_MANAGER" }, \
{ NFS4CLNT_DELEGATION_EXPIRED, "DELEGATION_EXPIRED" }, \ { BIT(NFS4CLNT_MANAGER_AVAILABLE), "MANAGER_AVAILABLE" }, \
{ NFS4CLNT_RUN_MANAGER, "RUN_MANAGER" }, \ { BIT(NFS4CLNT_RECALL_RUNNING), "RECALL_RUNNING" }, \
{ NFS4CLNT_RECALL_RUNNING, "RECALL_RUNNING" }, \ { BIT(NFS4CLNT_RECALL_ANY_LAYOUT_READ), "RECALL_ANY_LAYOUT_READ" }, \
{ NFS4CLNT_RECALL_ANY_LAYOUT_READ, "RECALL_ANY_LAYOUT_READ" }, \ { BIT(NFS4CLNT_RECALL_ANY_LAYOUT_RW), "RECALL_ANY_LAYOUT_RW" }, \
{ NFS4CLNT_RECALL_ANY_LAYOUT_RW, "RECALL_ANY_LAYOUT_RW" }) { BIT(NFS4CLNT_DELEGRETURN_DELAYED), "DELERETURN_DELAYED" })
TRACE_EVENT(nfs4_state_mgr, TRACE_EVENT(nfs4_state_mgr,
TP_PROTO( TP_PROTO(

View File

@ -152,8 +152,6 @@ DEFINE_NFS_INODE_EVENT(nfs_getattr_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_getattr_exit); DEFINE_NFS_INODE_EVENT_DONE(nfs_getattr_exit);
DEFINE_NFS_INODE_EVENT(nfs_setattr_enter); DEFINE_NFS_INODE_EVENT(nfs_setattr_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_setattr_exit); DEFINE_NFS_INODE_EVENT_DONE(nfs_setattr_exit);
DEFINE_NFS_INODE_EVENT(nfs_writeback_page_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_page_exit);
DEFINE_NFS_INODE_EVENT(nfs_writeback_inode_enter); DEFINE_NFS_INODE_EVENT(nfs_writeback_inode_enter);
DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit); DEFINE_NFS_INODE_EVENT_DONE(nfs_writeback_inode_exit);
DEFINE_NFS_INODE_EVENT(nfs_fsync_enter); DEFINE_NFS_INODE_EVENT(nfs_fsync_enter);
@ -933,13 +931,13 @@ TRACE_EVENT(nfs_sillyrename_unlink,
) )
); );
TRACE_EVENT(nfs_aop_readpage, DECLARE_EVENT_CLASS(nfs_folio_event,
TP_PROTO( TP_PROTO(
const struct inode *inode, const struct inode *inode,
struct page *page struct folio *folio
), ),
TP_ARGS(inode, page), TP_ARGS(inode, folio),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
@ -947,6 +945,7 @@ TRACE_EVENT(nfs_aop_readpage,
__field(u64, fileid) __field(u64, fileid)
__field(u64, version) __field(u64, version)
__field(loff_t, offset) __field(loff_t, offset)
__field(u32, count)
), ),
TP_fast_assign( TP_fast_assign(
@ -956,26 +955,36 @@ TRACE_EVENT(nfs_aop_readpage,
__entry->fileid = nfsi->fileid; __entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->version = inode_peek_iversion_raw(inode); __entry->version = inode_peek_iversion_raw(inode);
__entry->offset = page_index(page) << PAGE_SHIFT; __entry->offset = folio_file_pos(folio);
__entry->count = nfs_folio_length(folio);
), ),
TP_printk( TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu offset=%lld", "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu "
"offset=%lld count=%u",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
__entry->fhandle, __entry->version, __entry->fhandle, __entry->version,
__entry->offset __entry->offset, __entry->count
) )
); );
TRACE_EVENT(nfs_aop_readpage_done, #define DEFINE_NFS_FOLIO_EVENT(name) \
DEFINE_EVENT(nfs_folio_event, name, \
TP_PROTO( \
const struct inode *inode, \
struct folio *folio \
), \
TP_ARGS(inode, folio))
DECLARE_EVENT_CLASS(nfs_folio_event_done,
TP_PROTO( TP_PROTO(
const struct inode *inode, const struct inode *inode,
struct page *page, struct folio *folio,
int ret int ret
), ),
TP_ARGS(inode, page, ret), TP_ARGS(inode, folio, ret),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(dev_t, dev) __field(dev_t, dev)
@ -984,6 +993,7 @@ TRACE_EVENT(nfs_aop_readpage_done,
__field(u64, fileid) __field(u64, fileid)
__field(u64, version) __field(u64, version)
__field(loff_t, offset) __field(loff_t, offset)
__field(u32, count)
), ),
TP_fast_assign( TP_fast_assign(
@ -993,19 +1003,39 @@ TRACE_EVENT(nfs_aop_readpage_done,
__entry->fileid = nfsi->fileid; __entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(&nfsi->fh); __entry->fhandle = nfs_fhandle_hash(&nfsi->fh);
__entry->version = inode_peek_iversion_raw(inode); __entry->version = inode_peek_iversion_raw(inode);
__entry->offset = page_index(page) << PAGE_SHIFT; __entry->offset = folio_file_pos(folio);
__entry->count = nfs_folio_length(folio);
__entry->ret = ret; __entry->ret = ret;
), ),
TP_printk( TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu offset=%lld ret=%d", "fileid=%02x:%02x:%llu fhandle=0x%08x version=%llu "
"offset=%lld count=%u ret=%d",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid, (unsigned long long)__entry->fileid,
__entry->fhandle, __entry->version, __entry->fhandle, __entry->version,
__entry->offset, __entry->ret __entry->offset, __entry->count, __entry->ret
) )
); );
#define DEFINE_NFS_FOLIO_EVENT_DONE(name) \
DEFINE_EVENT(nfs_folio_event_done, name, \
TP_PROTO( \
const struct inode *inode, \
struct folio *folio, \
int ret \
), \
TP_ARGS(inode, folio, ret))
DEFINE_NFS_FOLIO_EVENT(nfs_aop_readpage);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_aop_readpage_done);
DEFINE_NFS_FOLIO_EVENT(nfs_writeback_folio);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_writeback_folio_done);
DEFINE_NFS_FOLIO_EVENT(nfs_invalidate_folio);
DEFINE_NFS_FOLIO_EVENT_DONE(nfs_launder_folio_done);
TRACE_EVENT(nfs_aop_readahead, TRACE_EVENT(nfs_aop_readahead,
TP_PROTO( TP_PROTO(
const struct inode *inode, const struct inode *inode,

View File

@ -32,6 +32,42 @@
static struct kmem_cache *nfs_page_cachep; static struct kmem_cache *nfs_page_cachep;
static const struct rpc_call_ops nfs_pgio_common_ops; static const struct rpc_call_ops nfs_pgio_common_ops;
struct nfs_page_iter_page {
const struct nfs_page *req;
size_t count;
};
static void nfs_page_iter_page_init(struct nfs_page_iter_page *i,
const struct nfs_page *req)
{
i->req = req;
i->count = 0;
}
static void nfs_page_iter_page_advance(struct nfs_page_iter_page *i, size_t sz)
{
const struct nfs_page *req = i->req;
size_t tmp = i->count + sz;
i->count = (tmp < req->wb_bytes) ? tmp : req->wb_bytes;
}
static struct page *nfs_page_iter_page_get(struct nfs_page_iter_page *i)
{
const struct nfs_page *req = i->req;
struct page *page;
if (i->count != req->wb_bytes) {
size_t base = i->count + req->wb_pgbase;
size_t len = PAGE_SIZE - offset_in_page(base);
page = nfs_page_to_page(req, base);
nfs_page_iter_page_advance(i, len);
return page;
}
return NULL;
}
static struct nfs_pgio_mirror * static struct nfs_pgio_mirror *
nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx) nfs_pgio_get_mirror(struct nfs_pageio_descriptor *desc, u32 idx)
{ {
@ -391,7 +427,7 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
* has extra ref from the write/commit path to handle handoff * has extra ref from the write/commit path to handle handoff
* between write and commit lists. */ * between write and commit lists. */
if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) { if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
inode = page_file_mapping(req->wb_page)->host; inode = nfs_page_to_inode(req);
set_bit(PG_INODE_REF, &req->wb_flags); set_bit(PG_INODE_REF, &req->wb_flags);
kref_get(&req->wb_kref); kref_get(&req->wb_kref);
atomic_long_inc(&NFS_I(inode)->nrequests); atomic_long_inc(&NFS_I(inode)->nrequests);
@ -431,10 +467,9 @@ nfs_page_group_destroy(struct kref *kref)
nfs_release_request(head); nfs_release_request(head);
} }
static struct nfs_page * static struct nfs_page *nfs_page_create(struct nfs_lock_context *l_ctx,
__nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page, unsigned int pgbase, pgoff_t index,
unsigned int pgbase, unsigned int offset, unsigned int offset, unsigned int count)
unsigned int count)
{ {
struct nfs_page *req; struct nfs_page *req;
struct nfs_open_context *ctx = l_ctx->open_context; struct nfs_open_context *ctx = l_ctx->open_context;
@ -453,42 +488,90 @@ __nfs_create_request(struct nfs_lock_context *l_ctx, struct page *page,
/* Initialize the request struct. Initially, we assume a /* Initialize the request struct. Initially, we assume a
* long write-back delay. This will be adjusted in * long write-back delay. This will be adjusted in
* update_nfs_request below if the region is not locked. */ * update_nfs_request below if the region is not locked. */
req->wb_page = page; req->wb_pgbase = pgbase;
if (page) { req->wb_index = index;
req->wb_index = page_index(page); req->wb_offset = offset;
get_page(page); req->wb_bytes = count;
}
req->wb_offset = offset;
req->wb_pgbase = pgbase;
req->wb_bytes = count;
kref_init(&req->wb_kref); kref_init(&req->wb_kref);
req->wb_nio = 0; req->wb_nio = 0;
return req; return req;
} }
static void nfs_page_assign_folio(struct nfs_page *req, struct folio *folio)
{
if (folio != NULL) {
req->wb_folio = folio;
folio_get(folio);
set_bit(PG_FOLIO, &req->wb_flags);
}
}
static void nfs_page_assign_page(struct nfs_page *req, struct page *page)
{
if (page != NULL) {
req->wb_page = page;
get_page(page);
}
}
/** /**
* nfs_create_request - Create an NFS read/write request. * nfs_page_create_from_page - Create an NFS read/write request.
* @ctx: open context to use * @ctx: open context to use
* @page: page to write * @page: page to write
* @offset: starting offset within the page for the write * @pgbase: starting offset within the page for the write
* @offset: file offset for the write
* @count: number of bytes to read/write * @count: number of bytes to read/write
* *
* The page must be locked by the caller. This makes sure we never * The page must be locked by the caller. This makes sure we never
* create two different requests for the same page. * create two different requests for the same page.
* User should ensure it is safe to sleep in this function. * User should ensure it is safe to sleep in this function.
*/ */
struct nfs_page * struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
nfs_create_request(struct nfs_open_context *ctx, struct page *page, struct page *page,
unsigned int offset, unsigned int count) unsigned int pgbase, loff_t offset,
unsigned int count)
{ {
struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx); struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
struct nfs_page *ret; struct nfs_page *ret;
if (IS_ERR(l_ctx)) if (IS_ERR(l_ctx))
return ERR_CAST(l_ctx); return ERR_CAST(l_ctx);
ret = __nfs_create_request(l_ctx, page, offset, offset, count); ret = nfs_page_create(l_ctx, pgbase, offset >> PAGE_SHIFT,
if (!IS_ERR(ret)) offset_in_page(offset), count);
if (!IS_ERR(ret)) {
nfs_page_assign_page(ret, page);
nfs_page_group_init(ret, NULL); nfs_page_group_init(ret, NULL);
}
nfs_put_lock_context(l_ctx);
return ret;
}
/**
* nfs_page_create_from_folio - Create an NFS read/write request.
* @ctx: open context to use
* @folio: folio to write
* @offset: starting offset within the folio for the write
* @count: number of bytes to read/write
*
* The page must be locked by the caller. This makes sure we never
* create two different requests for the same page.
* User should ensure it is safe to sleep in this function.
*/
struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx,
struct folio *folio,
unsigned int offset,
unsigned int count)
{
struct nfs_lock_context *l_ctx = nfs_get_lock_context(ctx);
struct nfs_page *ret;
if (IS_ERR(l_ctx))
return ERR_CAST(l_ctx);
ret = nfs_page_create(l_ctx, offset, folio_index(folio), offset, count);
if (!IS_ERR(ret)) {
nfs_page_assign_folio(ret, folio);
nfs_page_group_init(ret, NULL);
}
nfs_put_lock_context(l_ctx); nfs_put_lock_context(l_ctx);
return ret; return ret;
} }
@ -501,10 +584,16 @@ nfs_create_subreq(struct nfs_page *req,
{ {
struct nfs_page *last; struct nfs_page *last;
struct nfs_page *ret; struct nfs_page *ret;
struct folio *folio = nfs_page_to_folio(req);
struct page *page = nfs_page_to_page(req, pgbase);
ret = __nfs_create_request(req->wb_lock_context, req->wb_page, ret = nfs_page_create(req->wb_lock_context, pgbase, req->wb_index,
pgbase, offset, count); offset, count);
if (!IS_ERR(ret)) { if (!IS_ERR(ret)) {
if (folio)
nfs_page_assign_folio(ret, folio);
else
nfs_page_assign_page(ret, page);
/* find the last request */ /* find the last request */
for (last = req->wb_head; for (last = req->wb_head;
last->wb_this_page != req->wb_head; last->wb_this_page != req->wb_head;
@ -512,7 +601,6 @@ nfs_create_subreq(struct nfs_page *req,
; ;
nfs_lock_request(ret); nfs_lock_request(ret);
ret->wb_index = req->wb_index;
nfs_page_group_init(ret, last); nfs_page_group_init(ret, last);
ret->wb_nio = req->wb_nio; ret->wb_nio = req->wb_nio;
} }
@ -551,11 +639,16 @@ void nfs_unlock_and_release_request(struct nfs_page *req)
*/ */
static void nfs_clear_request(struct nfs_page *req) static void nfs_clear_request(struct nfs_page *req)
{ {
struct folio *folio = nfs_page_to_folio(req);
struct page *page = req->wb_page; struct page *page = req->wb_page;
struct nfs_lock_context *l_ctx = req->wb_lock_context; struct nfs_lock_context *l_ctx = req->wb_lock_context;
struct nfs_open_context *ctx; struct nfs_open_context *ctx;
if (page != NULL) { if (folio != NULL) {
folio_put(folio);
req->wb_folio = NULL;
clear_bit(PG_FOLIO, &req->wb_flags);
} else if (page != NULL) {
put_page(page); put_page(page);
req->wb_page = NULL; req->wb_page = NULL;
} }
@ -693,13 +786,14 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
/** /**
* nfs_pgio_rpcsetup - Set up arguments for a pageio call * nfs_pgio_rpcsetup - Set up arguments for a pageio call
* @hdr: The pageio hdr * @hdr: The pageio hdr
* @pgbase: base
* @count: Number of bytes to read * @count: Number of bytes to read
* @how: How to commit data (writes only) * @how: How to commit data (writes only)
* @cinfo: Commit information for the call (writes only) * @cinfo: Commit information for the call (writes only)
*/ */
static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr, unsigned int pgbase,
unsigned int count, unsigned int count, int how,
int how, struct nfs_commit_info *cinfo) struct nfs_commit_info *cinfo)
{ {
struct nfs_page *req = hdr->req; struct nfs_page *req = hdr->req;
@ -710,7 +804,7 @@ static void nfs_pgio_rpcsetup(struct nfs_pgio_header *hdr,
hdr->args.offset = req_offset(req); hdr->args.offset = req_offset(req);
/* pnfs_set_layoutcommit needs this */ /* pnfs_set_layoutcommit needs this */
hdr->mds_offset = hdr->args.offset; hdr->mds_offset = hdr->args.offset;
hdr->args.pgbase = req->wb_pgbase; hdr->args.pgbase = pgbase;
hdr->args.pages = hdr->page_array.pagevec; hdr->args.pages = hdr->page_array.pagevec;
hdr->args.count = count; hdr->args.count = count;
hdr->args.context = get_nfs_open_context(nfs_req_openctx(req)); hdr->args.context = get_nfs_open_context(nfs_req_openctx(req));
@ -896,9 +990,10 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
struct nfs_commit_info cinfo; struct nfs_commit_info cinfo;
struct nfs_page_array *pg_array = &hdr->page_array; struct nfs_page_array *pg_array = &hdr->page_array;
unsigned int pagecount, pageused; unsigned int pagecount, pageused;
unsigned int pg_base = offset_in_page(mirror->pg_base);
gfp_t gfp_flags = nfs_io_gfp_mask(); gfp_t gfp_flags = nfs_io_gfp_mask();
pagecount = nfs_page_array_len(mirror->pg_base, mirror->pg_count); pagecount = nfs_page_array_len(pg_base, mirror->pg_count);
pg_array->npages = pagecount; pg_array->npages = pagecount;
if (pagecount <= ARRAY_SIZE(pg_array->page_array)) if (pagecount <= ARRAY_SIZE(pg_array->page_array))
@ -918,16 +1013,26 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
last_page = NULL; last_page = NULL;
pageused = 0; pageused = 0;
while (!list_empty(head)) { while (!list_empty(head)) {
struct nfs_page_iter_page i;
struct page *page;
req = nfs_list_entry(head->next); req = nfs_list_entry(head->next);
nfs_list_move_request(req, &hdr->pages); nfs_list_move_request(req, &hdr->pages);
if (!last_page || last_page != req->wb_page) { if (req->wb_pgbase == 0)
pageused++; last_page = NULL;
if (pageused > pagecount)
break; nfs_page_iter_page_init(&i, req);
*pages++ = last_page = req->wb_page; while ((page = nfs_page_iter_page_get(&i)) != NULL) {
if (last_page != page) {
pageused++;
if (pageused > pagecount)
goto full;
*pages++ = last_page = page;
}
} }
} }
full:
if (WARN_ON_ONCE(pageused != pagecount)) { if (WARN_ON_ONCE(pageused != pagecount)) {
nfs_pgio_error(hdr); nfs_pgio_error(hdr);
desc->pg_error = -EINVAL; desc->pg_error = -EINVAL;
@ -939,7 +1044,8 @@ int nfs_generic_pgio(struct nfs_pageio_descriptor *desc,
desc->pg_ioflags &= ~FLUSH_COND_STABLE; desc->pg_ioflags &= ~FLUSH_COND_STABLE;
/* Set up the argument struct */ /* Set up the argument struct */
nfs_pgio_rpcsetup(hdr, mirror->pg_count, desc->pg_ioflags, &cinfo); nfs_pgio_rpcsetup(hdr, pg_base, mirror->pg_count, desc->pg_ioflags,
&cinfo);
desc->pg_rpc_callops = &nfs_pgio_common_ops; desc->pg_rpc_callops = &nfs_pgio_common_ops;
return 0; return 0;
} }
@ -1035,6 +1141,24 @@ static bool nfs_match_lock_context(const struct nfs_lock_context *l1,
return l1->lockowner == l2->lockowner; return l1->lockowner == l2->lockowner;
} }
static bool nfs_page_is_contiguous(const struct nfs_page *prev,
const struct nfs_page *req)
{
size_t prev_end = prev->wb_pgbase + prev->wb_bytes;
if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
return false;
if (req->wb_pgbase == 0)
return prev_end == nfs_page_max_length(prev);
if (req->wb_pgbase == prev_end) {
struct folio *folio = nfs_page_to_folio(req);
if (folio)
return folio == nfs_page_to_folio(prev);
return req->wb_page == prev->wb_page;
}
return false;
}
/** /**
* nfs_coalesce_size - test two requests for compatibility * nfs_coalesce_size - test two requests for compatibility
* @prev: pointer to nfs_page * @prev: pointer to nfs_page
@ -1063,16 +1187,8 @@ static unsigned int nfs_coalesce_size(struct nfs_page *prev,
!nfs_match_lock_context(req->wb_lock_context, !nfs_match_lock_context(req->wb_lock_context,
prev->wb_lock_context)) prev->wb_lock_context))
return 0; return 0;
if (req_offset(req) != req_offset(prev) + prev->wb_bytes) if (!nfs_page_is_contiguous(prev, req))
return 0; return 0;
if (req->wb_page == prev->wb_page) {
if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes)
return 0;
} else {
if (req->wb_pgbase != 0 ||
prev->wb_pgbase + prev->wb_bytes != PAGE_SIZE)
return 0;
}
} }
return pgio->pg_ops->pg_test(pgio, prev, req); return pgio->pg_ops->pg_test(pgio, prev, req);
} }
@ -1412,16 +1528,21 @@ void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index)
{ {
struct nfs_pgio_mirror *mirror; struct nfs_pgio_mirror *mirror;
struct nfs_page *prev; struct nfs_page *prev;
struct folio *folio;
u32 midx; u32 midx;
for (midx = 0; midx < desc->pg_mirror_count; midx++) { for (midx = 0; midx < desc->pg_mirror_count; midx++) {
mirror = nfs_pgio_get_mirror(desc, midx); mirror = nfs_pgio_get_mirror(desc, midx);
if (!list_empty(&mirror->pg_list)) { if (!list_empty(&mirror->pg_list)) {
prev = nfs_list_entry(mirror->pg_list.prev); prev = nfs_list_entry(mirror->pg_list.prev);
if (index != prev->wb_index + 1) { folio = nfs_page_to_folio(prev);
nfs_pageio_complete(desc); if (folio) {
break; if (index == folio_next_index(folio))
} continue;
} else if (index == prev->wb_index + 1)
continue;
nfs_pageio_complete(desc);
break;
} }
} }
} }

View File

@ -511,7 +511,7 @@ pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode)); pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0); pnfs_mark_matching_lsegs_return(lo, &head, &range, 0);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&head); pnfs_free_lseg_list(&head);
dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__, dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,

View File

@ -193,7 +193,7 @@ struct pnfs_commit_ops {
void (*recover_commit_reqs) (struct list_head *list, void (*recover_commit_reqs) (struct list_head *list,
struct nfs_commit_info *cinfo); struct nfs_commit_info *cinfo);
struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo, struct nfs_page * (*search_commit_reqs)(struct nfs_commit_info *cinfo,
struct page *page); struct folio *folio);
}; };
struct pnfs_layout_hdr { struct pnfs_layout_hdr {
@ -395,7 +395,7 @@ void pnfs_generic_rw_release(void *data);
void pnfs_generic_recover_commit_reqs(struct list_head *dst, void pnfs_generic_recover_commit_reqs(struct list_head *dst,
struct nfs_commit_info *cinfo); struct nfs_commit_info *cinfo);
struct nfs_page *pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo, struct nfs_page *pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo,
struct page *page); struct folio *folio);
int pnfs_generic_commit_pagelist(struct inode *inode, int pnfs_generic_commit_pagelist(struct inode *inode,
struct list_head *mds_pages, struct list_head *mds_pages,
int how, int how,
@ -557,13 +557,13 @@ pnfs_recover_commit_reqs(struct list_head *head, struct nfs_commit_info *cinfo)
static inline struct nfs_page * static inline struct nfs_page *
pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo,
struct page *page) struct folio *folio)
{ {
struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
if (!fl_cinfo->ops || !fl_cinfo->ops->search_commit_reqs) if (!fl_cinfo->ops || !fl_cinfo->ops->search_commit_reqs)
return NULL; return NULL;
return fl_cinfo->ops->search_commit_reqs(cinfo, page); return fl_cinfo->ops->search_commit_reqs(cinfo, folio);
} }
/* Should the pNFS client commit and return the layout upon a setattr */ /* Should the pNFS client commit and return the layout upon a setattr */
@ -864,7 +864,7 @@ pnfs_recover_commit_reqs(struct list_head *head, struct nfs_commit_info *cinfo)
static inline struct nfs_page * static inline struct nfs_page *
pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo, pnfs_search_commit_reqs(struct inode *inode, struct nfs_commit_info *cinfo,
struct page *page) struct folio *folio)
{ {
return NULL; return NULL;
} }

View File

@ -353,7 +353,7 @@ EXPORT_SYMBOL_GPL(pnfs_generic_recover_commit_reqs);
static struct nfs_page * static struct nfs_page *
pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets, pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets,
unsigned int nbuckets, struct page *page) unsigned int nbuckets, struct folio *folio)
{ {
struct nfs_page *req; struct nfs_page *req;
struct pnfs_commit_bucket *b; struct pnfs_commit_bucket *b;
@ -363,11 +363,11 @@ pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets,
* request is found */ * request is found */
for (i = 0, b = buckets; i < nbuckets; i++, b++) { for (i = 0, b = buckets; i < nbuckets; i++, b++) {
list_for_each_entry(req, &b->written, wb_list) { list_for_each_entry(req, &b->written, wb_list) {
if (req->wb_page == page) if (nfs_page_to_folio(req) == folio)
return req->wb_head; return req->wb_head;
} }
list_for_each_entry(req, &b->committing, wb_list) { list_for_each_entry(req, &b->committing, wb_list) {
if (req->wb_page == page) if (nfs_page_to_folio(req) == folio)
return req->wb_head; return req->wb_head;
} }
} }
@ -375,14 +375,14 @@ pnfs_bucket_search_commit_reqs(struct pnfs_commit_bucket *buckets,
} }
/* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head request /* pnfs_generic_search_commit_reqs - Search lists in @cinfo for the head request
* for @page * for @folio
* @cinfo - commit info for current inode * @cinfo - commit info for current inode
* @page - page to search for matching head request * @folio - page to search for matching head request
* *
* Return: the head request if one is found, otherwise %NULL. * Return: the head request if one is found, otherwise %NULL.
*/ */
struct nfs_page * struct nfs_page *pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo,
pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page) struct folio *folio)
{ {
struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds; struct pnfs_ds_commit_info *fl_cinfo = cinfo->ds;
struct pnfs_commit_array *array; struct pnfs_commit_array *array;
@ -390,7 +390,7 @@ pnfs_generic_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page
list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) { list_for_each_entry(array, &fl_cinfo->commits, cinfo_list) {
req = pnfs_bucket_search_commit_reqs(array->buckets, req = pnfs_bucket_search_commit_reqs(array->buckets,
array->nbuckets, page); array->nbuckets, folio);
if (req) if (req)
return req; return req;
} }
@ -1180,7 +1180,7 @@ pnfs_layout_mark_request_commit(struct nfs_page *req,
nfs_request_add_commit_list_locked(req, list, cinfo); nfs_request_add_commit_list_locked(req, list, cinfo);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
nfs_mark_page_unstable(req->wb_page, cinfo); nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo);
return; return;
out_resched: out_resched:
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);

View File

@ -49,12 +49,11 @@ static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
kmem_cache_free(nfs_rdata_cachep, rhdr); kmem_cache_free(nfs_rdata_cachep, rhdr);
} }
static static int nfs_return_empty_folio(struct folio *folio)
int nfs_return_empty_page(struct page *page)
{ {
zero_user(page, 0, PAGE_SIZE); folio_zero_segment(folio, 0, folio_size(folio));
SetPageUptodate(page); folio_mark_uptodate(folio);
unlock_page(page); folio_unlock(folio);
return 0; return 0;
} }
@ -111,18 +110,18 @@ EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
static void nfs_readpage_release(struct nfs_page *req, int error) static void nfs_readpage_release(struct nfs_page *req, int error)
{ {
struct inode *inode = d_inode(nfs_req_openctx(req)->dentry); struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
struct page *page = req->wb_page; struct folio *folio = nfs_page_to_folio(req);
dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id, dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode), req->wb_bytes, (unsigned long long)NFS_FILEID(inode), req->wb_bytes,
(long long)req_offset(req)); (long long)req_offset(req));
if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT) if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
SetPageError(page); folio_set_error(folio);
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) {
if (PageUptodate(page)) if (folio_test_uptodate(folio))
nfs_fscache_write_page(inode, page); nfs_fscache_write_page(inode, &folio->page);
unlock_page(page); folio_unlock(folio);
} }
nfs_release_request(req); nfs_release_request(req);
} }
@ -135,7 +134,7 @@ struct nfs_readdesc {
static void nfs_page_group_set_uptodate(struct nfs_page *req) static void nfs_page_group_set_uptodate(struct nfs_page *req)
{ {
if (nfs_page_group_sync_on_bit(req, PG_UPTODATE)) if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
SetPageUptodate(req->wb_page); folio_mark_uptodate(nfs_page_to_folio(req));
} }
static void nfs_read_completion(struct nfs_pgio_header *hdr) static void nfs_read_completion(struct nfs_pgio_header *hdr)
@ -147,7 +146,7 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
goto out; goto out;
while (!list_empty(&hdr->pages)) { while (!list_empty(&hdr->pages)) {
struct nfs_page *req = nfs_list_entry(hdr->pages.next); struct nfs_page *req = nfs_list_entry(hdr->pages.next);
struct page *page = req->wb_page; struct folio *folio = nfs_page_to_folio(req);
unsigned long start = req->wb_pgbase; unsigned long start = req->wb_pgbase;
unsigned long end = req->wb_pgbase + req->wb_bytes; unsigned long end = req->wb_pgbase + req->wb_bytes;
@ -157,14 +156,14 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
if (bytes > hdr->good_bytes) { if (bytes > hdr->good_bytes) {
/* nothing in this request was good, so zero /* nothing in this request was good, so zero
* the full extent of the request */ * the full extent of the request */
zero_user_segment(page, start, end); folio_zero_segment(folio, start, end);
} else if (hdr->good_bytes - bytes < req->wb_bytes) { } else if (hdr->good_bytes - bytes < req->wb_bytes) {
/* part of this request has good bytes, but /* part of this request has good bytes, but
* not all. zero the bad bytes */ * not all. zero the bad bytes */
start += hdr->good_bytes - bytes; start += hdr->good_bytes - bytes;
WARN_ON(start < req->wb_pgbase); WARN_ON(start < req->wb_pgbase);
zero_user_segment(page, start, end); folio_zero_segment(folio, start, end);
} }
} }
error = 0; error = 0;
@ -281,33 +280,34 @@ static void nfs_readpage_result(struct rpc_task *task,
nfs_readpage_retry(task, hdr); nfs_readpage_retry(task, hdr);
} }
static int static int readpage_async_filler(struct nfs_readdesc *desc, struct folio *folio)
readpage_async_filler(struct nfs_readdesc *desc, struct page *page)
{ {
struct inode *inode = page_file_mapping(page)->host; struct inode *inode = folio_file_mapping(folio)->host;
unsigned int rsize = NFS_SERVER(inode)->rsize; struct nfs_server *server = NFS_SERVER(inode);
size_t fsize = folio_size(folio);
unsigned int rsize = server->rsize;
struct nfs_page *new; struct nfs_page *new;
unsigned int len, aligned_len; unsigned int len, aligned_len;
int error; int error;
len = nfs_page_length(page); len = nfs_folio_length(folio);
if (len == 0) if (len == 0)
return nfs_return_empty_page(page); return nfs_return_empty_folio(folio);
aligned_len = min_t(unsigned int, ALIGN(len, rsize), PAGE_SIZE); aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
if (!IS_SYNC(page->mapping->host)) { if (!IS_SYNC(inode)) {
error = nfs_fscache_read_page(page->mapping->host, page); error = nfs_fscache_read_page(inode, &folio->page);
if (error == 0) if (error == 0)
goto out_unlock; goto out_unlock;
} }
new = nfs_create_request(desc->ctx, page, 0, aligned_len); new = nfs_page_create_from_folio(desc->ctx, folio, 0, aligned_len);
if (IS_ERR(new)) if (IS_ERR(new))
goto out_error; goto out_error;
if (len < PAGE_SIZE) if (len < fsize)
zero_user_segment(page, len, PAGE_SIZE); folio_zero_segment(folio, len, fsize);
if (!nfs_pageio_add_request(&desc->pgio, new)) { if (!nfs_pageio_add_request(&desc->pgio, new)) {
nfs_list_remove_request(new); nfs_list_remove_request(new);
error = desc->pgio.pg_error; error = desc->pgio.pg_error;
@ -318,7 +318,7 @@ readpage_async_filler(struct nfs_readdesc *desc, struct page *page)
out_error: out_error:
error = PTR_ERR(new); error = PTR_ERR(new);
out_unlock: out_unlock:
unlock_page(page); folio_unlock(folio);
out: out:
return error; return error;
} }
@ -331,61 +331,54 @@ readpage_async_filler(struct nfs_readdesc *desc, struct page *page)
*/ */
int nfs_read_folio(struct file *file, struct folio *folio) int nfs_read_folio(struct file *file, struct folio *folio)
{ {
struct page *page = &folio->page;
struct nfs_readdesc desc; struct nfs_readdesc desc;
struct inode *inode = page_file_mapping(page)->host; struct inode *inode = file_inode(file);
int ret; int ret;
trace_nfs_aop_readpage(inode, page); trace_nfs_aop_readpage(inode, folio);
nfs_inc_stats(inode, NFSIOS_VFSREADPAGE); nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
/* /*
* Try to flush any pending writes to the file.. * Try to flush any pending writes to the file..
* *
* NOTE! Because we own the page lock, there cannot * NOTE! Because we own the folio lock, there cannot
* be any new pending writes generated at this point * be any new pending writes generated at this point
* for this page (other pages can be written to). * for this folio (other folios can be written to).
*/ */
ret = nfs_wb_page(inode, page); ret = nfs_wb_folio(inode, folio);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
if (PageUptodate(page)) if (folio_test_uptodate(folio))
goto out_unlock; goto out_unlock;
ret = -ESTALE; ret = -ESTALE;
if (NFS_STALE(inode)) if (NFS_STALE(inode))
goto out_unlock; goto out_unlock;
if (file == NULL) { desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
ret = -EBADF;
desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
if (desc.ctx == NULL)
goto out_unlock;
} else
desc.ctx = get_nfs_open_context(nfs_file_open_context(file));
xchg(&desc.ctx->error, 0); xchg(&desc.ctx->error, 0);
nfs_pageio_init_read(&desc.pgio, inode, false, nfs_pageio_init_read(&desc.pgio, inode, false,
&nfs_async_read_completion_ops); &nfs_async_read_completion_ops);
ret = readpage_async_filler(&desc, page); ret = readpage_async_filler(&desc, folio);
if (ret) if (ret)
goto out; goto out;
nfs_pageio_complete_read(&desc.pgio); nfs_pageio_complete_read(&desc.pgio);
ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0; ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0;
if (!ret) { if (!ret) {
ret = wait_on_page_locked_killable(page); ret = folio_wait_locked_killable(folio);
if (!PageUptodate(page) && !ret) if (!folio_test_uptodate(folio) && !ret)
ret = xchg(&desc.ctx->error, 0); ret = xchg(&desc.ctx->error, 0);
} }
out: out:
put_nfs_open_context(desc.ctx); put_nfs_open_context(desc.ctx);
trace_nfs_aop_readpage_done(inode, page, ret); trace_nfs_aop_readpage_done(inode, folio, ret);
return ret; return ret;
out_unlock: out_unlock:
unlock_page(page); folio_unlock(folio);
trace_nfs_aop_readpage_done(inode, page, ret); trace_nfs_aop_readpage_done(inode, folio, ret);
return ret; return ret;
} }
@ -395,7 +388,7 @@ void nfs_readahead(struct readahead_control *ractl)
struct file *file = ractl->file; struct file *file = ractl->file;
struct nfs_readdesc desc; struct nfs_readdesc desc;
struct inode *inode = ractl->mapping->host; struct inode *inode = ractl->mapping->host;
struct page *page; struct folio *folio;
int ret; int ret;
trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages); trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
@ -416,9 +409,8 @@ void nfs_readahead(struct readahead_control *ractl)
nfs_pageio_init_read(&desc.pgio, inode, false, nfs_pageio_init_read(&desc.pgio, inode, false,
&nfs_async_read_completion_ops); &nfs_async_read_completion_ops);
while ((page = readahead_page(ractl)) != NULL) { while ((folio = readahead_folio(ractl)) != NULL) {
ret = readpage_async_filler(&desc, page); ret = readpage_async_filler(&desc, folio);
put_page(page);
if (ret) if (ret)
break; break;
} }

View File

@ -64,7 +64,7 @@ static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
struct inode *inode); struct inode *inode);
static struct nfs_page * static struct nfs_page *
nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
struct page *page); struct folio *folio);
static struct kmem_cache *nfs_wdata_cachep; static struct kmem_cache *nfs_wdata_cachep;
static mempool_t *nfs_wdata_mempool; static mempool_t *nfs_wdata_mempool;
@ -171,31 +171,28 @@ nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
return 0; return 0;
} }
static struct nfs_page * static struct nfs_page *nfs_folio_private_request(struct folio *folio)
nfs_page_private_request(struct page *page)
{ {
if (!PagePrivate(page)) return folio_get_private(folio);
return NULL;
return (struct nfs_page *)page_private(page);
} }
/* /**
* nfs_page_find_head_request_locked - find head request associated with @page * nfs_folio_find_private_request - find head request associated with a folio
* @folio: pointer to folio
* *
* must be called while holding the inode lock. * must be called while holding the inode lock.
* *
* returns matching head request with reference held, or NULL if not found. * returns matching head request with reference held, or NULL if not found.
*/ */
static struct nfs_page * static struct nfs_page *nfs_folio_find_private_request(struct folio *folio)
nfs_page_find_private_request(struct page *page)
{ {
struct address_space *mapping = page_file_mapping(page); struct address_space *mapping = folio_file_mapping(folio);
struct nfs_page *req; struct nfs_page *req;
if (!PagePrivate(page)) if (!folio_test_private(folio))
return NULL; return NULL;
spin_lock(&mapping->private_lock); spin_lock(&mapping->private_lock);
req = nfs_page_private_request(page); req = nfs_folio_private_request(folio);
if (req) { if (req) {
WARN_ON_ONCE(req->wb_head != req); WARN_ON_ONCE(req->wb_head != req);
kref_get(&req->wb_kref); kref_get(&req->wb_kref);
@ -204,18 +201,17 @@ nfs_page_find_private_request(struct page *page)
return req; return req;
} }
static struct nfs_page * static struct nfs_page *nfs_folio_find_swap_request(struct folio *folio)
nfs_page_find_swap_request(struct page *page)
{ {
struct inode *inode = page_file_mapping(page)->host; struct inode *inode = folio_file_mapping(folio)->host;
struct nfs_inode *nfsi = NFS_I(inode); struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *req = NULL; struct nfs_page *req = NULL;
if (!PageSwapCache(page)) if (!folio_test_swapcache(folio))
return NULL; return NULL;
mutex_lock(&nfsi->commit_mutex); mutex_lock(&nfsi->commit_mutex);
if (PageSwapCache(page)) { if (folio_test_swapcache(folio)) {
req = nfs_page_search_commits_for_head_request_locked(nfsi, req = nfs_page_search_commits_for_head_request_locked(nfsi,
page); folio);
if (req) { if (req) {
WARN_ON_ONCE(req->wb_head != req); WARN_ON_ONCE(req->wb_head != req);
kref_get(&req->wb_kref); kref_get(&req->wb_kref);
@ -225,29 +221,30 @@ nfs_page_find_swap_request(struct page *page)
return req; return req;
} }
/* /**
* nfs_page_find_head_request - find head request associated with @page * nfs_folio_find_head_request - find head request associated with a folio
* @folio: pointer to folio
* *
* returns matching head request with reference held, or NULL if not found. * returns matching head request with reference held, or NULL if not found.
*/ */
static struct nfs_page *nfs_page_find_head_request(struct page *page) static struct nfs_page *nfs_folio_find_head_request(struct folio *folio)
{ {
struct nfs_page *req; struct nfs_page *req;
req = nfs_page_find_private_request(page); req = nfs_folio_find_private_request(folio);
if (!req) if (!req)
req = nfs_page_find_swap_request(page); req = nfs_folio_find_swap_request(folio);
return req; return req;
} }
static struct nfs_page *nfs_find_and_lock_page_request(struct page *page) static struct nfs_page *nfs_folio_find_and_lock_request(struct folio *folio)
{ {
struct inode *inode = page_file_mapping(page)->host; struct inode *inode = folio_file_mapping(folio)->host;
struct nfs_page *req, *head; struct nfs_page *req, *head;
int ret; int ret;
for (;;) { for (;;) {
req = nfs_page_find_head_request(page); req = nfs_folio_find_head_request(folio);
if (!req) if (!req)
return req; return req;
head = nfs_page_group_lock_head(req); head = nfs_page_group_lock_head(req);
@ -261,9 +258,9 @@ static struct nfs_page *nfs_find_and_lock_page_request(struct page *page)
return ERR_PTR(ret); return ERR_PTR(ret);
} }
/* Ensure that nobody removed the request before we locked it */ /* Ensure that nobody removed the request before we locked it */
if (head == nfs_page_private_request(page)) if (head == nfs_folio_private_request(folio))
break; break;
if (PageSwapCache(page)) if (folio_test_swapcache(folio))
break; break;
nfs_unlock_and_release_request(head); nfs_unlock_and_release_request(head);
} }
@ -271,18 +268,19 @@ static struct nfs_page *nfs_find_and_lock_page_request(struct page *page)
} }
/* Adjust the file length if we're writing beyond the end */ /* Adjust the file length if we're writing beyond the end */
static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) static void nfs_grow_file(struct folio *folio, unsigned int offset,
unsigned int count)
{ {
struct inode *inode = page_file_mapping(page)->host; struct inode *inode = folio_file_mapping(folio)->host;
loff_t end, i_size; loff_t end, i_size;
pgoff_t end_index; pgoff_t end_index;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
i_size = i_size_read(inode); i_size = i_size_read(inode);
end_index = (i_size - 1) >> PAGE_SHIFT; end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio);
if (i_size > 0 && page_index(page) < end_index) if (i_size > 0 && folio_index(folio) < end_index)
goto out; goto out;
end = page_file_offset(page) + ((loff_t)offset+count); end = folio_file_pos(folio) + (loff_t)offset + (loff_t)count;
if (i_size >= end) if (i_size >= end)
goto out; goto out;
trace_nfs_size_grow(inode, end); trace_nfs_size_grow(inode, end);
@ -308,11 +306,11 @@ static void nfs_set_pageerror(struct address_space *mapping)
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
} }
static void nfs_mapping_set_error(struct page *page, int error) static void nfs_mapping_set_error(struct folio *folio, int error)
{ {
struct address_space *mapping = page_file_mapping(page); struct address_space *mapping = folio_file_mapping(folio);
SetPageError(page); folio_set_error(folio);
filemap_set_wb_err(mapping, error); filemap_set_wb_err(mapping, error);
if (mapping->host) if (mapping->host)
errseq_set(&mapping->host->i_sb->s_wb_err, errseq_set(&mapping->host->i_sb->s_wb_err,
@ -359,9 +357,9 @@ nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
*/ */
static bool nfs_page_group_covers_page(struct nfs_page *req) static bool nfs_page_group_covers_page(struct nfs_page *req)
{ {
unsigned int len = nfs_folio_length(nfs_page_to_folio(req));
struct nfs_page *tmp; struct nfs_page *tmp;
unsigned int pos = 0; unsigned int pos = 0;
unsigned int len = nfs_page_length(req->wb_page);
nfs_page_group_lock(req); nfs_page_group_lock(req);
@ -381,11 +379,13 @@ static bool nfs_page_group_covers_page(struct nfs_page *req)
*/ */
static void nfs_mark_uptodate(struct nfs_page *req) static void nfs_mark_uptodate(struct nfs_page *req)
{ {
if (PageUptodate(req->wb_page)) struct folio *folio = nfs_page_to_folio(req);
if (folio_test_uptodate(folio))
return; return;
if (!nfs_page_group_covers_page(req)) if (!nfs_page_group_covers_page(req))
return; return;
SetPageUptodate(req->wb_page); folio_mark_uptodate(folio);
} }
static int wb_priority(struct writeback_control *wbc) static int wb_priority(struct writeback_control *wbc)
@ -407,35 +407,34 @@ int nfs_congestion_kb;
#define NFS_CONGESTION_OFF_THRESH \ #define NFS_CONGESTION_OFF_THRESH \
(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
static void nfs_set_page_writeback(struct page *page) static void nfs_folio_set_writeback(struct folio *folio)
{ {
struct inode *inode = page_file_mapping(page)->host; struct nfs_server *nfss = NFS_SERVER(folio_file_mapping(folio)->host);
struct nfs_server *nfss = NFS_SERVER(inode);
int ret = test_set_page_writeback(page);
WARN_ON_ONCE(ret != 0); folio_start_writeback(folio);
if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH)
if (atomic_long_inc_return(&nfss->writeback) >
NFS_CONGESTION_ON_THRESH)
nfss->write_congested = 1; nfss->write_congested = 1;
} }
static void nfs_end_page_writeback(struct nfs_page *req) static void nfs_folio_end_writeback(struct folio *folio)
{ {
struct inode *inode = page_file_mapping(req->wb_page)->host; struct nfs_server *nfss = NFS_SERVER(folio_file_mapping(folio)->host);
struct nfs_server *nfss = NFS_SERVER(inode);
bool is_done;
is_done = nfs_page_group_sync_on_bit(req, PG_WB_END); folio_end_writeback(folio);
nfs_unlock_request(req); if (atomic_long_dec_return(&nfss->writeback) <
if (!is_done) NFS_CONGESTION_OFF_THRESH)
return;
end_page_writeback(req->wb_page);
if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
nfss->write_congested = 0; nfss->write_congested = 0;
} }
static void nfs_page_end_writeback(struct nfs_page *req)
{
if (nfs_page_group_sync_on_bit(req, PG_WB_END)) {
nfs_unlock_request(req);
nfs_folio_end_writeback(nfs_page_to_folio(req));
} else
nfs_unlock_request(req);
}
/* /*
* nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
* *
@ -550,7 +549,7 @@ nfs_join_page_group(struct nfs_page *head, struct inode *inode)
/* /*
* nfs_lock_and_join_requests - join all subreqs to the head req * nfs_lock_and_join_requests - join all subreqs to the head req
* @page: the page used to lookup the "page group" of nfs_page structures * @folio: the folio used to lookup the "page group" of nfs_page structures
* *
* This function joins all sub requests to the head request by first * This function joins all sub requests to the head request by first
* locking all requests in the group, cancelling any pending operations * locking all requests in the group, cancelling any pending operations
@ -560,13 +559,12 @@ nfs_join_page_group(struct nfs_page *head, struct inode *inode)
* *
* Returns a locked, referenced pointer to the head request - which after * Returns a locked, referenced pointer to the head request - which after
* this call is guaranteed to be the only request associated with the page. * this call is guaranteed to be the only request associated with the page.
* Returns NULL if no requests are found for @page, or a ERR_PTR if an * Returns NULL if no requests are found for @folio, or a ERR_PTR if an
* error was encountered. * error was encountered.
*/ */
static struct nfs_page * static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
nfs_lock_and_join_requests(struct page *page)
{ {
struct inode *inode = page_file_mapping(page)->host; struct inode *inode = folio_file_mapping(folio)->host;
struct nfs_page *head; struct nfs_page *head;
int ret; int ret;
@ -575,7 +573,7 @@ nfs_lock_and_join_requests(struct page *page)
* reference to the whole page group - the group will not be destroyed * reference to the whole page group - the group will not be destroyed
* until the head reference is released. * until the head reference is released.
*/ */
head = nfs_find_and_lock_page_request(page); head = nfs_folio_find_and_lock_request(folio);
if (IS_ERR_OR_NULL(head)) if (IS_ERR_OR_NULL(head))
return head; return head;
@ -593,11 +591,10 @@ nfs_lock_and_join_requests(struct page *page)
static void nfs_write_error(struct nfs_page *req, int error) static void nfs_write_error(struct nfs_page *req, int error)
{ {
trace_nfs_write_error(page_file_mapping(req->wb_page)->host, req, trace_nfs_write_error(nfs_page_to_inode(req), req, error);
error); nfs_mapping_set_error(nfs_page_to_folio(req), error);
nfs_mapping_set_error(req->wb_page, error);
nfs_inode_remove_request(req); nfs_inode_remove_request(req);
nfs_end_page_writeback(req); nfs_page_end_writeback(req);
nfs_release_request(req); nfs_release_request(req);
} }
@ -605,21 +602,21 @@ static void nfs_write_error(struct nfs_page *req, int error)
* Find an associated nfs write request, and prepare to flush it out * Find an associated nfs write request, and prepare to flush it out
* May return an error if the user signalled nfs_wait_on_request(). * May return an error if the user signalled nfs_wait_on_request().
*/ */
static int nfs_page_async_flush(struct page *page, static int nfs_page_async_flush(struct folio *folio,
struct writeback_control *wbc, struct writeback_control *wbc,
struct nfs_pageio_descriptor *pgio) struct nfs_pageio_descriptor *pgio)
{ {
struct nfs_page *req; struct nfs_page *req;
int ret = 0; int ret = 0;
req = nfs_lock_and_join_requests(page); req = nfs_lock_and_join_requests(folio);
if (!req) if (!req)
goto out; goto out;
ret = PTR_ERR(req); ret = PTR_ERR(req);
if (IS_ERR(req)) if (IS_ERR(req))
goto out; goto out;
nfs_set_page_writeback(page); nfs_folio_set_writeback(folio);
WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
/* If there is a fatal error that covers this write, just exit */ /* If there is a fatal error that covers this write, just exit */
@ -637,12 +634,12 @@ static int nfs_page_async_flush(struct page *page,
goto out_launder; goto out_launder;
if (wbc->sync_mode == WB_SYNC_NONE) if (wbc->sync_mode == WB_SYNC_NONE)
ret = AOP_WRITEPAGE_ACTIVATE; ret = AOP_WRITEPAGE_ACTIVATE;
redirty_page_for_writepage(wbc, page); folio_redirty_for_writepage(wbc, folio);
nfs_redirty_request(req); nfs_redirty_request(req);
pgio->pg_error = 0; pgio->pg_error = 0;
} else } else
nfs_add_stats(page_file_mapping(page)->host, nfs_add_stats(folio_file_mapping(folio)->host,
NFSIOS_WRITEPAGES, 1); NFSIOS_WRITEPAGES, 1);
out: out:
return ret; return ret;
out_launder: out_launder:
@ -650,21 +647,21 @@ static int nfs_page_async_flush(struct page *page,
return 0; return 0;
} }
static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc,
struct nfs_pageio_descriptor *pgio) struct nfs_pageio_descriptor *pgio)
{ {
nfs_pageio_cond_complete(pgio, page_index(page)); nfs_pageio_cond_complete(pgio, folio_index(folio));
return nfs_page_async_flush(page, wbc, pgio); return nfs_page_async_flush(folio, wbc, pgio);
} }
/* /*
* Write an mmapped page to the server. * Write an mmapped page to the server.
*/ */
static int nfs_writepage_locked(struct page *page, static int nfs_writepage_locked(struct folio *folio,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
struct nfs_pageio_descriptor pgio; struct nfs_pageio_descriptor pgio;
struct inode *inode = page_file_mapping(page)->host; struct inode *inode = folio_file_mapping(folio)->host;
int err; int err;
if (wbc->sync_mode == WB_SYNC_NONE && if (wbc->sync_mode == WB_SYNC_NONE &&
@ -672,9 +669,9 @@ static int nfs_writepage_locked(struct page *page,
return AOP_WRITEPAGE_ACTIVATE; return AOP_WRITEPAGE_ACTIVATE;
nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
nfs_pageio_init_write(&pgio, inode, 0, nfs_pageio_init_write(&pgio, inode, 0, false,
false, &nfs_async_write_completion_ops); &nfs_async_write_completion_ops);
err = nfs_do_writepage(page, wbc, &pgio); err = nfs_do_writepage(folio, wbc, &pgio);
pgio.pg_error = 0; pgio.pg_error = 0;
nfs_pageio_complete(&pgio); nfs_pageio_complete(&pgio);
return err; return err;
@ -682,19 +679,22 @@ static int nfs_writepage_locked(struct page *page,
int nfs_writepage(struct page *page, struct writeback_control *wbc) int nfs_writepage(struct page *page, struct writeback_control *wbc)
{ {
struct folio *folio = page_folio(page);
int ret; int ret;
ret = nfs_writepage_locked(page, wbc); ret = nfs_writepage_locked(folio, wbc);
if (ret != AOP_WRITEPAGE_ACTIVATE) if (ret != AOP_WRITEPAGE_ACTIVATE)
unlock_page(page); unlock_page(page);
return ret; return ret;
} }
static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) static int nfs_writepages_callback(struct page *page,
struct writeback_control *wbc, void *data)
{ {
struct folio *folio = page_folio(page);
int ret; int ret;
ret = nfs_do_writepage(page, wbc, data); ret = nfs_do_writepage(folio, wbc, data);
if (ret != AOP_WRITEPAGE_ACTIVATE) if (ret != AOP_WRITEPAGE_ACTIVATE)
unlock_page(page); unlock_page(page);
return ret; return ret;
@ -750,10 +750,11 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
/* /*
* Insert a write request into an inode * Insert a write request into an inode
*/ */
static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) static void nfs_inode_add_request(struct nfs_page *req)
{ {
struct address_space *mapping = page_file_mapping(req->wb_page); struct folio *folio = nfs_page_to_folio(req);
struct nfs_inode *nfsi = NFS_I(inode); struct address_space *mapping = folio_file_mapping(folio);
struct nfs_inode *nfsi = NFS_I(mapping->host);
WARN_ON_ONCE(req->wb_this_page != req); WARN_ON_ONCE(req->wb_this_page != req);
@ -765,10 +766,10 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
* with invalidate/truncate. * with invalidate/truncate.
*/ */
spin_lock(&mapping->private_lock); spin_lock(&mapping->private_lock);
if (likely(!PageSwapCache(req->wb_page))) { if (likely(!folio_test_swapcache(folio))) {
set_bit(PG_MAPPED, &req->wb_flags); set_bit(PG_MAPPED, &req->wb_flags);
SetPagePrivate(req->wb_page); folio_set_private(folio);
set_page_private(req->wb_page, (unsigned long)req); folio->private = req;
} }
spin_unlock(&mapping->private_lock); spin_unlock(&mapping->private_lock);
atomic_long_inc(&nfsi->nrequests); atomic_long_inc(&nfsi->nrequests);
@ -785,47 +786,43 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
*/ */
static void nfs_inode_remove_request(struct nfs_page *req) static void nfs_inode_remove_request(struct nfs_page *req)
{ {
struct address_space *mapping = page_file_mapping(req->wb_page);
struct inode *inode = mapping->host;
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *head;
if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
head = req->wb_head; struct folio *folio = nfs_page_to_folio(req->wb_head);
struct address_space *mapping = folio_file_mapping(folio);
spin_lock(&mapping->private_lock); spin_lock(&mapping->private_lock);
if (likely(head->wb_page && !PageSwapCache(head->wb_page))) { if (likely(folio && !folio_test_swapcache(folio))) {
set_page_private(head->wb_page, 0); folio->private = NULL;
ClearPagePrivate(head->wb_page); folio_clear_private(folio);
clear_bit(PG_MAPPED, &head->wb_flags); clear_bit(PG_MAPPED, &req->wb_head->wb_flags);
} }
spin_unlock(&mapping->private_lock); spin_unlock(&mapping->private_lock);
} }
if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
nfs_release_request(req); nfs_release_request(req);
atomic_long_dec(&nfsi->nrequests); atomic_long_dec(&NFS_I(nfs_page_to_inode(req))->nrequests);
} }
} }
static void static void nfs_mark_request_dirty(struct nfs_page *req)
nfs_mark_request_dirty(struct nfs_page *req)
{ {
if (req->wb_page) struct folio *folio = nfs_page_to_folio(req);
__set_page_dirty_nobuffers(req->wb_page); if (folio)
filemap_dirty_folio(folio_mapping(folio), folio);
} }
/* /*
* nfs_page_search_commits_for_head_request_locked * nfs_page_search_commits_for_head_request_locked
* *
* Search through commit lists on @inode for the head request for @page. * Search through commit lists on @inode for the head request for @folio.
* Must be called while holding the inode (which is cinfo) lock. * Must be called while holding the inode (which is cinfo) lock.
* *
* Returns the head request if found, or NULL if not found. * Returns the head request if found, or NULL if not found.
*/ */
static struct nfs_page * static struct nfs_page *
nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
struct page *page) struct folio *folio)
{ {
struct nfs_page *freq, *t; struct nfs_page *freq, *t;
struct nfs_commit_info cinfo; struct nfs_commit_info cinfo;
@ -834,13 +831,13 @@ nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi,
nfs_init_cinfo_from_inode(&cinfo, inode); nfs_init_cinfo_from_inode(&cinfo, inode);
/* search through pnfs commit lists */ /* search through pnfs commit lists */
freq = pnfs_search_commit_reqs(inode, &cinfo, page); freq = pnfs_search_commit_reqs(inode, &cinfo, folio);
if (freq) if (freq)
return freq->wb_head; return freq->wb_head;
/* Linearly search the commit list for the correct request */ /* Linearly search the commit list for the correct request */
list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) {
if (freq->wb_page == page) if (nfs_page_to_folio(freq) == folio)
return freq->wb_head; return freq->wb_head;
} }
@ -888,8 +885,7 @@ nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
if (req->wb_page) nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo);
nfs_mark_page_unstable(req->wb_page, cinfo);
} }
EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
@ -948,12 +944,15 @@ nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
nfs_request_add_commit_list(req, cinfo); nfs_request_add_commit_list(req, cinfo);
} }
static void static void nfs_folio_clear_commit(struct folio *folio)
nfs_clear_page_commit(struct page *page)
{ {
dec_node_page_state(page, NR_WRITEBACK); if (folio) {
dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, long nr = folio_nr_pages(folio);
WB_WRITEBACK);
node_stat_mod_folio(folio, NR_WRITEBACK, -nr);
wb_stat_mod(&inode_to_bdi(folio_file_mapping(folio)->host)->wb,
WB_WRITEBACK, -nr);
}
} }
/* Called holding the request lock on @req */ /* Called holding the request lock on @req */
@ -971,7 +970,7 @@ nfs_clear_request_commit(struct nfs_page *req)
nfs_request_remove_commit_list(req, &cinfo); nfs_request_remove_commit_list(req, &cinfo);
} }
mutex_unlock(&NFS_I(inode)->commit_mutex); mutex_unlock(&NFS_I(inode)->commit_mutex);
nfs_clear_page_commit(req->wb_page); nfs_folio_clear_commit(nfs_page_to_folio(req));
} }
} }
@ -1003,7 +1002,8 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
(hdr->good_bytes < bytes)) { (hdr->good_bytes < bytes)) {
trace_nfs_comp_error(hdr->inode, req, hdr->error); trace_nfs_comp_error(hdr->inode, req, hdr->error);
nfs_mapping_set_error(req->wb_page, hdr->error); nfs_mapping_set_error(nfs_page_to_folio(req),
hdr->error);
goto remove_req; goto remove_req;
} }
if (nfs_write_need_commit(hdr)) { if (nfs_write_need_commit(hdr)) {
@ -1017,7 +1017,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
remove_req: remove_req:
nfs_inode_remove_request(req); nfs_inode_remove_request(req);
next: next:
nfs_end_page_writeback(req); nfs_page_end_writeback(req);
nfs_release_request(req); nfs_release_request(req);
} }
out: out:
@ -1093,10 +1093,9 @@ nfs_scan_commit(struct inode *inode, struct list_head *dst,
* If the attempt fails, then the existing request is flushed out * If the attempt fails, then the existing request is flushed out
* to disk. * to disk.
*/ */
static struct nfs_page *nfs_try_to_update_request(struct inode *inode, static struct nfs_page *nfs_try_to_update_request(struct folio *folio,
struct page *page, unsigned int offset,
unsigned int offset, unsigned int bytes)
unsigned int bytes)
{ {
struct nfs_page *req; struct nfs_page *req;
unsigned int rqend; unsigned int rqend;
@ -1105,7 +1104,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
end = offset + bytes; end = offset + bytes;
req = nfs_lock_and_join_requests(page); req = nfs_lock_and_join_requests(folio);
if (IS_ERR_OR_NULL(req)) if (IS_ERR_OR_NULL(req))
return req; return req;
@ -1138,7 +1137,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
*/ */
nfs_mark_request_dirty(req); nfs_mark_request_dirty(req);
nfs_unlock_and_release_request(req); nfs_unlock_and_release_request(req);
error = nfs_wb_page(inode, page); error = nfs_wb_folio(folio_file_mapping(folio)->host, folio);
return (error < 0) ? ERR_PTR(error) : NULL; return (error < 0) ? ERR_PTR(error) : NULL;
} }
@ -1149,40 +1148,42 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
* if we have to add a new request. Also assumes that the caller has * if we have to add a new request. Also assumes that the caller has
* already called nfs_flush_incompatible() if necessary. * already called nfs_flush_incompatible() if necessary.
*/ */
static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, static struct nfs_page *nfs_setup_write_request(struct nfs_open_context *ctx,
struct page *page, unsigned int offset, unsigned int bytes) struct folio *folio,
unsigned int offset,
unsigned int bytes)
{ {
struct inode *inode = page_file_mapping(page)->host; struct nfs_page *req;
struct nfs_page *req;
req = nfs_try_to_update_request(inode, page, offset, bytes); req = nfs_try_to_update_request(folio, offset, bytes);
if (req != NULL) if (req != NULL)
goto out; goto out;
req = nfs_create_request(ctx, page, offset, bytes); req = nfs_page_create_from_folio(ctx, folio, offset, bytes);
if (IS_ERR(req)) if (IS_ERR(req))
goto out; goto out;
nfs_inode_add_request(inode, req); nfs_inode_add_request(req);
out: out:
return req; return req;
} }
static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, static int nfs_writepage_setup(struct nfs_open_context *ctx,
unsigned int offset, unsigned int count) struct folio *folio, unsigned int offset,
unsigned int count)
{ {
struct nfs_page *req; struct nfs_page *req;
req = nfs_setup_write_request(ctx, page, offset, count); req = nfs_setup_write_request(ctx, folio, offset, count);
if (IS_ERR(req)) if (IS_ERR(req))
return PTR_ERR(req); return PTR_ERR(req);
/* Update file length */ /* Update file length */
nfs_grow_file(page, offset, count); nfs_grow_file(folio, offset, count);
nfs_mark_uptodate(req); nfs_mark_uptodate(req);
nfs_mark_request_dirty(req); nfs_mark_request_dirty(req);
nfs_unlock_and_release_request(req); nfs_unlock_and_release_request(req);
return 0; return 0;
} }
int nfs_flush_incompatible(struct file *file, struct page *page) int nfs_flush_incompatible(struct file *file, struct folio *folio)
{ {
struct nfs_open_context *ctx = nfs_file_open_context(file); struct nfs_open_context *ctx = nfs_file_open_context(file);
struct nfs_lock_context *l_ctx; struct nfs_lock_context *l_ctx;
@ -1198,12 +1199,12 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
* dropped page. * dropped page.
*/ */
do { do {
req = nfs_page_find_head_request(page); req = nfs_folio_find_head_request(folio);
if (req == NULL) if (req == NULL)
return 0; return 0;
l_ctx = req->wb_lock_context; l_ctx = req->wb_lock_context;
do_flush = req->wb_page != page || do_flush = nfs_page_to_folio(req) != folio ||
!nfs_match_open_context(nfs_req_openctx(req), ctx); !nfs_match_open_context(nfs_req_openctx(req), ctx);
if (l_ctx && flctx && if (l_ctx && flctx &&
!(list_empty_careful(&flctx->flc_posix) && !(list_empty_careful(&flctx->flc_posix) &&
list_empty_careful(&flctx->flc_flock))) { list_empty_careful(&flctx->flc_flock))) {
@ -1212,7 +1213,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
nfs_release_request(req); nfs_release_request(req);
if (!do_flush) if (!do_flush)
return 0; return 0;
status = nfs_wb_page(page_file_mapping(page)->host, page); status = nfs_wb_folio(folio_file_mapping(folio)->host, folio);
} while (status == 0); } while (status == 0);
return status; return status;
} }
@ -1284,9 +1285,9 @@ bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
* the PageUptodate() flag. In this case, we will need to turn off * the PageUptodate() flag. In this case, we will need to turn off
* write optimisations that depend on the page contents being correct. * write optimisations that depend on the page contents being correct.
*/ */
static bool nfs_write_pageuptodate(struct page *page, struct inode *inode, static bool nfs_folio_write_uptodate(struct folio *folio, unsigned int pagelen)
unsigned int pagelen)
{ {
struct inode *inode = folio_file_mapping(folio)->host;
struct nfs_inode *nfsi = NFS_I(inode); struct nfs_inode *nfsi = NFS_I(inode);
if (nfs_have_delegated_attributes(inode)) if (nfs_have_delegated_attributes(inode))
@ -1300,7 +1301,7 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode,
out: out:
if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0) if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0)
return false; return false;
return PageUptodate(page) != 0; return folio_test_uptodate(folio) != 0;
} }
static bool static bool
@ -1318,16 +1319,17 @@ is_whole_file_wrlock(struct file_lock *fl)
* If the file is opened for synchronous writes then we can just skip the rest * If the file is opened for synchronous writes then we can just skip the rest
* of the checks. * of the checks.
*/ */
static int nfs_can_extend_write(struct file *file, struct page *page, static int nfs_can_extend_write(struct file *file, struct folio *folio,
struct inode *inode, unsigned int pagelen) unsigned int pagelen)
{ {
int ret; struct inode *inode = file_inode(file);
struct file_lock_context *flctx = locks_inode_context(inode); struct file_lock_context *flctx = locks_inode_context(inode);
struct file_lock *fl; struct file_lock *fl;
int ret;
if (file->f_flags & O_DSYNC) if (file->f_flags & O_DSYNC)
return 0; return 0;
if (!nfs_write_pageuptodate(page, inode, pagelen)) if (!nfs_folio_write_uptodate(folio, pagelen))
return 0; return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1; return 1;
@ -1359,33 +1361,33 @@ static int nfs_can_extend_write(struct file *file, struct page *page,
* XXX: Keep an eye on generic_file_read to make sure it doesn't do bad * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
* things with a page scheduled for an RPC call (e.g. invalidate it). * things with a page scheduled for an RPC call (e.g. invalidate it).
*/ */
int nfs_updatepage(struct file *file, struct page *page, int nfs_update_folio(struct file *file, struct folio *folio,
unsigned int offset, unsigned int count) unsigned int offset, unsigned int count)
{ {
struct nfs_open_context *ctx = nfs_file_open_context(file); struct nfs_open_context *ctx = nfs_file_open_context(file);
struct address_space *mapping = page_file_mapping(page); struct address_space *mapping = folio_file_mapping(folio);
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
unsigned int pagelen = nfs_page_length(page); unsigned int pagelen = nfs_folio_length(folio);
int status = 0; int status = 0;
nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", dprintk("NFS: nfs_update_folio(%pD2 %d@%lld)\n", file, count,
file, count, (long long)(page_file_offset(page) + offset)); (long long)(folio_file_pos(folio) + offset));
if (!count) if (!count)
goto out; goto out;
if (nfs_can_extend_write(file, page, inode, pagelen)) { if (nfs_can_extend_write(file, folio, pagelen)) {
count = max(count + offset, pagelen); count = max(count + offset, pagelen);
offset = 0; offset = 0;
} }
status = nfs_writepage_setup(ctx, page, offset, count); status = nfs_writepage_setup(ctx, folio, offset, count);
if (status < 0) if (status < 0)
nfs_set_pageerror(mapping); nfs_set_pageerror(mapping);
out: out:
dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", dprintk("NFS: nfs_update_folio returns %d (isize %lld)\n",
status, (long long)i_size_read(inode)); status, (long long)i_size_read(inode));
return status; return status;
} }
@ -1421,13 +1423,13 @@ static void nfs_initiate_write(struct nfs_pgio_header *hdr,
*/ */
static void nfs_redirty_request(struct nfs_page *req) static void nfs_redirty_request(struct nfs_page *req)
{ {
struct nfs_inode *nfsi = NFS_I(page_file_mapping(req->wb_page)->host); struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
/* Bump the transmission count */ /* Bump the transmission count */
req->wb_nio++; req->wb_nio++;
nfs_mark_request_dirty(req); nfs_mark_request_dirty(req);
atomic_long_inc(&nfsi->redirtied_pages); atomic_long_inc(&nfsi->redirtied_pages);
nfs_end_page_writeback(req); nfs_page_end_writeback(req);
nfs_release_request(req); nfs_release_request(req);
} }
@ -1785,18 +1787,18 @@ void nfs_retry_commit(struct list_head *page_list,
req = nfs_list_entry(page_list->next); req = nfs_list_entry(page_list->next);
nfs_list_remove_request(req); nfs_list_remove_request(req);
nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
if (!cinfo->dreq) nfs_folio_clear_commit(nfs_page_to_folio(req));
nfs_clear_page_commit(req->wb_page);
nfs_unlock_and_release_request(req); nfs_unlock_and_release_request(req);
} }
} }
EXPORT_SYMBOL_GPL(nfs_retry_commit); EXPORT_SYMBOL_GPL(nfs_retry_commit);
static void static void nfs_commit_resched_write(struct nfs_commit_info *cinfo,
nfs_commit_resched_write(struct nfs_commit_info *cinfo, struct nfs_page *req)
struct nfs_page *req)
{ {
__set_page_dirty_nobuffers(req->wb_page); struct folio *folio = nfs_page_to_folio(req);
filemap_dirty_folio(folio_mapping(folio), folio);
} }
/* /*
@ -1847,12 +1849,13 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
int status = data->task.tk_status; int status = data->task.tk_status;
struct nfs_commit_info cinfo; struct nfs_commit_info cinfo;
struct nfs_server *nfss; struct nfs_server *nfss;
struct folio *folio;
while (!list_empty(&data->pages)) { while (!list_empty(&data->pages)) {
req = nfs_list_entry(data->pages.next); req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req); nfs_list_remove_request(req);
if (req->wb_page) folio = nfs_page_to_folio(req);
nfs_clear_page_commit(req->wb_page); nfs_folio_clear_commit(folio);
dprintk("NFS: commit (%s/%llu %d@%lld)", dprintk("NFS: commit (%s/%llu %d@%lld)",
nfs_req_openctx(req)->dentry->d_sb->s_id, nfs_req_openctx(req)->dentry->d_sb->s_id,
@ -1860,10 +1863,10 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
req->wb_bytes, req->wb_bytes,
(long long)req_offset(req)); (long long)req_offset(req));
if (status < 0) { if (status < 0) {
if (req->wb_page) { if (folio) {
trace_nfs_commit_error(data->inode, req, trace_nfs_commit_error(data->inode, req,
status); status);
nfs_mapping_set_error(req->wb_page, status); nfs_mapping_set_error(folio, status);
nfs_inode_remove_request(req); nfs_inode_remove_request(req);
} }
dprintk_cont(", error = %d\n", status); dprintk_cont(", error = %d\n", status);
@ -1874,7 +1877,7 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data)
* returned by the server against all stored verfs. */ * returned by the server against all stored verfs. */
if (nfs_write_match_verf(verf, req)) { if (nfs_write_match_verf(verf, req)) {
/* We have a match */ /* We have a match */
if (req->wb_page) if (folio)
nfs_inode_remove_request(req); nfs_inode_remove_request(req);
dprintk_cont(" OK\n"); dprintk_cont(" OK\n");
goto next; goto next;
@ -2055,7 +2058,7 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
/* blocking call to cancel all requests and join to a single (head) /* blocking call to cancel all requests and join to a single (head)
* request */ * request */
req = nfs_lock_and_join_requests(&folio->page); req = nfs_lock_and_join_requests(folio);
if (IS_ERR(req)) { if (IS_ERR(req)) {
ret = PTR_ERR(req); ret = PTR_ERR(req);
@ -2071,13 +2074,18 @@ int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
return ret; return ret;
} }
/* /**
* Write back all requests on one page - we do this before reading it. * nfs_wb_folio - Write back all requests on one page
* @inode: pointer to page
* @folio: pointer to folio
*
* Assumes that the folio has been locked by the caller, and will
* not unlock it.
*/ */
int nfs_wb_page(struct inode *inode, struct page *page) int nfs_wb_folio(struct inode *inode, struct folio *folio)
{ {
loff_t range_start = page_file_offset(page); loff_t range_start = folio_file_pos(folio);
loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); loff_t range_end = range_start + (loff_t)folio_size(folio) - 1;
struct writeback_control wbc = { struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL, .sync_mode = WB_SYNC_ALL,
.nr_to_write = 0, .nr_to_write = 0,
@ -2086,25 +2094,25 @@ int nfs_wb_page(struct inode *inode, struct page *page)
}; };
int ret; int ret;
trace_nfs_writeback_page_enter(inode); trace_nfs_writeback_folio(inode, folio);
for (;;) { for (;;) {
wait_on_page_writeback(page); folio_wait_writeback(folio);
if (clear_page_dirty_for_io(page)) { if (folio_clear_dirty_for_io(folio)) {
ret = nfs_writepage_locked(page, &wbc); ret = nfs_writepage_locked(folio, &wbc);
if (ret < 0) if (ret < 0)
goto out_error; goto out_error;
continue; continue;
} }
ret = 0; ret = 0;
if (!PagePrivate(page)) if (!folio_test_private(folio))
break; break;
ret = nfs_commit_inode(inode, FLUSH_SYNC); ret = nfs_commit_inode(inode, FLUSH_SYNC);
if (ret < 0) if (ret < 0)
goto out_error; goto out_error;
} }
out_error: out_error:
trace_nfs_writeback_page_exit(inode, ret); trace_nfs_writeback_folio_done(inode, folio, ret);
return ret; return ret;
} }

View File

@ -569,8 +569,9 @@ extern void nfs_complete_unlink(struct dentry *dentry, struct inode *);
extern int nfs_congestion_kb; extern int nfs_congestion_kb;
extern int nfs_writepage(struct page *page, struct writeback_control *wbc); extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *); extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct page *page); extern int nfs_flush_incompatible(struct file *file, struct folio *folio);
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int); extern int nfs_update_folio(struct file *file, struct folio *folio,
unsigned int offset, unsigned int count);
/* /*
* Try to write back everything synchronously (but check the * Try to write back everything synchronously (but check the
@ -578,7 +579,7 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
*/ */
extern int nfs_sync_inode(struct inode *inode); extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode); extern int nfs_wb_all(struct inode *inode);
extern int nfs_wb_page(struct inode *inode, struct page *page); extern int nfs_wb_folio(struct inode *inode, struct folio *folio);
int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio); int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio);
extern int nfs_commit_inode(struct inode *, int); extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_commit_data *nfs_commitdata_alloc(void); extern struct nfs_commit_data *nfs_commitdata_alloc(void);

View File

@ -25,6 +25,7 @@
enum { enum {
PG_BUSY = 0, /* nfs_{un}lock_request */ PG_BUSY = 0, /* nfs_{un}lock_request */
PG_MAPPED, /* page private set for buffered io */ PG_MAPPED, /* page private set for buffered io */
PG_FOLIO, /* Tracking a folio (unset for O_DIRECT) */
PG_CLEAN, /* write succeeded */ PG_CLEAN, /* write succeeded */
PG_COMMIT_TO_DS, /* used by pnfs layouts */ PG_COMMIT_TO_DS, /* used by pnfs layouts */
PG_INODE_REF, /* extra ref held by inode when in writeback */ PG_INODE_REF, /* extra ref held by inode when in writeback */
@ -41,7 +42,10 @@ enum {
struct nfs_inode; struct nfs_inode;
struct nfs_page { struct nfs_page {
struct list_head wb_list; /* Defines state of page: */ struct list_head wb_list; /* Defines state of page: */
struct page *wb_page; /* page to read in/write out */ union {
struct page *wb_page; /* page to read in/write out */
struct folio *wb_folio;
};
struct nfs_lock_context *wb_lock_context; /* lock context info */ struct nfs_lock_context *wb_lock_context; /* lock context info */
pgoff_t wb_index; /* Offset >> PAGE_SHIFT */ pgoff_t wb_index; /* Offset >> PAGE_SHIFT */
unsigned int wb_offset, /* Offset & ~PAGE_MASK */ unsigned int wb_offset, /* Offset & ~PAGE_MASK */
@ -117,10 +121,15 @@ struct nfs_pageio_descriptor {
#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
extern struct nfs_page *nfs_create_request(struct nfs_open_context *ctx, extern struct nfs_page *nfs_page_create_from_page(struct nfs_open_context *ctx,
struct page *page, struct page *page,
unsigned int offset, unsigned int pgbase,
unsigned int count); loff_t offset,
unsigned int count);
extern struct nfs_page *nfs_page_create_from_folio(struct nfs_open_context *ctx,
struct folio *folio,
unsigned int offset,
unsigned int count);
extern void nfs_release_request(struct nfs_page *); extern void nfs_release_request(struct nfs_page *);
@ -153,6 +162,66 @@ extern int nfs_page_set_headlock(struct nfs_page *req);
extern void nfs_page_clear_headlock(struct nfs_page *req); extern void nfs_page_clear_headlock(struct nfs_page *req);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *); extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
/**
* nfs_page_to_folio - Retrieve a struct folio for the request
* @req: pointer to a struct nfs_page
*
* If a folio was assigned to @req, then return it, otherwise return NULL.
*/
static inline struct folio *nfs_page_to_folio(const struct nfs_page *req)
{
if (test_bit(PG_FOLIO, &req->wb_flags))
return req->wb_folio;
return NULL;
}
/**
* nfs_page_to_page - Retrieve a struct page for the request
* @req: pointer to a struct nfs_page
* @pgbase: folio byte offset
*
* Return the page containing the byte that is at offset @pgbase relative
* to the start of the folio.
* Note: The request starts at offset @req->wb_pgbase.
*/
static inline struct page *nfs_page_to_page(const struct nfs_page *req,
size_t pgbase)
{
struct folio *folio = nfs_page_to_folio(req);
if (folio == NULL)
return req->wb_page;
return folio_page(folio, pgbase >> PAGE_SHIFT);
}
/**
* nfs_page_to_inode - Retrieve an inode for the request
* @req: pointer to a struct nfs_page
*/
static inline struct inode *nfs_page_to_inode(const struct nfs_page *req)
{
struct folio *folio = nfs_page_to_folio(req);
if (folio == NULL)
return page_file_mapping(req->wb_page)->host;
return folio_file_mapping(folio)->host;
}
/**
* nfs_page_max_length - Retrieve the maximum possible length for a request
* @req: pointer to a struct nfs_page
*
* Returns the maximum possible length of a request
*/
static inline size_t nfs_page_max_length(const struct nfs_page *req)
{
struct folio *folio = nfs_page_to_folio(req);
if (folio == NULL)
return PAGE_SIZE;
return folio_size(folio);
}
/* /*
* Lock the page of an asynchronous request * Lock the page of an asynchronous request
*/ */

View File

@ -3350,6 +3350,8 @@ rpc_clnt_swap_deactivate_callback(struct rpc_clnt *clnt,
void void
rpc_clnt_swap_deactivate(struct rpc_clnt *clnt) rpc_clnt_swap_deactivate(struct rpc_clnt *clnt)
{ {
while (clnt != clnt->cl_parent)
clnt = clnt->cl_parent;
if (atomic_dec_if_positive(&clnt->cl_swapper) == 0) if (atomic_dec_if_positive(&clnt->cl_swapper) == 0)
rpc_clnt_iterate_for_each_xprt(clnt, rpc_clnt_iterate_for_each_xprt(clnt,
rpc_clnt_swap_deactivate_callback, NULL); rpc_clnt_swap_deactivate_callback, NULL);

View File

@ -36,7 +36,7 @@ rpc_sysfs_object_child_ns_type(const struct kobject *kobj)
return &net_ns_type_operations; return &net_ns_type_operations;
} }
static struct kobj_type rpc_sysfs_object_type = { static const struct kobj_type rpc_sysfs_object_type = {
.release = rpc_sysfs_object_release, .release = rpc_sysfs_object_release,
.sysfs_ops = &kobj_sysfs_ops, .sysfs_ops = &kobj_sysfs_ops,
.child_ns_type = rpc_sysfs_object_child_ns_type, .child_ns_type = rpc_sysfs_object_child_ns_type,
@ -427,20 +427,20 @@ static struct attribute *rpc_sysfs_xprt_switch_attrs[] = {
}; };
ATTRIBUTE_GROUPS(rpc_sysfs_xprt_switch); ATTRIBUTE_GROUPS(rpc_sysfs_xprt_switch);
static struct kobj_type rpc_sysfs_client_type = { static const struct kobj_type rpc_sysfs_client_type = {
.release = rpc_sysfs_client_release, .release = rpc_sysfs_client_release,
.sysfs_ops = &kobj_sysfs_ops, .sysfs_ops = &kobj_sysfs_ops,
.namespace = rpc_sysfs_client_namespace, .namespace = rpc_sysfs_client_namespace,
}; };
static struct kobj_type rpc_sysfs_xprt_switch_type = { static const struct kobj_type rpc_sysfs_xprt_switch_type = {
.release = rpc_sysfs_xprt_switch_release, .release = rpc_sysfs_xprt_switch_release,
.default_groups = rpc_sysfs_xprt_switch_groups, .default_groups = rpc_sysfs_xprt_switch_groups,
.sysfs_ops = &kobj_sysfs_ops, .sysfs_ops = &kobj_sysfs_ops,
.namespace = rpc_sysfs_xprt_switch_namespace, .namespace = rpc_sysfs_xprt_switch_namespace,
}; };
static struct kobj_type rpc_sysfs_xprt_type = { static const struct kobj_type rpc_sysfs_xprt_type = {
.release = rpc_sysfs_xprt_release, .release = rpc_sysfs_xprt_release,
.default_groups = rpc_sysfs_xprt_groups, .default_groups = rpc_sysfs_xprt_groups,
.sysfs_ops = &kobj_sysfs_ops, .sysfs_ops = &kobj_sysfs_ops,