mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-04 04:04:19 +00:00
Revert "nfs: don't reuse partially completed requests in nfs_lock_and_join_requests"
This reverts commitb571cfcb9d
. This patch appears to assume that if one request is complete, then the others will complete too before unlocking. That is not a valid assumption, since other requests could hit a non-fatal error or a short write that would cause them not to complete. Reported-by: Igor Raits <igor@gooddata.com> Link: https://bugzilla.kernel.org/show_bug.cgi?id=219508 Fixes:b571cfcb9d
("nfs: don't reuse partially completed requests in nfs_lock_and_join_requests") Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
This commit is contained in:
parent
8f52caf9d2
commit
66f9dac907
@ -144,6 +144,31 @@ static void nfs_io_completion_put(struct nfs_io_completion *ioc)
|
|||||||
kref_put(&ioc->refcount, nfs_io_completion_release);
|
kref_put(&ioc->refcount, nfs_io_completion_release);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
|
||||||
|
{
|
||||||
|
if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) {
|
||||||
|
kref_get(&req->wb_kref);
|
||||||
|
atomic_long_inc(&NFS_I(inode)->nrequests);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (!test_bit(PG_REMOVE, &req->wb_flags))
|
||||||
|
return 0;
|
||||||
|
ret = nfs_page_group_lock(req);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
|
||||||
|
nfs_page_set_inode_ref(req, inode);
|
||||||
|
nfs_page_group_unlock(req);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* nfs_folio_find_head_request - find head request associated with a folio
|
* nfs_folio_find_head_request - find head request associated with a folio
|
||||||
* @folio: pointer to folio
|
* @folio: pointer to folio
|
||||||
@ -540,7 +565,6 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
|
|||||||
struct inode *inode = folio->mapping->host;
|
struct inode *inode = folio->mapping->host;
|
||||||
struct nfs_page *head, *subreq;
|
struct nfs_page *head, *subreq;
|
||||||
struct nfs_commit_info cinfo;
|
struct nfs_commit_info cinfo;
|
||||||
bool removed;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -565,18 +589,18 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
|
|||||||
goto retry;
|
goto retry;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = nfs_page_group_lock(head);
|
ret = nfs_cancel_remove_inode(head, inode);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
|
|
||||||
removed = test_bit(PG_REMOVE, &head->wb_flags);
|
ret = nfs_page_group_lock(head);
|
||||||
|
if (ret < 0)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
/* lock each request in the page group */
|
/* lock each request in the page group */
|
||||||
for (subreq = head->wb_this_page;
|
for (subreq = head->wb_this_page;
|
||||||
subreq != head;
|
subreq != head;
|
||||||
subreq = subreq->wb_this_page) {
|
subreq = subreq->wb_this_page) {
|
||||||
if (test_bit(PG_REMOVE, &subreq->wb_flags))
|
|
||||||
removed = true;
|
|
||||||
ret = nfs_page_group_lock_subreq(head, subreq);
|
ret = nfs_page_group_lock_subreq(head, subreq);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
@ -584,21 +608,6 @@ static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
|
|||||||
|
|
||||||
nfs_page_group_unlock(head);
|
nfs_page_group_unlock(head);
|
||||||
|
|
||||||
/*
|
|
||||||
* If PG_REMOVE is set on any request, I/O on that request has
|
|
||||||
* completed, but some requests were still under I/O at the time
|
|
||||||
* we locked the head request.
|
|
||||||
*
|
|
||||||
* In that case the above wait for all requests means that all I/O
|
|
||||||
* has now finished, and we can restart from a clean slate. Let the
|
|
||||||
* old requests go away and start from scratch instead.
|
|
||||||
*/
|
|
||||||
if (removed) {
|
|
||||||
nfs_unroll_locks(head, head);
|
|
||||||
nfs_unlock_and_release_request(head);
|
|
||||||
goto retry;
|
|
||||||
}
|
|
||||||
|
|
||||||
nfs_init_cinfo_from_inode(&cinfo, inode);
|
nfs_init_cinfo_from_inode(&cinfo, inode);
|
||||||
nfs_join_page_group(head, &cinfo, inode);
|
nfs_join_page_group(head, &cinfo, inode);
|
||||||
return head;
|
return head;
|
||||||
|
Loading…
Reference in New Issue
Block a user