mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
netfs: Drop the was_async arg from netfs_read_subreq_terminated()
Drop the was_async argument from netfs_read_subreq_terminated(). Almost every caller is either in process context and passes false. Some filesystems delegate the call to a workqueue to avoid doing the work in their network message queue parsing thread. The only exception is netfs_cache_read_terminated() which handles completion in the cache - which is usually a callback from the backing filesystem in softirq context, though it can be from process context if an error occurred. In this case, delegate to a workqueue. Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Link: https://lore.kernel.org/r/CAHk-=wiVC5Cgyz6QKXFu6fTaA6h4CjexDR-OV9kL6Vo5x9v8=A@mail.gmail.com/ Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241108173236.1382366-12-dhowells@redhat.com cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
This commit is contained in:
parent
b0aa43841d
commit
966396e9f6
@ -84,7 +84,7 @@ static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
subreq->transferred += total;
|
||||
|
||||
subreq->error = err;
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
netfs_read_subreq_terminated(subreq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -247,7 +247,7 @@ static void afs_fetch_data_notify(struct afs_operation *op)
|
||||
if (req->pos + req->actual_len >= req->file_size)
|
||||
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
|
||||
subreq->error = error;
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
netfs_read_subreq_terminated(subreq);
|
||||
req->subreq = NULL;
|
||||
} else if (req->done) {
|
||||
req->done(req);
|
||||
@ -304,7 +304,7 @@ int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
|
||||
if (IS_ERR(op)) {
|
||||
if (req->subreq) {
|
||||
req->subreq->error = PTR_ERR(op);
|
||||
netfs_read_subreq_terminated(req->subreq, false);
|
||||
netfs_read_subreq_terminated(req->subreq);
|
||||
}
|
||||
return PTR_ERR(op);
|
||||
}
|
||||
@ -325,7 +325,7 @@ static void afs_read_worker(struct work_struct *work)
|
||||
fsreq = afs_alloc_read(GFP_NOFS);
|
||||
if (!fsreq) {
|
||||
subreq->error = -ENOMEM;
|
||||
return netfs_read_subreq_terminated(subreq, false);
|
||||
return netfs_read_subreq_terminated(subreq);
|
||||
}
|
||||
|
||||
fsreq->subreq = subreq;
|
||||
|
@ -352,7 +352,7 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
|
||||
ret = afs_extract_data(call, true);
|
||||
if (req->subreq) {
|
||||
req->subreq->transferred += count_before - call->iov_len;
|
||||
netfs_read_subreq_progress(req->subreq, false);
|
||||
netfs_read_subreq_progress(req->subreq);
|
||||
}
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -398,7 +398,7 @@ static int yfs_deliver_fs_fetch_data64(struct afs_call *call)
|
||||
ret = afs_extract_data(call, true);
|
||||
if (req->subreq) {
|
||||
req->subreq->transferred += count_before - call->iov_len;
|
||||
netfs_read_subreq_progress(req->subreq, false);
|
||||
netfs_read_subreq_progress(req->subreq);
|
||||
}
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -255,7 +255,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
|
||||
}
|
||||
subreq->error = err;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
netfs_read_subreq_terminated(subreq);
|
||||
iput(req->r_inode);
|
||||
ceph_dec_osd_stopping_blocker(fsc->mdsc);
|
||||
}
|
||||
@ -317,7 +317,7 @@ static bool ceph_netfs_issue_op_inline(struct netfs_io_subrequest *subreq)
|
||||
out:
|
||||
subreq->error = err;
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_io_progress);
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
netfs_read_subreq_terminated(subreq);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -431,7 +431,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
ceph_osdc_put_request(req);
|
||||
if (err) {
|
||||
subreq->error = err;
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
netfs_read_subreq_terminated(subreq);
|
||||
}
|
||||
doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ static void netfs_cache_read_terminated(void *priv, ssize_t transferred_or_error
|
||||
} else {
|
||||
subreq->error = transferred_or_error;
|
||||
}
|
||||
netfs_read_subreq_terminated(subreq, was_async);
|
||||
schedule_work(&subreq->work);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -261,7 +261,7 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
|
||||
slice = netfs_prepare_read_iterator(subreq);
|
||||
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
||||
subreq->error = 0;
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
netfs_read_subreq_terminated(subreq);
|
||||
goto done;
|
||||
}
|
||||
|
||||
@ -283,7 +283,7 @@ static void netfs_read_to_pagecache(struct netfs_io_request *rreq)
|
||||
} while (size > 0);
|
||||
|
||||
if (atomic_dec_and_test(&rreq->nr_outstanding))
|
||||
netfs_rreq_terminated(rreq, false);
|
||||
netfs_rreq_terminated(rreq);
|
||||
|
||||
/* Defer error return as we may need to wait for outstanding I/O. */
|
||||
cmpxchg(&rreq->error, 0, ret);
|
||||
|
@ -100,7 +100,7 @@ static int netfs_dispatch_unbuffered_reads(struct netfs_io_request *rreq)
|
||||
} while (size > 0);
|
||||
|
||||
if (atomic_dec_and_test(&rreq->nr_outstanding))
|
||||
netfs_rreq_terminated(rreq, false);
|
||||
netfs_rreq_terminated(rreq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -85,7 +85,7 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
|
||||
* read_collect.c
|
||||
*/
|
||||
void netfs_read_termination_worker(struct work_struct *work);
|
||||
void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async);
|
||||
void netfs_rreq_terminated(struct netfs_io_request *rreq);
|
||||
|
||||
/*
|
||||
* read_pgpriv2.c
|
||||
|
@ -56,7 +56,7 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
|
||||
origin == NETFS_READ_GAPS ||
|
||||
origin == NETFS_READ_FOR_WRITE ||
|
||||
origin == NETFS_DIO_READ)
|
||||
INIT_WORK(&rreq->work, netfs_read_termination_worker);
|
||||
INIT_WORK(&rreq->work, NULL);
|
||||
else
|
||||
INIT_WORK(&rreq->work, netfs_write_collection_worker);
|
||||
|
||||
|
@ -85,7 +85,7 @@ static void netfs_unlock_read_folio(struct netfs_io_subrequest *subreq,
|
||||
* Unlock any folios that are now completely read. Returns true if the
|
||||
* subrequest is removed from the list.
|
||||
*/
|
||||
static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was_async)
|
||||
static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct netfs_io_subrequest *prev, *next;
|
||||
struct netfs_io_request *rreq = subreq->rreq;
|
||||
@ -228,8 +228,7 @@ static bool netfs_consume_read_data(struct netfs_io_subrequest *subreq, bool was
|
||||
subreq->curr_folioq_slot = slot;
|
||||
if (folioq && folioq_folio(folioq, slot))
|
||||
subreq->curr_folio_order = folioq->orders[slot];
|
||||
if (!was_async)
|
||||
cond_resched();
|
||||
cond_resched();
|
||||
goto next_folio;
|
||||
}
|
||||
|
||||
@ -365,7 +364,7 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
|
||||
* Note that we're in normal kernel thread context at this point, possibly
|
||||
* running on a workqueue.
|
||||
*/
|
||||
static void netfs_rreq_assess(struct netfs_io_request *rreq)
|
||||
void netfs_rreq_terminated(struct netfs_io_request *rreq)
|
||||
{
|
||||
trace_netfs_rreq(rreq, netfs_rreq_trace_assess);
|
||||
|
||||
@ -392,56 +391,29 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq)
|
||||
netfs_pgpriv2_write_to_the_cache(rreq);
|
||||
}
|
||||
|
||||
void netfs_read_termination_worker(struct work_struct *work)
|
||||
{
|
||||
struct netfs_io_request *rreq =
|
||||
container_of(work, struct netfs_io_request, work);
|
||||
netfs_see_request(rreq, netfs_rreq_trace_see_work);
|
||||
netfs_rreq_assess(rreq);
|
||||
netfs_put_request(rreq, false, netfs_rreq_trace_put_work_complete);
|
||||
}
|
||||
|
||||
/*
|
||||
* Handle the completion of all outstanding I/O operations on a read request.
|
||||
* We inherit a ref from the caller.
|
||||
*/
|
||||
void netfs_rreq_terminated(struct netfs_io_request *rreq, bool was_async)
|
||||
{
|
||||
if (!was_async)
|
||||
return netfs_rreq_assess(rreq);
|
||||
if (!work_pending(&rreq->work)) {
|
||||
netfs_get_request(rreq, netfs_rreq_trace_get_work);
|
||||
if (!queue_work(system_unbound_wq, &rreq->work))
|
||||
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_work_nq);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* netfs_read_subreq_progress - Note progress of a read operation.
|
||||
* @subreq: The read request that has terminated.
|
||||
* @was_async: True if we're in an asynchronous context.
|
||||
*
|
||||
* This tells the read side of netfs lib that a contributory I/O operation has
|
||||
* made some progress and that it may be possible to unlock some folios.
|
||||
*
|
||||
* Before calling, the filesystem should update subreq->transferred to track
|
||||
* the amount of data copied into the output buffer.
|
||||
*
|
||||
* If @was_async is true, the caller might be running in softirq or interrupt
|
||||
* context and we can't sleep.
|
||||
*/
|
||||
void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq,
|
||||
bool was_async)
|
||||
void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct netfs_io_request *rreq = subreq->rreq;
|
||||
|
||||
might_sleep();
|
||||
|
||||
trace_netfs_sreq(subreq, netfs_sreq_trace_progress);
|
||||
|
||||
if (subreq->transferred > subreq->consumed &&
|
||||
(rreq->origin == NETFS_READAHEAD ||
|
||||
rreq->origin == NETFS_READPAGE ||
|
||||
rreq->origin == NETFS_READ_FOR_WRITE)) {
|
||||
netfs_consume_read_data(subreq, was_async);
|
||||
netfs_consume_read_data(subreq);
|
||||
__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
|
||||
}
|
||||
}
|
||||
@ -450,7 +422,6 @@ EXPORT_SYMBOL(netfs_read_subreq_progress);
|
||||
/**
|
||||
* netfs_read_subreq_terminated - Note the termination of an I/O operation.
|
||||
* @subreq: The I/O request that has terminated.
|
||||
* @was_async: True if we're in an asynchronous context.
|
||||
*
|
||||
* This tells the read helper that a contributory I/O operation has terminated,
|
||||
* one way or another, and that it should integrate the results.
|
||||
@ -464,7 +435,7 @@ EXPORT_SYMBOL(netfs_read_subreq_progress);
|
||||
* Before calling, the filesystem should update subreq->transferred to track
|
||||
* the amount of data copied into the output buffer.
|
||||
*/
|
||||
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, bool was_async)
|
||||
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq)
|
||||
{
|
||||
struct netfs_io_request *rreq = subreq->rreq;
|
||||
|
||||
@ -498,7 +469,7 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, bool was_a
|
||||
(rreq->origin == NETFS_READAHEAD ||
|
||||
rreq->origin == NETFS_READPAGE ||
|
||||
rreq->origin == NETFS_READ_FOR_WRITE)) {
|
||||
netfs_consume_read_data(subreq, was_async);
|
||||
netfs_consume_read_data(subreq);
|
||||
__clear_bit(NETFS_SREQ_NO_PROGRESS, &subreq->flags);
|
||||
}
|
||||
rreq->transferred += subreq->transferred;
|
||||
@ -540,9 +511,9 @@ void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, bool was_a
|
||||
}
|
||||
|
||||
if (atomic_dec_and_test(&rreq->nr_outstanding))
|
||||
netfs_rreq_terminated(rreq, was_async);
|
||||
netfs_rreq_terminated(rreq);
|
||||
|
||||
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
|
||||
netfs_put_subrequest(subreq, false, netfs_sreq_trace_put_terminated);
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_read_subreq_terminated);
|
||||
|
||||
@ -558,6 +529,6 @@ void netfs_read_subreq_termination_worker(struct work_struct *work)
|
||||
struct netfs_io_subrequest *subreq =
|
||||
container_of(work, struct netfs_io_subrequest, work);
|
||||
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
netfs_read_subreq_terminated(subreq);
|
||||
}
|
||||
EXPORT_SYMBOL(netfs_read_subreq_termination_worker);
|
||||
|
@ -232,7 +232,7 @@ void netfs_retry_reads(struct netfs_io_request *rreq)
|
||||
netfs_retry_read_subrequests(rreq);
|
||||
|
||||
if (atomic_dec_and_test(&rreq->nr_outstanding))
|
||||
netfs_rreq_terminated(rreq, false);
|
||||
netfs_rreq_terminated(rreq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -309,7 +309,7 @@ static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
|
||||
netfs = nfs_netfs_alloc(sreq);
|
||||
if (!netfs) {
|
||||
sreq->error = -ENOMEM;
|
||||
return netfs_read_subreq_terminated(sreq, false);
|
||||
return netfs_read_subreq_terminated(sreq);
|
||||
}
|
||||
|
||||
pgio.pg_netfs = netfs; /* used in completion */
|
||||
|
@ -75,7 +75,7 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
|
||||
netfs->sreq->transferred = min_t(s64, netfs->sreq->len,
|
||||
atomic64_read(&netfs->transferred));
|
||||
netfs->sreq->error = netfs->error;
|
||||
netfs_read_subreq_terminated(netfs->sreq, false);
|
||||
netfs_read_subreq_terminated(netfs->sreq);
|
||||
kfree(netfs);
|
||||
}
|
||||
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
|
||||
|
@ -228,7 +228,7 @@ static void cifs_issue_read(struct netfs_io_subrequest *subreq)
|
||||
|
||||
failed:
|
||||
subreq->error = rc;
|
||||
netfs_read_subreq_terminated(subreq, false);
|
||||
netfs_read_subreq_terminated(subreq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -428,8 +428,8 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp);
|
||||
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
|
||||
|
||||
/* (Sub)request management API. */
|
||||
void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq, bool was_async);
|
||||
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, bool was_async);
|
||||
void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq);
|
||||
void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq);
|
||||
void netfs_read_subreq_termination_worker(struct work_struct *work);
|
||||
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
|
||||
enum netfs_sreq_ref_trace what);
|
||||
|
Loading…
Reference in New Issue
Block a user