mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-01 18:52:02 +00:00
b4f239c91f
Change the way netfslib collects read results to do all the collection for a particular read request using a single work item that walks along the subrequest queue as subrequests make progress or complete, unlocking folios progressively rather than doing the unlock in parallel as parallel requests come in. The code is remodelled to be more like the write-side code, though only using a single stream. This makes it more directly comparable and thus easier to duplicate fixes between the two sides. This has a number of advantages: (1) It's simpler. There doesn't need to be a complex donation mechanism to handle mismatches between the size and alignment of subrequests and folios. The collector unlocks folios as the subrequests covering each complete. (2) It should cause less scheduler overhead as there's a single work item in play unlocking pages in parallel when a read gets split up into a lot of subrequests instead of one per subrequest. Whilst the parallellism is nice in theory, in practice, the vast majority of loads are sequential reads of the whole file, so committing a bunch of threads to unlocking folios out of order doesn't help in those cases. (3) It should make it easier to implement content decryption. A folio cannot be decrypted until all the requests that contribute to it have completed - and, again, most loads are sequential and so, most of the time, we want to begin decryption sequentially (though it's great if the decryption can happen in parallel). There is a disadvantage in that we're losing the ability to decrypt and unlock things on an as-things-arrive basis which may affect some applications. Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241108173236.1382366-29-dhowells@redhat.com cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
235 lines
6.5 KiB
C
235 lines
6.5 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/* Object lifetime handling and tracing.
|
|
*
|
|
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#include <linux/slab.h>
|
|
#include <linux/mempool.h>
|
|
#include <linux/delay.h>
|
|
#include "internal.h"
|
|
|
|
/*
|
|
* Allocate an I/O request and initialise it.
|
|
*/
|
|
struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
|
|
struct file *file,
|
|
loff_t start, size_t len,
|
|
enum netfs_io_origin origin)
|
|
{
|
|
static atomic_t debug_ids;
|
|
struct inode *inode = file ? file_inode(file) : mapping->host;
|
|
struct netfs_inode *ctx = netfs_inode(inode);
|
|
struct netfs_io_request *rreq;
|
|
mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
|
|
struct kmem_cache *cache = mempool->pool_data;
|
|
int ret;
|
|
|
|
for (;;) {
|
|
rreq = mempool_alloc(mempool, GFP_KERNEL);
|
|
if (rreq)
|
|
break;
|
|
msleep(10);
|
|
}
|
|
|
|
memset(rreq, 0, kmem_cache_size(cache));
|
|
rreq->start = start;
|
|
rreq->len = len;
|
|
rreq->origin = origin;
|
|
rreq->netfs_ops = ctx->ops;
|
|
rreq->mapping = mapping;
|
|
rreq->inode = inode;
|
|
rreq->i_size = i_size_read(inode);
|
|
rreq->debug_id = atomic_inc_return(&debug_ids);
|
|
rreq->wsize = INT_MAX;
|
|
rreq->io_streams[0].sreq_max_len = ULONG_MAX;
|
|
rreq->io_streams[0].sreq_max_segs = 0;
|
|
spin_lock_init(&rreq->lock);
|
|
INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
|
|
INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
|
|
init_waitqueue_head(&rreq->waitq);
|
|
refcount_set(&rreq->ref, 1);
|
|
|
|
if (origin == NETFS_READAHEAD ||
|
|
origin == NETFS_READPAGE ||
|
|
origin == NETFS_READ_GAPS ||
|
|
origin == NETFS_READ_SINGLE ||
|
|
origin == NETFS_READ_FOR_WRITE ||
|
|
origin == NETFS_DIO_READ) {
|
|
INIT_WORK(&rreq->work, netfs_read_collection_worker);
|
|
rreq->io_streams[0].avail = true;
|
|
} else {
|
|
INIT_WORK(&rreq->work, netfs_write_collection_worker);
|
|
}
|
|
|
|
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
|
|
if (file && file->f_flags & O_NONBLOCK)
|
|
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
|
|
if (rreq->netfs_ops->init_request) {
|
|
ret = rreq->netfs_ops->init_request(rreq, file);
|
|
if (ret < 0) {
|
|
mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
|
|
return ERR_PTR(ret);
|
|
}
|
|
}
|
|
|
|
atomic_inc(&ctx->io_count);
|
|
trace_netfs_rreq_ref(rreq->debug_id, 1, netfs_rreq_trace_new);
|
|
netfs_proc_add_rreq(rreq);
|
|
netfs_stat(&netfs_n_rh_rreq);
|
|
return rreq;
|
|
}
|
|
|
|
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what)
|
|
{
|
|
int r;
|
|
|
|
__refcount_inc(&rreq->ref, &r);
|
|
trace_netfs_rreq_ref(rreq->debug_id, r + 1, what);
|
|
}
|
|
|
|
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
|
|
{
|
|
struct netfs_io_subrequest *subreq;
|
|
struct netfs_io_stream *stream;
|
|
int s;
|
|
|
|
for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
|
|
stream = &rreq->io_streams[s];
|
|
while (!list_empty(&stream->subrequests)) {
|
|
subreq = list_first_entry(&stream->subrequests,
|
|
struct netfs_io_subrequest, rreq_link);
|
|
list_del(&subreq->rreq_link);
|
|
netfs_put_subrequest(subreq, was_async,
|
|
netfs_sreq_trace_put_clear);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void netfs_free_request_rcu(struct rcu_head *rcu)
|
|
{
|
|
struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu);
|
|
|
|
mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
|
|
netfs_stat_d(&netfs_n_rh_rreq);
|
|
}
|
|
|
|
static void netfs_free_request(struct work_struct *work)
|
|
{
|
|
struct netfs_io_request *rreq =
|
|
container_of(work, struct netfs_io_request, work);
|
|
struct netfs_inode *ictx = netfs_inode(rreq->inode);
|
|
unsigned int i;
|
|
|
|
trace_netfs_rreq(rreq, netfs_rreq_trace_free);
|
|
netfs_proc_del_rreq(rreq);
|
|
netfs_clear_subrequests(rreq, false);
|
|
if (rreq->netfs_ops->free_request)
|
|
rreq->netfs_ops->free_request(rreq);
|
|
if (rreq->cache_resources.ops)
|
|
rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
|
|
if (rreq->direct_bv) {
|
|
for (i = 0; i < rreq->direct_bv_count; i++) {
|
|
if (rreq->direct_bv[i].bv_page) {
|
|
if (rreq->direct_bv_unpin)
|
|
unpin_user_page(rreq->direct_bv[i].bv_page);
|
|
}
|
|
}
|
|
kvfree(rreq->direct_bv);
|
|
}
|
|
rolling_buffer_clear(&rreq->buffer);
|
|
|
|
if (atomic_dec_and_test(&ictx->io_count))
|
|
wake_up_var(&ictx->io_count);
|
|
call_rcu(&rreq->rcu, netfs_free_request_rcu);
|
|
}
|
|
|
|
void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
|
|
enum netfs_rreq_ref_trace what)
|
|
{
|
|
unsigned int debug_id;
|
|
bool dead;
|
|
int r;
|
|
|
|
if (rreq) {
|
|
debug_id = rreq->debug_id;
|
|
dead = __refcount_dec_and_test(&rreq->ref, &r);
|
|
trace_netfs_rreq_ref(debug_id, r - 1, what);
|
|
if (dead) {
|
|
if (was_async) {
|
|
rreq->work.func = netfs_free_request;
|
|
if (!queue_work(system_unbound_wq, &rreq->work))
|
|
WARN_ON(1);
|
|
} else {
|
|
netfs_free_request(&rreq->work);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Allocate and partially initialise an I/O request structure.
|
|
*/
|
|
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
|
|
{
|
|
struct netfs_io_subrequest *subreq;
|
|
mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool;
|
|
struct kmem_cache *cache = mempool->pool_data;
|
|
|
|
for (;;) {
|
|
subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool,
|
|
GFP_KERNEL);
|
|
if (subreq)
|
|
break;
|
|
msleep(10);
|
|
}
|
|
|
|
memset(subreq, 0, kmem_cache_size(cache));
|
|
INIT_WORK(&subreq->work, NULL);
|
|
INIT_LIST_HEAD(&subreq->rreq_link);
|
|
refcount_set(&subreq->ref, 2);
|
|
subreq->rreq = rreq;
|
|
subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
|
|
netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
|
|
netfs_stat(&netfs_n_rh_sreq);
|
|
return subreq;
|
|
}
|
|
|
|
void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
|
|
enum netfs_sreq_ref_trace what)
|
|
{
|
|
int r;
|
|
|
|
__refcount_inc(&subreq->ref, &r);
|
|
trace_netfs_sreq_ref(subreq->rreq->debug_id, subreq->debug_index, r + 1,
|
|
what);
|
|
}
|
|
|
|
static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
|
|
bool was_async)
|
|
{
|
|
struct netfs_io_request *rreq = subreq->rreq;
|
|
|
|
trace_netfs_sreq(subreq, netfs_sreq_trace_free);
|
|
if (rreq->netfs_ops->free_subrequest)
|
|
rreq->netfs_ops->free_subrequest(subreq);
|
|
mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
|
|
netfs_stat_d(&netfs_n_rh_sreq);
|
|
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
|
|
}
|
|
|
|
void netfs_put_subrequest(struct netfs_io_subrequest *subreq, bool was_async,
|
|
enum netfs_sreq_ref_trace what)
|
|
{
|
|
unsigned int debug_index = subreq->debug_index;
|
|
unsigned int debug_id = subreq->rreq->debug_id;
|
|
bool dead;
|
|
int r;
|
|
|
|
dead = __refcount_dec_and_test(&subreq->ref, &r);
|
|
trace_netfs_sreq_ref(debug_id, debug_index, r - 1, what);
|
|
if (dead)
|
|
netfs_free_subrequest(subreq, was_async);
|
|
}
|