mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
b4f239c91f
Change the way netfslib collects read results to do all the collection for a particular read request using a single work item that walks along the subrequest queue as subrequests make progress or complete, unlocking folios progressively rather than doing the unlock in parallel as parallel requests come in. The code is remodelled to be more like the write-side code, though only using a single stream. This makes it more directly comparable and thus easier to duplicate fixes between the two sides. This has a number of advantages: (1) It's simpler. There doesn't need to be a complex donation mechanism to handle mismatches between the size and alignment of subrequests and folios. The collector unlocks folios as the subrequests covering each complete. (2) It should cause less scheduler overhead as there's a single work item in play unlocking pages in parallel when a read gets split up into a lot of subrequests instead of one per subrequest. Whilst the parallellism is nice in theory, in practice, the vast majority of loads are sequential reads of the whole file, so committing a bunch of threads to unlocking folios out of order doesn't help in those cases. (3) It should make it easier to implement content decryption. A folio cannot be decrypted until all the requests that contribute to it have completed - and, again, most loads are sequential and so, most of the time, we want to begin decryption sequentially (though it's great if the decryption can happen in parallel). There is a disadvantage in that we're losing the ability to decrypt and unlock things on an as-things-arrive basis which may affect some applications. Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241108173236.1382366-29-dhowells@redhat.com cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
172 lines
4.3 KiB
C
172 lines
4.3 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* Miscellaneous bits for the netfs support library.
|
|
*
|
|
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/export.h>
|
|
#include <linux/mempool.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/seq_file.h>
|
|
#include "internal.h"
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/netfs.h>
|
|
|
|
MODULE_DESCRIPTION("Network fs support");
|
|
MODULE_AUTHOR("Red Hat, Inc.");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
|
|
|
|
unsigned netfs_debug;
|
|
module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
|
|
MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
|
|
|
|
static struct kmem_cache *netfs_request_slab;
|
|
static struct kmem_cache *netfs_subrequest_slab;
|
|
mempool_t netfs_request_pool;
|
|
mempool_t netfs_subrequest_pool;
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
LIST_HEAD(netfs_io_requests);
|
|
DEFINE_SPINLOCK(netfs_proc_lock);
|
|
|
|
static const char *netfs_origins[nr__netfs_io_origin] = {
|
|
[NETFS_READAHEAD] = "RA",
|
|
[NETFS_READPAGE] = "RP",
|
|
[NETFS_READ_GAPS] = "RG",
|
|
[NETFS_READ_SINGLE] = "R1",
|
|
[NETFS_READ_FOR_WRITE] = "RW",
|
|
[NETFS_DIO_READ] = "DR",
|
|
[NETFS_WRITEBACK] = "WB",
|
|
[NETFS_WRITEBACK_SINGLE] = "W1",
|
|
[NETFS_WRITETHROUGH] = "WT",
|
|
[NETFS_UNBUFFERED_WRITE] = "UW",
|
|
[NETFS_DIO_WRITE] = "DW",
|
|
[NETFS_PGPRIV2_COPY_TO_CACHE] = "2C",
|
|
};
|
|
|
|
/*
|
|
* Generate a list of I/O requests in /proc/fs/netfs/requests
|
|
*/
|
|
static int netfs_requests_seq_show(struct seq_file *m, void *v)
|
|
{
|
|
struct netfs_io_request *rreq;
|
|
|
|
if (v == &netfs_io_requests) {
|
|
seq_puts(m,
|
|
"REQUEST OR REF FL ERR OPS COVERAGE\n"
|
|
"======== == === == ==== === =========\n"
|
|
);
|
|
return 0;
|
|
}
|
|
|
|
rreq = list_entry(v, struct netfs_io_request, proc_link);
|
|
seq_printf(m,
|
|
"%08x %s %3d %2lx %4ld %3d @%04llx %llx/%llx",
|
|
rreq->debug_id,
|
|
netfs_origins[rreq->origin],
|
|
refcount_read(&rreq->ref),
|
|
rreq->flags,
|
|
rreq->error,
|
|
0,
|
|
rreq->start, rreq->submitted, rreq->len);
|
|
seq_putc(m, '\n');
|
|
return 0;
|
|
}
|
|
|
|
static void *netfs_requests_seq_start(struct seq_file *m, loff_t *_pos)
|
|
__acquires(rcu)
|
|
{
|
|
rcu_read_lock();
|
|
return seq_list_start_head(&netfs_io_requests, *_pos);
|
|
}
|
|
|
|
static void *netfs_requests_seq_next(struct seq_file *m, void *v, loff_t *_pos)
|
|
{
|
|
return seq_list_next(v, &netfs_io_requests, _pos);
|
|
}
|
|
|
|
static void netfs_requests_seq_stop(struct seq_file *m, void *v)
|
|
__releases(rcu)
|
|
{
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
static const struct seq_operations netfs_requests_seq_ops = {
|
|
.start = netfs_requests_seq_start,
|
|
.next = netfs_requests_seq_next,
|
|
.stop = netfs_requests_seq_stop,
|
|
.show = netfs_requests_seq_show,
|
|
};
|
|
#endif /* CONFIG_PROC_FS */
|
|
|
|
static int __init netfs_init(void)
|
|
{
|
|
int ret = -ENOMEM;
|
|
|
|
netfs_request_slab = kmem_cache_create("netfs_request",
|
|
sizeof(struct netfs_io_request), 0,
|
|
SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
|
|
NULL);
|
|
if (!netfs_request_slab)
|
|
goto error_req;
|
|
|
|
if (mempool_init_slab_pool(&netfs_request_pool, 100, netfs_request_slab) < 0)
|
|
goto error_reqpool;
|
|
|
|
netfs_subrequest_slab = kmem_cache_create("netfs_subrequest",
|
|
sizeof(struct netfs_io_subrequest) + 16, 0,
|
|
SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
|
|
NULL);
|
|
if (!netfs_subrequest_slab)
|
|
goto error_subreq;
|
|
|
|
if (mempool_init_slab_pool(&netfs_subrequest_pool, 100, netfs_subrequest_slab) < 0)
|
|
goto error_subreqpool;
|
|
|
|
if (!proc_mkdir("fs/netfs", NULL))
|
|
goto error_proc;
|
|
if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
|
|
&netfs_requests_seq_ops))
|
|
goto error_procfile;
|
|
#ifdef CONFIG_FSCACHE_STATS
|
|
if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
|
|
netfs_stats_show))
|
|
goto error_procfile;
|
|
#endif
|
|
|
|
ret = fscache_init();
|
|
if (ret < 0)
|
|
goto error_fscache;
|
|
return 0;
|
|
|
|
error_fscache:
|
|
error_procfile:
|
|
remove_proc_subtree("fs/netfs", NULL);
|
|
error_proc:
|
|
mempool_exit(&netfs_subrequest_pool);
|
|
error_subreqpool:
|
|
kmem_cache_destroy(netfs_subrequest_slab);
|
|
error_subreq:
|
|
mempool_exit(&netfs_request_pool);
|
|
error_reqpool:
|
|
kmem_cache_destroy(netfs_request_slab);
|
|
error_req:
|
|
return ret;
|
|
}
|
|
fs_initcall(netfs_init);
|
|
|
|
static void __exit netfs_exit(void)
|
|
{
|
|
fscache_exit();
|
|
remove_proc_subtree("fs/netfs", NULL);
|
|
mempool_exit(&netfs_subrequest_pool);
|
|
kmem_cache_destroy(netfs_subrequest_slab);
|
|
mempool_exit(&netfs_request_pool);
|
|
kmem_cache_destroy(netfs_request_slab);
|
|
}
|
|
module_exit(netfs_exit);
|