mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-18 03:06:43 +00:00
2ff1e97587
When dirty data is being written to the cache, setting/waiting on/clearing the fscache flag is always done in tandem with setting/waiting on/clearing the writeback flag. The netfslib buffered write routines wait on and set both flags and the write request cleanup clears both flags, so the fscache flag is almost superfluous. The reason it isn't superfluous is because the fscache flag is also used to indicate that data just read from the server is being written to the cache. The flag is used to prevent a race involving overlapping direct-I/O writes to the cache. Change this to indicate that a page is in need of being copied to the cache by placing a magic value in folio->private and marking the folios dirty. Then when the writeback code sees a folio marked in this way, it only writes it to the cache and not to the server. If a folio that has this magic value set is modified, the value is just replaced and the folio will then be uplodaded too. With this, PG_fscache is no longer required by the netfslib core, 9p and afs. Ceph and nfs, however, still need to use the old PG_fscache-based tracking. To deal with this, a flag, NETFS_ICTX_USE_PGPRIV2, now has to be set on the flags in the netfs_inode struct for those filesystems. This reenables the use of PG_fscache in that inode. 9p and afs use the netfslib write helpers so get switched over; cifs, for the moment, does page-by-page manual access to the cache, so doesn't use PG_fscache and is unaffected. Signed-off-by: David Howells <dhowells@redhat.com> Reviewed-by: Jeff Layton <jlayton@kernel.org> cc: Matthew Wilcox (Oracle) <willy@infradead.org> cc: Eric Van Hensbergen <ericvh@kernel.org> cc: Latchesar Ionkov <lucho@ionkov.net> cc: Dominique Martinet <asmadeus@codewreck.org> cc: Christian Schoenebeck <linux_oss@crudebyte.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: Ilya Dryomov <idryomov@gmail.com> cc: Xiubo Li <xiubli@redhat.com> cc: Steve French <sfrench@samba.org> cc: Paulo Alcantara <pc@manguebit.com> cc: Ronnie Sahlberg <ronniesahlberg@gmail.com> cc: Shyam Prasad N <sprasad@microsoft.com> cc: Tom Talpey <tom@talpey.com> cc: Bharath SM <bharathsm@microsoft.com> cc: Trond Myklebust <trond.myklebust@hammerspace.com> cc: Anna Schumaker <anna@kernel.org> cc: netfs@lists.linux.dev cc: v9fs@lists.linux.dev cc: linux-afs@lists.infradead.org cc: ceph-devel@vger.kernel.org cc: linux-cifs@vger.kernel.org cc: linux-nfs@vger.kernel.org cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org
131 lines
3.1 KiB
C
131 lines
3.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-or-later
|
|
/* Miscellaneous bits for the netfs support library.
|
|
*
|
|
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/export.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/seq_file.h>
|
|
#include "internal.h"
|
|
#define CREATE_TRACE_POINTS
|
|
#include <trace/events/netfs.h>
|
|
|
|
MODULE_DESCRIPTION("Network fs support");
|
|
MODULE_AUTHOR("Red Hat, Inc.");
|
|
MODULE_LICENSE("GPL");
|
|
|
|
EXPORT_TRACEPOINT_SYMBOL(netfs_sreq);
|
|
|
|
unsigned netfs_debug;
|
|
module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
|
|
MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
LIST_HEAD(netfs_io_requests);
|
|
DEFINE_SPINLOCK(netfs_proc_lock);
|
|
|
|
static const char *netfs_origins[nr__netfs_io_origin] = {
|
|
[NETFS_READAHEAD] = "RA",
|
|
[NETFS_READPAGE] = "RP",
|
|
[NETFS_READ_FOR_WRITE] = "RW",
|
|
[NETFS_COPY_TO_CACHE] = "CC",
|
|
[NETFS_WRITEBACK] = "WB",
|
|
[NETFS_WRITETHROUGH] = "WT",
|
|
[NETFS_LAUNDER_WRITE] = "LW",
|
|
[NETFS_UNBUFFERED_WRITE] = "UW",
|
|
[NETFS_DIO_READ] = "DR",
|
|
[NETFS_DIO_WRITE] = "DW",
|
|
};
|
|
|
|
/*
|
|
* Generate a list of I/O requests in /proc/fs/netfs/requests
|
|
*/
|
|
static int netfs_requests_seq_show(struct seq_file *m, void *v)
|
|
{
|
|
struct netfs_io_request *rreq;
|
|
|
|
if (v == &netfs_io_requests) {
|
|
seq_puts(m,
|
|
"REQUEST OR REF FL ERR OPS COVERAGE\n"
|
|
"======== == === == ==== === =========\n"
|
|
);
|
|
return 0;
|
|
}
|
|
|
|
rreq = list_entry(v, struct netfs_io_request, proc_link);
|
|
seq_printf(m,
|
|
"%08x %s %3d %2lx %4d %3d @%04llx %zx/%zx",
|
|
rreq->debug_id,
|
|
netfs_origins[rreq->origin],
|
|
refcount_read(&rreq->ref),
|
|
rreq->flags,
|
|
rreq->error,
|
|
atomic_read(&rreq->nr_outstanding),
|
|
rreq->start, rreq->submitted, rreq->len);
|
|
seq_putc(m, '\n');
|
|
return 0;
|
|
}
|
|
|
|
static void *netfs_requests_seq_start(struct seq_file *m, loff_t *_pos)
|
|
__acquires(rcu)
|
|
{
|
|
rcu_read_lock();
|
|
return seq_list_start_head(&netfs_io_requests, *_pos);
|
|
}
|
|
|
|
static void *netfs_requests_seq_next(struct seq_file *m, void *v, loff_t *_pos)
|
|
{
|
|
return seq_list_next(v, &netfs_io_requests, _pos);
|
|
}
|
|
|
|
static void netfs_requests_seq_stop(struct seq_file *m, void *v)
|
|
__releases(rcu)
|
|
{
|
|
rcu_read_unlock();
|
|
}
|
|
|
|
static const struct seq_operations netfs_requests_seq_ops = {
|
|
.start = netfs_requests_seq_start,
|
|
.next = netfs_requests_seq_next,
|
|
.stop = netfs_requests_seq_stop,
|
|
.show = netfs_requests_seq_show,
|
|
};
|
|
#endif /* CONFIG_PROC_FS */
|
|
|
|
static int __init netfs_init(void)
|
|
{
|
|
int ret = -ENOMEM;
|
|
|
|
if (!proc_mkdir("fs/netfs", NULL))
|
|
goto error;
|
|
if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
|
|
&netfs_requests_seq_ops))
|
|
goto error_proc;
|
|
#ifdef CONFIG_FSCACHE_STATS
|
|
if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
|
|
netfs_stats_show))
|
|
goto error_proc;
|
|
#endif
|
|
|
|
ret = fscache_init();
|
|
if (ret < 0)
|
|
goto error_proc;
|
|
return 0;
|
|
|
|
error_proc:
|
|
remove_proc_entry("fs/netfs", NULL);
|
|
error:
|
|
return ret;
|
|
}
|
|
fs_initcall(netfs_init);
|
|
|
|
static void __exit netfs_exit(void)
|
|
{
|
|
fscache_exit();
|
|
remove_proc_entry("fs/netfs", NULL);
|
|
}
|
|
module_exit(netfs_exit);
|