mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
b4f239c91f
Change the way netfslib collects read results to do all the collection for a particular read request using a single work item that walks along the subrequest queue as subrequests make progress or complete, unlocking folios progressively rather than doing the unlock in parallel as parallel requests come in. The code is remodelled to be more like the write-side code, though only using a single stream. This makes it more directly comparable and thus easier to duplicate fixes between the two sides. This has a number of advantages: (1) It's simpler. There doesn't need to be a complex donation mechanism to handle mismatches between the size and alignment of subrequests and folios. The collector unlocks folios as the subrequests covering each complete. (2) It should cause less scheduler overhead as there's a single work item in play unlocking pages in parallel when a read gets split up into a lot of subrequests instead of one per subrequest. Whilst the parallellism is nice in theory, in practice, the vast majority of loads are sequential reads of the whole file, so committing a bunch of threads to unlocking folios out of order doesn't help in those cases. (3) It should make it easier to implement content decryption. A folio cannot be decrypted until all the requests that contribute to it have completed - and, again, most loads are sequential and so, most of the time, we want to begin decryption sequentially (though it's great if the decryption can happen in parallel). There is a disadvantage in that we're losing the ability to decrypt and unlock things on an as-things-arrive basis which may affect some applications. Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20241108173236.1382366-29-dhowells@redhat.com cc: Jeff Layton <jlayton@kernel.org> cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner <brauner@kernel.org>
166 lines
4.4 KiB
C
166 lines
4.4 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* This file contians vfs address (mmap) ops for 9P2000.
|
|
*
|
|
* Copyright (C) 2005 by Eric Van Hensbergen <ericvh@gmail.com>
|
|
* Copyright (C) 2002 by Ron Minnich <rminnich@lanl.gov>
|
|
*/
|
|
|
|
#include <linux/module.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/file.h>
|
|
#include <linux/stat.h>
|
|
#include <linux/string.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/swap.h>
|
|
#include <linux/uio.h>
|
|
#include <linux/netfs.h>
|
|
#include <net/9p/9p.h>
|
|
#include <net/9p/client.h>
|
|
#include <trace/events/netfs.h>
|
|
|
|
#include "v9fs.h"
|
|
#include "v9fs_vfs.h"
|
|
#include "cache.h"
|
|
#include "fid.h"
|
|
|
|
/*
|
|
* Writeback calls this when it finds a folio that needs uploading. This isn't
|
|
* called if writeback only has copy-to-cache to deal with.
|
|
*/
|
|
static void v9fs_begin_writeback(struct netfs_io_request *wreq)
|
|
{
|
|
struct p9_fid *fid;
|
|
|
|
fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true);
|
|
if (!fid) {
|
|
WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
|
|
wreq->inode->i_ino);
|
|
return;
|
|
}
|
|
|
|
wreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
|
|
if (fid->iounit)
|
|
wreq->wsize = min(wreq->wsize, fid->iounit);
|
|
wreq->netfs_priv = fid;
|
|
wreq->io_streams[0].avail = true;
|
|
}
|
|
|
|
/*
|
|
* Issue a subrequest to write to the server.
|
|
*/
|
|
static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
|
|
{
|
|
struct p9_fid *fid = subreq->rreq->netfs_priv;
|
|
int err, len;
|
|
|
|
len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
|
|
netfs_write_subrequest_terminated(subreq, len ?: err, false);
|
|
}
|
|
|
|
/**
|
|
* v9fs_issue_read - Issue a read from 9P
|
|
* @subreq: The read to make
|
|
*/
|
|
static void v9fs_issue_read(struct netfs_io_subrequest *subreq)
|
|
{
|
|
struct netfs_io_request *rreq = subreq->rreq;
|
|
struct p9_fid *fid = rreq->netfs_priv;
|
|
unsigned long long pos = subreq->start + subreq->transferred;
|
|
int total, err;
|
|
|
|
total = p9_client_read(fid, pos, &subreq->io_iter, &err);
|
|
|
|
/* if we just extended the file size, any portion not in
|
|
* cache won't be on server and is zeroes */
|
|
if (subreq->rreq->origin != NETFS_DIO_READ)
|
|
__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
|
|
if (pos + total >= i_size_read(rreq->inode))
|
|
__set_bit(NETFS_SREQ_HIT_EOF, &subreq->flags);
|
|
if (!err && total) {
|
|
__set_bit(NETFS_SREQ_MADE_PROGRESS, &subreq->flags);
|
|
subreq->transferred += total;
|
|
}
|
|
|
|
subreq->error = err;
|
|
netfs_read_subreq_terminated(subreq);
|
|
}
|
|
|
|
/**
|
|
* v9fs_init_request - Initialise a request
|
|
* @rreq: The read request
|
|
* @file: The file being read from
|
|
*/
|
|
static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
|
|
{
|
|
struct p9_fid *fid;
|
|
bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
|
|
rreq->origin == NETFS_WRITETHROUGH ||
|
|
rreq->origin == NETFS_UNBUFFERED_WRITE ||
|
|
rreq->origin == NETFS_DIO_WRITE);
|
|
|
|
if (rreq->origin == NETFS_WRITEBACK)
|
|
return 0; /* We don't get the write handle until we find we
|
|
* have actually dirty data and not just
|
|
* copy-to-cache data.
|
|
*/
|
|
|
|
if (file) {
|
|
fid = file->private_data;
|
|
if (!fid)
|
|
goto no_fid;
|
|
p9_fid_get(fid);
|
|
} else {
|
|
fid = v9fs_fid_find_inode(rreq->inode, writing, INVALID_UID, true);
|
|
if (!fid)
|
|
goto no_fid;
|
|
}
|
|
|
|
rreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
|
|
if (fid->iounit)
|
|
rreq->wsize = min(rreq->wsize, fid->iounit);
|
|
|
|
/* we might need to read from a fid that was opened write-only
|
|
* for read-modify-write of page cache, use the writeback fid
|
|
* for that */
|
|
WARN_ON(rreq->origin == NETFS_READ_FOR_WRITE && !(fid->mode & P9_ORDWR));
|
|
rreq->netfs_priv = fid;
|
|
return 0;
|
|
|
|
no_fid:
|
|
WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
|
|
rreq->inode->i_ino);
|
|
return -EINVAL;
|
|
}
|
|
|
|
/**
|
|
* v9fs_free_request - Cleanup request initialized by v9fs_init_rreq
|
|
* @rreq: The I/O request to clean up
|
|
*/
|
|
static void v9fs_free_request(struct netfs_io_request *rreq)
|
|
{
|
|
struct p9_fid *fid = rreq->netfs_priv;
|
|
|
|
p9_fid_put(fid);
|
|
}
|
|
|
|
const struct netfs_request_ops v9fs_req_ops = {
|
|
.init_request = v9fs_init_request,
|
|
.free_request = v9fs_free_request,
|
|
.issue_read = v9fs_issue_read,
|
|
.begin_writeback = v9fs_begin_writeback,
|
|
.issue_write = v9fs_issue_write,
|
|
};
|
|
|
|
const struct address_space_operations v9fs_addr_operations = {
|
|
.read_folio = netfs_read_folio,
|
|
.readahead = netfs_readahead,
|
|
.dirty_folio = netfs_dirty_folio,
|
|
.release_folio = netfs_release_folio,
|
|
.invalidate_folio = netfs_invalidate_folio,
|
|
.direct_IO = noop_direct_IO,
|
|
.writepages = netfs_writepages,
|
|
};
|