mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-16 09:56:46 +00:00
vfs-6.8-rc2.netfs
-----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZbFEvgAKCRCRxhvAZXjc oiv6AP44QuZZP0qp+7YQrIn4jcpRcMowOjGsa9n9c5TYQna+ggEA+JLencaRkihi NjsT0McUPKzfi58pKW+6a8AOudwNrgc= =zpMk -----END PGP SIGNATURE----- Merge tag 'vfs-6.8-rc2.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs Pull netfs fixes from Christian Brauner: "This contains various fixes for the netfs work merged earlier this cycle: afs: - Fix locking imbalance in afs_proc_addr_prefs_show() - Remove afs_dynroot_d_revalidate() which is redundant - Fix error handling during lookup - Hide sillyrenames from userspace. This fixes a race between silly-rename files being created/removed and userspace iterating over directory entries - Don't use unnecessary folio_*() functions cifs: - Don't use unnecessary folio_*() functions cachefiles: - erofs: Fix Null dereference when cachefiles are not doing ondemand-mode - Update mailing list netfs library: - Add Jeff Layton as reviewer - Update mailing list - Fix a error checking in netfs_perform_write() - fscache: Check error before dereferencing - Don't use unnecessary folio_*() functions" * tag 'vfs-6.8-rc2.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: afs: Fix missing/incorrect unlocking of RCU read lock afs: Remove afs_dynroot_d_revalidate() as it is redundant afs: Fix error handling with lookup via FS.InlineBulkStatus afs: Hide silly-rename files from userspace cachefiles, erofs: Fix NULL deref in when cachefiles is not doing ondemand-mode netfs: Fix a NULL vs IS_ERR() check in netfs_perform_write() netfs, fscache: Prevent Oops in fscache_put_cache() cifs: Don't use certain unnecessary folio_*() functions afs: Don't use certain unnecessary folio_*() functions netfs: Don't use certain unnecessary folio_*() functions netfs: Add Jeff Layton as reviewer netfs, cachefiles: Change mailing list
This commit is contained in:
commit
a658e0e986
@ -4547,7 +4547,7 @@ F: drivers/net/ieee802154/ca8210.c
|
||||
|
||||
CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
|
||||
M: David Howells <dhowells@redhat.com>
|
||||
L: linux-cachefs@redhat.com (moderated for non-subscribers)
|
||||
L: netfs@lists.linux.dev
|
||||
S: Supported
|
||||
F: Documentation/filesystems/caching/cachefiles.rst
|
||||
F: fs/cachefiles/
|
||||
@ -8224,7 +8224,8 @@ F: include/linux/iomap.h
|
||||
|
||||
FILESYSTEMS [NETFS LIBRARY]
|
||||
M: David Howells <dhowells@redhat.com>
|
||||
L: linux-cachefs@redhat.com (moderated for non-subscribers)
|
||||
R: Jeff Layton <jlayton@kernel.org>
|
||||
L: netfs@lists.linux.dev
|
||||
L: linux-fsdevel@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/filesystems/caching/
|
||||
|
30
fs/afs/dir.c
30
fs/afs/dir.c
@ -124,7 +124,7 @@ static void afs_dir_read_cleanup(struct afs_read *req)
|
||||
if (xas_retry(&xas, folio))
|
||||
continue;
|
||||
BUG_ON(xa_is_value(folio));
|
||||
ASSERTCMP(folio_file_mapping(folio), ==, mapping);
|
||||
ASSERTCMP(folio->mapping, ==, mapping);
|
||||
|
||||
folio_put(folio);
|
||||
}
|
||||
@ -202,12 +202,12 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
|
||||
if (xas_retry(&xas, folio))
|
||||
continue;
|
||||
|
||||
BUG_ON(folio_file_mapping(folio) != mapping);
|
||||
BUG_ON(folio->mapping != mapping);
|
||||
|
||||
size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio));
|
||||
for (offset = 0; offset < size; offset += sizeof(*block)) {
|
||||
block = kmap_local_folio(folio, offset);
|
||||
pr_warn("[%02lx] %32phN\n", folio_index(folio) + offset, block);
|
||||
pr_warn("[%02lx] %32phN\n", folio->index + offset, block);
|
||||
kunmap_local(block);
|
||||
}
|
||||
}
|
||||
@ -233,7 +233,7 @@ static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
|
||||
if (xas_retry(&xas, folio))
|
||||
continue;
|
||||
|
||||
BUG_ON(folio_file_mapping(folio) != mapping);
|
||||
BUG_ON(folio->mapping != mapping);
|
||||
|
||||
if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) {
|
||||
afs_dir_dump(dvnode, req);
|
||||
@ -474,6 +474,14 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Don't expose silly rename entries to userspace. */
|
||||
if (nlen > 6 &&
|
||||
dire->u.name[0] == '.' &&
|
||||
ctx->actor != afs_lookup_filldir &&
|
||||
ctx->actor != afs_lookup_one_filldir &&
|
||||
memcmp(dire->u.name, ".__afs", 6) == 0)
|
||||
continue;
|
||||
|
||||
/* found the next entry */
|
||||
if (!dir_emit(ctx, dire->u.name, nlen,
|
||||
ntohl(dire->u.vnode),
|
||||
@ -708,6 +716,8 @@ static void afs_do_lookup_success(struct afs_operation *op)
|
||||
break;
|
||||
}
|
||||
|
||||
if (vp->scb.status.abort_code)
|
||||
trace_afs_bulkstat_error(op, &vp->fid, i, vp->scb.status.abort_code);
|
||||
if (!vp->scb.have_status && !vp->scb.have_error)
|
||||
continue;
|
||||
|
||||
@ -897,12 +907,16 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
|
||||
afs_begin_vnode_operation(op);
|
||||
afs_wait_for_operation(op);
|
||||
}
|
||||
inode = ERR_PTR(afs_op_error(op));
|
||||
|
||||
out_op:
|
||||
if (!afs_op_error(op)) {
|
||||
inode = &op->file[1].vnode->netfs.inode;
|
||||
op->file[1].vnode = NULL;
|
||||
if (op->file[1].scb.status.abort_code) {
|
||||
afs_op_accumulate_error(op, -ECONNABORTED,
|
||||
op->file[1].scb.status.abort_code);
|
||||
} else {
|
||||
inode = &op->file[1].vnode->netfs.inode;
|
||||
op->file[1].vnode = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (op->file[0].scb.have_status)
|
||||
@ -2022,7 +2036,7 @@ static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags)
|
||||
{
|
||||
struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
|
||||
|
||||
_enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio));
|
||||
_enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio->index);
|
||||
|
||||
folio_detach_private(folio);
|
||||
|
||||
|
@ -258,16 +258,7 @@ const struct inode_operations afs_dynroot_inode_operations = {
|
||||
.lookup = afs_dynroot_lookup,
|
||||
};
|
||||
|
||||
/*
|
||||
* Dirs in the dynamic root don't need revalidation.
|
||||
*/
|
||||
static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
const struct dentry_operations afs_dynroot_dentry_operations = {
|
||||
.d_revalidate = afs_dynroot_d_revalidate,
|
||||
.d_delete = always_delete_dentry,
|
||||
.d_release = afs_d_release,
|
||||
.d_automount = afs_d_automount,
|
||||
|
@ -166,7 +166,7 @@ static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
|
||||
|
||||
if (!preflist) {
|
||||
seq_puts(m, "NO PREFS\n");
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
seq_printf(m, "PROT SUBNET PRIOR (v=%u n=%u/%u/%u)\n",
|
||||
@ -191,7 +191,8 @@ static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -539,6 +539,9 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
|
||||
struct fscache_volume *volume = object->volume->vcookie;
|
||||
size_t volume_key_size, cookie_key_size, data_len;
|
||||
|
||||
if (!object->ondemand)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* CacheFiles will firstly check the cache file under the root cache
|
||||
* directory. If the coherency check failed, it will fallback to
|
||||
|
@ -101,7 +101,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
|
||||
}
|
||||
|
||||
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
|
||||
if (folio_index(folio) == rreq->no_unlock_folio &&
|
||||
if (folio->index == rreq->no_unlock_folio &&
|
||||
test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
|
||||
_debug("no unlock");
|
||||
else
|
||||
@ -246,13 +246,13 @@ EXPORT_SYMBOL(netfs_readahead);
|
||||
*/
|
||||
int netfs_read_folio(struct file *file, struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping = folio_file_mapping(folio);
|
||||
struct address_space *mapping = folio->mapping;
|
||||
struct netfs_io_request *rreq;
|
||||
struct netfs_inode *ctx = netfs_inode(mapping->host);
|
||||
struct folio *sink = NULL;
|
||||
int ret;
|
||||
|
||||
_enter("%lx", folio_index(folio));
|
||||
_enter("%lx", folio->index);
|
||||
|
||||
rreq = netfs_alloc_request(mapping, file,
|
||||
folio_file_pos(folio), folio_size(folio),
|
||||
@ -460,7 +460,7 @@ retry:
|
||||
ret = PTR_ERR(rreq);
|
||||
goto error;
|
||||
}
|
||||
rreq->no_unlock_folio = folio_index(folio);
|
||||
rreq->no_unlock_folio = folio->index;
|
||||
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
|
||||
|
||||
ret = netfs_begin_cache_read(rreq, ctx);
|
||||
@ -518,7 +518,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
|
||||
size_t offset, size_t len)
|
||||
{
|
||||
struct netfs_io_request *rreq;
|
||||
struct address_space *mapping = folio_file_mapping(folio);
|
||||
struct address_space *mapping = folio->mapping;
|
||||
struct netfs_inode *ctx = netfs_inode(mapping->host);
|
||||
unsigned long long start = folio_pos(folio);
|
||||
size_t flen = folio_size(folio);
|
||||
@ -535,7 +535,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
|
||||
goto error;
|
||||
}
|
||||
|
||||
rreq->no_unlock_folio = folio_index(folio);
|
||||
rreq->no_unlock_folio = folio->index;
|
||||
__set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
|
||||
ret = netfs_begin_cache_read(rreq, ctx);
|
||||
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
|
||||
|
@ -221,10 +221,11 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
|
||||
break;
|
||||
|
||||
ret = -ENOMEM;
|
||||
folio = netfs_grab_folio_for_write(mapping, pos, part);
|
||||
if (!folio)
|
||||
if (IS_ERR(folio)) {
|
||||
ret = PTR_ERR(folio);
|
||||
break;
|
||||
}
|
||||
|
||||
flen = folio_size(folio);
|
||||
offset = pos & (flen - 1);
|
||||
@ -343,7 +344,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
|
||||
break;
|
||||
default:
|
||||
WARN(true, "Unexpected modify type %u ix=%lx\n",
|
||||
howto, folio_index(folio));
|
||||
howto, folio->index);
|
||||
ret = -EIO;
|
||||
goto error_folio_unlock;
|
||||
}
|
||||
@ -648,7 +649,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq)
|
||||
xas_for_each(&xas, folio, last) {
|
||||
WARN(!folio_test_writeback(folio),
|
||||
"bad %zx @%llx page %lx %lx\n",
|
||||
wreq->len, wreq->start, folio_index(folio), last);
|
||||
wreq->len, wreq->start, folio->index, last);
|
||||
|
||||
if ((finfo = netfs_folio_info(folio))) {
|
||||
/* Streaming writes cannot be redirtied whilst under
|
||||
@ -795,7 +796,7 @@ static void netfs_extend_writeback(struct address_space *mapping,
|
||||
continue;
|
||||
if (xa_is_value(folio))
|
||||
break;
|
||||
if (folio_index(folio) != index) {
|
||||
if (folio->index != index) {
|
||||
xas_reset(xas);
|
||||
break;
|
||||
}
|
||||
@ -901,7 +902,7 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping,
|
||||
long count = wbc->nr_to_write;
|
||||
int ret;
|
||||
|
||||
_enter(",%lx,%llx-%llx,%u", folio_index(folio), start, end, caching);
|
||||
_enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
|
||||
|
||||
wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
|
||||
NETFS_WRITEBACK);
|
||||
@ -1047,7 +1048,7 @@ search_again:
|
||||
|
||||
start = folio_pos(folio); /* May regress with THPs */
|
||||
|
||||
_debug("wback %lx", folio_index(folio));
|
||||
_debug("wback %lx", folio->index);
|
||||
|
||||
/* At this point we hold neither the i_pages lock nor the page lock:
|
||||
* the page may be truncated or invalidated (changing page->mapping to
|
||||
|
@ -179,13 +179,14 @@ EXPORT_SYMBOL(fscache_acquire_cache);
|
||||
void fscache_put_cache(struct fscache_cache *cache,
|
||||
enum fscache_cache_trace where)
|
||||
{
|
||||
unsigned int debug_id = cache->debug_id;
|
||||
unsigned int debug_id;
|
||||
bool zero;
|
||||
int ref;
|
||||
|
||||
if (IS_ERR_OR_NULL(cache))
|
||||
return;
|
||||
|
||||
debug_id = cache->debug_id;
|
||||
zero = __refcount_dec_and_test(&cache->ref, &ref);
|
||||
trace_fscache_cache(debug_id, ref - 1, where);
|
||||
|
||||
|
@ -124,7 +124,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
|
||||
/* We might have multiple writes from the same huge
|
||||
* folio, but we mustn't unlock a folio more than once.
|
||||
*/
|
||||
if (have_unlocked && folio_index(folio) <= unlocked)
|
||||
if (have_unlocked && folio->index <= unlocked)
|
||||
continue;
|
||||
unlocked = folio_next_index(folio) - 1;
|
||||
trace_netfs_folio(folio, netfs_folio_trace_end_copy);
|
||||
|
@ -180,7 +180,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
|
||||
struct netfs_folio *finfo = NULL;
|
||||
size_t flen = folio_size(folio);
|
||||
|
||||
_enter("{%lx},%zx,%zx", folio_index(folio), offset, length);
|
||||
_enter("{%lx},%zx,%zx", folio->index, offset, length);
|
||||
|
||||
folio_wait_fscache(folio);
|
||||
|
||||
|
@ -87,7 +87,7 @@ void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len
|
||||
continue;
|
||||
if (!folio_test_writeback(folio)) {
|
||||
WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
|
||||
len, start, folio_index(folio), end);
|
||||
len, start, folio->index, end);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -120,7 +120,7 @@ void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len
|
||||
continue;
|
||||
if (!folio_test_writeback(folio)) {
|
||||
WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
|
||||
len, start, folio_index(folio), end);
|
||||
len, start, folio->index, end);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -151,7 +151,7 @@ void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int le
|
||||
xas_for_each(&xas, folio, end) {
|
||||
if (!folio_test_writeback(folio)) {
|
||||
WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
|
||||
len, start, folio_index(folio), end);
|
||||
len, start, folio->index, end);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -2651,7 +2651,7 @@ static void cifs_extend_writeback(struct address_space *mapping,
|
||||
continue;
|
||||
if (xa_is_value(folio))
|
||||
break;
|
||||
if (folio_index(folio) != index)
|
||||
if (folio->index != index)
|
||||
break;
|
||||
if (!folio_try_get_rcu(folio)) {
|
||||
xas_reset(&xas);
|
||||
@ -2899,7 +2899,7 @@ redo_folio:
|
||||
goto skip_write;
|
||||
}
|
||||
|
||||
if (folio_mapping(folio) != mapping ||
|
||||
if (folio->mapping != mapping ||
|
||||
!folio_test_dirty(folio)) {
|
||||
start += folio_size(folio);
|
||||
folio_unlock(folio);
|
||||
|
@ -1071,6 +1071,31 @@ TRACE_EVENT(afs_file_error,
|
||||
__print_symbolic(__entry->where, afs_file_errors))
|
||||
);
|
||||
|
||||
TRACE_EVENT(afs_bulkstat_error,
|
||||
TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort),
|
||||
|
||||
TP_ARGS(op, fid, index, abort),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field_struct(struct afs_fid, fid)
|
||||
__field(unsigned int, op)
|
||||
__field(unsigned int, index)
|
||||
__field(s32, abort)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->op = op->debug_id;
|
||||
__entry->fid = *fid;
|
||||
__entry->index = index;
|
||||
__entry->abort = abort;
|
||||
),
|
||||
|
||||
TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d",
|
||||
__entry->op, __entry->index,
|
||||
__entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
|
||||
__entry->abort)
|
||||
);
|
||||
|
||||
TRACE_EVENT(afs_cm_no_server,
|
||||
TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user