From 0d24852bd71ec85ca0016b6d6fc997e6a3381552 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Mon, 30 Sep 2024 11:55:00 -0700 Subject: [PATCH 1/7] iov_iter: fix advancing slot in iter_folioq_get_pages() iter_folioq_get_pages() decides to advance to the next folioq slot when it has reached the end of the current folio. However, it is checking offset, which is the beginning of the current part, instead of iov_offset, which is adjusted to the end of the current part, so it doesn't advance the slot when it's supposed to. As a result, on the next iteration, we'll use the same folio with an out-of-bounds offset and return an unrelated page. This manifested as various crashes and other failures in 9pfs in drgn's VM testing setup and BPF CI. Fixes: db0aa2e9566f ("mm: Define struct folio_queue and ITER_FOLIOQ to handle a sequence of folios") Link: https://lore.kernel.org/linux-fsdevel/20240923183432.1876750-1-chantr4@gmail.com/ Tested-by: Manu Bretelle Signed-off-by: Omar Sandoval Link: https://lore.kernel.org/r/cbaf141ba6c0e2e209717d02746584072844841a.1727722269.git.osandov@fb.com Tested-by: Eduard Zingerman Tested-by: Leon Romanovsky Tested-by: Joey Gouly Acked-by: David Howells Signed-off-by: Christian Brauner --- lib/iov_iter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 97003155bfac..1abb32c0da50 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1033,7 +1033,7 @@ static ssize_t iter_folioq_get_pages(struct iov_iter *iter, if (maxpages == 0 || extracted >= maxsize) break; - if (offset >= fsize) { + if (iov_offset >= fsize) { iov_offset = 0; slot++; if (slot == folioq_nr_slots(folioq) && folioq->next) { From f6023535b52f5a066fa52fcfd0dc51c7f7894ce6 Mon Sep 17 00:00:00 2001 From: Chang Yu Date: Mon, 30 Sep 2024 23:31:52 -0700 Subject: [PATCH 2/7] netfs: Fix a KMSAN uninit-value error in netfs_clear_buffer Use folioq_count instead of folioq_nr_slots to fix a KMSAN uninit-value error in netfs_clear_buffer Signed-off-by: Chang Yu Link: https://lore.kernel.org/r/ZvuXWC2bYpvQsWgS@gmail.com Fixes: cd0277ed0c18 ("netfs: Use new folio_queue data type and iterator instead of xarray iter") Acked-by: David Howells Reported-by: syzbot+921873345a95f4dae7e9@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=921873345a95f4dae7e9 Signed-off-by: Christian Brauner --- fs/netfs/misc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/netfs/misc.c b/fs/netfs/misc.c index 63280791de3b..78fe5796b2b2 100644 --- a/fs/netfs/misc.c +++ b/fs/netfs/misc.c @@ -102,7 +102,7 @@ void netfs_clear_buffer(struct netfs_io_request *rreq) while ((p = rreq->buffer)) { rreq->buffer = p->next; - for (int slot = 0; slot < folioq_nr_slots(p); slot++) { + for (int slot = 0; slot < folioq_count(p); slot++) { struct folio *folio = folioq_folio(p, slot); if (!folio) continue; From f5c82730bedbc4a424cb94d2653bcb8be9dbd2ec Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 1 Oct 2024 17:01:40 +0200 Subject: [PATCH 3/7] folio_queue: fix documentation s/folioq_count/folioq_full/ Reported-by: Stephen Rothwell Link: https://lore.kernel.org/r/20241001134729.3f65ae78@canb.auug.org.au Signed-off-by: Christian Brauner --- include/linux/folio_queue.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h index af871405ae55..3abe614ef5f0 100644 --- a/include/linux/folio_queue.h +++ b/include/linux/folio_queue.h @@ -81,7 +81,7 @@ static inline unsigned int folioq_count(struct folio_queue *folioq) } /** - * folioq_count: Query if a folio queue segment is full + * folioq_full: Query if a folio queue segment is full * @folioq: The segment to query * * Query if a folio queue segment is fully occupied. Note that this does not From 59d39b9259e4d15b6e4c6da758ab318a76a10ca4 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 1 Oct 2024 17:04:32 +0200 Subject: [PATCH 4/7] Documentation: add missing folio_queue entry Add missing folio_queue entry. Reported-by: Stephen Rothwell Link: https://lore.kernel.org/r/20241001133920.6e28637b@canb.auug.org.au Signed-off-by: Christian Brauner --- Documentation/core-api/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/core-api/index.rst b/Documentation/core-api/index.rst index a331d2c814f5..6a875743dd4b 100644 --- a/Documentation/core-api/index.rst +++ b/Documentation/core-api/index.rst @@ -37,6 +37,7 @@ Library functionality that is used throughout the kernel. kref cleanup assoc_array + folio_queue xarray maple_tree idr From 1ca4169c391c370e0f3a92938df2862900575096 Mon Sep 17 00:00:00 2001 From: David Howells Date: Wed, 2 Oct 2024 15:45:50 +0100 Subject: [PATCH 5/7] netfs: Fix missing wakeup after issuing writes After dividing up a proposed write into subrequests, netfslib sets NETFS_RREQ_ALL_QUEUED to indicate to the collector that it can move on to the final cleanup once it has emptied the subrequest queues. Now, whilst the collector will normally end up running at least once after this bit is set just because it takes a while to process all the write subrequests before the collector runs out of subrequests, there exists the possibility that the issuing thread will be forced to sleep and the collector thread will clean up all the subrequests before ALL_QUEUED gets set. In such a case, the collector thread will not get triggered again and will never clear NETFS_RREQ_IN_PROGRESS thus leaving a request uncompleted and causing a potential futute hang. Fix this by scheduling the write collector if all the subrequest queues are empty (and thus no writes pending issuance). Note that we'd do this ideally before queuing the subrequest, but in the case of buffered writeback, at least, we can't find out that we've run out of folios until after we've called writeback_iter() and it has returned NULL - at which point we might not actually have any subrequests still under construction. Fixes: 288ace2f57c9 ("netfs: New writeback implementation") Signed-off-by: David Howells Link: https://lore.kernel.org/r/3317784.1727880350@warthog.procyon.org.uk cc: Jeff Layton cc: netfs@lists.linux.dev cc: linux-fsdevel@vger.kernel.org Signed-off-by: Christian Brauner --- fs/netfs/write_issue.c | 42 +++++++++++++++++++++++++++--------------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c index 6293f547e4c3..bf6d507578e5 100644 --- a/fs/netfs/write_issue.c +++ b/fs/netfs/write_issue.c @@ -508,6 +508,30 @@ static int netfs_write_folio(struct netfs_io_request *wreq, return 0; } +/* + * End the issuing of writes, letting the collector know we're done. + */ +static void netfs_end_issue_write(struct netfs_io_request *wreq) +{ + bool needs_poke = true; + + smp_wmb(); /* Write subreq lists before ALL_QUEUED. */ + set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); + + for (int s = 0; s < NR_IO_STREAMS; s++) { + struct netfs_io_stream *stream = &wreq->io_streams[s]; + + if (!stream->active) + continue; + if (!list_empty(&stream->subrequests)) + needs_poke = false; + netfs_issue_write(wreq, stream); + } + + if (needs_poke) + netfs_wake_write_collector(wreq, false); +} + /* * Write some of the pending data back to the server */ @@ -559,10 +583,7 @@ int netfs_writepages(struct address_space *mapping, break; } while ((folio = writeback_iter(mapping, wbc, folio, &error))); - for (int s = 0; s < NR_IO_STREAMS; s++) - netfs_issue_write(wreq, &wreq->io_streams[s]); - smp_wmb(); /* Write lists before ALL_QUEUED. */ - set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); + netfs_end_issue_write(wreq); mutex_unlock(&ictx->wb_lock); @@ -650,10 +671,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr if (writethrough_cache) netfs_write_folio(wreq, wbc, writethrough_cache); - netfs_issue_write(wreq, &wreq->io_streams[0]); - netfs_issue_write(wreq, &wreq->io_streams[1]); - smp_wmb(); /* Write lists before ALL_QUEUED. */ - set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); + netfs_end_issue_write(wreq); mutex_unlock(&ictx->wb_lock); @@ -699,13 +717,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t break; } - netfs_issue_write(wreq, upload); - - smp_wmb(); /* Write lists before ALL_QUEUED. */ - set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); - if (list_empty(&upload->subrequests)) - netfs_wake_write_collector(wreq, false); - + netfs_end_issue_write(wreq); _leave(" = %d", error); return error; } From f7a4874d977bf4202ad575031222e78809a36292 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 2 Oct 2024 08:00:40 -0700 Subject: [PATCH 6/7] iomap: don't bother unsharing delalloc extents If unshare encounters a delalloc reservation in the srcmap, that means that the file range isn't shared because delalloc reservations cannot be reflinked. Therefore, don't try to unshare them. Signed-off-by: Darrick J. Wong Link: https://lore.kernel.org/r/20241002150040.GB21853@frogsfrogsfrogs Reviewed-by: Christoph Hellwig Reviewed-by: Brian Foster Signed-off-by: Christian Brauner --- fs/iomap/buffered-io.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index 11ea747228ae..c1c559e0cc07 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1321,7 +1321,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter) return length; /* - * Don't bother with holes or unwritten extents. + * Don't bother with delalloc reservations, holes or unwritten extents. * * Note that we use srcmap directly instead of iomap_iter_srcmap as * unsharing requires providing a separate source map, and the presence @@ -1330,6 +1330,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter) * fork for XFS. */ if (iter->srcmap.type == IOMAP_HOLE || + iter->srcmap.type == IOMAP_DELALLOC || iter->srcmap.type == IOMAP_UNWRITTEN) return length; From a311a08a4237241fb5b9d219d3e33346de6e83e0 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Wed, 2 Oct 2024 08:02:13 -0700 Subject: [PATCH 7/7] iomap: constrain the file range passed to iomap_file_unshare File contents can only be shared (i.e. reflinked) below EOF, so it makes no sense to try to unshare ranges beyond EOF. Constrain the file range parameters here so that we don't have to do that in the callers. Fixes: 5f4e5752a8a3 ("fs: add iomap_file_dirty") Signed-off-by: Darrick J. Wong Link: https://lore.kernel.org/r/20241002150213.GC21853@frogsfrogsfrogs Reviewed-by: Christoph Hellwig Reviewed-by: Brian Foster Signed-off-by: Christian Brauner --- fs/dax.c | 6 +++++- fs/iomap/buffered-io.c | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/fs/dax.c b/fs/dax.c index becb4a6920c6..c62acd2812f8 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -1305,11 +1305,15 @@ int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len, struct iomap_iter iter = { .inode = inode, .pos = pos, - .len = len, .flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX, }; + loff_t size = i_size_read(inode); int ret; + if (pos < 0 || pos >= size) + return 0; + + iter.len = min(len, size - pos); while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = dax_unshare_iter(&iter); return ret; diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index c1c559e0cc07..78ebd265f425 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -1375,11 +1375,15 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len, struct iomap_iter iter = { .inode = inode, .pos = pos, - .len = len, .flags = IOMAP_WRITE | IOMAP_UNSHARE, }; + loff_t size = i_size_read(inode); int ret; + if (pos < 0 || pos >= size) + return 0; + + iter.len = min(len, size - pos); while ((ret = iomap_iter(&iter, ops)) > 0) iter.processed = iomap_unshare_iter(&iter); return ret;