vfs-6.12-rc2.fixes.2

-----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQRAhzRXHqcMeLMyaSiRxhvAZXjcogUCZv5Y3gAKCRCRxhvAZXjc
 ojFPAP45kz5JgVKFn8iZmwfjPa7qbCa11gEzmx0SbUt3zZ3mJAD/fL9k9KaNU+qA
 LIcZW5BJn/p5fumUAw8/fKoz4ajCWQk=
 =LIz1
 -----END PGP SIGNATURE-----

Merge tag 'vfs-6.12-rc2.fixes.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull vfs fixes from Christian Brauner:
 "vfs:

   - Ensure that iter_folioq_get_pages() advances to the next slot
     otherwise it will end up using the same folio with an out-of-bound
     offset.

  iomap:

   - Dont unshare delalloc extents which can't be reflinked, and thus
     can't be shared.

   - Constrain the file range passed to iomap_file_unshare() directly in
     iomap instead of requiring the callers to do it.

  netfs:

   - Use folioq_count instead of folioq_nr_slot to prevent an
     unitialized value warning in netfs_clear_buffer().

   - Fix missing wakeup after issuing writes by scheduling the write
     collector only if all the subrequest queues are empty and thus no
     writes are pending.

   - Fix two minor documentation bugs"

* tag 'vfs-6.12-rc2.fixes.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs:
  iomap: constrain the file range passed to iomap_file_unshare
  iomap: don't bother unsharing delalloc extents
  netfs: Fix missing wakeup after issuing writes
  Documentation: add missing folio_queue entry
  folio_queue: fix documentation
  netfs: Fix a KMSAN uninit-value error in netfs_clear_buffer
  iov_iter: fix advancing slot in iter_folioq_get_pages()
This commit is contained in:
Linus Torvalds 2024-10-03 09:22:50 -07:00
commit 20c2474fa5
7 changed files with 43 additions and 21 deletions

View File

@ -37,6 +37,7 @@ Library functionality that is used throughout the kernel.
kref
cleanup
assoc_array
folio_queue
xarray
maple_tree
idr

View File

@ -1305,11 +1305,15 @@ int dax_file_unshare(struct inode *inode, loff_t pos, loff_t len,
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
.len = len,
.flags = IOMAP_WRITE | IOMAP_UNSHARE | IOMAP_DAX,
};
loff_t size = i_size_read(inode);
int ret;
if (pos < 0 || pos >= size)
return 0;
iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = dax_unshare_iter(&iter);
return ret;

View File

@ -1321,7 +1321,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
return length;
/*
* Don't bother with holes or unwritten extents.
* Don't bother with delalloc reservations, holes or unwritten extents.
*
* Note that we use srcmap directly instead of iomap_iter_srcmap as
* unsharing requires providing a separate source map, and the presence
@ -1330,6 +1330,7 @@ static loff_t iomap_unshare_iter(struct iomap_iter *iter)
* fork for XFS.
*/
if (iter->srcmap.type == IOMAP_HOLE ||
iter->srcmap.type == IOMAP_DELALLOC ||
iter->srcmap.type == IOMAP_UNWRITTEN)
return length;
@ -1374,11 +1375,15 @@ iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
struct iomap_iter iter = {
.inode = inode,
.pos = pos,
.len = len,
.flags = IOMAP_WRITE | IOMAP_UNSHARE,
};
loff_t size = i_size_read(inode);
int ret;
if (pos < 0 || pos >= size)
return 0;
iter.len = min(len, size - pos);
while ((ret = iomap_iter(&iter, ops)) > 0)
iter.processed = iomap_unshare_iter(&iter);
return ret;

View File

@ -102,7 +102,7 @@ void netfs_clear_buffer(struct netfs_io_request *rreq)
while ((p = rreq->buffer)) {
rreq->buffer = p->next;
for (int slot = 0; slot < folioq_nr_slots(p); slot++) {
for (int slot = 0; slot < folioq_count(p); slot++) {
struct folio *folio = folioq_folio(p, slot);
if (!folio)
continue;

View File

@ -508,6 +508,30 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
return 0;
}
/*
* End the issuing of writes, letting the collector know we're done.
*/
static void netfs_end_issue_write(struct netfs_io_request *wreq)
{
bool needs_poke = true;
smp_wmb(); /* Write subreq lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
for (int s = 0; s < NR_IO_STREAMS; s++) {
struct netfs_io_stream *stream = &wreq->io_streams[s];
if (!stream->active)
continue;
if (!list_empty(&stream->subrequests))
needs_poke = false;
netfs_issue_write(wreq, stream);
}
if (needs_poke)
netfs_wake_write_collector(wreq, false);
}
/*
* Write some of the pending data back to the server
*/
@ -559,10 +583,7 @@ int netfs_writepages(struct address_space *mapping,
break;
} while ((folio = writeback_iter(mapping, wbc, folio, &error)));
for (int s = 0; s < NR_IO_STREAMS; s++)
netfs_issue_write(wreq, &wreq->io_streams[s]);
smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
netfs_end_issue_write(wreq);
mutex_unlock(&ictx->wb_lock);
@ -650,10 +671,7 @@ int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_contr
if (writethrough_cache)
netfs_write_folio(wreq, wbc, writethrough_cache);
netfs_issue_write(wreq, &wreq->io_streams[0]);
netfs_issue_write(wreq, &wreq->io_streams[1]);
smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
netfs_end_issue_write(wreq);
mutex_unlock(&ictx->wb_lock);
@ -699,13 +717,7 @@ int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t
break;
}
netfs_issue_write(wreq, upload);
smp_wmb(); /* Write lists before ALL_QUEUED. */
set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
if (list_empty(&upload->subrequests))
netfs_wake_write_collector(wreq, false);
netfs_end_issue_write(wreq);
_leave(" = %d", error);
return error;
}

View File

@ -81,7 +81,7 @@ static inline unsigned int folioq_count(struct folio_queue *folioq)
}
/**
* folioq_count: Query if a folio queue segment is full
* folioq_full: Query if a folio queue segment is full
* @folioq: The segment to query
*
* Query if a folio queue segment is fully occupied. Note that this does not

View File

@ -1033,7 +1033,7 @@ static ssize_t iter_folioq_get_pages(struct iov_iter *iter,
if (maxpages == 0 || extracted >= maxsize)
break;
if (offset >= fsize) {
if (iov_offset >= fsize) {
iov_offset = 0;
slot++;
if (slot == folioq_nr_slots(folioq) && folioq->next) {