mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
iomap: remove iomap_file_buffered_write_punch_delalloc
Currently iomap_file_buffered_write_punch_delalloc can be called from XFS either with the invalidate lock held or not. To fix this while keeping the locking in the file system and not the iomap library code we'll need to life the locking up into the file system. To prepare for that, open code iomap_file_buffered_write_punch_delalloc in the only caller, and instead export iomap_write_delalloc_release. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Darrick J. Wong <djwong@kernel.org> Signed-off-by: Carlos Maiolino <cem@kernel.org>
This commit is contained in:
parent
c0adf8c3a9
commit
caf0ea451d
@ -208,7 +208,7 @@ The filesystem must arrange to `cancel
|
||||
such `reservations
|
||||
<https://lore.kernel.org/linux-xfs/20220817093627.GZ3600936@dread.disaster.area/>`_
|
||||
because writeback will not consume the reservation.
|
||||
The ``iomap_file_buffered_write_punch_delalloc`` can be called from a
|
||||
The ``iomap_write_delalloc_release`` can be called from a
|
||||
``->iomap_end`` function to find all the clean areas of the folios
|
||||
caching a fresh (``IOMAP_F_NEW``) delalloc mapping.
|
||||
It takes the ``invalidate_lock``.
|
||||
|
@ -1145,10 +1145,36 @@ static void iomap_write_delalloc_scan(struct inode *inode,
|
||||
}
|
||||
|
||||
/*
|
||||
* When a short write occurs, the filesystem might need to use ->iomap_end
|
||||
* to remove space reservations created in ->iomap_begin.
|
||||
*
|
||||
* For filesystems that use delayed allocation, there can be dirty pages over
|
||||
* the delalloc extent outside the range of a short write but still within the
|
||||
* delalloc extent allocated for this iomap if the write raced with page
|
||||
* faults.
|
||||
*
|
||||
* Punch out all the delalloc blocks in the range given except for those that
|
||||
* have dirty data still pending in the page cache - those are going to be
|
||||
* written and so must still retain the delalloc backing for writeback.
|
||||
*
|
||||
* The punch() callback *must* only punch delalloc extents in the range passed
|
||||
* to it. It must skip over all other types of extents in the range and leave
|
||||
* them completely unchanged. It must do this punch atomically with respect to
|
||||
* other extent modifications.
|
||||
*
|
||||
* The punch() callback may be called with a folio locked to prevent writeback
|
||||
* extent allocation racing at the edge of the range we are currently punching.
|
||||
* The locked folio may or may not cover the range being punched, so it is not
|
||||
* safe for the punch() callback to lock folios itself.
|
||||
*
|
||||
* Lock order is:
|
||||
*
|
||||
* inode->i_rwsem (shared or exclusive)
|
||||
* inode->i_mapping->invalidate_lock (exclusive)
|
||||
* folio_lock()
|
||||
* ->punch
|
||||
* internal filesystem allocation lock
|
||||
*
|
||||
* As we are scanning the page cache for data, we don't need to reimplement the
|
||||
* wheel - mapping_seek_hole_data() does exactly what we need to identify the
|
||||
* start and end of data ranges correctly even for sub-folio block sizes. This
|
||||
@ -1177,7 +1203,7 @@ static void iomap_write_delalloc_scan(struct inode *inode,
|
||||
* require sprinkling this code with magic "+ 1" and "- 1" arithmetic and expose
|
||||
* the code to subtle off-by-one bugs....
|
||||
*/
|
||||
static void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
|
||||
void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
|
||||
loff_t end_byte, unsigned flags, struct iomap *iomap,
|
||||
iomap_punch_t punch)
|
||||
{
|
||||
@ -1243,62 +1269,7 @@ static void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
|
||||
out_unlock:
|
||||
filemap_invalidate_unlock(inode->i_mapping);
|
||||
}
|
||||
|
||||
/*
|
||||
* When a short write occurs, the filesystem may need to remove reserved space
|
||||
* that was allocated in ->iomap_begin from it's ->iomap_end method. For
|
||||
* filesystems that use delayed allocation, we need to punch out delalloc
|
||||
* extents from the range that are not dirty in the page cache. As the write can
|
||||
* race with page faults, there can be dirty pages over the delalloc extent
|
||||
* outside the range of a short write but still within the delalloc extent
|
||||
* allocated for this iomap.
|
||||
*
|
||||
* This function uses [start_byte, end_byte) intervals (i.e. open ended) to
|
||||
* simplify range iterations.
|
||||
*
|
||||
* The punch() callback *must* only punch delalloc extents in the range passed
|
||||
* to it. It must skip over all other types of extents in the range and leave
|
||||
* them completely unchanged. It must do this punch atomically with respect to
|
||||
* other extent modifications.
|
||||
*
|
||||
* The punch() callback may be called with a folio locked to prevent writeback
|
||||
* extent allocation racing at the edge of the range we are currently punching.
|
||||
* The locked folio may or may not cover the range being punched, so it is not
|
||||
* safe for the punch() callback to lock folios itself.
|
||||
*
|
||||
* Lock order is:
|
||||
*
|
||||
* inode->i_rwsem (shared or exclusive)
|
||||
* inode->i_mapping->invalidate_lock (exclusive)
|
||||
* folio_lock()
|
||||
* ->punch
|
||||
* internal filesystem allocation lock
|
||||
*/
|
||||
void iomap_file_buffered_write_punch_delalloc(struct inode *inode,
|
||||
loff_t pos, loff_t length, ssize_t written, unsigned flags,
|
||||
struct iomap *iomap, iomap_punch_t punch)
|
||||
{
|
||||
loff_t start_byte;
|
||||
loff_t end_byte;
|
||||
|
||||
if (iomap->type != IOMAP_DELALLOC)
|
||||
return;
|
||||
|
||||
/* If we didn't reserve the blocks, we're not allowed to punch them. */
|
||||
if (!(iomap->flags & IOMAP_F_NEW))
|
||||
return;
|
||||
|
||||
start_byte = iomap_last_written_block(inode, pos, written);
|
||||
end_byte = round_up(pos + length, i_blocksize(inode));
|
||||
|
||||
/* Nothing to do if we've written the entire delalloc extent */
|
||||
if (start_byte >= end_byte)
|
||||
return;
|
||||
|
||||
iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap,
|
||||
punch);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_file_buffered_write_punch_delalloc);
|
||||
EXPORT_SYMBOL_GPL(iomap_write_delalloc_release);
|
||||
|
||||
static loff_t iomap_unshare_iter(struct iomap_iter *iter)
|
||||
{
|
||||
|
@ -1227,8 +1227,20 @@ xfs_buffered_write_iomap_end(
|
||||
unsigned flags,
|
||||
struct iomap *iomap)
|
||||
{
|
||||
iomap_file_buffered_write_punch_delalloc(inode, offset, length, written,
|
||||
flags, iomap, &xfs_buffered_write_delalloc_punch);
|
||||
loff_t start_byte, end_byte;
|
||||
|
||||
/* If we didn't reserve the blocks, we're not allowed to punch them. */
|
||||
if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW))
|
||||
return 0;
|
||||
|
||||
/* Nothing to do if we've written the entire delalloc extent */
|
||||
start_byte = iomap_last_written_block(inode, offset, written);
|
||||
end_byte = round_up(offset + length, i_blocksize(inode));
|
||||
if (start_byte >= end_byte)
|
||||
return 0;
|
||||
|
||||
iomap_write_delalloc_release(inode, start_byte, end_byte, flags, iomap,
|
||||
xfs_buffered_write_delalloc_punch);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -290,9 +290,9 @@ vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf,
|
||||
|
||||
typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length,
|
||||
struct iomap *iomap);
|
||||
void iomap_file_buffered_write_punch_delalloc(struct inode *inode, loff_t pos,
|
||||
loff_t length, ssize_t written, unsigned flag,
|
||||
struct iomap *iomap, iomap_punch_t punch);
|
||||
void iomap_write_delalloc_release(struct inode *inode, loff_t start_byte,
|
||||
loff_t end_byte, unsigned flags, struct iomap *iomap,
|
||||
iomap_punch_t punch);
|
||||
|
||||
int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
|
||||
u64 start, u64 len, const struct iomap_ops *ops);
|
||||
|
Loading…
Reference in New Issue
Block a user