mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-06 05:13:18 +00:00
btrfs: rename btrfs_folio_(set|start|end)_writer_lock()
Since there is no user of reader locks, rename the writer locks into a more generic name, by removing the "_writer" part from the name. And also rename btrfs_subpage::writer into btrfs_subpage::locked. Signed-off-by: Qu Wenruo <wqu@suse.com> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
336e69f302
commit
0f71202665
@ -545,7 +545,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
|
|||||||
* subpage::readers and to unlock the page.
|
* subpage::readers and to unlock the page.
|
||||||
*/
|
*/
|
||||||
if (fs_info->sectorsize < PAGE_SIZE)
|
if (fs_info->sectorsize < PAGE_SIZE)
|
||||||
btrfs_folio_set_writer_lock(fs_info, folio, cur, add_size);
|
btrfs_folio_set_lock(fs_info, folio, cur, add_size);
|
||||||
folio_put(folio);
|
folio_put(folio);
|
||||||
cur += add_size;
|
cur += add_size;
|
||||||
}
|
}
|
||||||
|
@ -190,7 +190,7 @@ static void process_one_folio(struct btrfs_fs_info *fs_info,
|
|||||||
btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
|
btrfs_folio_clamp_clear_writeback(fs_info, folio, start, len);
|
||||||
|
|
||||||
if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
|
if (folio != locked_folio && (page_ops & PAGE_UNLOCK))
|
||||||
btrfs_folio_end_writer_lock(fs_info, folio, start, len);
|
btrfs_folio_end_lock(fs_info, folio, start, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __process_folios_contig(struct address_space *mapping,
|
static void __process_folios_contig(struct address_space *mapping,
|
||||||
@ -276,7 +276,7 @@ static noinline int lock_delalloc_folios(struct inode *inode,
|
|||||||
range_start = max_t(u64, folio_pos(folio), start);
|
range_start = max_t(u64, folio_pos(folio), start);
|
||||||
range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
|
range_len = min_t(u64, folio_pos(folio) + folio_size(folio),
|
||||||
end + 1) - range_start;
|
end + 1) - range_start;
|
||||||
btrfs_folio_set_writer_lock(fs_info, folio, range_start, range_len);
|
btrfs_folio_set_lock(fs_info, folio, range_start, range_len);
|
||||||
|
|
||||||
processed_end = range_start + range_len - 1;
|
processed_end = range_start + range_len - 1;
|
||||||
}
|
}
|
||||||
@ -438,7 +438,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
|
|||||||
if (!btrfs_is_subpage(fs_info, folio->mapping))
|
if (!btrfs_is_subpage(fs_info, folio->mapping))
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
else
|
else
|
||||||
btrfs_folio_end_writer_lock(fs_info, folio, start, len);
|
btrfs_folio_end_lock(fs_info, folio, start, len);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -495,7 +495,7 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
ASSERT(folio_test_private(folio));
|
ASSERT(folio_test_private(folio));
|
||||||
btrfs_folio_set_writer_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
|
btrfs_folio_set_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1184,7 +1184,7 @@ static noinline_for_stack int writepage_delalloc(struct btrfs_inode *inode,
|
|||||||
for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
|
for_each_set_bit(bit, &bio_ctrl->submit_bitmap, fs_info->sectors_per_page) {
|
||||||
u64 start = page_start + (bit << fs_info->sectorsize_bits);
|
u64 start = page_start + (bit << fs_info->sectorsize_bits);
|
||||||
|
|
||||||
btrfs_folio_set_writer_lock(fs_info, folio, start, fs_info->sectorsize);
|
btrfs_folio_set_lock(fs_info, folio, start, fs_info->sectorsize);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Lock all (subpage) delalloc ranges inside the folio first. */
|
/* Lock all (subpage) delalloc ranges inside the folio first. */
|
||||||
@ -1520,7 +1520,7 @@ static int extent_writepage(struct folio *folio, struct btrfs_bio_ctrl *bio_ctrl
|
|||||||
* Only unlock ranges that are submitted. As there can be some async
|
* Only unlock ranges that are submitted. As there can be some async
|
||||||
* submitted ranges inside the folio.
|
* submitted ranges inside the folio.
|
||||||
*/
|
*/
|
||||||
btrfs_folio_end_writer_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
|
btrfs_folio_end_lock_bitmap(fs_info, folio, bio_ctrl->submit_bitmap);
|
||||||
ASSERT(ret <= 0);
|
ASSERT(ret <= 0);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -2298,7 +2298,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f
|
|||||||
cur, cur_len, !ret);
|
cur, cur_len, !ret);
|
||||||
mapping_set_error(mapping, ret);
|
mapping_set_error(mapping, ret);
|
||||||
}
|
}
|
||||||
btrfs_folio_end_writer_lock(fs_info, folio, cur, cur_len);
|
btrfs_folio_end_lock(fs_info, folio, cur, cur_len);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
found_error = true;
|
found_error = true;
|
||||||
next_page:
|
next_page:
|
||||||
|
@ -143,7 +143,7 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
|
|||||||
if (type == BTRFS_SUBPAGE_METADATA)
|
if (type == BTRFS_SUBPAGE_METADATA)
|
||||||
atomic_set(&ret->eb_refs, 0);
|
atomic_set(&ret->eb_refs, 0);
|
||||||
else
|
else
|
||||||
atomic_set(&ret->writers, 0);
|
atomic_set(&ret->nr_locked, 0);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -237,7 +237,7 @@ static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
|
|||||||
orig_start + orig_len) - *start;
|
orig_start + orig_len) - *start;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_info,
|
static bool btrfs_subpage_end_and_test_lock(const struct btrfs_fs_info *fs_info,
|
||||||
struct folio *folio, u64 start, u32 len)
|
struct folio *folio, u64 start, u32 len)
|
||||||
{
|
{
|
||||||
struct btrfs_subpage *subpage = folio_get_private(folio);
|
struct btrfs_subpage *subpage = folio_get_private(folio);
|
||||||
@ -256,9 +256,9 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
|
|||||||
* extent_clear_unlock_delalloc() for compression path.
|
* extent_clear_unlock_delalloc() for compression path.
|
||||||
*
|
*
|
||||||
* This @locked_page is locked by plain lock_page(), thus its
|
* This @locked_page is locked by plain lock_page(), thus its
|
||||||
* subpage::writers is 0. Handle them in a special way.
|
* subpage::locked is 0. Handle them in a special way.
|
||||||
*/
|
*/
|
||||||
if (atomic_read(&subpage->writers) == 0) {
|
if (atomic_read(&subpage->nr_locked) == 0) {
|
||||||
spin_unlock_irqrestore(&subpage->lock, flags);
|
spin_unlock_irqrestore(&subpage->lock, flags);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -267,8 +267,8 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
|
|||||||
clear_bit(bit, subpage->bitmaps);
|
clear_bit(bit, subpage->bitmaps);
|
||||||
cleared++;
|
cleared++;
|
||||||
}
|
}
|
||||||
ASSERT(atomic_read(&subpage->writers) >= cleared);
|
ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
|
||||||
last = atomic_sub_and_test(cleared, &subpage->writers);
|
last = atomic_sub_and_test(cleared, &subpage->nr_locked);
|
||||||
spin_unlock_irqrestore(&subpage->lock, flags);
|
spin_unlock_irqrestore(&subpage->lock, flags);
|
||||||
return last;
|
return last;
|
||||||
}
|
}
|
||||||
@ -289,7 +289,7 @@ static bool btrfs_subpage_end_and_test_writer(const struct btrfs_fs_info *fs_inf
|
|||||||
* bitmap, reduce the writer lock number, and unlock the page if that's
|
* bitmap, reduce the writer lock number, and unlock the page if that's
|
||||||
* the last locked range.
|
* the last locked range.
|
||||||
*/
|
*/
|
||||||
void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
|
void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
|
||||||
struct folio *folio, u64 start, u32 len)
|
struct folio *folio, u64 start, u32 len)
|
||||||
{
|
{
|
||||||
struct btrfs_subpage *subpage = folio_get_private(folio);
|
struct btrfs_subpage *subpage = folio_get_private(folio);
|
||||||
@ -303,23 +303,23 @@ void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* For subpage case, there are two types of locked page. With or
|
* For subpage case, there are two types of locked page. With or
|
||||||
* without writers number.
|
* without locked number.
|
||||||
*
|
*
|
||||||
* Since we own the page lock, no one else could touch subpage::writers
|
* Since we own the page lock, no one else could touch subpage::locked
|
||||||
* and we are safe to do several atomic operations without spinlock.
|
* and we are safe to do several atomic operations without spinlock.
|
||||||
*/
|
*/
|
||||||
if (atomic_read(&subpage->writers) == 0) {
|
if (atomic_read(&subpage->nr_locked) == 0) {
|
||||||
/* No writers, locked by plain lock_page(). */
|
/* No subpage lock, locked by plain lock_page(). */
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
btrfs_subpage_clamp_range(folio, &start, &len);
|
btrfs_subpage_clamp_range(folio, &start, &len);
|
||||||
if (btrfs_subpage_end_and_test_writer(fs_info, folio, start, len))
|
if (btrfs_subpage_end_and_test_lock(fs_info, folio, start, len))
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
}
|
}
|
||||||
|
|
||||||
void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
|
void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
|
||||||
struct folio *folio, unsigned long bitmap)
|
struct folio *folio, unsigned long bitmap)
|
||||||
{
|
{
|
||||||
struct btrfs_subpage *subpage = folio_get_private(folio);
|
struct btrfs_subpage *subpage = folio_get_private(folio);
|
||||||
@ -334,8 +334,8 @@ void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (atomic_read(&subpage->writers) == 0) {
|
if (atomic_read(&subpage->nr_locked) == 0) {
|
||||||
/* No writers, locked by plain lock_page(). */
|
/* No subpage lock, locked by plain lock_page(). */
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -345,8 +345,8 @@ void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
|
|||||||
if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
|
if (test_and_clear_bit(bit + start_bit, subpage->bitmaps))
|
||||||
cleared++;
|
cleared++;
|
||||||
}
|
}
|
||||||
ASSERT(atomic_read(&subpage->writers) >= cleared);
|
ASSERT(atomic_read(&subpage->nr_locked) >= cleared);
|
||||||
last = atomic_sub_and_test(cleared, &subpage->writers);
|
last = atomic_sub_and_test(cleared, &subpage->nr_locked);
|
||||||
spin_unlock_irqrestore(&subpage->lock, flags);
|
spin_unlock_irqrestore(&subpage->lock, flags);
|
||||||
if (last)
|
if (last)
|
||||||
folio_unlock(folio);
|
folio_unlock(folio);
|
||||||
@ -671,7 +671,7 @@ void btrfs_folio_assert_not_dirty(const struct btrfs_fs_info *fs_info,
|
|||||||
* This populates the involved subpage ranges so that subpage helpers can
|
* This populates the involved subpage ranges so that subpage helpers can
|
||||||
* properly unlock them.
|
* properly unlock them.
|
||||||
*/
|
*/
|
||||||
void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
|
void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
|
||||||
struct folio *folio, u64 start, u32 len)
|
struct folio *folio, u64 start, u32 len)
|
||||||
{
|
{
|
||||||
struct btrfs_subpage *subpage;
|
struct btrfs_subpage *subpage;
|
||||||
@ -691,7 +691,7 @@ void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
|
|||||||
/* Target range should not yet be locked. */
|
/* Target range should not yet be locked. */
|
||||||
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
|
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
|
||||||
bitmap_set(subpage->bitmaps, start_bit, nbits);
|
bitmap_set(subpage->bitmaps, start_bit, nbits);
|
||||||
ret = atomic_add_return(nbits, &subpage->writers);
|
ret = atomic_add_return(nbits, &subpage->nr_locked);
|
||||||
ASSERT(ret <= fs_info->sectors_per_page);
|
ASSERT(ret <= fs_info->sectors_per_page);
|
||||||
spin_unlock_irqrestore(&subpage->lock, flags);
|
spin_unlock_irqrestore(&subpage->lock, flags);
|
||||||
}
|
}
|
||||||
|
@ -54,8 +54,12 @@ struct btrfs_subpage {
|
|||||||
*/
|
*/
|
||||||
atomic_t eb_refs;
|
atomic_t eb_refs;
|
||||||
|
|
||||||
/* Structures only used by data */
|
/*
|
||||||
atomic_t writers;
|
* Structures only used by data,
|
||||||
|
*
|
||||||
|
* How many sectors inside the page is locked.
|
||||||
|
*/
|
||||||
|
atomic_t nr_locked;
|
||||||
};
|
};
|
||||||
unsigned long bitmaps[];
|
unsigned long bitmaps[];
|
||||||
};
|
};
|
||||||
@ -87,11 +91,11 @@ void btrfs_free_subpage(struct btrfs_subpage *subpage);
|
|||||||
void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
|
void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
|
||||||
void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
|
void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
|
||||||
|
|
||||||
void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
|
void btrfs_folio_end_lock(const struct btrfs_fs_info *fs_info,
|
||||||
struct folio *folio, u64 start, u32 len);
|
struct folio *folio, u64 start, u32 len);
|
||||||
void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,
|
void btrfs_folio_set_lock(const struct btrfs_fs_info *fs_info,
|
||||||
struct folio *folio, u64 start, u32 len);
|
struct folio *folio, u64 start, u32 len);
|
||||||
void btrfs_folio_end_writer_lock_bitmap(const struct btrfs_fs_info *fs_info,
|
void btrfs_folio_end_lock_bitmap(const struct btrfs_fs_info *fs_info,
|
||||||
struct folio *folio, unsigned long bitmap);
|
struct folio *folio, unsigned long bitmap);
|
||||||
/*
|
/*
|
||||||
* Template for subpage related operations.
|
* Template for subpage related operations.
|
||||||
|
Loading…
Reference in New Issue
Block a user