btrfs: unify to use writer locks for subpage locking

Since commit d7172f52e993 ("btrfs: use per-buffer locking for
extent_buffer reading"), metadata read no longer relies on the subpage
reader locking.

This means we do not need to maintain a different metadata/data split
for locking, so we can convert the existing reader lock users by:

- add_ra_bio_pages()
  Convert to btrfs_folio_set_writer_lock()

- end_folio_read()
  Convert to btrfs_folio_end_writer_lock()

- begin_folio_read()
  Convert to btrfs_folio_set_writer_lock()

- folio_range_has_eb()
  Remove the subpage->readers checks, since it is always 0.

- Remove btrfs_subpage_start_reader() and btrfs_subpage_end_reader()

Signed-off-by: Qu Wenruo <wqu@suse.com>
Reviewed-by: David Sterba <dsterba@suse.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Qu Wenruo 2024-10-09 16:21:06 +10:30 committed by David Sterba
parent 8511074c42
commit 336e69f302
4 changed files with 5 additions and 83 deletions

View File

@ -545,8 +545,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
* subpage::readers and to unlock the page.
*/
if (fs_info->sectorsize < PAGE_SIZE)
btrfs_subpage_start_reader(fs_info, folio, cur,
add_size);
btrfs_folio_set_writer_lock(fs_info, folio, cur, add_size);
folio_put(folio);
cur += add_size;
}

View File

@ -438,7 +438,7 @@ static void end_folio_read(struct folio *folio, bool uptodate, u64 start, u32 le
if (!btrfs_is_subpage(fs_info, folio->mapping))
folio_unlock(folio);
else
btrfs_subpage_end_reader(fs_info, folio, start, len);
btrfs_folio_end_writer_lock(fs_info, folio, start, len);
}
/*
@ -495,7 +495,7 @@ static void begin_folio_read(struct btrfs_fs_info *fs_info, struct folio *folio)
return;
ASSERT(folio_test_private(folio));
btrfs_subpage_start_reader(fs_info, folio, folio_pos(folio), PAGE_SIZE);
btrfs_folio_set_writer_lock(fs_info, folio, folio_pos(folio), PAGE_SIZE);
}
/*
@ -2517,12 +2517,6 @@ static bool folio_range_has_eb(struct folio *folio)
subpage = folio_get_private(folio);
if (atomic_read(&subpage->eb_refs))
return true;
/*
* Even there is no eb refs here, we may still have
* end_folio_read() call relying on page::private.
*/
if (atomic_read(&subpage->readers))
return true;
}
return false;
}

View File

@ -140,12 +140,10 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
return ERR_PTR(-ENOMEM);
spin_lock_init(&ret->lock);
if (type == BTRFS_SUBPAGE_METADATA) {
if (type == BTRFS_SUBPAGE_METADATA)
atomic_set(&ret->eb_refs, 0);
} else {
atomic_set(&ret->readers, 0);
else
atomic_set(&ret->writers, 0);
}
return ret;
}
@ -221,62 +219,6 @@ static void btrfs_subpage_assert(const struct btrfs_fs_info *fs_info,
__start_bit; \
})
void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_subpage *subpage = folio_get_private(folio);
const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
const int nbits = len >> fs_info->sectorsize_bits;
unsigned long flags;
btrfs_subpage_assert(fs_info, folio, start, len);
spin_lock_irqsave(&subpage->lock, flags);
/*
* Even though it's just for reading the page, no one should have
* locked the subpage range.
*/
ASSERT(bitmap_test_range_all_zero(subpage->bitmaps, start_bit, nbits));
bitmap_set(subpage->bitmaps, start_bit, nbits);
atomic_add(nbits, &subpage->readers);
spin_unlock_irqrestore(&subpage->lock, flags);
}
void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len)
{
struct btrfs_subpage *subpage = folio_get_private(folio);
const int start_bit = subpage_calc_start_bit(fs_info, folio, locked, start, len);
const int nbits = len >> fs_info->sectorsize_bits;
unsigned long flags;
bool is_data;
bool last;
btrfs_subpage_assert(fs_info, folio, start, len);
is_data = is_data_inode(BTRFS_I(folio->mapping->host));
spin_lock_irqsave(&subpage->lock, flags);
/* The range should have already been locked. */
ASSERT(bitmap_test_range_all_set(subpage->bitmaps, start_bit, nbits));
ASSERT(atomic_read(&subpage->readers) >= nbits);
bitmap_clear(subpage->bitmaps, start_bit, nbits);
last = atomic_sub_and_test(nbits, &subpage->readers);
/*
* For data we need to unlock the page if the last read has finished.
*
* And please don't replace @last with atomic_sub_and_test() call
* inside if () condition.
* As we want the atomic_sub_and_test() to be always executed.
*/
if (is_data && last)
folio_unlock(folio);
spin_unlock_irqrestore(&subpage->lock, flags);
}
static void btrfs_subpage_clamp_range(struct folio *folio, u64 *start, u32 *len)
{
u64 orig_start = *start;

View File

@ -45,14 +45,6 @@ enum {
struct btrfs_subpage {
/* Common members for both data and metadata pages */
spinlock_t lock;
/*
* Both data and metadata needs to track how many readers are for the
* page.
* Data relies on @readers to unlock the page when last reader finished.
* While metadata doesn't need page unlock, it needs to prevent
* page::private get cleared before the last end_page_read().
*/
atomic_t readers;
union {
/*
* Structures only used by metadata
@ -95,11 +87,6 @@ void btrfs_free_subpage(struct btrfs_subpage *subpage);
void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
void btrfs_subpage_end_reader(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
void btrfs_folio_end_writer_lock(const struct btrfs_fs_info *fs_info,
struct folio *folio, u64 start, u32 len);
void btrfs_folio_set_writer_lock(const struct btrfs_fs_info *fs_info,