mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 07:23:14 +00:00
nilfs2: convert to __nilfs_clear_folio_dirty()
All callers now have a folio, so convert to pass a folio. No caller uses the return value, so make it return void. Removes a couple of hidden calls to compound_head(). Link: https://lkml.kernel.org/r/20231114084436.2755-10-konishi.ryusuke@gmail.com Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5d3b5903d4
commit
6609e23576
@ -82,7 +82,7 @@ void nilfs_forget_buffer(struct buffer_head *bh)
|
||||
lock_buffer(bh);
|
||||
set_mask_bits(&bh->b_state, clear_bits, 0);
|
||||
if (nilfs_folio_buffers_clean(folio))
|
||||
__nilfs_clear_page_dirty(&folio->page);
|
||||
__nilfs_clear_folio_dirty(folio);
|
||||
|
||||
bh->b_blocknr = -1;
|
||||
folio_clear_uptodate(folio);
|
||||
@ -428,7 +428,7 @@ void nilfs_clear_folio_dirty(struct folio *folio, bool silent)
|
||||
} while (bh = bh->b_this_page, bh != head);
|
||||
}
|
||||
|
||||
__nilfs_clear_page_dirty(&folio->page);
|
||||
__nilfs_clear_folio_dirty(folio);
|
||||
}
|
||||
|
||||
unsigned int nilfs_page_count_clean_buffers(struct page *page,
|
||||
@ -458,22 +458,23 @@ unsigned int nilfs_page_count_clean_buffers(struct page *page,
|
||||
* 2) Some B-tree operations like insertion or deletion may dispose buffers
|
||||
* in dirty state, and this needs to cancel the dirty state of their pages.
|
||||
*/
|
||||
int __nilfs_clear_page_dirty(struct page *page)
|
||||
void __nilfs_clear_folio_dirty(struct folio *folio)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct address_space *mapping = folio->mapping;
|
||||
|
||||
if (mapping) {
|
||||
xa_lock_irq(&mapping->i_pages);
|
||||
if (test_bit(PG_dirty, &page->flags)) {
|
||||
__xa_clear_mark(&mapping->i_pages, page_index(page),
|
||||
if (folio_test_dirty(folio)) {
|
||||
__xa_clear_mark(&mapping->i_pages, folio->index,
|
||||
PAGECACHE_TAG_DIRTY);
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
return clear_page_dirty_for_io(page);
|
||||
folio_clear_dirty_for_io(folio);
|
||||
return;
|
||||
}
|
||||
xa_unlock_irq(&mapping->i_pages);
|
||||
return 0;
|
||||
return;
|
||||
}
|
||||
return TestClearPageDirty(page);
|
||||
folio_clear_dirty(folio);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -30,7 +30,7 @@ BUFFER_FNS(NILFS_Checked, nilfs_checked) /* buffer is verified */
|
||||
BUFFER_FNS(NILFS_Redirected, nilfs_redirected) /* redirected to a copy */
|
||||
|
||||
|
||||
int __nilfs_clear_page_dirty(struct page *);
|
||||
void __nilfs_clear_folio_dirty(struct folio *);
|
||||
|
||||
struct buffer_head *nilfs_grab_buffer(struct inode *, struct address_space *,
|
||||
unsigned long, unsigned long);
|
||||
|
@ -1760,7 +1760,7 @@ static void nilfs_end_folio_io(struct folio *folio, int err)
|
||||
*/
|
||||
folio_lock(folio);
|
||||
if (nilfs_folio_buffers_clean(folio))
|
||||
__nilfs_clear_page_dirty(&folio->page);
|
||||
__nilfs_clear_folio_dirty(folio);
|
||||
folio_unlock(folio);
|
||||
}
|
||||
return;
|
||||
|
Loading…
Reference in New Issue
Block a user