mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 00:35:01 +00:00
mm/writeback: add folio_mark_dirty_lock()
Add a new convenience helper folio_mark_dirty_lock() that grabs the folio lock before calling folio_mark_dirty(). Refactor set_page_dirty_lock() to directly use folio_mark_dirty_lock(). Signed-off-by: Joanne Koong <joannelkoong@gmail.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
This commit is contained in:
parent
cbe9c115b7
commit
7fce207af5
@ -2539,6 +2539,7 @@ struct kvec;
|
||||
struct page *get_dump_page(unsigned long addr);
|
||||
|
||||
bool folio_mark_dirty(struct folio *folio);
|
||||
bool folio_mark_dirty_lock(struct folio *folio);
|
||||
bool set_page_dirty(struct page *page);
|
||||
int set_page_dirty_lock(struct page *page);
|
||||
|
||||
|
@ -52,6 +52,12 @@ bool set_page_dirty(struct page *page)
|
||||
}
|
||||
EXPORT_SYMBOL(set_page_dirty);
|
||||
|
||||
int set_page_dirty_lock(struct page *page)
|
||||
{
|
||||
return folio_mark_dirty_lock(page_folio(page));
|
||||
}
|
||||
EXPORT_SYMBOL(set_page_dirty_lock);
|
||||
|
||||
bool clear_page_dirty_for_io(struct page *page)
|
||||
{
|
||||
return folio_clear_dirty_for_io(page_folio(page));
|
||||
|
@ -2914,25 +2914,25 @@ bool folio_mark_dirty(struct folio *folio)
|
||||
EXPORT_SYMBOL(folio_mark_dirty);
|
||||
|
||||
/*
|
||||
* set_page_dirty() is racy if the caller has no reference against
|
||||
* page->mapping->host, and if the page is unlocked. This is because another
|
||||
* CPU could truncate the page off the mapping and then free the mapping.
|
||||
* folio_mark_dirty() is racy if the caller has no reference against
|
||||
* folio->mapping->host, and if the folio is unlocked. This is because another
|
||||
* CPU could truncate the folio off the mapping and then free the mapping.
|
||||
*
|
||||
* Usually, the page _is_ locked, or the caller is a user-space process which
|
||||
* Usually, the folio _is_ locked, or the caller is a user-space process which
|
||||
* holds a reference on the inode by having an open file.
|
||||
*
|
||||
* In other cases, the page should be locked before running set_page_dirty().
|
||||
* In other cases, the folio should be locked before running folio_mark_dirty().
|
||||
*/
|
||||
int set_page_dirty_lock(struct page *page)
|
||||
bool folio_mark_dirty_lock(struct folio *folio)
|
||||
{
|
||||
int ret;
|
||||
bool ret;
|
||||
|
||||
lock_page(page);
|
||||
ret = set_page_dirty(page);
|
||||
unlock_page(page);
|
||||
folio_lock(folio);
|
||||
ret = folio_mark_dirty(folio);
|
||||
folio_unlock(folio);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(set_page_dirty_lock);
|
||||
EXPORT_SYMBOL(folio_mark_dirty_lock);
|
||||
|
||||
/*
|
||||
* This cancels just the dirty bit on the kernel page itself, it does NOT
|
||||
|
Loading…
Reference in New Issue
Block a user