memcg-v1: no need for memcg locking for dirty tracking

During the era of memcg charge migration, the kernel has to be make
sure that the dirty stat updates do not race with the charge migration.
Otherwise it might update the dirty stats of the wrong memcg.  Now
with the memcg charge migration gone, there is no more race for dirty
stat updates and the previous locking can be removed.

Link: https://lkml.kernel.org/r/20241025012304.2473312-4-shakeel.butt@linux.dev
Signed-off-by: Shakeel Butt <shakeel.butt@linux.dev>
Acked-by: Michal Hocko <mhocko@suse.com>
Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Shakeel Butt 2024-10-24 18:23:00 -07:00 committed by Andrew Morton
parent 6b611388b6
commit a8cd9d4ce3
2 changed files with 3 additions and 18 deletions

View File

@ -736,15 +736,12 @@ bool block_dirty_folio(struct address_space *mapping, struct folio *folio)
* Lock out page's memcg migration to keep PageDirty
* synchronized with per-memcg dirty page counters.
*/
folio_memcg_lock(folio);
newly_dirty = !folio_test_set_dirty(folio);
spin_unlock(&mapping->i_private_lock);
if (newly_dirty)
__folio_mark_dirty(folio, mapping, 1);
folio_memcg_unlock(folio);
if (newly_dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
@ -1194,13 +1191,11 @@ void mark_buffer_dirty(struct buffer_head *bh)
struct folio *folio = bh->b_folio;
struct address_space *mapping = NULL;
folio_memcg_lock(folio);
if (!folio_test_set_dirty(folio)) {
mapping = folio->mapping;
if (mapping)
__folio_mark_dirty(folio, mapping, 0);
}
folio_memcg_unlock(folio);
if (mapping)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}

View File

@ -2743,8 +2743,6 @@ EXPORT_SYMBOL(noop_dirty_folio);
/*
* Helper function for set_page_dirty family.
*
* Caller must hold folio_memcg_lock().
*
* NOTE: This relies on being atomic wrt interrupts.
*/
static void folio_account_dirtied(struct folio *folio,
@ -2777,7 +2775,6 @@ static void folio_account_dirtied(struct folio *folio,
/*
* Helper function for deaccounting dirty page without writeback.
*
* Caller must hold folio_memcg_lock().
*/
void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
{
@ -2795,9 +2792,8 @@ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb)
* If warn is true, then emit a warning if the folio is not uptodate and has
* not been truncated.
*
* The caller must hold folio_memcg_lock(). It is the caller's
* responsibility to prevent the folio from being truncated while
* this function is in progress, although it may have been truncated
* It is the caller's responsibility to prevent the folio from being truncated
* while this function is in progress, although it may have been truncated
* before this function is called. Most callers have the folio locked.
* A few have the folio blocked from truncation through other means (e.g.
* zap_vma_pages() has it mapped and is holding the page table lock).
@ -2841,14 +2837,10 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
*/
bool filemap_dirty_folio(struct address_space *mapping, struct folio *folio)
{
folio_memcg_lock(folio);
if (folio_test_set_dirty(folio)) {
folio_memcg_unlock(folio);
if (folio_test_set_dirty(folio))
return false;
}
__folio_mark_dirty(folio, mapping, !folio_test_private(folio));
folio_memcg_unlock(folio);
if (mapping->host) {
/* !PageAnon && !swapper_space */
@ -2975,14 +2967,12 @@ void __folio_cancel_dirty(struct folio *folio)
struct bdi_writeback *wb;
struct wb_lock_cookie cookie = {};
folio_memcg_lock(folio);
wb = unlocked_inode_to_wb_begin(inode, &cookie);
if (folio_test_clear_dirty(folio))
folio_account_cleaned(folio, wb);
unlocked_inode_to_wb_end(inode, &cookie);
folio_memcg_unlock(folio);
} else {
folio_clear_dirty(folio);
}