mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
mm/memory-failure: convert memory_failure() to use a folio
Saves dozens of calls to compound_head(). Link: https://lkml.kernel.org/r/20240412193510.2356957-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Miaohe Lin <linmiaohe@huawei.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
6e8cda4c2c
commit
5dba5c356a
@ -2189,7 +2189,7 @@ static int memory_failure_dev_pagemap(unsigned long pfn, int flags,
|
||||
int memory_failure(unsigned long pfn, int flags)
|
||||
{
|
||||
struct page *p;
|
||||
struct page *hpage;
|
||||
struct folio *folio;
|
||||
struct dev_pagemap *pgmap;
|
||||
int res = 0;
|
||||
unsigned long page_flags;
|
||||
@ -2277,8 +2277,8 @@ int memory_failure(unsigned long pfn, int flags)
|
||||
}
|
||||
}
|
||||
|
||||
hpage = compound_head(p);
|
||||
if (PageTransHuge(hpage)) {
|
||||
folio = page_folio(p);
|
||||
if (folio_test_large(folio)) {
|
||||
/*
|
||||
* The flag must be set after the refcount is bumped
|
||||
* otherwise it may race with THP split.
|
||||
@ -2292,12 +2292,13 @@ int memory_failure(unsigned long pfn, int flags)
|
||||
* or unhandlable page. The refcount is bumped iff the
|
||||
* page is a valid handlable page.
|
||||
*/
|
||||
SetPageHasHWPoisoned(hpage);
|
||||
folio_set_has_hwpoisoned(folio);
|
||||
if (try_to_split_thp_page(p) < 0) {
|
||||
res = action_result(pfn, MF_MSG_UNSPLIT_THP, MF_IGNORED);
|
||||
goto unlock_mutex;
|
||||
}
|
||||
VM_BUG_ON_PAGE(!page_count(p), p);
|
||||
folio = page_folio(p);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2308,9 +2309,9 @@ int memory_failure(unsigned long pfn, int flags)
|
||||
* The check (unnecessarily) ignores LRU pages being isolated and
|
||||
* walked by the page reclaim code, however that's not a big loss.
|
||||
*/
|
||||
shake_page(p);
|
||||
shake_folio(folio);
|
||||
|
||||
lock_page(p);
|
||||
folio_lock(folio);
|
||||
|
||||
/*
|
||||
* We're only intended to deal with the non-Compound page here.
|
||||
@ -2318,11 +2319,11 @@ int memory_failure(unsigned long pfn, int flags)
|
||||
* race window. If this happens, we could try again to hopefully
|
||||
* handle the page next round.
|
||||
*/
|
||||
if (PageCompound(p)) {
|
||||
if (folio_test_large(folio)) {
|
||||
if (retry) {
|
||||
ClearPageHWPoison(p);
|
||||
unlock_page(p);
|
||||
put_page(p);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
flags &= ~MF_COUNT_INCREASED;
|
||||
retry = false;
|
||||
goto try_again;
|
||||
@ -2338,29 +2339,29 @@ int memory_failure(unsigned long pfn, int flags)
|
||||
* folio_remove_rmap_*() in try_to_unmap_one(). So to determine page
|
||||
* status correctly, we save a copy of the page flags at this time.
|
||||
*/
|
||||
page_flags = p->flags;
|
||||
page_flags = folio->flags;
|
||||
|
||||
if (hwpoison_filter(p)) {
|
||||
ClearPageHWPoison(p);
|
||||
unlock_page(p);
|
||||
put_page(p);
|
||||
folio_unlock(folio);
|
||||
folio_put(folio);
|
||||
res = -EOPNOTSUPP;
|
||||
goto unlock_mutex;
|
||||
}
|
||||
|
||||
/*
|
||||
* __munlock_folio() may clear a writeback page's LRU flag without
|
||||
* page_lock. We need wait writeback completion for this page or it
|
||||
* may trigger vfs BUG while evict inode.
|
||||
* __munlock_folio() may clear a writeback folio's LRU flag without
|
||||
* the folio lock. We need to wait for writeback completion for this
|
||||
* folio or it may trigger a vfs BUG while evicting inode.
|
||||
*/
|
||||
if (!PageLRU(p) && !PageWriteback(p))
|
||||
if (!folio_test_lru(folio) && !folio_test_writeback(folio))
|
||||
goto identify_page_state;
|
||||
|
||||
/*
|
||||
* It's very difficult to mess with pages currently under IO
|
||||
* and in many cases impossible, so we just avoid it here.
|
||||
*/
|
||||
wait_on_page_writeback(p);
|
||||
folio_wait_writeback(folio);
|
||||
|
||||
/*
|
||||
* Now take care of user space mappings.
|
||||
@ -2374,7 +2375,8 @@ int memory_failure(unsigned long pfn, int flags)
|
||||
/*
|
||||
* Torn down by someone else?
|
||||
*/
|
||||
if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) {
|
||||
if (folio_test_lru(folio) && !folio_test_swapcache(folio) &&
|
||||
folio->mapping == NULL) {
|
||||
res = action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED);
|
||||
goto unlock_page;
|
||||
}
|
||||
@ -2384,7 +2386,7 @@ int memory_failure(unsigned long pfn, int flags)
|
||||
mutex_unlock(&mf_mutex);
|
||||
return res;
|
||||
unlock_page:
|
||||
unlock_page(p);
|
||||
folio_unlock(folio);
|
||||
unlock_mutex:
|
||||
mutex_unlock(&mf_mutex);
|
||||
return res;
|
||||
|
Loading…
Reference in New Issue
Block a user