mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-19 14:56:21 +00:00
mm/memory-failure: convert hwpoison_user_mappings to take a folio
Pass the folio from the callers, and use it throughout instead of hpage. Saves dozens of calls to compound_head(). Link: https://lkml.kernel.org/r/20240412193510.2356957-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Acked-by: Miaohe Lin <linmiaohe@huawei.com> Reviewed-by: Jane Chu <jane.chu@oracle.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
5dba5c356a
commit
03468a0f52
@ -1559,24 +1559,24 @@ static int get_hwpoison_page(struct page *p, unsigned long flags)
|
||||
* Do all that is necessary to remove user space mappings. Unmap
|
||||
* the pages and send SIGBUS to the processes if the data was dirty.
|
||||
*/
|
||||
static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
int flags, struct page *hpage)
|
||||
static bool hwpoison_user_mappings(struct folio *folio, struct page *p,
|
||||
unsigned long pfn, int flags)
|
||||
{
|
||||
struct folio *folio = page_folio(hpage);
|
||||
enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_SYNC | TTU_HWPOISON;
|
||||
struct address_space *mapping;
|
||||
LIST_HEAD(tokill);
|
||||
bool unmap_success;
|
||||
int forcekill;
|
||||
bool mlocked = PageMlocked(hpage);
|
||||
bool mlocked = folio_test_mlocked(folio);
|
||||
|
||||
/*
|
||||
* Here we are interested only in user-mapped pages, so skip any
|
||||
* other types of pages.
|
||||
*/
|
||||
if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
|
||||
if (folio_test_reserved(folio) || folio_test_slab(folio) ||
|
||||
folio_test_pgtable(folio) || folio_test_offline(folio))
|
||||
return true;
|
||||
if (!(PageLRU(hpage) || PageHuge(p)))
|
||||
if (!(folio_test_lru(folio) || folio_test_hugetlb(folio)))
|
||||
return true;
|
||||
|
||||
/*
|
||||
@ -1586,7 +1586,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
if (!page_mapped(p))
|
||||
return true;
|
||||
|
||||
if (PageSwapCache(p)) {
|
||||
if (folio_test_swapcache(folio)) {
|
||||
pr_err("%#lx: keeping poisoned page in swap cache\n", pfn);
|
||||
ttu &= ~TTU_HWPOISON;
|
||||
}
|
||||
@ -1597,11 +1597,11 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
* XXX: the dirty test could be racy: set_page_dirty() may not always
|
||||
* be called inside page lock (it's recommended but not enforced).
|
||||
*/
|
||||
mapping = page_mapping(hpage);
|
||||
if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
|
||||
mapping = folio_mapping(folio);
|
||||
if (!(flags & MF_MUST_KILL) && !folio_test_dirty(folio) && mapping &&
|
||||
mapping_can_writeback(mapping)) {
|
||||
if (page_mkclean(hpage)) {
|
||||
SetPageDirty(hpage);
|
||||
if (folio_mkclean(folio)) {
|
||||
folio_set_dirty(folio);
|
||||
} else {
|
||||
ttu &= ~TTU_HWPOISON;
|
||||
pr_info("%#lx: corrupted page was clean: dropped without side effects\n",
|
||||
@ -1616,7 +1616,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
*/
|
||||
collect_procs(folio, p, &tokill, flags & MF_ACTION_REQUIRED);
|
||||
|
||||
if (PageHuge(hpage) && !PageAnon(hpage)) {
|
||||
if (folio_test_hugetlb(folio) && !folio_test_anon(folio)) {
|
||||
/*
|
||||
* For hugetlb pages in shared mappings, try_to_unmap
|
||||
* could potentially call huge_pmd_unshare. Because of
|
||||
@ -1656,7 +1656,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
||||
* use a more force-full uncatchable kill to prevent
|
||||
* any accesses to the poisoned memory.
|
||||
*/
|
||||
forcekill = PageDirty(hpage) || (flags & MF_MUST_KILL) ||
|
||||
forcekill = folio_test_dirty(folio) || (flags & MF_MUST_KILL) ||
|
||||
!unmap_success;
|
||||
kill_procs(&tokill, forcekill, !unmap_success, pfn, flags);
|
||||
|
||||
@ -2100,7 +2100,7 @@ retry:
|
||||
|
||||
page_flags = folio->flags;
|
||||
|
||||
if (!hwpoison_user_mappings(p, pfn, flags, &folio->page)) {
|
||||
if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
|
||||
folio_unlock(folio);
|
||||
return action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
|
||||
}
|
||||
@ -2367,7 +2367,7 @@ try_again:
|
||||
* Now take care of user space mappings.
|
||||
* Abort on fail: __filemap_remove_folio() assumes unmapped page.
|
||||
*/
|
||||
if (!hwpoison_user_mappings(p, pfn, flags, p)) {
|
||||
if (!hwpoison_user_mappings(folio, p, pfn, flags)) {
|
||||
res = action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED);
|
||||
goto unlock_page;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user