mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-16 18:08:20 +00:00
mm, page_alloc: use check_pages_enabled static key to check tail pages
Commit 700d2e9a36b9 ("mm, page_alloc: reduce page alloc/free sanity checks") has introduced a new static key check_pages_enabled to control when struct pages are sanity checked during allocation and freeing. Mel Gorman suggested that free_tail_pages_check() could use this static key as well, instead of relying on CONFIG_DEBUG_VM. That makes sense, so do that. Also rename the function to free_tail_page_prepare() because it works on a single tail page and has a struct page preparation component as well as the optional checking component. Also remove some unnecessary unlikely() within static_branch_unlikely() statements that Mel pointed out for commit 700d2e9a36b9. Link: https://lkml.kernel.org/r/20230405142840.11068-1-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Suggested-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Mel Gorman <mgorman@techsingularity.net> Cc: Alexander Halbuer <halbuer@sra.uni-hannover.de> Cc: Kees Cook <keescook@chromium.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
27da93d8e6
commit
8666925c49
@ -264,7 +264,7 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr,
|
||||
* How many struct page structs need to be reset. When we reuse the head
|
||||
* struct page, the special metadata (e.g. page->flags or page->mapping)
|
||||
* cannot copy to the tail struct page structs. The invalid value will be
|
||||
* checked in the free_tail_pages_check(). In order to avoid the message
|
||||
* checked in the free_tail_page_prepare(). In order to avoid the message
|
||||
* of "corrupted mapping in tail page". We need to reset at least 3 (one
|
||||
* head struct page struct and two tail struct page structs) struct page
|
||||
* structs.
|
||||
|
@ -1131,7 +1131,7 @@ static inline bool free_page_is_bad(struct page *page)
|
||||
return true;
|
||||
}
|
||||
|
||||
static int free_tail_pages_check(struct page *head_page, struct page *page)
|
||||
static int free_tail_page_prepare(struct page *head_page, struct page *page)
|
||||
{
|
||||
struct folio *folio = (struct folio *)head_page;
|
||||
int ret = 1;
|
||||
@ -1142,7 +1142,7 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
|
||||
*/
|
||||
BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
|
||||
if (!static_branch_unlikely(&check_pages_enabled)) {
|
||||
ret = 0;
|
||||
goto out;
|
||||
}
|
||||
@ -1276,9 +1276,9 @@ static __always_inline bool free_pages_prepare(struct page *page,
|
||||
ClearPageHasHWPoisoned(page);
|
||||
for (i = 1; i < (1 << order); i++) {
|
||||
if (compound)
|
||||
bad += free_tail_pages_check(page, page + i);
|
||||
bad += free_tail_page_prepare(page, page + i);
|
||||
if (is_check_pages_enabled()) {
|
||||
if (unlikely(free_page_is_bad(page + i))) {
|
||||
if (free_page_is_bad(page + i)) {
|
||||
bad++;
|
||||
continue;
|
||||
}
|
||||
@ -1627,7 +1627,7 @@ static inline bool check_new_pages(struct page *page, unsigned int order)
|
||||
for (int i = 0; i < (1 << order); i++) {
|
||||
struct page *p = page + i;
|
||||
|
||||
if (unlikely(check_new_page(p)))
|
||||
if (check_new_page(p))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user