mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 09:13:38 +00:00
mm: add __dump_folio()
Turn __dump_page() into a wrapper around __dump_folio(). Snapshot the page & folio into a stack variable so we don't hit BUG_ON() if an allocation is freed under us and what was a folio pointer becomes a pointer to a tail page. [willy@infradead.org: fix build issue] Link: https://lkml.kernel.org/r/ZeAKCyTn_xS3O9cE@casper.infradead.org [willy@infradead.org: fix __dump_folio] Link: https://lkml.kernel.org/r/ZeJJegP8zM7S9GTy@casper.infradead.org [willy@infradead.org: fix pointer confusion] Link: https://lkml.kernel.org/r/ZeYa00ixxC4k1ot-@casper.infradead.org [akpm@linux-foundation.org: s/printk/pr_warn/] Link: https://lkml.kernel.org/r/20240227192337.757313-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
7da8988c7c
commit
fae7d834c4
@ -2066,6 +2066,13 @@ static inline long folio_nr_pages(struct folio *folio)
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Only hugetlbfs can allocate folios larger than MAX_ORDER */
|
||||
#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
|
||||
#define MAX_FOLIO_NR_PAGES (1UL << PUD_ORDER)
|
||||
#else
|
||||
#define MAX_FOLIO_NR_PAGES MAX_ORDER_NR_PAGES
|
||||
#endif
|
||||
|
||||
/*
|
||||
* compound_nr() returns the number of pages in this potentially compound
|
||||
* page. compound_nr() can be called on a tail page, and is defined to
|
||||
|
@ -76,9 +76,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES];
|
||||
#ifdef CONFIG_CMA
|
||||
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
|
||||
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
|
||||
# define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \
|
||||
get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
|
||||
#else
|
||||
# define is_migrate_cma(migratetype) false
|
||||
# define is_migrate_cma_page(_page) false
|
||||
# define is_migrate_cma_folio(folio, pfn) false
|
||||
#endif
|
||||
|
||||
static inline bool is_migrate_movable(int mt)
|
||||
|
128
mm/debug.c
128
mm/debug.c
@ -51,84 +51,102 @@ const struct trace_print_flags vmaflag_names[] = {
|
||||
{0, NULL}
|
||||
};
|
||||
|
||||
static void __dump_page(struct page *page)
|
||||
static void __dump_folio(struct folio *folio, struct page *page,
|
||||
unsigned long pfn, unsigned long idx)
|
||||
{
|
||||
struct folio *folio = page_folio(page);
|
||||
struct page *head = &folio->page;
|
||||
struct address_space *mapping;
|
||||
bool compound = PageCompound(page);
|
||||
/*
|
||||
* Accessing the pageblock without the zone lock. It could change to
|
||||
* "isolate" again in the meantime, but since we are just dumping the
|
||||
* state for debugging, it should be fine to accept a bit of
|
||||
* inaccuracy here due to racing.
|
||||
*/
|
||||
bool page_cma = is_migrate_cma_page(page);
|
||||
int mapcount;
|
||||
struct address_space *mapping = folio_mapping(folio);
|
||||
int mapcount = 0;
|
||||
char *type = "";
|
||||
|
||||
if (page < head || (page >= head + MAX_ORDER_NR_PAGES)) {
|
||||
/*
|
||||
* Corrupt page, so we cannot call page_mapping. Instead, do a
|
||||
* safe subset of the steps that page_mapping() does. Caution:
|
||||
* this will be misleading for tail pages, PageSwapCache pages,
|
||||
* and potentially other situations. (See the page_mapping()
|
||||
* implementation for what's missing here.)
|
||||
*/
|
||||
unsigned long tmp = (unsigned long)page->mapping;
|
||||
|
||||
if (tmp & PAGE_MAPPING_ANON)
|
||||
mapping = NULL;
|
||||
else
|
||||
mapping = (void *)(tmp & ~PAGE_MAPPING_FLAGS);
|
||||
head = page;
|
||||
folio = (struct folio *)page;
|
||||
compound = false;
|
||||
} else {
|
||||
mapping = page_mapping(page);
|
||||
/*
|
||||
* page->_mapcount space in struct page is used by slab pages to
|
||||
* encode own info, and we must avoid calling page_folio() again.
|
||||
*/
|
||||
if (!folio_test_slab(folio)) {
|
||||
mapcount = atomic_read(&page->_mapcount) + 1;
|
||||
if (folio_test_large(folio))
|
||||
mapcount += folio_entire_mapcount(folio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Avoid VM_BUG_ON() in page_mapcount().
|
||||
* page->_mapcount space in struct page is used by sl[aou]b pages to
|
||||
* encode own info.
|
||||
*/
|
||||
mapcount = PageSlab(head) ? 0 : page_mapcount(page);
|
||||
|
||||
pr_warn("page:%p refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
|
||||
page, page_ref_count(head), mapcount, mapping,
|
||||
page_to_pgoff(page), page_to_pfn(page));
|
||||
if (compound) {
|
||||
pr_warn("head:%p order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
|
||||
head, compound_order(head),
|
||||
pr_warn("page: refcount:%d mapcount:%d mapping:%p index:%#lx pfn:%#lx\n",
|
||||
folio_ref_count(folio), mapcount, mapping,
|
||||
folio->index + idx, pfn);
|
||||
if (folio_test_large(folio)) {
|
||||
pr_warn("head: order:%u entire_mapcount:%d nr_pages_mapped:%d pincount:%d\n",
|
||||
folio_order(folio),
|
||||
folio_entire_mapcount(folio),
|
||||
folio_nr_pages_mapped(folio),
|
||||
atomic_read(&folio->_pincount));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MEMCG
|
||||
if (head->memcg_data)
|
||||
pr_warn("memcg:%lx\n", head->memcg_data);
|
||||
if (folio->memcg_data)
|
||||
pr_warn("memcg:%lx\n", folio->memcg_data);
|
||||
#endif
|
||||
if (PageKsm(page))
|
||||
if (folio_test_ksm(folio))
|
||||
type = "ksm ";
|
||||
else if (PageAnon(page))
|
||||
else if (folio_test_anon(folio))
|
||||
type = "anon ";
|
||||
else if (mapping)
|
||||
dump_mapping(mapping);
|
||||
BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS + 1);
|
||||
|
||||
pr_warn("%sflags: %pGp%s\n", type, &head->flags,
|
||||
page_cma ? " CMA" : "");
|
||||
pr_warn("page_type: %pGt\n", &head->page_type);
|
||||
/*
|
||||
* Accessing the pageblock without the zone lock. It could change to
|
||||
* "isolate" again in the meantime, but since we are just dumping the
|
||||
* state for debugging, it should be fine to accept a bit of
|
||||
* inaccuracy here due to racing.
|
||||
*/
|
||||
pr_warn("%sflags: %pGp%s\n", type, &folio->flags,
|
||||
is_migrate_cma_folio(folio, pfn) ? " CMA" : "");
|
||||
pr_warn("page_type: %pGt\n", &folio->page.page_type);
|
||||
|
||||
print_hex_dump(KERN_WARNING, "raw: ", DUMP_PREFIX_NONE, 32,
|
||||
sizeof(unsigned long), page,
|
||||
sizeof(struct page), false);
|
||||
if (head != page)
|
||||
if (folio_test_large(folio))
|
||||
print_hex_dump(KERN_WARNING, "head: ", DUMP_PREFIX_NONE, 32,
|
||||
sizeof(unsigned long), head,
|
||||
sizeof(struct page), false);
|
||||
sizeof(unsigned long), folio,
|
||||
2 * sizeof(struct page), false);
|
||||
}
|
||||
|
||||
static void __dump_page(const struct page *page)
|
||||
{
|
||||
struct folio *foliop, folio;
|
||||
struct page precise;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
unsigned long idx, nr_pages = 1;
|
||||
int loops = 5;
|
||||
|
||||
again:
|
||||
memcpy(&precise, page, sizeof(*page));
|
||||
foliop = page_folio(&precise);
|
||||
if (foliop == (struct folio *)&precise) {
|
||||
idx = 0;
|
||||
if (!folio_test_large(foliop))
|
||||
goto dump;
|
||||
foliop = (struct folio *)page;
|
||||
} else {
|
||||
idx = folio_page_idx(foliop, page);
|
||||
}
|
||||
|
||||
if (idx < MAX_FOLIO_NR_PAGES) {
|
||||
memcpy(&folio, foliop, 2 * sizeof(struct page));
|
||||
nr_pages = folio_nr_pages(&folio);
|
||||
foliop = &folio;
|
||||
}
|
||||
|
||||
if (idx > nr_pages) {
|
||||
if (loops-- > 0)
|
||||
goto again;
|
||||
pr_warn("page does not match folio\n");
|
||||
precise.compound_head &= ~1UL;
|
||||
foliop = (struct folio *)&precise;
|
||||
idx = 0;
|
||||
}
|
||||
|
||||
dump:
|
||||
__dump_folio(foliop, &precise, pfn, idx);
|
||||
}
|
||||
|
||||
void dump_page(struct page *page, const char *reason)
|
||||
|
Loading…
Reference in New Issue
Block a user