From 9a61f8bff87f5e3bfe7924b3fa5a90603b58e52b Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Mon, 25 Nov 2024 02:41:56 +0000 Subject: [PATCH 001/504] maple_tree: use mas_next_slot() directly The loop condition makes sure (mas.last < max), so we can directly use mas_next_slot() here. Since no other use of mas_next_entry(), it is removed. Link: https://lkml.kernel.org/r/20241125024156.26093-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang Reviewed-by: Liam R. Howlett Cc: Sidhartha Kumar Cc: Lorenzo Stoakes Signed-off-by: Andrew Morton --- lib/maple_tree.c | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 047397136f15..36e603645a30 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4745,29 +4745,6 @@ again: return entry; } -/* - * mas_next_entry() - Internal function to get the next entry. - * @mas: The maple state - * @limit: The maximum range start. - * - * Set the @mas->node to the next entry and the range_start to - * the beginning value for the entry. Does not check beyond @limit. - * Sets @mas->index and @mas->last to the range, Does not update @mas->index and - * @mas->last on overflow. - * Restarts on dead nodes. - * - * Return: the next entry or %NULL. - */ -static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) -{ - if (mas->last >= limit) { - mas->status = ma_overflow; - return NULL; - } - - return mas_next_slot(mas, limit, false); -} - /* * mas_rev_awalk() - Internal function. Reverse allocation walk. Find the * highest gap address of a given size in a given node and descend. @@ -6938,7 +6915,7 @@ retry: goto unlock; while (mas_is_active(&mas) && (mas.last < max)) { - entry = mas_next_entry(&mas, max); + entry = mas_next_slot(&mas, max, false); if (likely(entry && !xa_is_zero(entry))) break; } From 0a54f592b755afb1d7183d5370b1ee0bdd1a54e2 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Wed, 27 Nov 2024 13:53:29 +0000 Subject: [PATCH 002/504] mm/zswap: add LRU_STOP to comment about dropping the lru lock This function has been able to return LRU_STOP since commit b49547ade38a ("mm/zswap: stop lru list shrinking when encounter warm region"). To reduce confusion, update the comment to also list LRU_STOP as an option. Link: https://lkml.kernel.org/r/20241127-lru-stop-comment-v1-1-f54a7cba9429@google.com Signed-off-by: Alice Ryhl Acked-by: Johannes Weiner Reviewed-by: Chengming Zhou Cc: Alice Ryhl Cc: Nhat Pham Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/zswap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/zswap.c b/mm/zswap.c index 30f5a27a6862..167ae641379f 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1186,7 +1186,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o /* * It's safe to drop the lock here because we return either - * LRU_REMOVED_RETRY or LRU_RETRY. + * LRU_REMOVED_RETRY, LRU_RETRY or LRU_STOP. */ spin_unlock(&l->lock); From 664b424d82dae248bfc5a3ddc5104e89586fe126 Mon Sep 17 00:00:00 2001 From: Donet Tom Date: Tue, 26 Nov 2024 09:56:54 -0600 Subject: [PATCH 003/504] mm: migrate: remove unused argument vma from migrate_misplaced_folio() Commit ee86814b0562 ("mm/migrate: move NUMA hinting fault folio isolation + checks under PTL") removed the code that had used the vma argument in migrate_misplaced_folio. Since the vma argument was no longer used in migrate_misplaced_folio, this patch removes it. Link: https://lkml.kernel.org/r/20241126155655.466186-1-donettom@linux.ibm.com Signed-off-by: Donet Tom Reviewed-by: Baolin Wang Reviewed-by: Zi Yan Acked-by: David Hildenbrand Cc: Ritesh Harjani (IBM) Signed-off-by: Andrew Morton --- include/linux/migrate.h | 6 ++---- mm/huge_memory.c | 2 +- mm/memory.c | 2 +- mm/migrate.c | 3 +-- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 002e49b2ebd9..29919faea2f1 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -144,16 +144,14 @@ const struct movable_operations *page_movable_ops(struct page *page) #ifdef CONFIG_NUMA_BALANCING int migrate_misplaced_folio_prepare(struct folio *folio, struct vm_area_struct *vma, int node); -int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, - int node); +int migrate_misplaced_folio(struct folio *folio, int node); #else static inline int migrate_misplaced_folio_prepare(struct folio *folio, struct vm_area_struct *vma, int node) { return -EAGAIN; /* can't migrate now */ } -static inline int migrate_misplaced_folio(struct folio *folio, - struct vm_area_struct *vma, int node) +static inline int migrate_misplaced_folio(struct folio *folio, int node) { return -EAGAIN; /* can't migrate now */ } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index db64116a4f84..45901dc6710c 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2003,7 +2003,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf) spin_unlock(vmf->ptl); writable = false; - if (!migrate_misplaced_folio(folio, vma, target_nid)) { + if (!migrate_misplaced_folio(folio, target_nid)) { flags |= TNF_MIGRATED; nid = target_nid; task_numa_fault(last_cpupid, nid, HPAGE_PMD_NR, flags); diff --git a/mm/memory.c b/mm/memory.c index 398c031be9ba..78b6741ae593 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5625,7 +5625,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) ignore_writable = true; /* Migrate to the requested node */ - if (!migrate_misplaced_folio(folio, vma, target_nid)) { + if (!migrate_misplaced_folio(folio, target_nid)) { nid = target_nid; flags |= TNF_MIGRATED; task_numa_fault(last_cpupid, nid, nr_pages, flags); diff --git a/mm/migrate.c b/mm/migrate.c index cc68583c86f9..e9e00d1d1d19 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -2683,8 +2683,7 @@ int migrate_misplaced_folio_prepare(struct folio *folio, * elevated reference count on the folio. This function will un-isolate the * folio, dereferencing the folio before returning. */ -int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma, - int node) +int migrate_misplaced_folio(struct folio *folio, int node) { pg_data_t *pgdat = NODE_DATA(node); int nr_remaining; From 52a217d824de25f3fa5b4dfd008a629ffe26e029 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:33 +0000 Subject: [PATCH 004/504] mm/page_alloc: cache page_zone() result in free_unref_page() Patch series "Allocate and free frozen pages", v3. Slab does not need to use the page refcount at all, and it can avoid an atomic operation on page free. Hugetlb wants to delay setting the refcount until it has assembled a complete gigantic page. We already have the ability to freeze a page (safely reduce its reference count to 0), so this patchset adds APIs to allocate and free pages which are in a frozen state. This patchset is also a step towards the Glorious Future in which struct page doesn't have a refcount; the users which need a refcount will have one in their per-allocation memdesc. This patch (of 15): Save 17 bytes of text by calculating page_zone() once instead of twice. Link: https://lkml.kernel.org/r/20241125210149.2976098-1-willy@infradead.org Link: https://lkml.kernel.org/r/20241125210149.2976098-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Miaohe Lin Reviewed-by: Muchun Song Acked-by: Mel Gorman Reviewed-by: Zi Yan Acked-by: David Hildenbrand Reviewed-by: Vlastimil Babka Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/page_alloc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 01eab25edf89..9b32cdf5d767 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2666,16 +2666,16 @@ void free_unref_page(struct page *page, unsigned int order) * get those areas back if necessary. Otherwise, we may have to free * excessively into the page allocator */ + zone = page_zone(page); migratetype = get_pfnblock_migratetype(page, pfn); if (unlikely(migratetype >= MIGRATE_PCPTYPES)) { if (unlikely(is_migrate_isolate(migratetype))) { - free_one_page(page_zone(page), page, pfn, order, FPI_NONE); + free_one_page(zone, page, pfn, order, FPI_NONE); return; } migratetype = MIGRATE_MOVABLE; } - zone = page_zone(page); pcp_trylock_prepare(UP_flags); pcp = pcp_spin_trylock(zone->per_cpu_pageset); if (pcp) { From f4cf47873fb3f2864f5f467b5db24f6369fa63c6 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:34 +0000 Subject: [PATCH 005/504] mm: make alloc_pages_mpol() static All callers outside mempolicy.c now use folio_alloc_mpol() thanks to Kefeng's cleanups, so we can remove this as a visible symbol. And also remove the alloc_hooks for alloc_pages_mpol(), since all users in mempolicy.c are using the nonprof version. Link: https://lkml.kernel.org/r/20241125210149.2976098-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Acked-by: David Hildenbrand Reviewed-by: Vlastimil Babka Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- include/linux/gfp.h | 8 -------- mm/mempolicy.c | 8 ++++---- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index b0fe9f62d15b..c96d5d7f7b89 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -300,8 +300,6 @@ static inline struct page *alloc_pages_node_noprof(int nid, gfp_t gfp_mask, #ifdef CONFIG_NUMA struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order); -struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, - struct mempolicy *mpol, pgoff_t ilx, int nid); struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order); struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid); @@ -312,11 +310,6 @@ static inline struct page *alloc_pages_noprof(gfp_t gfp_mask, unsigned int order { return alloc_pages_node_noprof(numa_node_id(), gfp_mask, order); } -static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, - struct mempolicy *mpol, pgoff_t ilx, int nid) -{ - return alloc_pages_noprof(gfp, order); -} static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) { return __folio_alloc_node_noprof(gfp, order, numa_node_id()); @@ -331,7 +324,6 @@ static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int orde #endif #define alloc_pages(...) alloc_hooks(alloc_pages_noprof(__VA_ARGS__)) -#define alloc_pages_mpol(...) alloc_hooks(alloc_pages_mpol_noprof(__VA_ARGS__)) #define folio_alloc(...) alloc_hooks(folio_alloc_noprof(__VA_ARGS__)) #define folio_alloc_mpol(...) alloc_hooks(folio_alloc_mpol_noprof(__VA_ARGS__)) #define vma_alloc_folio(...) alloc_hooks(vma_alloc_folio_noprof(__VA_ARGS__)) diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 162407fbf2bc..e092aff55e2d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2222,7 +2222,7 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, * * Return: The page on success or NULL if allocation fails. */ -struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, +static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, struct mempolicy *pol, pgoff_t ilx, int nid) { nodemask_t *nodemask; @@ -2285,7 +2285,7 @@ struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order, struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *pol, pgoff_t ilx, int nid) { - return page_rmappable_folio(alloc_pages_mpol_noprof(gfp | __GFP_COMP, + return page_rmappable_folio(alloc_pages_mpol(gfp | __GFP_COMP, order, pol, ilx, nid)); } @@ -2300,7 +2300,7 @@ struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, * NUMA policy. The caller must hold the mmap_lock of the mm_struct of the * VMA to prevent it from going away. Should be used for all allocations * for folios that will be mapped into user space, excepting hugetlbfs, and - * excepting where direct use of alloc_pages_mpol() is more appropriate. + * excepting where direct use of folio_alloc_mpol() is more appropriate. * * Return: The folio on success or NULL if allocation fails. */ @@ -2346,7 +2346,7 @@ struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order) if (!in_interrupt() && !(gfp & __GFP_THISNODE)) pol = get_task_policy(current); - return alloc_pages_mpol_noprof(gfp, order, pol, NO_INTERLEAVE_INDEX, + return alloc_pages_mpol(gfp, order, pol, NO_INTERLEAVE_INDEX, numa_node_id()); } EXPORT_SYMBOL(alloc_pages_noprof); From 3f92e3edb836227ac2e09d76bef77f9b297c18c4 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:35 +0000 Subject: [PATCH 006/504] mm/page_alloc: export free_frozen_pages() instead of free_unref_page() We already have the concept of "frozen pages" (eg page_ref_freeze()), so let's not complicate things by also having the concept of "unref pages". Link: https://lkml.kernel.org/r/20241125210149.2976098-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: David Hildenbrand Reviewed-by: William Kucharski Reviewed-by: Miaohe Lin Reviewed-by: Muchun Song Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Signed-off-by: Andrew Morton --- mm/internal.h | 2 +- mm/page_alloc.c | 18 +++++++++--------- mm/page_frag_cache.c | 6 +++--- mm/swap.c | 2 +- 4 files changed, 14 insertions(+), 14 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 9826f7dce607..b650a7cb7b46 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -741,7 +741,7 @@ extern bool free_pages_prepare(struct page *page, unsigned int order); extern int user_min_free_kbytes; -void free_unref_page(struct page *page, unsigned int order); +void free_frozen_pages(struct page *page, unsigned int order); void free_unref_folios(struct folio_batch *fbatch); extern void zone_pcp_reset(struct zone *zone); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9b32cdf5d767..ddb6abb413fa 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2592,9 +2592,9 @@ static int nr_pcp_high(struct per_cpu_pages *pcp, struct zone *zone, return high; } -static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, - struct page *page, int migratetype, - unsigned int order) +static void free_frozen_page_commit(struct zone *zone, + struct per_cpu_pages *pcp, struct page *page, int migratetype, + unsigned int order) { int high, batch; int pindex; @@ -2643,7 +2643,7 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, /* * Free a pcp page */ -void free_unref_page(struct page *page, unsigned int order) +void free_frozen_pages(struct page *page, unsigned int order) { unsigned long __maybe_unused UP_flags; struct per_cpu_pages *pcp; @@ -2679,7 +2679,7 @@ void free_unref_page(struct page *page, unsigned int order) pcp_trylock_prepare(UP_flags); pcp = pcp_spin_trylock(zone->per_cpu_pageset); if (pcp) { - free_unref_page_commit(zone, pcp, page, migratetype, order); + free_frozen_page_commit(zone, pcp, page, migratetype, order); pcp_spin_unlock(pcp); } else { free_one_page(zone, page, pfn, order, FPI_NONE); @@ -2743,7 +2743,7 @@ void free_unref_folios(struct folio_batch *folios) /* * Free isolated pages directly to the - * allocator, see comment in free_unref_page. + * allocator, see comment in free_frozen_pages. */ if (is_migrate_isolate(migratetype)) { free_one_page(zone, &folio->page, pfn, @@ -2774,7 +2774,7 @@ void free_unref_folios(struct folio_batch *folios) migratetype = MIGRATE_MOVABLE; trace_mm_page_free_batched(&folio->page); - free_unref_page_commit(zone, pcp, &folio->page, migratetype, + free_frozen_page_commit(zone, pcp, &folio->page, migratetype, order); } @@ -4837,11 +4837,11 @@ void __free_pages(struct page *page, unsigned int order) struct alloc_tag *tag = pgalloc_tag_get(page); if (put_page_testzero(page)) - free_unref_page(page, order); + free_frozen_pages(page, order); else if (!head) { pgalloc_tag_sub_pages(tag, (1 << order) - 1); while (order-- > 0) - free_unref_page(page + (1 << order), order); + free_frozen_pages(page + (1 << order), order); } } EXPORT_SYMBOL(__free_pages); diff --git a/mm/page_frag_cache.c b/mm/page_frag_cache.c index 3f7a203d35c6..d2423f30577e 100644 --- a/mm/page_frag_cache.c +++ b/mm/page_frag_cache.c @@ -86,7 +86,7 @@ void __page_frag_cache_drain(struct page *page, unsigned int count) VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); if (page_ref_sub_and_test(page, count)) - free_unref_page(page, compound_order(page)); + free_frozen_pages(page, compound_order(page)); } EXPORT_SYMBOL(__page_frag_cache_drain); @@ -138,7 +138,7 @@ refill: goto refill; if (unlikely(encoded_page_decode_pfmemalloc(encoded_page))) { - free_unref_page(page, + free_frozen_pages(page, encoded_page_decode_order(encoded_page)); goto refill; } @@ -166,6 +166,6 @@ void page_frag_free(void *addr) struct page *page = virt_to_head_page(addr); if (unlikely(put_page_testzero(page))) - free_unref_page(page, compound_order(page)); + free_frozen_pages(page, compound_order(page)); } EXPORT_SYMBOL(page_frag_free); diff --git a/mm/swap.c b/mm/swap.c index 10decd9dffa1..3a01acfd5a89 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -109,7 +109,7 @@ void __folio_put(struct folio *folio) page_cache_release(folio); folio_unqueue_deferred_split(folio); mem_cgroup_uncharge(folio); - free_unref_page(&folio->page, folio_order(folio)); + free_frozen_pages(&folio->page, folio_order(folio)); } EXPORT_SYMBOL(__folio_put); From 520d3336ce5857d0e39113b1485181f175fea9cb Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:36 +0000 Subject: [PATCH 007/504] mm/page_alloc: move set_page_refcounted() to callers of post_alloc_hook() In preparation for allocating frozen pages, stop initialising the page refcount in post_alloc_hook(). Link: https://lkml.kernel.org/r/20241125210149.2976098-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Miaohe Lin Reviewed-by: Zi Yan Acked-by: David Hildenbrand Reviewed-by: Vlastimil Babka Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/compaction.c | 2 ++ mm/internal.h | 3 +-- mm/page_alloc.c | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/mm/compaction.c b/mm/compaction.c index a2b16b08cbbf..07bd22789f07 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -83,6 +83,7 @@ static inline bool is_via_compact_memory(int order) { return false; } static struct page *mark_allocated_noprof(struct page *page, unsigned int order, gfp_t gfp_flags) { post_alloc_hook(page, order, __GFP_MOVABLE); + set_page_refcounted(page); return page; } #define mark_allocated(...) alloc_hooks(mark_allocated_noprof(__VA_ARGS__)) @@ -1868,6 +1869,7 @@ again: dst = (struct folio *)freepage; post_alloc_hook(&dst->page, order, __GFP_MOVABLE); + set_page_refcounted(&dst->page); if (order) prep_compound_page(&dst->page, order); cc->nr_freepages -= 1 << order; diff --git a/mm/internal.h b/mm/internal.h index b650a7cb7b46..9c941af5bdb6 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -735,8 +735,7 @@ static inline void prep_compound_tail(struct page *head, int tail_idx) extern void prep_compound_page(struct page *page, unsigned int order); -extern void post_alloc_hook(struct page *page, unsigned int order, - gfp_t gfp_flags); +void post_alloc_hook(struct page *page, unsigned int order, gfp_t gfp_flags); extern bool free_pages_prepare(struct page *page, unsigned int order); extern int user_min_free_kbytes; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ddb6abb413fa..1fc74db14722 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1508,7 +1508,6 @@ inline void post_alloc_hook(struct page *page, unsigned int order, int i; set_page_private(page, 0); - set_page_refcounted(page); arch_alloc_page(page, order); debug_pagealloc_map_pages(page, 1 << order); @@ -1564,6 +1563,7 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags unsigned int alloc_flags) { post_alloc_hook(page, order, gfp_flags); + set_page_refcounted(page); if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); @@ -6363,6 +6363,7 @@ static void split_free_pages(struct list_head *list) int i; post_alloc_hook(page, order, __GFP_MOVABLE); + set_page_refcounted(page); if (!order) continue; From 4a8104d21622bf1323645f78c4aa97a6553283d3 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:37 +0000 Subject: [PATCH 008/504] mm/page_alloc: move set_page_refcounted() to callers of prep_new_page() In preparation for allocating frozen pages, stop initialising the page refcount in prep_new_page(). Link: https://lkml.kernel.org/r/20241125210149.2976098-6-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Acked-by: David Hildenbrand Reviewed-by: Vlastimil Babka Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/page_alloc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1fc74db14722..db6ec34ba454 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1563,7 +1563,6 @@ static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags unsigned int alloc_flags) { post_alloc_hook(page, order, gfp_flags); - set_page_refcounted(page); if (order && (gfp_flags & __GFP_COMP)) prep_compound_page(page, order); @@ -3474,6 +3473,7 @@ try_this_zone: gfp_mask, alloc_flags, ac->migratetype); if (page) { prep_new_page(page, order, gfp_mask, alloc_flags); + set_page_refcounted(page); /* * If this is a high-order atomic allocation then check @@ -3698,8 +3698,10 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, count_vm_event(COMPACTSTALL); /* Prep a captured page if available */ - if (page) + if (page) { prep_new_page(page, order, gfp_mask, alloc_flags); + set_page_refcounted(page); + } /* Try get a page from the freelist if available */ if (!page) @@ -4678,6 +4680,7 @@ retry_this_zone: nr_account++; prep_new_page(page, 0, gfp, 0); + set_page_refcounted(page); if (page_list) list_add(&page->lru, page_list); else @@ -6503,6 +6506,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end, check_new_pages(head, order); prep_new_page(head, order, gfp_mask, 0); + set_page_refcounted(head); } else { ret = -EINVAL; WARN(true, "PFN range: requested [%lu, %lu), allocated [%lu, %lu)\n", From 5b97220e2dc6548403838130db11232b2df6c80d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:38 +0000 Subject: [PATCH 009/504] mm/page_alloc: move set_page_refcounted() to callers of get_page_from_freelist() In preparation for allocating frozen pages, stop initialising the page refcount in get_page_from_freelist(). Link: https://lkml.kernel.org/r/20241125210149.2976098-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Cc: David Hildenbrand Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/page_alloc.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index db6ec34ba454..e2b7c623fefe 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3473,7 +3473,6 @@ try_this_zone: gfp_mask, alloc_flags, ac->migratetype); if (page) { prep_new_page(page, order, gfp_mask, alloc_flags); - set_page_refcounted(page); /* * If this is a high-order atomic allocation then check @@ -3568,6 +3567,8 @@ __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); + if (page) + set_page_refcounted(page); return page; } @@ -3606,8 +3607,10 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & ~__GFP_DIRECT_RECLAIM, order, ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); - if (page) + if (page) { + set_page_refcounted(page); goto out; + } /* Coredumps can quickly deplete all memory reserves */ if (current->flags & PF_DUMPCORE) @@ -3698,10 +3701,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, count_vm_event(COMPACTSTALL); /* Prep a captured page if available */ - if (page) { + if (page) prep_new_page(page, order, gfp_mask, alloc_flags); - set_page_refcounted(page); - } /* Try get a page from the freelist if available */ if (!page) @@ -3710,6 +3711,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, if (page) { struct zone *zone = page_zone(page); + set_page_refcounted(page); zone->compact_blockskip_flush = false; compaction_defer_reset(zone, order, true); count_vm_event(COMPACTSUCCESS); @@ -3968,6 +3970,7 @@ retry: drained = true; goto retry; } + set_page_refcounted(page); out: psi_memstall_leave(&pflags); @@ -4288,8 +4291,10 @@ restart: * that first */ page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); - if (page) + if (page) { + set_page_refcounted(page); goto got_pg; + } /* * For costly allocations, try direct compaction first, as it's likely @@ -4369,8 +4374,10 @@ retry: /* Attempt with potentially adjusted zonelist and alloc_flags */ page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); - if (page) + if (page) { + set_page_refcounted(page); goto got_pg; + } /* Caller is not willing to reclaim, we can't balance anything */ if (!can_direct_reclaim) @@ -4754,8 +4761,10 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); - if (likely(page)) + if (likely(page)) { + set_page_refcounted(page); goto out; + } alloc_gfp = gfp; ac.spread_dirty_pages = false; From 45e344323f7e25db5d76963369d6ef7930fc5429 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:39 +0000 Subject: [PATCH 010/504] mm/page_alloc: move set_page_refcounted() to callers of __alloc_pages_cpuset_fallback() In preparation for allocating frozen pages, stop initialising the page refcount in __alloc_pages_cpuset_fallback(). Link: https://lkml.kernel.org/r/20241125210149.2976098-8-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Cc: David Hildenbrand Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/page_alloc.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e2b7c623fefe..e567289b2b38 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3566,9 +3566,6 @@ __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, if (!page) page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); - - if (page) - set_page_refcounted(page); return page; } @@ -3655,6 +3652,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, if (gfp_mask & __GFP_NOFAIL) page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_NO_WATERMARKS, ac); + if (page) + set_page_refcounted(page); } out: mutex_unlock(&oom_lock); @@ -4483,8 +4482,10 @@ nopage: * the situation worse. */ page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); - if (page) + if (page) { + set_page_refcounted(page); goto got_pg; + } cond_resched(); goto retry; From 19b8d2b3ef845c9423bd5a176e94f687553ab961 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:40 +0000 Subject: [PATCH 011/504] mm/page_alloc: move set_page_refcounted() to callers of __alloc_pages_may_oom() In preparation for allocating frozen pages, stop initialising the page refcount in __alloc_pages_may_oom(). Link: https://lkml.kernel.org/r/20241125210149.2976098-9-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Cc: David Hildenbrand Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/page_alloc.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e567289b2b38..c0616d29f052 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3604,10 +3604,8 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & ~__GFP_DIRECT_RECLAIM, order, ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac); - if (page) { - set_page_refcounted(page); + if (page) goto out; - } /* Coredumps can quickly deplete all memory reserves */ if (current->flags & PF_DUMPCORE) @@ -3652,8 +3650,6 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, if (gfp_mask & __GFP_NOFAIL) page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_NO_WATERMARKS, ac); - if (page) - set_page_refcounted(page); } out: mutex_unlock(&oom_lock); @@ -4437,8 +4433,10 @@ retry: /* Reclaim has failed us, start killing things */ page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); - if (page) + if (page) { + set_page_refcounted(page); goto got_pg; + } /* Avoid allocations with no watermarks from looping endlessly */ if (tsk_is_oom_victim(current) && From c911afc2d448e9e9e5249dc341f150100c271648 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:41 +0000 Subject: [PATCH 012/504] mm/page_alloc: move set_page_refcounted() to callers of __alloc_pages_direct_compact() In preparation for allocating frozen pages, stop initialising the page refcount in __alloc_pages_direct_compact(). Link: https://lkml.kernel.org/r/20241125210149.2976098-10-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Cc: David Hildenbrand Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/page_alloc.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c0616d29f052..4b375dd30df0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3706,7 +3706,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, if (page) { struct zone *zone = page_zone(page); - set_page_refcounted(page); zone->compact_blockskip_flush = false; compaction_defer_reset(zone, order, true); count_vm_event(COMPACTSUCCESS); @@ -4308,8 +4307,10 @@ restart: alloc_flags, ac, INIT_COMPACT_PRIORITY, &compact_result); - if (page) + if (page) { + set_page_refcounted(page); goto got_pg; + } /* * Checks for costly allocations with __GFP_NORETRY, which @@ -4391,8 +4392,10 @@ retry: /* Try direct compaction and then allocating */ page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, compact_priority, &compact_result); - if (page) + if (page) { + set_page_refcounted(page); goto got_pg; + } /* Do not loop if specifically requested */ if (gfp_mask & __GFP_NORETRY) From cc407e9efddf09191f3ea7abc15c54316b1bd999 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:42 +0000 Subject: [PATCH 013/504] mm/page_alloc: move set_page_refcounted() to callers of __alloc_pages_direct_reclaim() In preparation for allocating frozen pages, stop initialising the page refcount in __alloc_pages_direct_reclaim(). Link: https://lkml.kernel.org/r/20241125210149.2976098-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Cc: David Hildenbrand Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/page_alloc.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4b375dd30df0..b5f6225168bb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3964,7 +3964,6 @@ retry: drained = true; goto retry; } - set_page_refcounted(page); out: psi_memstall_leave(&pflags); @@ -4386,8 +4385,10 @@ retry: /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, &did_some_progress); - if (page) + if (page) { + set_page_refcounted(page); goto got_pg; + } /* Try direct compaction and then allocating */ page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, From 8eb788ae468dc5922ef97b3b1fd4dcedd2b573ab Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:43 +0000 Subject: [PATCH 014/504] mm/page_alloc: move set_page_refcounted() to callers of __alloc_pages_slowpath() In preparation for allocating frozen pages, stop initialising the page refcount in __alloc_pages_slowpath(). Link: https://lkml.kernel.org/r/20241125210149.2976098-12-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Cc: David Hildenbrand Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/page_alloc.c | 30 +++++++++--------------------- 1 file changed, 9 insertions(+), 21 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b5f6225168bb..640e8c063bcc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4284,10 +4284,8 @@ restart: * that first */ page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* * For costly allocations, try direct compaction first, as it's likely @@ -4306,10 +4304,8 @@ restart: alloc_flags, ac, INIT_COMPACT_PRIORITY, &compact_result); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* * Checks for costly allocations with __GFP_NORETRY, which @@ -4369,10 +4365,8 @@ retry: /* Attempt with potentially adjusted zonelist and alloc_flags */ page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* Caller is not willing to reclaim, we can't balance anything */ if (!can_direct_reclaim) @@ -4385,18 +4379,14 @@ retry: /* Try direct reclaim and then allocating */ page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, &did_some_progress); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* Try direct compaction and then allocating */ page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, compact_priority, &compact_result); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* Do not loop if specifically requested */ if (gfp_mask & __GFP_NORETRY) @@ -4437,10 +4427,8 @@ retry: /* Reclaim has failed us, start killing things */ page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } /* Avoid allocations with no watermarks from looping endlessly */ if (tsk_is_oom_victim(current) && @@ -4484,10 +4472,8 @@ nopage: * the situation worse. */ page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); - if (page) { - set_page_refcounted(page); + if (page) goto got_pg; - } cond_resched(); goto retry; @@ -4779,6 +4765,8 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, ac.nodemask = nodemask; page = __alloc_pages_slowpath(alloc_gfp, order, &ac); + if (page) + set_page_refcounted(page); out: if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && From 4d9d1429f6deb91c66591074bbd8ca6aa4cba4dc Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:44 +0000 Subject: [PATCH 015/504] mm/page_alloc: move set_page_refcounted() to end of __alloc_pages() Remove some code duplication by calling set_page_refcounted() at the end of __alloc_pages() instead of after each call that can allocate a page. That means that we free a frozen page if we've exceeded the allowed memcg memory. Link: https://lkml.kernel.org/r/20241125210149.2976098-13-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Cc: David Hildenbrand Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/page_alloc.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 640e8c063bcc..411faecd0205 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4750,10 +4750,8 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); - if (likely(page)) { - set_page_refcounted(page); + if (likely(page)) goto out; - } alloc_gfp = gfp; ac.spread_dirty_pages = false; @@ -4765,15 +4763,15 @@ struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, ac.nodemask = nodemask; page = __alloc_pages_slowpath(alloc_gfp, order, &ac); - if (page) - set_page_refcounted(page); out: if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { - __free_pages(page, order); + free_frozen_pages(page, order); page = NULL; } + if (page) + set_page_refcounted(page); trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); kmsan_alloc_page(page, order, alloc_gfp); From 13ea0dd2430a36227256c619e48c886d39d8d4ac Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:45 +0000 Subject: [PATCH 016/504] mm/page_alloc: add __alloc_frozen_pages() Defer the initialisation of the page refcount to the new __alloc_pages() wrapper and turn the old __alloc_pages() into __alloc_frozen_pages(). Link: https://lkml.kernel.org/r/20241125210149.2976098-14-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: Zi Yan Acked-by: David Hildenbrand Reviewed-by: Vlastimil Babka Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Cc: William Kucharski Signed-off-by: Andrew Morton --- mm/internal.h | 4 ++++ mm/page_alloc.c | 18 ++++++++++++++---- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 9c941af5bdb6..b831688a71e8 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -740,6 +740,10 @@ extern bool free_pages_prepare(struct page *page, unsigned int order); extern int user_min_free_kbytes; +struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid, + nodemask_t *); +#define __alloc_frozen_pages(...) \ + alloc_hooks(__alloc_frozen_pages_noprof(__VA_ARGS__)) void free_frozen_pages(struct page *page, unsigned int order); void free_unref_folios(struct folio_batch *fbatch); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 411faecd0205..55d77fb6a1a3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4713,8 +4713,8 @@ EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); /* * This is the 'heart' of the zoned buddy allocator. */ -struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, - int preferred_nid, nodemask_t *nodemask) +struct page *__alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order, + int preferred_nid, nodemask_t *nodemask) { struct page *page; unsigned int alloc_flags = ALLOC_WMARK_LOW; @@ -4770,14 +4770,24 @@ out: free_frozen_pages(page, order); page = NULL; } - if (page) - set_page_refcounted(page); trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); kmsan_alloc_page(page, order, alloc_gfp); return page; } +EXPORT_SYMBOL(__alloc_frozen_pages_noprof); + +struct page *__alloc_pages_noprof(gfp_t gfp, unsigned int order, + int preferred_nid, nodemask_t *nodemask) +{ + struct page *page; + + page = __alloc_frozen_pages_noprof(gfp, order, preferred_nid, nodemask); + if (page) + set_page_refcounted(page); + return page; +} EXPORT_SYMBOL(__alloc_pages_noprof); struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_nid, From 1533a6c4d2dd410d313df35702e5c3a5d0c6904c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:46 +0000 Subject: [PATCH 017/504] mm/mempolicy: add alloc_frozen_pages() Provide an interface to allocate pages from the page allocator without incrementing their refcount. This saves an atomic operation on free, which may be beneficial to some users (eg slab). Link: https://lkml.kernel.org/r/20241125210149.2976098-15-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: William Kucharski Reviewed-by: Vlastimil Babka Cc: David Hildenbrand Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/internal.h | 12 ++++++++++++ mm/mempolicy.c | 49 ++++++++++++++++++++++++++++++++----------------- 2 files changed, 44 insertions(+), 17 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index b831688a71e8..6f6585e98c6f 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -747,6 +747,18 @@ struct page *__alloc_frozen_pages_noprof(gfp_t, unsigned int order, int nid, void free_frozen_pages(struct page *page, unsigned int order); void free_unref_folios(struct folio_batch *fbatch); +#ifdef CONFIG_NUMA +struct page *alloc_frozen_pages_noprof(gfp_t, unsigned int order); +#else +static inline struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned int order) +{ + return __alloc_frozen_pages_noprof(gfp, order, numa_node_id(), NULL); +} +#endif + +#define alloc_frozen_pages(...) \ + alloc_hooks(alloc_frozen_pages_noprof(__VA_ARGS__)) + extern void zone_pcp_reset(struct zone *zone); extern void zone_pcp_disable(struct zone *zone); extern void zone_pcp_enable(struct zone *zone); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index e092aff55e2d..305aa3012173 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2205,9 +2205,9 @@ static struct page *alloc_pages_preferred_many(gfp_t gfp, unsigned int order, */ preferred_gfp = gfp | __GFP_NOWARN; preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); - page = __alloc_pages_noprof(preferred_gfp, order, nid, nodemask); + page = __alloc_frozen_pages_noprof(preferred_gfp, order, nid, nodemask); if (!page) - page = __alloc_pages_noprof(gfp, order, nid, NULL); + page = __alloc_frozen_pages_noprof(gfp, order, nid, NULL); return page; } @@ -2253,8 +2253,9 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, * First, try to allocate THP only on local node, but * don't reclaim unnecessarily, just compact. */ - page = __alloc_pages_node_noprof(nid, - gfp | __GFP_THISNODE | __GFP_NORETRY, order); + page = __alloc_frozen_pages_noprof( + gfp | __GFP_THISNODE | __GFP_NORETRY, order, + nid, NULL); if (page || !(gfp & __GFP_DIRECT_RECLAIM)) return page; /* @@ -2266,7 +2267,7 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, } } - page = __alloc_pages_noprof(gfp, order, nid, nodemask); + page = __alloc_frozen_pages_noprof(gfp, order, nid, nodemask); if (unlikely(pol->mode == MPOL_INTERLEAVE || pol->mode == MPOL_WEIGHTED_INTERLEAVE) && page) { @@ -2285,8 +2286,13 @@ static struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order, struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *pol, pgoff_t ilx, int nid) { - return page_rmappable_folio(alloc_pages_mpol(gfp | __GFP_COMP, - order, pol, ilx, nid)); + struct page *page = alloc_pages_mpol(gfp | __GFP_COMP, order, pol, + ilx, nid); + if (!page) + return NULL; + + set_page_refcounted(page); + return page_rmappable_folio(page); } /** @@ -2321,6 +2327,21 @@ struct folio *vma_alloc_folio_noprof(gfp_t gfp, int order, struct vm_area_struct } EXPORT_SYMBOL(vma_alloc_folio_noprof); +struct page *alloc_frozen_pages_noprof(gfp_t gfp, unsigned order) +{ + struct mempolicy *pol = &default_policy; + + /* + * No reference counting needed for current->mempolicy + * nor system default_policy + */ + if (!in_interrupt() && !(gfp & __GFP_THISNODE)) + pol = get_task_policy(current); + + return alloc_pages_mpol(gfp, order, pol, NO_INTERLEAVE_INDEX, + numa_node_id()); +} + /** * alloc_pages - Allocate pages. * @gfp: GFP flags. @@ -2337,17 +2358,11 @@ EXPORT_SYMBOL(vma_alloc_folio_noprof); */ struct page *alloc_pages_noprof(gfp_t gfp, unsigned int order) { - struct mempolicy *pol = &default_policy; + struct page *page = alloc_frozen_pages_noprof(gfp, order); - /* - * No reference counting needed for current->mempolicy - * nor system default_policy - */ - if (!in_interrupt() && !(gfp & __GFP_THISNODE)) - pol = get_task_policy(current); - - return alloc_pages_mpol(gfp, order, pol, NO_INTERLEAVE_INDEX, - numa_node_id()); + if (page) + set_page_refcounted(page); + return page; } EXPORT_SYMBOL(alloc_pages_noprof); From 8232a40ac539c21560e2a9aa824432a2c927e8e3 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Mon, 25 Nov 2024 21:01:47 +0000 Subject: [PATCH 018/504] slab: allocate frozen pages Since slab does not use the page refcount, it can allocate and free frozen pages, saving one atomic operation per free. Link: https://lkml.kernel.org/r/20241125210149.2976098-16-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: William Kucharski Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Vlastimil Babka Cc: David Hildenbrand Cc: Mel Gorman Cc: Miaohe Lin Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/slub.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index c2151c9fee22..cef25d9a476a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2420,9 +2420,9 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node, unsigned int order = oo_order(oo); if (node == NUMA_NO_NODE) - folio = (struct folio *)alloc_pages(flags, order); + folio = (struct folio *)alloc_frozen_pages(flags, order); else - folio = (struct folio *)__alloc_pages_node(node, flags, order); + folio = (struct folio *)__alloc_frozen_pages(flags, order, node, NULL); if (!folio) return NULL; @@ -2656,7 +2656,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab) __folio_clear_slab(folio); mm_account_reclaimed_pages(pages); unaccount_slab(slab, order, s); - __free_pages(&folio->page, order); + free_frozen_pages(&folio->page, order); } static void rcu_free_slab(struct rcu_head *h) From 6cf76e5fdc0da007609297fd827604eb847a47a1 Mon Sep 17 00:00:00 2001 From: Honggyu Kim Date: Sat, 11 Jan 2025 23:18:26 -0800 Subject: [PATCH 019/504] mm/damon/core: remove duplicate list_empty quota->goals check damos_set_effective_quota() checks quota contidions but there are some duplicate checks for quota->goals inside. This patch reduces one of if statement to simplify the esz calculation logic by setting esz as ULONG_MAX by default. Link: https://lkml.kernel.org/r/20241125184307.41746-1-sj@kernel.org Signed-off-by: Honggyu Kim Reviewed-by: SeongJae Park Signed-off-by: Andrew Morton --- mm/damon/core.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index 0776452a1abb..5192ee29f6cf 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1550,7 +1550,7 @@ static unsigned long damos_quota_score(struct damos_quota *quota) static void damos_set_effective_quota(struct damos_quota *quota) { unsigned long throughput; - unsigned long esz; + unsigned long esz = ULONG_MAX; if (!quota->ms && list_empty("a->goals)) { quota->esz = quota->sz; @@ -1572,10 +1572,7 @@ static void damos_set_effective_quota(struct damos_quota *quota) quota->total_charged_ns; else throughput = PAGE_SIZE * 1024; - if (!list_empty("a->goals)) - esz = min(throughput * quota->ms, esz); - else - esz = throughput * quota->ms; + esz = min(throughput * quota->ms, esz); } if (quota->sz && quota->sz < esz) From 8045d3d88da75fa2cf016bc8372780dd89f534d6 Mon Sep 17 00:00:00 2001 From: Shakeel Butt Date: Mon, 25 Nov 2024 09:16:17 -0800 Subject: [PATCH 020/504] mm: mmap_lock: optimize mmap_lock tracepoints We are starting to deploy mmap_lock tracepoint monitoring across our fleet and the early results showed that these tracepoints are consuming significant amount of CPUs in kernfs_path_from_node when enabled. It seems like the kernel is trying to resolve the cgroup path in the fast path of the locking code path when the tracepoints are enabled. In addition for some application their metrics are regressing when monitoring is enabled. The cgroup path resolution can be slow and should not be done in the fast path. Most userspace tools, like bpftrace, provides functionality to get the cgroup path from cgroup id, so let's just trace the cgroup id and the users can use better tools to get the path in the slow path. Link: https://lkml.kernel.org/r/20241125171617.113892-1-shakeel.butt@linux.dev Signed-off-by: Shakeel Butt Reviewed-by: Yosry Ahmed Acked-by: Vlastimil Babka Acked-by: Roman Gushchin Reviewed-by: Axel Rasmussen Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Muchun Song Cc: Steven Rostedt Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 22 ++++++++++++++ include/trace/events/mmap_lock.h | 32 ++++++++++---------- mm/mmap_lock.c | 50 ++------------------------------ 3 files changed, 40 insertions(+), 64 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5502aa8e138e..b28180269e75 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1046,6 +1046,23 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm, void split_page_memcg(struct page *head, int old_order, int new_order); +static inline u64 cgroup_id_from_mm(struct mm_struct *mm) +{ + struct mem_cgroup *memcg; + u64 id; + + if (mem_cgroup_disabled()) + return 0; + + rcu_read_lock(); + memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); + if (!memcg) + memcg = root_mem_cgroup; + id = cgroup_id(memcg->css.cgroup); + rcu_read_unlock(); + return id; +} + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 @@ -1466,6 +1483,11 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) static inline void split_page_memcg(struct page *head, int old_order, int new_order) { } + +static inline u64 cgroup_id_from_mm(struct mm_struct *mm) +{ + return 0; +} #endif /* CONFIG_MEMCG */ /* diff --git a/include/trace/events/mmap_lock.h b/include/trace/events/mmap_lock.h index bc2e3ad787b3..cf9f9faf8914 100644 --- a/include/trace/events/mmap_lock.h +++ b/include/trace/events/mmap_lock.h @@ -5,6 +5,7 @@ #if !defined(_TRACE_MMAP_LOCK_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_MMAP_LOCK_H +#include #include #include @@ -12,64 +13,61 @@ struct mm_struct; DECLARE_EVENT_CLASS(mmap_lock, - TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write), + TP_PROTO(struct mm_struct *mm, bool write), - TP_ARGS(mm, memcg_path, write), + TP_ARGS(mm, write), TP_STRUCT__entry( __field(struct mm_struct *, mm) - __string(memcg_path, memcg_path) + __field(u64, memcg_id) __field(bool, write) ), TP_fast_assign( __entry->mm = mm; - __assign_str(memcg_path); + __entry->memcg_id = cgroup_id_from_mm(mm); __entry->write = write; ), TP_printk( - "mm=%p memcg_path=%s write=%s", - __entry->mm, - __get_str(memcg_path), + "mm=%p memcg_id=%llu write=%s", + __entry->mm, __entry->memcg_id, __entry->write ? "true" : "false" ) ); #define DEFINE_MMAP_LOCK_EVENT(name) \ DEFINE_EVENT(mmap_lock, name, \ - TP_PROTO(struct mm_struct *mm, const char *memcg_path, \ - bool write), \ - TP_ARGS(mm, memcg_path, write)) + TP_PROTO(struct mm_struct *mm, bool write), \ + TP_ARGS(mm, write)) DEFINE_MMAP_LOCK_EVENT(mmap_lock_start_locking); DEFINE_MMAP_LOCK_EVENT(mmap_lock_released); TRACE_EVENT(mmap_lock_acquire_returned, - TP_PROTO(struct mm_struct *mm, const char *memcg_path, bool write, - bool success), + TP_PROTO(struct mm_struct *mm, bool write, bool success), - TP_ARGS(mm, memcg_path, write, success), + TP_ARGS(mm, write, success), TP_STRUCT__entry( __field(struct mm_struct *, mm) - __string(memcg_path, memcg_path) + __field(u64, memcg_id) __field(bool, write) __field(bool, success) ), TP_fast_assign( __entry->mm = mm; - __assign_str(memcg_path); + __entry->memcg_id = cgroup_id_from_mm(mm); __entry->write = write; __entry->success = success; ), TP_printk( - "mm=%p memcg_path=%s write=%s success=%s", + "mm=%p memcg_id=%llu write=%s success=%s", __entry->mm, - __get_str(memcg_path), + __entry->memcg_id, __entry->write ? "true" : "false", __entry->success ? "true" : "false" ) diff --git a/mm/mmap_lock.c b/mm/mmap_lock.c index f186d57df2c6..e7dbaf96aa17 100644 --- a/mm/mmap_lock.c +++ b/mm/mmap_lock.c @@ -17,51 +17,7 @@ EXPORT_TRACEPOINT_SYMBOL(mmap_lock_start_locking); EXPORT_TRACEPOINT_SYMBOL(mmap_lock_acquire_returned); EXPORT_TRACEPOINT_SYMBOL(mmap_lock_released); -#ifdef CONFIG_MEMCG - -/* - * Size of the buffer for memcg path names. Ignoring stack trace support, - * trace_events_hist.c uses MAX_FILTER_STR_VAL for this, so we also use it. - */ -#define MEMCG_PATH_BUF_SIZE MAX_FILTER_STR_VAL - -#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \ - do { \ - if (trace_mmap_lock_##type##_enabled()) { \ - char buf[MEMCG_PATH_BUF_SIZE]; \ - get_mm_memcg_path(mm, buf, sizeof(buf)); \ - trace_mmap_lock_##type(mm, buf, ##__VA_ARGS__); \ - } \ - } while (0) - -#else /* !CONFIG_MEMCG */ - -#define TRACE_MMAP_LOCK_EVENT(type, mm, ...) \ - trace_mmap_lock_##type(mm, "", ##__VA_ARGS__) - -#endif /* CONFIG_MEMCG */ - #ifdef CONFIG_TRACING -#ifdef CONFIG_MEMCG -/* - * Write the given mm_struct's memcg path to a buffer. If the path cannot be - * determined, empty string is written. - */ -static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen) -{ - struct mem_cgroup *memcg; - - buf[0] = '\0'; - memcg = get_mem_cgroup_from_mm(mm); - if (memcg == NULL) - return; - if (memcg->css.cgroup) - cgroup_path(memcg->css.cgroup, buf, buflen); - css_put(&memcg->css); -} - -#endif /* CONFIG_MEMCG */ - /* * Trace calls must be in a separate file, as otherwise there's a circular * dependency between linux/mmap_lock.h and trace/events/mmap_lock.h. @@ -69,20 +25,20 @@ static void get_mm_memcg_path(struct mm_struct *mm, char *buf, size_t buflen) void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write) { - TRACE_MMAP_LOCK_EVENT(start_locking, mm, write); + trace_mmap_lock_start_locking(mm, write); } EXPORT_SYMBOL(__mmap_lock_do_trace_start_locking); void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, bool success) { - TRACE_MMAP_LOCK_EVENT(acquire_returned, mm, write, success); + trace_mmap_lock_acquire_returned(mm, write, success); } EXPORT_SYMBOL(__mmap_lock_do_trace_acquire_returned); void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write) { - TRACE_MMAP_LOCK_EVENT(released, mm, write); + trace_mmap_lock_released(mm, write); } EXPORT_SYMBOL(__mmap_lock_do_trace_released); #endif /* CONFIG_TRACING */ From e674c5ecf77c7177fcd63c4b51edb4e3fc16bc4b Mon Sep 17 00:00:00 2001 From: Pintu Kumar Date: Fri, 22 Nov 2024 23:05:58 +0530 Subject: [PATCH 021/504] mm/hugetlb_cgroup: avoid useless return in void function The return statement at the end of void function is unnecessary. Just remove it as part of cleanup. Link: https://lkml.kernel.org/r/20241122173558.20670-1-quic_pintu@quicinc.com Signed-off-by: Pintu Kumar Cc: Pintu Agarwal Cc: Shuah Khan Signed-off-by: Andrew Morton --- mm/hugetlb_cgroup.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index e716c4671a15..89a8ad45a533 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -917,7 +917,6 @@ void hugetlb_cgroup_migrate(struct folio *old_folio, struct folio *new_folio) set_hugetlb_cgroup_rsvd(new_folio, h_cg_rsvd); list_move(&new_folio->lru, &h->hugepage_activelist); spin_unlock_irq(&hugetlb_lock); - return; } static struct cftype hugetlb_files[] = { From 741cbf7c5ec5b0e0a54ea29723ce35e9b64fba96 Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Mon, 25 Nov 2024 14:40:36 +0800 Subject: [PATCH 022/504] selftests/mm: add a few missing gitignore files Compiled binary files should be added to .gitignore 'git status' complains: Untracked files: (use "git add ..." to include in what will be committed) mm/hugetlb_dio mm/pkey_sighandler_tests_32 mm/pkey_sighandler_tests_64 Link: https://lkml.kernel.org/r/20241125064036.413536-1-lizhijian@fujitsu.com Signed-off-by: Li Zhijian Reviewed-by: John Hubbard Cc: Donet Tom Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/.gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore index 8f01f4da1c0d..085b06750bf4 100644 --- a/tools/testing/selftests/mm/.gitignore +++ b/tools/testing/selftests/mm/.gitignore @@ -36,6 +36,9 @@ map_fixed_noreplace write_to_hugetlbfs hmm-tests memfd_secret +hugetlb_dio +pkey_sighandler_tests_32 +pkey_sighandler_tests_64 soft-dirty split_huge_page_test ksm_tests From c1bc8fd460ebce85d7a768d8226861438e28bd53 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Fri, 22 Nov 2024 15:36:52 +0800 Subject: [PATCH 023/504] mm: pgtable: make ptep_clear() non-atomic In the generic ptep_get_and_clear() implementation, it is just a simple combination of ptep_get() and pte_clear(). But for some architectures (such as x86 and arm64, etc), the hardware will modify the A/D bits of the page table entry, so the ptep_get_and_clear() needs to be overwritten and implemented as an atomic operation to avoid contention, which has a performance cost. The commit d283d422c6c4 ("x86: mm: add x86_64 support for page table check") adds the ptep_clear() on the x86, and makes it call ptep_get_and_clear() when CONFIG_PAGE_TABLE_CHECK is enabled. The page table check feature does not actually care about the A/D bits, so only ptep_get() + pte_clear() should be called. But considering that the page table check is a debug option, this should not have much of an impact. But then the commit de8c8e52836d ("mm: page_table_check: add hooks to public helpers") changed ptep_clear() to unconditionally call ptep_get_and_clear(), so that the CONFIG_PAGE_TABLE_CHECK check can be put into the page table check stubs (in include/linux/page_table_check.h). This also cause performance loss to the kernel without CONFIG_PAGE_TABLE_CHECK enabled, which doesn't make sense. Currently ptep_clear() is only used in debug code and in khugepaged collapse paths, which are fairly expensive. So the cost of an extra atomic RMW operation does not matter. But this may be used for other paths in the future. After all, for the present pte entry, we need to call ptep_clear() instead of pte_clear() to ensure that PAGE_TABLE_CHECK works properly. So to be more precise, just calling ptep_get() and pte_clear() in the ptep_clear(). Link: https://lkml.kernel.org/r/20241122073652.54030-1-zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reviewed-by: Pasha Tatashin Reviewed-by: Jann Horn Reviewed-by: Muchun Song Acked-by: David Hildenbrand Cc: Jason Gunthorpe Cc: Lorenzo Stoakes Cc: Peter Xu Cc: Ryan Roberts Cc: Tong Tiangen Signed-off-by: Andrew Morton --- include/linux/pgtable.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index adef9d6e9b1b..94d267d02372 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -533,7 +533,14 @@ static inline void clear_young_dirty_ptes(struct vm_area_struct *vma, static inline void ptep_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - ptep_get_and_clear(mm, addr, ptep); + pte_t pte = ptep_get(ptep); + + pte_clear(mm, addr, ptep); + /* + * No need for ptep_get_and_clear(): page table check doesn't care about + * any bits that could have been set by HW concurrently. + */ + page_table_check_pte_clear(mm, pte); } #ifdef CONFIG_GUP_GET_PXX_LOW_HIGH From fcd31b7c35949323434d50416f896bc881a5c397 Mon Sep 17 00:00:00 2001 From: Jim Zhao Date: Thu, 21 Nov 2024 18:05:39 +0800 Subject: [PATCH 024/504] mm/page-writeback: consolidate wb_thresh bumping logic into __wb_calc_thresh Address the feedback from 39ac99852fca ("mm/page-writeback: raise wb_thresh to prevent write blocking with strictlimit)". The wb_thresh bumping logic is scattered across wb_position_ratio, __wb_calc_thresh, and wb_update_dirty_ratelimit. For consistency, consolidate all wb_thresh bumping logic into __wb_calc_thresh. Link: https://lkml.kernel.org/r/20241121100539.605818-1-jimzhao.ai@gmail.com Signed-off-by: Jim Zhao Reviewed-by: Jan Kara Cc: Matthew Wilcox Cc: Kemeng Shi Signed-off-by: Andrew Morton --- mm/page-writeback.c | 55 ++++++++++++++------------------------------- 1 file changed, 17 insertions(+), 38 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index d9861e42b2bd..4f5970723cf2 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -942,26 +942,25 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc, wb_min_max_ratio(wb, &wb_min_ratio, &wb_max_ratio); wb_thresh += (thresh * wb_min_ratio) / (100 * BDI_RATIO_SCALE); + + /* + * It's very possible that wb_thresh is close to 0 not because the + * device is slow, but that it has remained inactive for long time. + * Honour such devices a reasonable good (hopefully IO efficient) + * threshold, so that the occasional writes won't be blocked and active + * writes can rampup the threshold quickly. + */ + if (thresh > dtc->dirty) { + if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) + wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 100); + else + wb_thresh = max(wb_thresh, (thresh - dtc->dirty) / 8); + } + wb_max_thresh = thresh * wb_max_ratio / (100 * BDI_RATIO_SCALE); if (wb_thresh > wb_max_thresh) wb_thresh = wb_max_thresh; - /* - * With strictlimit flag, the wb_thresh is treated as - * a hard limit in balance_dirty_pages() and wb_position_ratio(). - * It's possible that wb_thresh is close to zero, not because - * the device is slow, but because it has been inactive. - * To prevent occasional writes from being blocked, we raise wb_thresh. - */ - if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { - unsigned long limit = hard_dirty_limit(dom, dtc->thresh); - u64 wb_scale_thresh = 0; - - if (limit > dtc->dirty) - wb_scale_thresh = (limit - dtc->dirty) / 100; - wb_thresh = max(wb_thresh, min(wb_scale_thresh, wb_max_thresh / 4)); - } - return wb_thresh; } @@ -969,6 +968,7 @@ unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) { struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; + domain_dirty_avail(&gdtc, true); return __wb_calc_thresh(&gdtc, thresh); } @@ -1145,12 +1145,6 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc) if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { long long wb_pos_ratio; - if (dtc->wb_dirty < 8) { - dtc->pos_ratio = min_t(long long, pos_ratio * 2, - 2 << RATELIMIT_CALC_SHIFT); - return; - } - if (dtc->wb_dirty >= wb_thresh) return; @@ -1221,14 +1215,6 @@ static void wb_position_ratio(struct dirty_throttle_control *dtc) */ if (unlikely(wb_thresh > dtc->thresh)) wb_thresh = dtc->thresh; - /* - * It's very possible that wb_thresh is close to 0 not because the - * device is slow, but that it has remained inactive for long time. - * Honour such devices a reasonable good (hopefully IO efficient) - * threshold, so that the occasional writes won't be blocked and active - * writes can rampup the threshold quickly. - */ - wb_thresh = max(wb_thresh, (limit - dtc->dirty) / 8); /* * scale global setpoint to wb's: * wb_setpoint = setpoint * wb_thresh / thresh @@ -1484,17 +1470,10 @@ static void wb_update_dirty_ratelimit(struct dirty_throttle_control *dtc, * balanced_dirty_ratelimit = task_ratelimit * write_bw / dirty_rate). * Hence, to calculate "step" properly, we have to use wb_dirty as * "dirty" and wb_setpoint as "setpoint". - * - * We rampup dirty_ratelimit forcibly if wb_dirty is low because - * it's possible that wb_thresh is close to zero due to inactivity - * of backing device. */ if (unlikely(wb->bdi->capabilities & BDI_CAP_STRICTLIMIT)) { dirty = dtc->wb_dirty; - if (dtc->wb_dirty < 8) - setpoint = dtc->wb_dirty + 1; - else - setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2; + setpoint = (dtc->wb_thresh + dtc->wb_bg_thresh) / 2; } if (dirty < setpoint) { From a82412684eaeda1ef8201472107de6a40843beec Mon Sep 17 00:00:00 2001 From: Jiale Yang <295107659@qq.com> Date: Wed, 20 Nov 2024 11:01:35 +0000 Subject: [PATCH 025/504] mm: change type of cma_area_count to unsigned int Prefer 'unsigned int' over plain 'unsigned'. Also make it consistent with mm/cma.c Link: https://lkml.kernel.org/r/tencent_1E5E3AA25C261196D8C1F7097F130E382008@qq.com Signed-off-by: Jiale Yang <295107659@qq.com> Signed-off-by: Andrew Morton --- mm/cma.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/cma.h b/mm/cma.h index ad61cc6dd439..8485ef893e99 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -36,7 +36,7 @@ struct cma { }; extern struct cma cma_areas[MAX_CMA_AREAS]; -extern unsigned cma_area_count; +extern unsigned int cma_area_count; static inline unsigned long cma_bitmap_maxno(struct cma *cma) { From 67b5aec6b30c7bb0d8260c0ccbf42df392807112 Mon Sep 17 00:00:00 2001 From: Chin Yik Ming Date: Wed, 20 Nov 2024 18:50:41 +0800 Subject: [PATCH 026/504] mm/memory: fix a comment typo in lock_mm_and_find_vma() s/equivalend/equivalent/ Link: https://lkml.kernel.org/r/20241120105041.2394283-1-yikming2222@gmail.com Signed-off-by: Chin Yik Ming Signed-off-by: Andrew Morton --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 78b6741ae593..f49eb4d4be75 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6185,7 +6185,7 @@ static inline bool upgrade_mmap_lock_carefully(struct mm_struct *mm, struct pt_r /* * Helper for page fault handling. * - * This is kind of equivalend to "mmap_read_lock()" followed + * This is kind of equivalent to "mmap_read_lock()" followed * by "find_extend_vma()", except it's a lot more careful about * the locking (and will drop the lock on failure). * From f8db55561f1b5c70ba2dd260206f295ffee9d1c9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Nov 2024 16:54:51 +0100 Subject: [PATCH 027/504] kasan: make kasan_record_aux_stack_noalloc() the default behaviour kasan_record_aux_stack_noalloc() was introduced to record a stack trace without allocating memory in the process. It has been added to callers which were invoked while a raw_spinlock_t was held. More and more callers were identified and changed over time. Is it a good thing to have this while functions try their best to do a locklessly setup? The only downside of having kasan_record_aux_stack() not allocate any memory is that we end up without a stacktrace if stackdepot runs out of memory and at the same stacktrace was not recorded before To quote Marco Elver from https://lore.kernel.org/all/CANpmjNPmQYJ7pv1N3cuU8cP18u7PP_uoZD8YxwZd4jtbof9nVQ@mail.gmail.com/ | I'd be in favor, it simplifies things. And stack depot should be | able to replenish its pool sufficiently in the "non-aux" cases | i.e. regular allocations. Worst case we fail to record some | aux stacks, but I think that's only really bad if there's a bug | around one of these allocations. In general the probabilities | of this being a regression are extremely small [...] Make the kasan_record_aux_stack_noalloc() behaviour default as kasan_record_aux_stack(). [bigeasy@linutronix.de: dressed the diff as patch] Link: https://lkml.kernel.org/r/20241122155451.Mb2pmeyJ@linutronix.de Fixes: 7cb3007ce2da ("kasan: generic: introduce kasan_record_aux_stack_noalloc()") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Sebastian Andrzej Siewior Reported-by: syzbot+39f85d612b7c20d8db48@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/67275485.050a0220.3c8d68.0a37.GAE@google.com Reviewed-by: Andrey Konovalov Reviewed-by: Marco Elver Reviewed-by: Waiman Long Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Ben Segall Cc: Boqun Feng Cc: Christoph Lameter Cc: David Rientjes Cc: Dietmar Eggemann Cc: Dmitry Vyukov Cc: Frederic Weisbecker Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Ingo Molnar Cc: Jann Horn Cc: Joel Fernandes (Google) Cc: Joonsoo Kim Cc: Josh Triplett Cc: Juri Lelli Cc: Cc: Lai Jiangshan Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Mathieu Desnoyers Cc: Mel Gorman Cc: Neeraj Upadhyay Cc: Paul E. McKenney Cc: Pekka Enberg Cc: Roman Gushchin Cc: Steven Rostedt Cc: syzkaller-bugs@googlegroups.com Cc: Tejun Heo Cc: Thomas Gleixner Cc: Uladzislau Rezki (Sony) Cc: Valentin Schneider Cc: Vincent Guittot Cc: Vincenzo Frascino Cc: Vlastimil Babka Cc: Zqiang Signed-off-by: Andrew Morton --- include/linux/kasan.h | 2 -- include/linux/task_work.h | 3 --- kernel/irq_work.c | 2 +- kernel/rcu/tiny.c | 2 +- kernel/rcu/tree.c | 4 ++-- kernel/sched/core.c | 2 +- kernel/task_work.c | 14 +------------- kernel/workqueue.c | 2 +- mm/kasan/generic.c | 18 ++++++------------ mm/slub.c | 2 +- 10 files changed, 14 insertions(+), 37 deletions(-) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6bbfc8aa42e8..1c1b3d39e7b6 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -491,7 +491,6 @@ void kasan_cache_create(struct kmem_cache *cache, unsigned int *size, void kasan_cache_shrink(struct kmem_cache *cache); void kasan_cache_shutdown(struct kmem_cache *cache); void kasan_record_aux_stack(void *ptr); -void kasan_record_aux_stack_noalloc(void *ptr); #else /* CONFIG_KASAN_GENERIC */ @@ -509,7 +508,6 @@ static inline void kasan_cache_create(struct kmem_cache *cache, static inline void kasan_cache_shrink(struct kmem_cache *cache) {} static inline void kasan_cache_shutdown(struct kmem_cache *cache) {} static inline void kasan_record_aux_stack(void *ptr) {} -static inline void kasan_record_aux_stack_noalloc(void *ptr) {} #endif /* CONFIG_KASAN_GENERIC */ diff --git a/include/linux/task_work.h b/include/linux/task_work.h index 2964171856e0..0646804860ff 100644 --- a/include/linux/task_work.h +++ b/include/linux/task_work.h @@ -19,9 +19,6 @@ enum task_work_notify_mode { TWA_SIGNAL, TWA_SIGNAL_NO_IPI, TWA_NMI_CURRENT, - - TWA_FLAGS = 0xff00, - TWAF_NO_ALLOC = 0x0100, }; static inline bool task_work_pending(struct task_struct *task) diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 2f4fb336dda1..73f7e1fd4ab4 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -147,7 +147,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) if (!irq_work_claim(work)) return false; - kasan_record_aux_stack_noalloc(work); + kasan_record_aux_stack(work); preempt_disable(); if (cpu != smp_processor_id()) { diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c index b3b3ce34df63..4b3f31911465 100644 --- a/kernel/rcu/tiny.c +++ b/kernel/rcu/tiny.c @@ -250,7 +250,7 @@ EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu); void kvfree_call_rcu(struct rcu_head *head, void *ptr) { if (head) - kasan_record_aux_stack_noalloc(ptr); + kasan_record_aux_stack(ptr); __kvfree_call_rcu(head, ptr); } diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index ff98233d4aa5..3885aae5f9cb 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -3083,7 +3083,7 @@ __call_rcu_common(struct rcu_head *head, rcu_callback_t func, bool lazy_in) } head->func = func; head->next = NULL; - kasan_record_aux_stack_noalloc(head); + kasan_record_aux_stack(head); local_irq_save(flags); rdp = this_cpu_ptr(&rcu_data); lazy = lazy_in && !rcu_async_should_hurry(); @@ -3817,7 +3817,7 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr) return; } - kasan_record_aux_stack_noalloc(ptr); + kasan_record_aux_stack(ptr); success = add_ptr_to_bulk_krc_lock(&krcp, &flags, ptr, !head); if (!success) { run_page_cache_worker(krcp); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3e5a6bf587f9..755ae4659b64 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -10590,7 +10590,7 @@ void task_tick_mm_cid(struct rq *rq, struct task_struct *curr) return; /* No page allocation under rq lock */ - task_work_add(curr, work, TWA_RESUME | TWAF_NO_ALLOC); + task_work_add(curr, work, TWA_RESUME); } void sched_mm_cid_exit_signals(struct task_struct *t) diff --git a/kernel/task_work.c b/kernel/task_work.c index c969f1f26be5..d1efec571a4a 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c @@ -55,26 +55,14 @@ int task_work_add(struct task_struct *task, struct callback_head *work, enum task_work_notify_mode notify) { struct callback_head *head; - int flags = notify & TWA_FLAGS; - notify &= ~TWA_FLAGS; if (notify == TWA_NMI_CURRENT) { if (WARN_ON_ONCE(task != current)) return -EINVAL; if (!IS_ENABLED(CONFIG_IRQ_WORK)) return -EINVAL; } else { - /* - * Record the work call stack in order to print it in KASAN - * reports. - * - * Note that stack allocation can fail if TWAF_NO_ALLOC flag - * is set and new page is needed to expand the stack buffer. - */ - if (flags & TWAF_NO_ALLOC) - kasan_record_aux_stack_noalloc(work); - else - kasan_record_aux_stack(work); + kasan_record_aux_stack(work); } head = READ_ONCE(task->task_works); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f7d8fc204579..77d8f672e175 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2180,7 +2180,7 @@ static void insert_work(struct pool_workqueue *pwq, struct work_struct *work, debug_work_activate(work); /* record the work call stack in order to print it in KASAN reports */ - kasan_record_aux_stack_noalloc(work); + kasan_record_aux_stack(work); /* we own @work, set data and link */ set_work_pwq(work, pwq, extra_flags); diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 8b9e348113b1..d54e89f8c3e7 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -524,7 +524,11 @@ size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object) sizeof(struct kasan_free_meta) : 0); } -static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) +/* + * This function avoids dynamic memory allocations and thus can be called from + * contexts that do not allow allocating memory. + */ +void kasan_record_aux_stack(void *addr) { struct slab *slab = kasan_addr_to_slab(addr); struct kmem_cache *cache; @@ -541,17 +545,7 @@ static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags) return; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; - alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags); -} - -void kasan_record_aux_stack(void *addr) -{ - return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC); -} - -void kasan_record_aux_stack_noalloc(void *addr) -{ - return __kasan_record_aux_stack(addr, 0); + alloc_meta->aux_stack[0] = kasan_save_stack(0, 0); } void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) diff --git a/mm/slub.c b/mm/slub.c index cef25d9a476a..a8e9b5106f4c 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2311,7 +2311,7 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init, * We have to do this manually because the rcu_head is * not located inside the object. */ - kasan_record_aux_stack_noalloc(x); + kasan_record_aux_stack(x); delayed_free->object = x; call_rcu(&delayed_free->head, slab_free_after_rcu_debug); From 9ecac47cabd0decb694b6666cc253d4b6a25da06 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 28 Nov 2024 15:40:39 +0800 Subject: [PATCH 028/504] mm: factor out the order calculation into a new helper Patch series "Support large folios for tmpfs", v3. Traditionally, tmpfs only supported PMD-sized large folios. However nowadays with other file systems supporting any sized large folios, and extending anonymous to support mTHP, we should not restrict tmpfs to allocating only PMD-sized large folios, making it more special. Instead, we should allow tmpfs can allocate any sized large folios. Considering that tmpfs already has the 'huge=' option to control the PMD-sized large folios allocation, we can extend the 'huge=' option to allow any sized large folios. The semantics of the 'huge=' mount option are: huge=never: no any sized large folios huge=always: any sized large folios huge=within_size: like 'always' but respect the i_size huge=advise: like 'always' if requested with madvise() Note: for tmpfs mmap() faults, due to the lack of a write size hint, still allocate the PMD-sized large folios if huge=always/within_size/advise is set. Moreover, the 'deny' and 'force' testing options controlled by '/sys/kernel/mm/transparent_hugepage/shmem_enabled', still retain the same semantics. The 'deny' can disable any sized large folios for tmpfs, while the 'force' can enable PMD sized large folios for tmpfs. This patch (of 6): Factor out the order calculation into a new helper, which can be reused by shmem in the following patch. Link: https://lkml.kernel.org/r/cover.1732779148.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/5505f9ea50942820c1924d1803bfdd3a524e54f6.1732779148.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Suggested-by: Matthew Wilcox Reviewed-by: Barry Song Reviewed-by: David Hildenbrand Reviewed-by: Daniel Gomez Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Ryan Roberts Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index bcf0865a38ae..d796c8a33647 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -727,6 +727,16 @@ typedef unsigned int __bitwise fgf_t; #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) +static inline unsigned int filemap_get_order(size_t size) +{ + unsigned int shift = ilog2(size); + + if (shift <= PAGE_SHIFT) + return 0; + + return shift - PAGE_SHIFT; +} + /** * fgf_set_order - Encode a length in the fgf_t flags. * @size: The suggested size of the folio to create. @@ -740,11 +750,11 @@ typedef unsigned int __bitwise fgf_t; */ static inline fgf_t fgf_set_order(size_t size) { - unsigned int shift = ilog2(size); + unsigned int order = filemap_get_order(size); - if (shift <= PAGE_SHIFT) + if (!order) return 0; - return (__force fgf_t)((shift - PAGE_SHIFT) << 26); + return (__force fgf_t)(order << 26); } void *filemap_get_entry(struct address_space *mapping, pgoff_t index); From 43a571671cd938c7a5fb68e6963d25e1d6c051af Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 28 Nov 2024 15:40:40 +0800 Subject: [PATCH 029/504] mm: shmem: change shmem_huge_global_enabled() to return huge order bitmap Change the shmem_huge_global_enabled() to return the suitable huge order bitmap, and return 0 if huge pages are not allowed. This is a preparation for supporting various huge orders allocation of tmpfs in the following patches. No functional changes. Link: https://lkml.kernel.org/r/9dce1cfad3e9c1587cf1a0ea782ddbebd0e92984.1732779148.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: David Hildenbrand Cc: Barry Song Cc: Daniel Gomez Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Ryan Roberts Signed-off-by: Andrew Morton --- mm/shmem.c | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index fdb5afa1cfe9..9a05588e48b2 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -554,37 +554,37 @@ static bool shmem_confirm_swap(struct address_space *mapping, static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; -static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - unsigned long vm_flags) +static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + loff_t write_end, bool shmem_huge_force, + unsigned long vm_flags) { loff_t i_size; if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) - return false; + return 0; if (!S_ISREG(inode->i_mode)) - return false; + return 0; if (shmem_huge == SHMEM_HUGE_DENY) - return false; + return 0; if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) - return true; + return BIT(HPAGE_PMD_ORDER); switch (SHMEM_SB(inode->i_sb)->huge) { case SHMEM_HUGE_ALWAYS: - return true; + return BIT(HPAGE_PMD_ORDER); case SHMEM_HUGE_WITHIN_SIZE: index = round_up(index + 1, HPAGE_PMD_NR); i_size = max(write_end, i_size_read(inode)); i_size = round_up(i_size, PAGE_SIZE); if (i_size >> PAGE_SHIFT >= index) - return true; + return BIT(HPAGE_PMD_ORDER); fallthrough; case SHMEM_HUGE_ADVISE: if (vm_flags & VM_HUGEPAGE) - return true; + return BIT(HPAGE_PMD_ORDER); fallthrough; default: - return false; + return 0; } } @@ -779,11 +779,11 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, return 0; } -static bool shmem_huge_global_enabled(struct inode *inode, pgoff_t index, - loff_t write_end, bool shmem_huge_force, - unsigned long vm_flags) +static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, + loff_t write_end, bool shmem_huge_force, + unsigned long vm_flags) { - return false; + return 0; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -1690,21 +1690,21 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, unsigned long within_size_orders = READ_ONCE(huge_shmem_orders_within_size); unsigned long vm_flags = vma ? vma->vm_flags : 0; pgoff_t aligned_index; - bool global_huge; + unsigned int global_orders; loff_t i_size; int order; if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags))) return 0; - global_huge = shmem_huge_global_enabled(inode, index, write_end, - shmem_huge_force, vm_flags); + global_orders = shmem_huge_global_enabled(inode, index, write_end, + shmem_huge_force, vm_flags); if (!vma || !vma_is_anon_shmem(vma)) { /* * For tmpfs, we now only support PMD sized THP if huge page * is enabled, otherwise fallback to order 0. */ - return global_huge ? BIT(HPAGE_PMD_ORDER) : 0; + return global_orders; } /* @@ -1737,7 +1737,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, if (vm_flags & VM_HUGEPAGE) mask |= READ_ONCE(huge_shmem_orders_madvise); - if (global_huge) + if (global_orders > 0) mask |= READ_ONCE(huge_shmem_orders_inherit); return THP_ORDERS_ALL_FILE_DEFAULT & mask; From e7d5079e510a47df3629221eee1ff210b10e221c Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 28 Nov 2024 15:40:41 +0800 Subject: [PATCH 030/504] mm: shmem: add large folio support for tmpfs Add large folio support for tmpfs write and fallocate paths matching the same high order preference mechanism used in the iomap buffered IO path as used in __filemap_get_folio(). Add shmem_mapping_size_orders() to get a hint for the orders of the folio based on the file size which takes care of the mapping requirements. Traditionally, tmpfs only supported PMD-sized large folios. However nowadays with other file systems supporting any sized large folios, and extending anonymous to support mTHP, we should not restrict tmpfs to allocating only PMD-sized large folios, making it more special. Instead, we should allow tmpfs can allocate any sized large folios. Considering that tmpfs already has the 'huge=' option to control the PMD-sized large folios allocation, we can extend the 'huge=' option to allow any sized large folios. The semantics of the 'huge=' mount option are: huge=never: no any sized large folios huge=always: any sized large folios huge=within_size: like 'always' but respect the i_size huge=advise: like 'always' if requested with madvise() Note: for tmpfs mmap() faults, due to the lack of a write size hint, still allocate the PMD-sized huge folios if huge=always/within_size/advise is set. Moreover, the 'deny' and 'force' testing options controlled by '/sys/kernel/mm/transparent_hugepage/shmem_enabled', still retain the same semantics. The 'deny' can disable any sized large folios for tmpfs, while the 'force' can enable PMD sized large folios for tmpfs. Link: https://lkml.kernel.org/r/035bf55fbdebeff65f5cb2cdb9907b7d632c3228.1732779148.git.baolin.wang@linux.alibaba.com Co-developed-by: Daniel Gomez Signed-off-by: Daniel Gomez Signed-off-by: Baolin Wang Cc: Barry Song Cc: David Hildenbrand Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Ryan Roberts Signed-off-by: Andrew Morton --- mm/shmem.c | 99 ++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 81 insertions(+), 18 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index 9a05588e48b2..d39b1c2a3597 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -554,34 +554,100 @@ static bool shmem_confirm_swap(struct address_space *mapping, static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; +/** + * shmem_mapping_size_orders - Get allowable folio orders for the given file size. + * @mapping: Target address_space. + * @index: The page index. + * @write_end: end of a write, could extend inode size. + * + * This returns huge orders for folios (when supported) based on the file size + * which the mapping currently allows at the given index. The index is relevant + * due to alignment considerations the mapping might have. The returned order + * may be less than the size passed. + * + * Return: The orders. + */ +static inline unsigned int +shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t write_end) +{ + unsigned int order; + size_t size; + + if (!mapping_large_folio_support(mapping) || !write_end) + return 0; + + /* Calculate the write size based on the write_end */ + size = write_end - (index << PAGE_SHIFT); + order = filemap_get_order(size); + if (!order) + return 0; + + /* If we're not aligned, allocate a smaller folio */ + if (index & ((1UL << order) - 1)) + order = __ffs(index); + + order = min_t(size_t, order, MAX_PAGECACHE_ORDER); + return order > 0 ? BIT(order + 1) - 1 : 0; +} + static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, loff_t write_end, bool shmem_huge_force, + struct vm_area_struct *vma, unsigned long vm_flags) { + unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ? + 0 : BIT(HPAGE_PMD_ORDER); + unsigned long within_size_orders; + unsigned int order; + pgoff_t aligned_index; loff_t i_size; - if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER) - return 0; if (!S_ISREG(inode->i_mode)) return 0; if (shmem_huge == SHMEM_HUGE_DENY) return 0; if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE) - return BIT(HPAGE_PMD_ORDER); + return maybe_pmd_order; + /* + * The huge order allocation for anon shmem is controlled through + * the mTHP interface, so we still use PMD-sized huge order to + * check whether global control is enabled. + * + * For tmpfs mmap()'s huge order, we still use PMD-sized order to + * allocate huge pages due to lack of a write size hint. + * + * Otherwise, tmpfs will allow getting a highest order hint based on + * the size of write and fallocate paths, then will try each allowable + * huge orders. + */ switch (SHMEM_SB(inode->i_sb)->huge) { case SHMEM_HUGE_ALWAYS: - return BIT(HPAGE_PMD_ORDER); + if (vma) + return maybe_pmd_order; + + return shmem_mapping_size_orders(inode->i_mapping, index, write_end); case SHMEM_HUGE_WITHIN_SIZE: - index = round_up(index + 1, HPAGE_PMD_NR); - i_size = max(write_end, i_size_read(inode)); - i_size = round_up(i_size, PAGE_SIZE); - if (i_size >> PAGE_SHIFT >= index) - return BIT(HPAGE_PMD_ORDER); + if (vma) + within_size_orders = maybe_pmd_order; + else + within_size_orders = shmem_mapping_size_orders(inode->i_mapping, + index, write_end); + + order = highest_order(within_size_orders); + while (within_size_orders) { + aligned_index = round_up(index + 1, 1 << order); + i_size = max(write_end, i_size_read(inode)); + i_size = round_up(i_size, PAGE_SIZE); + if (i_size >> PAGE_SHIFT >= aligned_index) + return within_size_orders; + + order = next_order(&within_size_orders, order); + } fallthrough; case SHMEM_HUGE_ADVISE: if (vm_flags & VM_HUGEPAGE) - return BIT(HPAGE_PMD_ORDER); + return maybe_pmd_order; fallthrough; default: return 0; @@ -781,6 +847,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo, static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index, loff_t write_end, bool shmem_huge_force, + struct vm_area_struct *vma, unsigned long vm_flags) { return 0; @@ -1180,7 +1247,7 @@ static int shmem_getattr(struct mnt_idmap *idmap, STATX_ATTR_NODUMP); generic_fillattr(idmap, request_mask, inode, stat); - if (shmem_huge_global_enabled(inode, 0, 0, false, 0)) + if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0)) stat->blksize = HPAGE_PMD_SIZE; if (request_mask & STATX_BTIME) { @@ -1698,14 +1765,10 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode, return 0; global_orders = shmem_huge_global_enabled(inode, index, write_end, - shmem_huge_force, vm_flags); - if (!vma || !vma_is_anon_shmem(vma)) { - /* - * For tmpfs, we now only support PMD sized THP if huge page - * is enabled, otherwise fallback to order 0. - */ + shmem_huge_force, vma, vm_flags); + /* Tmpfs huge pages allocation */ + if (!vma || !vma_is_anon_shmem(vma)) return global_orders; - } /* * Following the 'deny' semantics of the top level, force the huge From 930ba4eb5117c85288ac47dcb506d99879d94454 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 28 Nov 2024 15:40:42 +0800 Subject: [PATCH 031/504] mm: shmem: add a kernel command line to change the default huge policy for tmpfs Now the tmpfs can allow to allocate any sized large folios, and the default huge policy is still preferred to be 'never'. Due to tmpfs not behaving like other file systems in some cases as previously explained by David[1]: : I think I raised this in the past, but tmpfs/shmem is just like any : other file system .. except it sometimes really isn't and behaves much : more like (swappable) anonymous memory. (or mlocked files) : : There are many systems out there that run without swap enabled, or with : extremely minimal swap (IIRC until recently kubernetes was completely : incompatible with swapping). Swap can even be disabled today for shmem : using a mount option. : : That's a big difference to all other file systems where you are : guaranteed to have backend storage where you can simply evict under : memory pressure (might temporarily fail, of course). : : I *think* that's the reason why we have the "huge=" parameter that also : controls the THP allocations during page faults (IOW possible memory : over-allocation). Maybe also because it was a new feature, and we only : had a single THP size. Thus adding a new command line to change the default huge policy will be helpful to use the large folios for tmpfs, which is similar to the 'transparent_hugepage_shmem' cmdline for shmem. [1] https://lore.kernel.org/all/cbadd5fe-69d5-4c21-8eb8-3344ed36c721@redhat.com/ Link: https://lkml.kernel.org/r/ff390b2656f0d39649547f8f2cbb30fcb7e7be2d.1732779148.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: Barry Song Cc: Daniel Gomez Cc: David Hildenbrand Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Ryan Roberts Signed-off-by: Andrew Morton --- .../admin-guide/kernel-parameters.txt | 7 ++++++ Documentation/admin-guide/mm/transhuge.rst | 6 +++++ mm/shmem.c | 23 ++++++++++++++++++- 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 3872bc6ec49d..c79691eee54f 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -6992,6 +6992,13 @@ See Documentation/admin-guide/mm/transhuge.rst for more details. + transparent_hugepage_tmpfs= [KNL] + Format: [always|within_size|advise|never] + Can be used to control the default hugepage allocation policy + for the tmpfs mount. + See Documentation/admin-guide/mm/transhuge.rst + for more details. + trusted.source= [KEYS] Format: This parameter identifies the trust source as a backend diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 8872203df088..96b5b3b53b71 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -332,6 +332,12 @@ allocation policy for the internal shmem mount by using the kernel parameter seven valid policies for shmem (``always``, ``within_size``, ``advise``, ``never``, ``deny``, and ``force``). +Similarly to ``transparent_hugepage_shmem``, you can control the default +hugepage allocation policy for the tmpfs mount by using the kernel parameter +``transparent_hugepage_tmpfs=``, where ```` is one of the +four valid policies for tmpfs (``always``, ``within_size``, ``advise``, +``never``). The tmpfs mount default policy is ``never``. + In the same manner as ``thp_anon`` controls each supported anonymous THP size, ``thp_shmem`` controls each supported shmem THP size. ``thp_shmem`` has the same format as ``thp_anon``, but also supports the policy diff --git a/mm/shmem.c b/mm/shmem.c index d39b1c2a3597..bdc2df0b6cf7 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -553,6 +553,7 @@ static bool shmem_confirm_swap(struct address_space *mapping, /* ifdef here to avoid bloating shmem.o when not necessary */ static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER; +static int tmpfs_huge __read_mostly = SHMEM_HUGE_NEVER; /** * shmem_mapping_size_orders - Get allowable folio orders for the given file size. @@ -4954,7 +4955,12 @@ static int shmem_fill_super(struct super_block *sb, struct fs_context *fc) sbinfo->gid = ctx->gid; sbinfo->full_inums = ctx->full_inums; sbinfo->mode = ctx->mode; - sbinfo->huge = ctx->huge; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (ctx->seen & SHMEM_SEEN_HUGE) + sbinfo->huge = ctx->huge; + else + sbinfo->huge = tmpfs_huge; +#endif sbinfo->mpol = ctx->mpol; ctx->mpol = NULL; @@ -5505,6 +5511,21 @@ static int __init setup_transparent_hugepage_shmem(char *str) } __setup("transparent_hugepage_shmem=", setup_transparent_hugepage_shmem); +static int __init setup_transparent_hugepage_tmpfs(char *str) +{ + int huge; + + huge = shmem_parse_huge(str); + if (huge < 0) { + pr_warn("transparent_hugepage_tmpfs= cannot parse, ignored\n"); + return huge; + } + + tmpfs_huge = huge; + return 1; +} +__setup("transparent_hugepage_tmpfs=", setup_transparent_hugepage_tmpfs); + static char str_dup[PAGE_SIZE] __initdata; static int __init setup_thp_shmem(char *str) { From 6976848d304ef960e5ac5032611cfd8a9b1b6b01 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 28 Nov 2024 15:40:43 +0800 Subject: [PATCH 032/504] docs: tmpfs: update the large folios policy for tmpfs and shmem Update the large folios policy for tmpfs and shmem. Link: https://lkml.kernel.org/r/9b7418af30e300d1eb05721b81d79074d0bb0ec9.1732779148.git.baolin.wang@linux.alibaba.com Signed-off-by: David Hildenbrand Signed-off-by: Baolin Wang Cc: Barry Song Cc: Daniel Gomez Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Ryan Roberts Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/transhuge.rst | 58 +++++++++++++++------- 1 file changed, 41 insertions(+), 17 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 96b5b3b53b71..506c0e6cd713 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -358,8 +358,21 @@ default to ``never``. Hugepages in tmpfs/shmem ======================== -You can control hugepage allocation policy in tmpfs with mount option -``huge=``. It can have following values: +Traditionally, tmpfs only supported a single huge page size ("PMD"). Today, +it also supports smaller sizes just like anonymous memory, often referred +to as "multi-size THP" (mTHP). Huge pages of any size are commonly +represented in the kernel as "large folios". + +While there is fine control over the huge page sizes to use for the internal +shmem mount (see below), ordinary tmpfs mounts will make use of all available +huge page sizes without any control over the exact sizes, behaving more like +other file systems. + +tmpfs mounts +------------ + +The THP allocation policy for tmpfs mounts can be adjusted using the mount +option: ``huge=``. It can have following values: always Attempt to allocate huge pages every time we need a new page; @@ -374,19 +387,19 @@ within_size advise Only allocate huge pages if requested with fadvise()/madvise(); -The default policy is ``never``. +Remember, that the kernel may use huge pages of all available sizes, and +that no fine control as for the internal tmpfs mount is available. + +The default policy in the past was ``never``, but it can now be adjusted +using the kernel parameter ``transparent_hugepage_tmpfs=``. ``mount -o remount,huge= /mountpoint`` works fine after mount: remounting ``huge=never`` will not attempt to break up huge pages at all, just stop more from being allocated. -There's also sysfs knob to control hugepage allocation policy for internal -shmem mount: /sys/kernel/mm/transparent_hugepage/shmem_enabled. The mount -is used for SysV SHM, memfds, shared anonymous mmaps (of /dev/zero or -MAP_ANONYMOUS), GPU drivers' DRM objects, Ashmem. - -In addition to policies listed above, shmem_enabled allows two further -values: +In addition to policies listed above, the sysfs knob +/sys/kernel/mm/transparent_hugepage/shmem_enabled will affect the +allocation policy of tmpfs mounts, when set to the following values: deny For use in emergencies, to force the huge option off from @@ -394,13 +407,24 @@ deny force Force the huge option on for all - very useful for testing; -Shmem can also use "multi-size THP" (mTHP) by adding a new sysfs knob to -control mTHP allocation: -'/sys/kernel/mm/transparent_hugepage/hugepages-kB/shmem_enabled', -and its value for each mTHP is essentially consistent with the global -setting. An 'inherit' option is added to ensure compatibility with these -global settings. Conversely, the options 'force' and 'deny' are dropped, -which are rather testing artifacts from the old ages. +shmem / internal tmpfs +---------------------- +The mount internal tmpfs mount is used for SysV SHM, memfds, shared anonymous +mmaps (of /dev/zero or MAP_ANONYMOUS), GPU drivers' DRM objects, Ashmem. + +To control the THP allocation policy for this internal tmpfs mount, the +sysfs knob /sys/kernel/mm/transparent_hugepage/shmem_enabled and the knobs +per THP size in +'/sys/kernel/mm/transparent_hugepage/hugepages-kB/shmem_enabled' +can be used. + +The global knob has the same semantics as the ``huge=`` mount options +for tmpfs mounts, except that the different huge page sizes can be controlled +individually, and will only use the setting of the global knob when the +per-size knob is set to 'inherit'. + +The options 'force' and 'deny' are dropped for the individual sizes, which +are rather testing artifacts from the old ages. always Attempt to allocate huge pages every time we need a new page; From 231998c32144c7d6716e612db4d7b8342b7f433b Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 28 Nov 2024 15:40:44 +0800 Subject: [PATCH 033/504] docs: tmpfs: drop 'fadvise()' from the documentation Drop 'fadvise()' from the doc, since fadvise() has no HUGEPAGE advise currently. Link: https://lkml.kernel.org/r/3a10bb49832f6d9827dc2c76aec0bf43a892876b.1732779148.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: Barry Song Acked-by: David Hildenbrand Cc: Daniel Gomez Cc: Hugh Dickins Cc: Kefeng Wang Cc: Lance Yang Cc: Matthew Wilcox Cc: Ryan Roberts Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/transhuge.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index 506c0e6cd713..d870f83775bc 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -382,10 +382,10 @@ never within_size Only allocate huge page if it will be fully within i_size. - Also respect fadvise()/madvise() hints; + Also respect madvise() hints; advise - Only allocate huge pages if requested with fadvise()/madvise(); + Only allocate huge pages if requested with madvise(); Remember, that the kernel may use huge pages of all available sizes, and that no fine control as for the internal tmpfs mount is available. @@ -438,10 +438,10 @@ never within_size Only allocate huge page if it will be fully within i_size. - Also respect fadvise()/madvise() hints; + Also respect madvise() hints; advise - Only allocate huge pages if requested with fadvise()/madvise(); + Only allocate huge pages if requested with madvise(); Need of application restart =========================== From 90ec6e1252c6adbeeabbbaa0ebacc35f96f3a427 Mon Sep 17 00:00:00 2001 From: Petr Tesarik Date: Tue, 19 Nov 2024 12:37:38 +0100 Subject: [PATCH 034/504] mm/rodata_test: use READ_ONCE() to read const variable Patch series "Fix mm/rodata_test", v2. Make sure that the test actually reads the read-only memory location. Verify that the variable contains the expected value rather than any non-zero value. This patch (of 2): The C compiler may optimize away the memory read of a const variable if its value is known at compile time. In particular, GCC14 with -O2 generates no code at all for test 1, and it generates the following x86_64 instructions for test 3: cmpl $195, 4(%rsp) je .L14 That is, it replaces the read of rodata_test_data with an immediate value and compares it to the value of the local variable "zero". Use READ_ONCE() to undo any such compiler optimizations and enforce a memory read. Link: https://lkml.kernel.org/r/cover.1732016064.git.ptesarik@suse.com Link: https://lkml.kernel.org/r/2a66dee010151b25cb143efb39091ef7530aa00a.1732016064.git.ptesarik@suse.com Fixes: 2959a5f726f6 ("mm: add arch-independent testcases for RODATA") Signed-off-by: Petr Tesarik Reviewed-by: Kees Cook Cc: Jinbum Park Signed-off-by: Andrew Morton --- mm/rodata_test.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/rodata_test.c b/mm/rodata_test.c index 6d783436951f..3b60425d80fe 100644 --- a/mm/rodata_test.c +++ b/mm/rodata_test.c @@ -20,7 +20,7 @@ void rodata_test(void) /* test 1: read the value */ /* If this test fails, some previous testrun has clobbered the state */ - if (!rodata_test_data) { + if (!READ_ONCE(rodata_test_data)) { pr_err("test 1 fails (start data)\n"); return; } @@ -33,7 +33,7 @@ void rodata_test(void) } /* test 3: check the value hasn't changed */ - if (rodata_test_data == zero) { + if (READ_ONCE(rodata_test_data) == zero) { pr_err("test data was changed\n"); return; } From 2507368b6175ba3340b74926884e292997674e7a Mon Sep 17 00:00:00 2001 From: Petr Tesarik Date: Tue, 19 Nov 2024 12:37:39 +0100 Subject: [PATCH 035/504] mm/rodata_test: verify test data is unchanged, rather than non-zero Verify that the test variable holds the initialization value, rather than any non-zero value. Link: https://lkml.kernel.org/r/386ffda192eb4a26f68c526c496afd48a5cd87ce.1732016064.git.ptesarik@suse.com Signed-off-by: Petr Tesarik Reviewed-by: Kees Cook Cc: Jinbum Park Signed-off-by: Andrew Morton --- mm/rodata_test.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/mm/rodata_test.c b/mm/rodata_test.c index 3b60425d80fe..e7173fcd210c 100644 --- a/mm/rodata_test.c +++ b/mm/rodata_test.c @@ -12,7 +12,8 @@ #include #include -static const int rodata_test_data = 0xC3; +#define TEST_VALUE 0xC3 +static const int rodata_test_data = TEST_VALUE; void rodata_test(void) { @@ -20,7 +21,7 @@ void rodata_test(void) /* test 1: read the value */ /* If this test fails, some previous testrun has clobbered the state */ - if (!READ_ONCE(rodata_test_data)) { + if (unlikely(READ_ONCE(rodata_test_data) != TEST_VALUE)) { pr_err("test 1 fails (start data)\n"); return; } @@ -33,7 +34,7 @@ void rodata_test(void) } /* test 3: check the value hasn't changed */ - if (READ_ONCE(rodata_test_data) == zero) { + if (unlikely(READ_ONCE(rodata_test_data) != TEST_VALUE)) { pr_err("test data was changed\n"); return; } From a8355f086c19ab11b68e61e9804d1d4387cee1bb Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 29 Nov 2024 14:58:25 +0000 Subject: [PATCH 036/504] list_lru: expand list_lru_add() docs with info about sublists The documentation for list_lru_add() and list_lru_del() has not been updated since lru lists were originally introduced by commit a38e40824844 ("list: add a new LRU list type"). Back then, list_lru stored all of the items in a single list, but the implementation has since been expanded to use many sublists internally. Thus, update the docs to mention that the requirements about not using the item with several lists at the same time also applies not using different sublists. Also mention that list_lru items are reparented when the memcg is deleted as discussed on the LKML [1]. Also fix incorrect use of 'Return value:' which should be 'Return:'. Link: https://lore.kernel.org/all/Z0eXrllVhRI9Ag5b@dread.disaster.area/ [1] Link: https://lkml.kernel.org/r/20241129-list_lru_memcg_docs-v2-1-e285ff1c481b@google.com Signed-off-by: Alice Ryhl Reviewed-by: Dave Chinner Acked-by: Muchun Song Reviewed-by: Nhat Pham Cc: Johannes Weiner Cc: Michal Hocko Cc: Qi Zheng Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- include/linux/list_lru.h | 44 ++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 11 deletions(-) diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 05c166811f6b..fe739d35a864 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -91,13 +91,24 @@ void memcg_reparent_list_lrus(struct mem_cgroup *memcg, struct mem_cgroup *paren * @memcg: the cgroup of the sublist to add the item to. * * If the element is already part of a list, this function returns doing - * nothing. Therefore the caller does not need to keep state about whether or - * not the element already belongs in the list and is allowed to lazy update - * it. Note however that this is valid for *a* list, not *this* list. If - * the caller organize itself in a way that elements can be in more than - * one type of list, it is up to the caller to fully remove the item from - * the previous list (with list_lru_del() for instance) before moving it - * to @lru. + * nothing. This means that it is not necessary to keep state about whether or + * not the element already belongs in the list. That said, this logic only + * works if the item is in *this* list. If the item might be in some other + * list, then you cannot rely on this check and you must remove it from the + * other list before trying to insert it. + * + * The lru list consists of many sublists internally; the @nid and @memcg + * parameters are used to determine which sublist to insert the item into. + * It's important to use the right value of @nid and @memcg when deleting the + * item, since it might otherwise get deleted from the wrong sublist. + * + * This also applies when attempting to insert the item multiple times - if + * the item is currently in one sublist and you call list_lru_add() again, you + * must pass the right @nid and @memcg parameters so that the same sublist is + * used. + * + * You must ensure that the memcg is not freed during this call (e.g., with + * rcu or by taking a css refcnt). * * Return: true if the list was updated, false otherwise */ @@ -113,7 +124,7 @@ bool list_lru_add(struct list_lru *lru, struct list_head *item, int nid, * memcg of the sublist is determined by @item list_head. This assumption is * valid for slab objects LRU such as dentries, inodes, etc. * - * Return value: true if the list was updated, false otherwise + * Return: true if the list was updated, false otherwise */ bool list_lru_add_obj(struct list_lru *lru, struct list_head *item); @@ -125,8 +136,19 @@ bool list_lru_add_obj(struct list_lru *lru, struct list_head *item); * @memcg: the cgroup of the sublist to delete the item from. * * This function works analogously as list_lru_add() in terms of list - * manipulation. The comments about an element already pertaining to - * a list are also valid for list_lru_del(). + * manipulation. + * + * The comments in list_lru_add() about an element already being in a list are + * also valid for list_lru_del(), that is, you can delete an item that has + * already been removed or never been added. However, if the item is in a + * list, it must be in *this* list, and you must pass the right value of @nid + * and @memcg so that the right sublist is used. + * + * You must ensure that the memcg is not freed during this call (e.g., with + * rcu or by taking a css refcnt). When a memcg is deleted, list_lru entries + * are automatically moved to the parent memcg. This is done in a race-free + * way, so during deletion of an memcg both the old and new memcg will resolve + * to the same sublist internally. * * Return: true if the list was updated, false otherwise */ @@ -142,7 +164,7 @@ bool list_lru_del(struct list_lru *lru, struct list_head *item, int nid, * memcg of the sublist is determined by @item list_head. This assumption is * valid for slab objects LRU such as dentries, inodes, etc. * - * Return value: true if the list was updated, false otherwise. + * Return: true if the list was updated, false otherwise. */ bool list_lru_del_obj(struct list_lru *lru, struct list_head *item); From 75f6dc5ee1c620c98c7ebe29d9d1217ac9cd2fbe Mon Sep 17 00:00:00 2001 From: guanjing Date: Sun, 17 Nov 2024 15:12:31 +0800 Subject: [PATCH 037/504] selftests: mm: fix conversion specifiers in transact_test() Lots of incorrect conversion specifiers. Fix them. Link: https://lkml.kernel.org/r/20241117071231.177864-1-guanjing@cmss.chinamobile.com Fixes: 46fd75d4a3c9 ("selftests: mm: add pagemap ioctl tests") Signed-off-by: guanjing Reviewed-by: Muhammad Usama Anjum Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pagemap_ioctl.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c index bcc73b4e805c..fdafce0654e9 100644 --- a/tools/testing/selftests/mm/pagemap_ioctl.c +++ b/tools/testing/selftests/mm/pagemap_ioctl.c @@ -1405,9 +1405,9 @@ static void transact_test(int page_size) memset(mem, 0, 0x1000 * nthreads * pages_per_thread); count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size); - ksft_test_result(count > 0, "%s count %d\n", __func__, count); + ksft_test_result(count > 0, "%s count %u\n", __func__, count); count = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size); - ksft_test_result(count == 0, "%s count %d\n", __func__, count); + ksft_test_result(count == 0, "%s count %u\n", __func__, count); finish = 0; for (i = 0; i < nthreads; ++i) @@ -1429,7 +1429,7 @@ static void transact_test(int page_size) ksft_exit_fail_msg("pthread_barrier_wait\n"); if (count > nthreads * access_per_thread) - ksft_exit_fail_msg("Too big count %d expected %d, iter %d\n", + ksft_exit_fail_msg("Too big count %u expected %u, iter %u\n", count, nthreads * access_per_thread, i); c = get_dirty_pages_reset(mem, nthreads * pages_per_thread, 1, page_size); @@ -1454,7 +1454,7 @@ static void transact_test(int page_size) * access and application gets page fault again for the same write. */ if (count < nthreads * access_per_thread) { - ksft_test_result_fail("Lost update, iter %d, %d vs %d.\n", i, count, + ksft_test_result_fail("Lost update, iter %u, %u vs %u.\n", i, count, nthreads * access_per_thread); return; } @@ -1467,7 +1467,7 @@ static void transact_test(int page_size) finish = 1; pthread_barrier_wait(&end_barrier); - ksft_test_result_pass("%s Extra pages %u (%.1lf%%), extra thread faults %d.\n", __func__, + ksft_test_result_pass("%s Extra pages %u (%.1lf%%), extra thread faults %u.\n", __func__, extra_pages, 100.0 * extra_pages / (iter_count * nthreads * access_per_thread), extra_thread_faults); From d475c57535c8a55deb2ea2a3abc1dc89db887354 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Sat, 16 Nov 2024 15:14:46 +0000 Subject: [PATCH 038/504] filemap: remove unused folio_add_wait_queue folio_add_wait_queue() has been unused since 2021's commit 850cba069c26 ("cachefiles: Delete the cachefiles driver pending rewrite") Remove it. Link: https://lkml.kernel.org/r/20241116151446.95555-1-linux@treblig.org Signed-off-by: Dr. David Alan Gilbert Reviewed-by: David Hildenbrand Reviewed-by: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 5 ----- mm/filemap.c | 19 ------------------- 2 files changed, 24 deletions(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index d796c8a33647..fc2e1319c7bb 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1280,11 +1280,6 @@ void folio_end_private_2(struct folio *folio); void folio_wait_private_2(struct folio *folio); int folio_wait_private_2_killable(struct folio *folio); -/* - * Add an arbitrary waiter to a page's wait queue - */ -void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter); - /* * Fault in userspace address range. */ diff --git a/mm/filemap.c b/mm/filemap.c index 4f476411a9a2..b6494d2d3bc2 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1463,25 +1463,6 @@ static int folio_put_wait_locked(struct folio *folio, int state) return folio_wait_bit_common(folio, PG_locked, state, DROP); } -/** - * folio_add_wait_queue - Add an arbitrary waiter to a folio's wait queue - * @folio: Folio defining the wait queue of interest - * @waiter: Waiter to add to the queue - * - * Add an arbitrary @waiter to the wait queue for the nominated @folio. - */ -void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter) -{ - wait_queue_head_t *q = folio_waitqueue(folio); - unsigned long flags; - - spin_lock_irqsave(&q->lock, flags); - __add_wait_queue_entry_tail(q, waiter); - folio_set_waiters(folio); - spin_unlock_irqrestore(&q->lock, flags); -} -EXPORT_SYMBOL_GPL(folio_add_wait_queue); - /** * folio_unlock - Unlock a locked folio. * @folio: The folio. From 672e5ad505bdb7f1bfc8076873facb40dce98474 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Sat, 16 Nov 2024 01:48:03 +0000 Subject: [PATCH 039/504] maple_tree: index has been checked to be smaller than pivot Patch series "mas_anode_descend() related cleanup". Some cleanup related to mas_anode_descend(). This patch (of 3): At the beginning of loop, it has checked the range is in lower bounds. Link: https://lkml.kernel.org/r/20241116014805.11547-1-richard.weiyang@gmail.com Link: https://lkml.kernel.org/r/20241116014805.11547-2-richard.weiyang@gmail.com Signed-off-by: Wei Yang Reviewed-by: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Sidhartha Kumar Signed-off-by: Andrew Morton --- lib/maple_tree.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 36e603645a30..57603524e2dd 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4882,13 +4882,12 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) found = true; goto done; } - if (mas->index <= pivot) { - mas->node = mas_slot(mas, slots, offset); - mas->min = min; - mas->max = pivot; - offset = 0; - break; - } + + mas->node = mas_slot(mas, slots, offset); + mas->min = min; + mas->max = pivot; + offset = 0; + break; } next_slot: min = pivot + 1; From c1f6da2598b3cef806652c9450aebc2542ae2e7b Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Sat, 16 Nov 2024 01:48:04 +0000 Subject: [PATCH 040/504] maple_tree: not possible to be a root node after loop Empty tree and single entry tree is handled else whether, so the maple tree here must be a tree with nodes. If the height is 1 and we found the gap, it will jump to *done* since it is also a leaf. If the height is more than one, and there may be an available range, we will descend the tree, which is not root anymore. If there is no available range, we will set error and return. This means the check for root node here is not necessary. Link: https://lkml.kernel.org/r/20241116014805.11547-3-richard.weiyang@gmail.com Signed-off-by: Wei Yang Reviewed-by: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Sidhartha Kumar Signed-off-by: Andrew Morton --- lib/maple_tree.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 57603524e2dd..3174234d77cb 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -4880,7 +4880,7 @@ static inline bool mas_anode_descend(struct ma_state *mas, unsigned long size) if (gap >= size) { if (ma_is_leaf(type)) { found = true; - goto done; + break; } mas->node = mas_slot(mas, slots, offset); @@ -4897,9 +4897,6 @@ next_slot: } } - if (mte_is_root(mas->node)) - found = true; -done: mas->offset = offset; return found; } From acb751ac74438a5c38873560af217437043d4ebb Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Sat, 16 Nov 2024 01:48:05 +0000 Subject: [PATCH 041/504] maple_tree: we don't set offset to MAPLE_NODE_SLOTS on error When mas_anode_descend() not find gap, it sets -EBUSY instead of setting offset to MAPLE_NODE_SLOTS. Link: https://lkml.kernel.org/r/20241116014805.11547-4-richard.weiyang@gmail.com Signed-off-by: Wei Yang Reviewed-by: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Sidhartha Kumar Signed-off-by: Andrew Morton --- lib/maple_tree.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 3174234d77cb..fe7f9e1f5bbb 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5000,8 +5000,8 @@ static inline void mas_awalk(struct ma_state *mas, unsigned long size) * There are 4 options: * go to child (descend) * go back to parent (ascend) - * no gap found. (return, slot == MAPLE_NODE_SLOTS) - * found the gap. (return, slot != MAPLE_NODE_SLOTS) + * no gap found. (return, error == -EBUSY) + * found the gap. (return) */ while (!mas_is_err(mas) && !mas_anode_descend(mas, size)) { if (last == mas->node) @@ -5086,9 +5086,6 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, return xa_err(mas->node); offset = mas->offset; - if (unlikely(offset == MAPLE_NODE_SLOTS)) - return -EBUSY; - node = mas_mn(mas); mt = mte_node_type(mas->node); pivots = ma_pivots(node, mt); From dac95052cd8b352ebfd2c15094bfa7062b05ec1f Mon Sep 17 00:00:00 2001 From: Jeff Xu Date: Sat, 16 Nov 2024 00:50:58 +0000 Subject: [PATCH 042/504] selftest/mm: remove seal_elf Remove seal_elf, which is a demo of mseal, we no longer need this. Link: https://lkml.kernel.org/r/20241116005058.69091-1-jeffxu@chromium.org Signed-off-by: Jeff Xu Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/.gitignore | 1 - tools/testing/selftests/mm/Makefile | 1 - tools/testing/selftests/mm/seal_elf.c | 137 -------------------------- 3 files changed, 139 deletions(-) delete mode 100644 tools/testing/selftests/mm/seal_elf.c diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore index 085b06750bf4..a51a947b2d1d 100644 --- a/tools/testing/selftests/mm/.gitignore +++ b/tools/testing/selftests/mm/.gitignore @@ -52,7 +52,6 @@ va_high_addr_switch hugetlb_fault_after_madv hugetlb_madv_vs_map mseal_test -seal_elf droppable hugetlb_dio pkey_sighandler_tests_32 diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 3de23ea4663f..f2db43c64f83 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -75,7 +75,6 @@ TEST_GEN_FILES += mrelease_test TEST_GEN_FILES += mremap_dontunmap TEST_GEN_FILES += mremap_test TEST_GEN_FILES += mseal_test -TEST_GEN_FILES += seal_elf TEST_GEN_FILES += on-fault-limit TEST_GEN_FILES += pagemap_ioctl TEST_GEN_FILES += thuge-gen diff --git a/tools/testing/selftests/mm/seal_elf.c b/tools/testing/selftests/mm/seal_elf.c deleted file mode 100644 index d9f8ba8d5050..000000000000 --- a/tools/testing/selftests/mm/seal_elf.c +++ /dev/null @@ -1,137 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include "../kselftest.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include "mseal_helpers.h" - -/* - * define sys_xyx to call syscall directly. - */ -static int sys_mseal(void *start, size_t len) -{ - int sret; - - errno = 0; - sret = syscall(__NR_mseal, start, len, 0); - return sret; -} - -static inline int sys_mprotect(void *ptr, size_t size, unsigned long prot) -{ - int sret; - - errno = 0; - sret = syscall(__NR_mprotect, ptr, size, prot); - return sret; -} - -static bool seal_support(void) -{ - int ret; - void *ptr; - unsigned long page_size = getpagesize(); - - ptr = mmap(NULL, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); - if (ptr == (void *) -1) - return false; - - ret = sys_mseal(ptr, page_size); - if (ret < 0) - return false; - - return true; -} - -const char somestr[4096] = {"READONLY"}; - -static void test_seal_elf(void) -{ - int ret; - FILE *maps; - char line[512]; - uintptr_t addr_start, addr_end; - char prot[5]; - char filename[256]; - unsigned long page_size = getpagesize(); - unsigned long long ptr = (unsigned long long) somestr; - char *somestr2 = (char *)somestr; - - /* - * Modify the protection of readonly somestr - */ - if (((unsigned long long)ptr % page_size) != 0) - ptr = (unsigned long long)ptr & ~(page_size - 1); - - ksft_print_msg("somestr = %s\n", somestr); - ksft_print_msg("change protection to rw\n"); - ret = sys_mprotect((void *)ptr, page_size, PROT_READ|PROT_WRITE); - FAIL_TEST_IF_FALSE(!ret); - *somestr2 = 'A'; - ksft_print_msg("somestr is modified to: %s\n", somestr); - ret = sys_mprotect((void *)ptr, page_size, PROT_READ); - FAIL_TEST_IF_FALSE(!ret); - - maps = fopen("/proc/self/maps", "r"); - FAIL_TEST_IF_FALSE(maps); - - /* - * apply sealing to elf binary - */ - while (fgets(line, sizeof(line), maps)) { - if (sscanf(line, "%lx-%lx %4s %*x %*x:%*x %*u %255[^\n]", - &addr_start, &addr_end, prot, filename) == 4) { - if (strlen(filename)) { - /* - * seal the mapping if read only. - */ - if (strstr(prot, "r-")) { - ret = sys_mseal((void *)addr_start, addr_end - addr_start); - FAIL_TEST_IF_FALSE(!ret); - ksft_print_msg("sealed: %lx-%lx %s %s\n", - addr_start, addr_end, prot, filename); - if ((uintptr_t) somestr >= addr_start && - (uintptr_t) somestr <= addr_end) - ksft_print_msg("mapping for somestr found\n"); - } - } - } - } - fclose(maps); - - ret = sys_mprotect((void *)ptr, page_size, PROT_READ | PROT_WRITE); - FAIL_TEST_IF_FALSE(ret < 0); - ksft_print_msg("somestr is sealed, mprotect is rejected\n"); - - REPORT_TEST_PASS(); -} - -int main(int argc, char **argv) -{ - bool test_seal = seal_support(); - - ksft_print_header(); - ksft_print_msg("pid=%d\n", getpid()); - - if (!test_seal) - ksft_exit_skip("sealing not supported, check CONFIG_64BIT\n"); - - ksft_set_plan(1); - - test_seal_elf(); - - ksft_finished(); -} From 58adb659c969ac70071846f50200bda04da1c7e4 Mon Sep 17 00:00:00 2001 From: Keren Sun Date: Fri, 15 Nov 2024 15:57:42 -0800 Subject: [PATCH 043/504] mm: prefer 'unsigned int' to bare use of 'unsigned' Patch series "mm: fix format issues and param types" Change the param 'mode' from type 'unsigned' to 'unsigned int' in memcg_event_wake() and memcg_oom_wake_function(), and for the param 'nid' in VM_BUG_ON(). Link: https://lkml.kernel.org/r/20241115235744.1419580-2-kerensun@google.com Signed-off-by: Keren Sun Acked-by: Shakeel Butt Reviewed-by: Roman Gushchin Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/memcontrol-v1.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index a071fa43d479..914fbd5b65c8 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -899,7 +899,7 @@ static void memcg_event_remove(struct work_struct *work) * * Called with wqh->lock held and interrupts disabled. */ -static int memcg_event_wake(wait_queue_entry_t *wait, unsigned mode, +static int memcg_event_wake(wait_queue_entry_t *wait, unsigned int mode, int sync, void *key) { struct mem_cgroup_event *event = @@ -1202,7 +1202,7 @@ struct oom_wait_info { }; static int memcg_oom_wake_function(wait_queue_entry_t *wait, - unsigned mode, int sync, void *arg) + unsigned int mode, int sync, void *arg) { struct mem_cgroup *wake_memcg = (struct mem_cgroup *)arg; struct mem_cgroup *oom_wait_memcg; @@ -1644,7 +1644,7 @@ static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, unsigned long nr = 0; enum lru_list lru; - VM_BUG_ON((unsigned)nid >= nr_node_ids); + VM_BUG_ON((unsigned int)nid >= nr_node_ids); for_each_lru(lru) { if (!(BIT(lru) & lru_mask)) From c0f6fa1ec55e0f7d6cd0194b4b5c9520867fa791 Mon Sep 17 00:00:00 2001 From: Keren Sun Date: Fri, 15 Nov 2024 15:57:43 -0800 Subject: [PATCH 044/504] mm: remove unnecessary whitespace before a quoted newline Remove whitespaces before newlines for strings in pr_warn_once() Link: https://lkml.kernel.org/r/20241115235744.1419580-3-kerensun@google.com Signed-off-by: Keren Sun Reviewed-by: Roman Gushchin Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol-v1.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index 914fbd5b65c8..c9fe524d341a 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -1040,13 +1040,13 @@ static ssize_t memcg_write_event_control(struct kernfs_open_file *of, } else if (!strcmp(name, "memory.oom_control")) { pr_warn_once("oom_control is deprecated and will be removed. " "Please report your usecase to linux-mm-@kvack.org" - " if you depend on this functionality. \n"); + " if you depend on this functionality.\n"); event->register_event = mem_cgroup_oom_register_event; event->unregister_event = mem_cgroup_oom_unregister_event; } else if (!strcmp(name, "memory.pressure_level")) { pr_warn_once("pressure_level is deprecated and will be removed. " "Please report your usecase to linux-mm-@kvack.org " - "if you depend on this functionality. \n"); + "if you depend on this functionality.\n"); event->register_event = vmpressure_register_event; event->unregister_event = vmpressure_unregister_event; } else if (!strcmp(name, "memory.memsw.usage_in_bytes")) { @@ -1881,7 +1881,7 @@ static int mem_cgroup_oom_control_write(struct cgroup_subsys_state *css, pr_warn_once("oom_control is deprecated and will be removed. " "Please report your usecase to linux-mm-@kvack.org if you " - "depend on this functionality. \n"); + "depend on this functionality.\n"); /* cannot set to root cgroup and only 0 and 1 are allowed */ if (mem_cgroup_is_root(memcg) || !((val == 0) || (val == 1))) From a33315bbb30f3a8d72bd866aec4b76ef2c42d723 Mon Sep 17 00:00:00 2001 From: Keren Sun Date: Fri, 15 Nov 2024 15:57:44 -0800 Subject: [PATCH 045/504] mm: remove the non-useful else after a break in a if statement Remove the else block since there is already a break in the statement of if (iter->oom_lock), just set iter->oom_lock true after the if block ends. Link: https://lkml.kernel.org/r/20241115235744.1419580-4-kerensun@google.com Signed-off-by: Keren Sun Reviewed-by: Roman Gushchin Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol-v1.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/memcontrol-v1.c b/mm/memcontrol-v1.c index c9fe524d341a..2be6b9112808 100644 --- a/mm/memcontrol-v1.c +++ b/mm/memcontrol-v1.c @@ -1134,8 +1134,8 @@ static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg) failed = iter; mem_cgroup_iter_break(memcg, iter); break; - } else - iter->oom_lock = true; + } + iter->oom_lock = true; } if (failed) { From fc935b90b0011bd1b5700a03dcc089812810d43f Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 15 Nov 2024 19:02:28 +0000 Subject: [PATCH 046/504] mm: swap_cgroup: allocate swap_cgroup map using vcalloc() Currently swap_cgroup's map is constructed as a vmalloc()'s-based array of pointers to individual struct pages. This brings an unnecessary complexity into the code. This patch turns the swap_cgroup's map into a single space allocated by vcalloc(). Link: https://lkml.kernel.org/r/20241115190229.676440-1-roman.gushchin@linux.dev Signed-off-by: Roman Gushchin Acked-by: Shakeel Butt Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/swap_cgroup.c | 83 ++++++++---------------------------------------- 1 file changed, 13 insertions(+), 70 deletions(-) diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index da1278f0563b..18de498c84a4 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -6,17 +6,18 @@ #include /* depends on mm.h include */ static DEFINE_MUTEX(swap_cgroup_mutex); + +struct swap_cgroup { + unsigned short id; +}; + struct swap_cgroup_ctrl { - struct page **map; - unsigned long length; + struct swap_cgroup *map; spinlock_t lock; }; static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; -struct swap_cgroup { - unsigned short id; -}; #define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) /* @@ -33,44 +34,10 @@ struct swap_cgroup { * TODO: we can push these buffers out to HIGHMEM. */ -/* - * allocate buffer for swap_cgroup. - */ -static int swap_cgroup_prepare(int type) -{ - struct page *page; - struct swap_cgroup_ctrl *ctrl; - unsigned long idx, max; - - ctrl = &swap_cgroup_ctrl[type]; - - for (idx = 0; idx < ctrl->length; idx++) { - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) - goto not_enough_page; - ctrl->map[idx] = page; - - if (!(idx % SWAP_CLUSTER_MAX)) - cond_resched(); - } - return 0; -not_enough_page: - max = idx; - for (idx = 0; idx < max; idx++) - __free_page(ctrl->map[idx]); - - return -ENOMEM; -} - static struct swap_cgroup *__lookup_swap_cgroup(struct swap_cgroup_ctrl *ctrl, pgoff_t offset) { - struct page *mappage; - struct swap_cgroup *sc; - - mappage = ctrl->map[offset / SC_PER_PAGE]; - sc = page_address(mappage); - return sc + offset % SC_PER_PAGE; + return &ctrl->map[offset]; } static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, @@ -168,32 +135,20 @@ unsigned short lookup_swap_cgroup_id(swp_entry_t ent) int swap_cgroup_swapon(int type, unsigned long max_pages) { - void *array; - unsigned long length; + struct swap_cgroup *map; struct swap_cgroup_ctrl *ctrl; if (mem_cgroup_disabled()) return 0; - length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); - - array = vcalloc(length, sizeof(void *)); - if (!array) + map = vcalloc(max_pages, sizeof(struct swap_cgroup)); + if (!map) goto nomem; ctrl = &swap_cgroup_ctrl[type]; mutex_lock(&swap_cgroup_mutex); - ctrl->length = length; - ctrl->map = array; + ctrl->map = map; spin_lock_init(&ctrl->lock); - if (swap_cgroup_prepare(type)) { - /* memory shortage */ - ctrl->map = NULL; - ctrl->length = 0; - mutex_unlock(&swap_cgroup_mutex); - vfree(array); - goto nomem; - } mutex_unlock(&swap_cgroup_mutex); return 0; @@ -205,8 +160,7 @@ nomem: void swap_cgroup_swapoff(int type) { - struct page **map; - unsigned long i, length; + struct swap_cgroup *map; struct swap_cgroup_ctrl *ctrl; if (mem_cgroup_disabled()) @@ -215,19 +169,8 @@ void swap_cgroup_swapoff(int type) mutex_lock(&swap_cgroup_mutex); ctrl = &swap_cgroup_ctrl[type]; map = ctrl->map; - length = ctrl->length; ctrl->map = NULL; - ctrl->length = 0; mutex_unlock(&swap_cgroup_mutex); - if (map) { - for (i = 0; i < length; i++) { - struct page *page = map[i]; - if (page) - __free_page(page); - if (!(i % SWAP_CLUSTER_MAX)) - cond_resched(); - } - vfree(map); - } + kvfree(map); } From 8099b5a42f10ccef3de9a7054acf6a6624b50b21 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Fri, 15 Nov 2024 19:02:29 +0000 Subject: [PATCH 047/504] mm: swap_cgroup: get rid of __lookup_swap_cgroup() Because swap_cgroup map is now virtually contiguous, swap_cgroup_record() can be simplified, which eliminates a need to use __lookup_swap_cgroup(). Now as __lookup_swap_cgroup() is really trivial and is used only once, it can be inlined. Link: https://lkml.kernel.org/r/20241115190229.676440-2-roman.gushchin@linux.dev Signed-off-by: Roman Gushchin Acked-by: Shakeel Butt Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/swap_cgroup.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 18de498c84a4..0db907308c94 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -33,13 +33,6 @@ static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; * * TODO: we can push these buffers out to HIGHMEM. */ - -static struct swap_cgroup *__lookup_swap_cgroup(struct swap_cgroup_ctrl *ctrl, - pgoff_t offset) -{ - return &ctrl->map[offset]; -} - static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, struct swap_cgroup_ctrl **ctrlp) { @@ -49,7 +42,7 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, ctrl = &swap_cgroup_ctrl[swp_type(ent)]; if (ctrlp) *ctrlp = ctrl; - return __lookup_swap_cgroup(ctrl, offset); + return &ctrl->map[offset]; } /** @@ -104,16 +97,9 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, spin_lock_irqsave(&ctrl->lock, flags); old = sc->id; - for (;;) { + for (; offset < end; offset++, sc++) { VM_BUG_ON(sc->id != old); sc->id = id; - offset++; - if (offset == end) - break; - if (offset % SC_PER_PAGE) - sc++; - else - sc = __lookup_swap_cgroup(ctrl, offset); } spin_unlock_irqrestore(&ctrl->lock, flags); From 1f3715d127b15a9e6d2a5af8241555145ccb7ab2 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 29 Nov 2024 20:09:32 -0800 Subject: [PATCH 048/504] mm-swap_cgroup-allocate-swap_cgroup-map-using-vcalloc-fix s/vfree/kvfree/, per Shakeel Cc: Johannes Weiner Cc: Michal Hocko Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/swap_cgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 0db907308c94..f63d1aa072a1 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -158,5 +158,5 @@ void swap_cgroup_swapoff(int type) ctrl->map = NULL; mutex_unlock(&swap_cgroup_mutex); - kvfree(map); + vfree(map); } From b86414802dff5ecb6ac810d1a0ceea11124eefd7 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 13 Nov 2024 03:16:14 +0000 Subject: [PATCH 049/504] maple_tree: simplify split calculation Patch series "simplify split calculation", v3. This patch (of 3): The current calculation for splitting nodes tries to enforce a minimum span on the leaf nodes. This code is complex and never worked correctly to begin with, due to the min value being passed as 0 for all leaves. The calculation should just split the data as equally as possible between the new nodes. Note that b_end will be one more than the data, so the left side is still favoured in the calculation. The current code may also lead to a deficient node by not leaving enough data for the right side of the split. This issue is also addressed with the split calculation change. [Liam.Howlett@Oracle.com: rephrase the change log] Link: https://lkml.kernel.org/r/20241113031616.10530-1-richard.weiyang@gmail.com Link: https://lkml.kernel.org/r/20241113031616.10530-2-richard.weiyang@gmail.com Fixes: 54a611b60590 ("Maple Tree: add new data structure") Signed-off-by: Wei Yang Reviewed-by: Liam R. Howlett Cc: Sidhartha Kumar Cc: Lorenzo Stoakes Cc: Signed-off-by: Andrew Morton --- lib/maple_tree.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index fe7f9e1f5bbb..ca8ae1e1cc0a 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -1863,11 +1863,11 @@ static inline int mab_no_null_split(struct maple_big_node *b_node, * Return: The first split location. The middle split is set in @mid_split. */ static inline int mab_calc_split(struct ma_state *mas, - struct maple_big_node *bn, unsigned char *mid_split, unsigned long min) + struct maple_big_node *bn, unsigned char *mid_split) { unsigned char b_end = bn->b_end; int split = b_end / 2; /* Assume equal split. */ - unsigned char slot_min, slot_count = mt_slots[bn->type]; + unsigned char slot_count = mt_slots[bn->type]; /* * To support gap tracking, all NULL entries are kept together and a node cannot @@ -1900,18 +1900,7 @@ static inline int mab_calc_split(struct ma_state *mas, split = b_end / 3; *mid_split = split * 2; } else { - slot_min = mt_min_slots[bn->type]; - *mid_split = 0; - /* - * Avoid having a range less than the slot count unless it - * causes one node to be deficient. - * NOTE: mt_min_slots is 1 based, b_end and split are zero. - */ - while ((split < slot_count - 1) && - ((bn->pivot[split] - min) < slot_count - 1) && - (b_end - split > slot_min)) - split++; } /* Avoid ending a node on a NULL entry */ @@ -2377,7 +2366,7 @@ static inline struct maple_enode static inline unsigned char mas_mab_to_node(struct ma_state *mas, struct maple_big_node *b_node, struct maple_enode **left, struct maple_enode **right, struct maple_enode **middle, - unsigned char *mid_split, unsigned long min) + unsigned char *mid_split) { unsigned char split = 0; unsigned char slot_count = mt_slots[b_node->type]; @@ -2390,7 +2379,7 @@ static inline unsigned char mas_mab_to_node(struct ma_state *mas, if (b_node->b_end < slot_count) { split = b_node->b_end; } else { - split = mab_calc_split(mas, b_node, mid_split, min); + split = mab_calc_split(mas, b_node, mid_split); *right = mas_new_ma_node(mas, b_node); } @@ -2877,7 +2866,7 @@ static void mas_spanning_rebalance(struct ma_state *mas, mast->bn->b_end--; mast->bn->type = mte_node_type(mast->orig_l->node); split = mas_mab_to_node(mas, mast->bn, &left, &right, &middle, - &mid_split, mast->orig_l->min); + &mid_split); mast_set_split_parents(mast, left, middle, right, split, mid_split); mast_cp_to_nodes(mast, left, middle, right, split, mid_split); @@ -3365,7 +3354,7 @@ static void mas_split(struct ma_state *mas, struct maple_big_node *b_node) if (mas_push_data(mas, height, &mast, false)) break; - split = mab_calc_split(mas, b_node, &mid_split, prev_l_mas.min); + split = mab_calc_split(mas, b_node, &mid_split); mast_split_data(&mast, mas, split); /* * Usually correct, mab_mas_cp in the above call overwrites From e1d4306b0549b1d33688280103c772e6ebf40bd7 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 13 Nov 2024 03:16:15 +0000 Subject: [PATCH 050/504] maple_tree: add a test check deficient node Add a test to assert when resulting a deficient node on splitting. We can achieve this by build a tree with two nodes. With the left node with consecutive data from 0 and leave some room for the final insert to locate in left node. And the right node a full node to force the split happens on the left node. Link: https://lkml.kernel.org/r/20241113031616.10530-3-richard.weiyang@gmail.com Signed-off-by: Wei Yang Reviewed-by: Liam R. Howlett Cc: Sidhartha Kumar Cc: Lorenzo Stoakes Signed-off-by: Andrew Morton --- lib/test_maple_tree.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 704cb1093ae8..72bda304b595 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -1563,6 +1563,30 @@ static noinline void __init check_root_expand(struct maple_tree *mt) mas_unlock(&mas); } +static noinline void __init check_deficient_node(struct maple_tree *mt) +{ + MA_STATE(mas, mt, 0, 0); + int count; + + mas_lock(&mas); + for (count = 0; count < 10; count++) { + mas_set(&mas, count); + mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL); + } + + for (count = 20; count < 39; count++) { + mas_set(&mas, count); + mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL); + } + + for (count = 10; count < 12; count++) { + mas_set(&mas, count); + mas_store_gfp(&mas, xa_mk_value(count), GFP_KERNEL); + } + mas_unlock(&mas); + mt_validate(mt); +} + static noinline void __init check_gap_combining(struct maple_tree *mt) { struct maple_enode *mn1, *mn2; @@ -3796,6 +3820,10 @@ static int __init maple_tree_seed(void) goto skip; #endif + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_deficient_node(&tree); + mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); check_store_null(&tree); mtree_destroy(&tree); From 539c2974950033fe05212c5d104a2796d948ada2 Mon Sep 17 00:00:00 2001 From: Wei Yang Date: Wed, 13 Nov 2024 03:16:16 +0000 Subject: [PATCH 051/504] maple_tree: only root node could be deficient Each level's rightmost node should have (max == ULONG_MAX). This means current validation skips the right most node on each level. Only the root node may be below the minimum data threshold. Link: https://lkml.kernel.org/r/20241113031616.10530-4-richard.weiyang@gmail.com Signed-off-by: Wei Yang Reviewed-by: Liam R. Howlett Cc: Sidhartha Kumar Cc: Lorenzo Stoakes Signed-off-by: Andrew Morton --- lib/maple_tree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/maple_tree.c b/lib/maple_tree.c index ca8ae1e1cc0a..f7153ade1be5 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -7556,7 +7556,7 @@ void mt_validate(struct maple_tree *mt) MAS_WARN_ON(&mas, mte_dead_node(mas.node)); end = mas_data_end(&mas); if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) && - (mas.max != ULONG_MAX))) { + (!mte_is_root(mas.node)))) { pr_err("Invalid size %u of " PTR_FMT "\n", end, mas_mn(&mas)); } From e90bf8507ab7f873ca1d3747948c68cb487cb555 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 4 Nov 2024 11:23:18 -0300 Subject: [PATCH 052/504] lazy tlb: fix hotplug exit race with MMU_LAZY_TLB_SHOOTDOWN CPU unplug first calls __cpu_disable(), and that's where powerpc calls cleanup_cpu_mmu_context(), which clears this CPU from mm_cpumask() of all mms in the system. However this CPU may still be using a lazy tlb mm, and its mm_cpumask bit will be cleared from it. The CPU does not switch away from the lazy tlb mm until arch_cpu_idle_dead() calls idle_task_exit(). If that user mm exits in this window, it will not be subject to the lazy tlb mm shootdown and may be freed while in use as a lazy mm by the CPU that is being unplugged. cleanup_cpu_mmu_context() could be moved later, but it looks better to move the lazy tlb mm switching earlier. The problem with doing the lazy mm switching in idle_task_exit() is explained in commit bf2c59fce4074 ("sched/core: Fix illegal RCU from offline CPUs"), which added a wart to switch away from the mm but leave it set in active_mm to be cleaned up later. So instead, switch away from the lazy tlb mm at sched_cpu_wait_empty(), which is the last hotplug state before teardown (CPUHP_AP_SCHED_WAIT_EMPTY). This CPU will never switch to a user thread from this point, so it has no chance to pick up a new lazy tlb mm. This removes the lazy tlb mm handling wart in CPU unplug. With this, idle_task_exit() is not needed anymore and can be cleaned up. This leaves the prototype alone, to be cleaned after this change. herton: took the suggestions from https://lore.kernel.org/all/87jzvyprsw.ffs@tglx/ and made adjustments on the initial patch proposed by Nicholas. Link: https://lkml.kernel.org/r/20230524060455.147699-1-npiggin@gmail.com Link: https://lore.kernel.org/all/20230525205253.E2FAEC433EF@smtp.kernel.org/ Link: https://lkml.kernel.org/r/20241104142318.3295663-1-herton@redhat.com Fixes: 2655421ae69f ("lazy tlb: shoot lazies, non-refcounting lazy tlb mm reference handling scheme") Signed-off-by: Nicholas Piggin Signed-off-by: Herton R. Krzesinski Suggested-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Michael Ellerman Signed-off-by: Andrew Morton --- include/linux/sched/hotplug.h | 4 ---- kernel/cpu.c | 9 +++++---- kernel/sched/core.c | 22 +++++++++++++++------- 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/include/linux/sched/hotplug.h b/include/linux/sched/hotplug.h index 412cdaba33eb..17e04859b9a4 100644 --- a/include/linux/sched/hotplug.h +++ b/include/linux/sched/hotplug.h @@ -18,10 +18,6 @@ extern int sched_cpu_dying(unsigned int cpu); # define sched_cpu_dying NULL #endif -#ifdef CONFIG_HOTPLUG_CPU -extern void idle_task_exit(void); -#else static inline void idle_task_exit(void) {} -#endif #endif /* _LINUX_SCHED_HOTPLUG_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index b605334f8ee6..7f3bf759cbdf 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -905,12 +905,13 @@ static int finish_cpu(unsigned int cpu) struct mm_struct *mm = idle->active_mm; /* - * idle_task_exit() will have switched to &init_mm, now - * clean up any remaining active_mm state. + * sched_force_init_mm() ensured the use of &init_mm, + * drop that refcount now that the CPU has stopped. */ - if (mm != &init_mm) - idle->active_mm = &init_mm; + WARN_ON(mm != &init_mm); + idle->active_mm = NULL; mmdrop_lazy_tlb(mm); + return 0; } diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 755ae4659b64..6fbf4c373a50 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7930,19 +7930,26 @@ void sched_setnuma(struct task_struct *p, int nid) #ifdef CONFIG_HOTPLUG_CPU /* - * Ensure that the idle task is using init_mm right before its CPU goes - * offline. + * Invoked on the outgoing CPU in context of the CPU hotplug thread + * after ensuring that there are no user space tasks left on the CPU. + * + * If there is a lazy mm in use on the hotplug thread, drop it and + * switch to init_mm. + * + * The reference count on init_mm is dropped in finish_cpu(). */ -void idle_task_exit(void) +static void sched_force_init_mm(void) { struct mm_struct *mm = current->active_mm; - BUG_ON(cpu_online(smp_processor_id())); - BUG_ON(current != this_rq()->idle); - if (mm != &init_mm) { - switch_mm(mm, &init_mm, current); + mmgrab_lazy_tlb(&init_mm); + local_irq_disable(); + current->active_mm = &init_mm; + switch_mm_irqs_off(mm, &init_mm, current); + local_irq_enable(); finish_arch_post_lock_switch(); + mmdrop_lazy_tlb(mm); } /* finish_cpu(), as ran on the BP, will clean up the active_mm state */ @@ -8344,6 +8351,7 @@ int sched_cpu_starting(unsigned int cpu) int sched_cpu_wait_empty(unsigned int cpu) { balance_hotplug_wait(); + sched_force_init_mm(); return 0; } From 53a2c8301ebaa92b435efd07ce2b347aa2cd5e4d Mon Sep 17 00:00:00 2001 From: Nihar Chaithanya Date: Fri, 11 Oct 2024 17:15:38 +0530 Subject: [PATCH 053/504] mm:kasan: fix sparse warnings: Should it be static? Yes, when making the global variables kasan_ptr_result and kasan_int_result as static volatile, the warnings are removed and the variable and assignments are retained, but when just static is used I understand that it might be optimized. Add a fix making the global varaibles - static volatile, removing the warnings: mm/kasan/kasan_test.c:36:6: warning: symbol 'kasan_ptr_result' was not declared. Should it be static? mm/kasan/kasan_test.c:37:5: warning: symbol 'kasan_int_result' was not declared. Should it be static? Link: https://lkml.kernel.org/r/20241011114537.35664-1-niharchaithanya@gmail.com Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202312261010.o0lRiI9b-lkp@intel.com/ Signed-off-by: Nihar Chaithanya Reviewed-by: Dmitry Vyukov Reviewed-by: Andrey Konovalov Cc: Andrey Ryabinin Cc: Shuah Khan Signed-off-by: Andrew Morton --- mm/kasan/kasan_test_c.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/kasan/kasan_test_c.c b/mm/kasan/kasan_test_c.c index 99d4ff0ed57a..59d673400085 100644 --- a/mm/kasan/kasan_test_c.c +++ b/mm/kasan/kasan_test_c.c @@ -47,8 +47,8 @@ static struct { * Some tests use these global variables to store return values from function * calls that could otherwise be eliminated by the compiler as dead code. */ -void *kasan_ptr_result; -int kasan_int_result; +static volatile void *kasan_ptr_result; +static volatile int kasan_int_result; /* Probe for console output: obtains test_status lines of interest. */ static void probe_console(void *ignore, const char *buf, size_t len) From d9ce99c94db3cb300da38d85fa23de1efd8eb149 Mon Sep 17 00:00:00 2001 From: gaoxiang17 Date: Fri, 20 Sep 2024 20:20:30 +0800 Subject: [PATCH 054/504] mm/page_alloc: add some detailed comments in can_steal_fallback Link: https://lkml.kernel.org/r/20240920122030.159751-1-gxxa03070307@gmail.com Signed-off-by: gaoxiang17 Signed-off-by: Andrew Morton --- mm/page_alloc.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 55d77fb6a1a3..9dc991c00dcb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1855,6 +1855,13 @@ static bool can_steal_fallback(unsigned int order, int start_mt) if (order >= pageblock_order) return true; + /* + * movable pages won't cause permanent fragmentation, so when you alloc small pages, + * you just need to temporarily steal unmovable or reclaimable pages that are closest + * to the request size. After a while, memory compact may occur to form large contiguous + * pages, and the next movable allocation may not need to steal. Unmovable and reclaimable + * allocation need to actually steal pages. + */ if (order >= pageblock_order / 2 || start_mt == MIGRATE_RECLAIMABLE || start_mt == MIGRATE_UNMOVABLE || From 2fabc02ba8bb18242fbe66076acf19bae4b3c761 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 29 Nov 2024 21:23:51 -0800 Subject: [PATCH 055/504] mm-page_alloc-add-some-detailed-comments-in-can_steal_fallback-fix tweak grammer, fit to 80 cols Cc: gaoxiang17 Signed-off-by: Andrew Morton --- mm/page_alloc.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9dc991c00dcb..685d491451ff 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1856,11 +1856,12 @@ static bool can_steal_fallback(unsigned int order, int start_mt) return true; /* - * movable pages won't cause permanent fragmentation, so when you alloc small pages, - * you just need to temporarily steal unmovable or reclaimable pages that are closest - * to the request size. After a while, memory compact may occur to form large contiguous - * pages, and the next movable allocation may not need to steal. Unmovable and reclaimable - * allocation need to actually steal pages. + * Movable pages won't cause permanent fragmentation, so when you alloc + * small pages, you just need to temporarily steal unmovable or + * reclaimable pages that are closest to the request size. After a + * while, memory compaction may occur to form large contiguous pages, + * and the next movable allocation may not need to steal. Unmovable and + * reclaimable allocations need to actually steal pages. */ if (order >= pageblock_order / 2 || start_mt == MIGRATE_RECLAIMABLE || From 06f2afd434e429339b94b561cdff49847ab41a4f Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Tue, 3 Dec 2024 18:05:08 +0000 Subject: [PATCH 056/504] mm/vma: move brk() internals to mm/vma.c Patch series "mm/vma: make more mmap logic userland testable". This series carries on the work started in previous series and continued in commit 52956b0d7fb9 ("mm: isolate mmap internal logic to mm/vma.c"), moving the remainder of memory mapping implementation details logic into mm/vma.c allowing the bulk of the mapping logic to be unit tested. It is highly useful to do so, as this means we can both fundamentally test this core logic, and introduce regression tests to ensure any issues previously resolved do not recur. Vitally, this includes the do_brk_flags() function, meaning we have both core means of userland mapping memory now testable. Performance testing was performed after this change given the brk() system call's sensitivity to change, and no performance regression was observed. The stack expansion logic is also moved into mm/vma.c, which necessitates a change in the API exposed to the exec code, removing the invocation of the expand_downwards() function used in get_arg_page() and instead adding mmap_read_lock_maybe_expand() to wrap this. This patch (of 5): Now we have moved mmap_region() internals to mm/vma.c, making it available to userland testing, it makes sense to do the same with brk(). This continues the pattern of VMA heavy lifting being done in mm/vma.c in an environment where it can be subject to straightforward unit and regression testing, with other VMA-adjacent files becoming wrappers around this functionality. Link: https://lkml.kernel.org/r/cover.1733248985.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/3d24b9e67bb0261539ca921d1188a10a1b4d4357.1733248985.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: Al Viro Cc: Christian Brauner Cc: Eric W. Biederman Cc: Jan Kara Cc: Jann Horn Cc: Kees Cook Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/mmap.c | 85 +------------------------------- mm/vma.c | 82 ++++++++++++++++++++++++++++++ mm/vma.h | 3 ++ tools/testing/vma/vma_internal.h | 22 +++++++++ 4 files changed, 108 insertions(+), 84 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index aec208f90337..775db706b822 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -111,8 +111,7 @@ static int check_brk_limits(unsigned long addr, unsigned long len) return mlock_future_ok(current->mm, current->mm->def_flags, len) ? 0 : -EAGAIN; } -static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, - unsigned long addr, unsigned long request, unsigned long flags); + SYSCALL_DEFINE1(brk, unsigned long, brk) { unsigned long newbrk, oldbrk, origbrk; @@ -1512,88 +1511,6 @@ out: return ret; } -/* - * do_brk_flags() - Increase the brk vma if the flags match. - * @vmi: The vma iterator - * @addr: The start address - * @len: The length of the increase - * @vma: The vma, - * @flags: The VMA Flags - * - * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags - * do not match then create a new anonymous VMA. Eventually we may be able to - * do some brk-specific accounting here. - */ -static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, - unsigned long addr, unsigned long len, unsigned long flags) -{ - struct mm_struct *mm = current->mm; - - /* - * Check against address space limits by the changed size - * Note: This happens *after* clearing old mappings in some code paths. - */ - flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; - if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) - return -ENOMEM; - - if (mm->map_count > sysctl_max_map_count) - return -ENOMEM; - - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) - return -ENOMEM; - - /* - * Expand the existing vma if possible; Note that singular lists do not - * occur after forking, so the expand will only happen on new VMAs. - */ - if (vma && vma->vm_end == addr) { - VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr)); - - vmg.prev = vma; - /* vmi is positioned at prev, which this mode expects. */ - vmg.merge_flags = VMG_FLAG_JUST_EXPAND; - - if (vma_merge_new_range(&vmg)) - goto out; - else if (vmg_nomem(&vmg)) - goto unacct_fail; - } - - if (vma) - vma_iter_next_range(vmi); - /* create a vma struct for an anonymous mapping */ - vma = vm_area_alloc(mm); - if (!vma) - goto unacct_fail; - - vma_set_anonymous(vma); - vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); - vm_flags_init(vma, flags); - vma->vm_page_prot = vm_get_page_prot(flags); - vma_start_write(vma); - if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) - goto mas_store_fail; - - mm->map_count++; - validate_mm(mm); - ksm_add_vma(vma); -out: - perf_event_mmap(vma); - mm->total_vm += len >> PAGE_SHIFT; - mm->data_vm += len >> PAGE_SHIFT; - if (flags & VM_LOCKED) - mm->locked_vm += (len >> PAGE_SHIFT); - vm_flags_set(vma, VM_SOFTDIRTY); - return 0; - -mas_store_fail: - vm_area_free(vma); -unacct_fail: - vm_unacct_memory(len >> PAGE_SHIFT); - return -ENOMEM; -} - int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags) { struct mm_struct *mm = current->mm; diff --git a/mm/vma.c b/mm/vma.c index bb2119e5a0d0..7cd174daeeec 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -2481,3 +2481,85 @@ abort_munmap: vms_abort_munmap_vmas(&map.vms, &map.mas_detach); return error; } + +/* + * do_brk_flags() - Increase the brk vma if the flags match. + * @vmi: The vma iterator + * @addr: The start address + * @len: The length of the increase + * @vma: The vma, + * @flags: The VMA Flags + * + * Extend the brk VMA from addr to addr + len. If the VMA is NULL or the flags + * do not match then create a new anonymous VMA. Eventually we may be able to + * do some brk-specific accounting here. + */ +int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, + unsigned long addr, unsigned long len, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + + /* + * Check against address space limits by the changed size + * Note: This happens *after* clearing old mappings in some code paths. + */ + flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT)) + return -ENOMEM; + + if (mm->map_count > sysctl_max_map_count) + return -ENOMEM; + + if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT)) + return -ENOMEM; + + /* + * Expand the existing vma if possible; Note that singular lists do not + * occur after forking, so the expand will only happen on new VMAs. + */ + if (vma && vma->vm_end == addr) { + VMG_STATE(vmg, mm, vmi, addr, addr + len, flags, PHYS_PFN(addr)); + + vmg.prev = vma; + /* vmi is positioned at prev, which this mode expects. */ + vmg.merge_flags = VMG_FLAG_JUST_EXPAND; + + if (vma_merge_new_range(&vmg)) + goto out; + else if (vmg_nomem(&vmg)) + goto unacct_fail; + } + + if (vma) + vma_iter_next_range(vmi); + /* create a vma struct for an anonymous mapping */ + vma = vm_area_alloc(mm); + if (!vma) + goto unacct_fail; + + vma_set_anonymous(vma); + vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT); + vm_flags_init(vma, flags); + vma->vm_page_prot = vm_get_page_prot(flags); + vma_start_write(vma); + if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) + goto mas_store_fail; + + mm->map_count++; + validate_mm(mm); + ksm_add_vma(vma); +out: + perf_event_mmap(vma); + mm->total_vm += len >> PAGE_SHIFT; + mm->data_vm += len >> PAGE_SHIFT; + if (flags & VM_LOCKED) + mm->locked_vm += (len >> PAGE_SHIFT); + vm_flags_set(vma, VM_SOFTDIRTY); + return 0; + +mas_store_fail: + vm_area_free(vma); +unacct_fail: + vm_unacct_memory(len >> PAGE_SHIFT); + return -ENOMEM; +} diff --git a/mm/vma.h b/mm/vma.h index 388d34748674..83a15d3a8285 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -247,6 +247,9 @@ unsigned long __mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf); +int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, + unsigned long addr, unsigned long request, unsigned long flags); + static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) { /* diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index e76ff579e1fd..7c3c15135c5b 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -39,6 +39,7 @@ #define VM_SHARED 0x00000008 #define VM_MAYREAD 0x00000010 #define VM_MAYWRITE 0x00000020 +#define VM_MAYEXEC 0x00000040 #define VM_GROWSDOWN 0x00000100 #define VM_PFNMAP 0x00000400 #define VM_LOCKED 0x00002000 @@ -58,6 +59,13 @@ /* This mask represents all the VMA flag bits used by mlock */ #define VM_LOCKED_MASK (VM_LOCKED | VM_LOCKONFAULT) +#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) + +#define VM_DATA_FLAGS_TSK_EXEC (VM_READ | VM_WRITE | TASK_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC + #ifdef CONFIG_64BIT /* VM is sealed, in vm_flags */ #define VM_SEALED _BITUL(63) @@ -122,10 +130,22 @@ enum { TASK_COMM_LEN = 16, }; +/* + * Flags for bug emulation. + * + * These occupy the top three bytes. + */ +enum { + READ_IMPLIES_EXEC = 0x0400000, +}; + struct task_struct { char comm[TASK_COMM_LEN]; pid_t pid; struct mm_struct *mm; + + /* Used for emulating ABI behavior of previous Linux versions: */ + unsigned int personality; }; struct task_struct *get_current(void); @@ -186,6 +206,8 @@ struct mm_struct { unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ unsigned long stack_vm; /* VM_STACK */ + + unsigned long def_flags; }; struct vma_lock { From 16fe59184b7e3c656398c680c5c0cc73b04348b2 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 4 Dec 2024 12:00:35 +0000 Subject: [PATCH 057/504] mm/vma: add missing personality header import Some architectures have different header dependency chains, we incorrectly failed to important linux/personality.h which broke MIPS. Fix this. Link: https://lkml.kernel.org/r/2a717265-985f-45eb-9257-8b2857088ed4@lucifer.local Signed-off-by: Lorenzo Stoakes Signed-off-by: Andrew Morton --- mm/vma_internal.h | 1 + 1 file changed, 1 insertion(+) diff --git a/mm/vma_internal.h b/mm/vma_internal.h index fc5f172a36bd..2f05735ff190 100644 --- a/mm/vma_internal.h +++ b/mm/vma_internal.h @@ -35,6 +35,7 @@ #include #include #include +#include #include #include #include From 3a73569900ebe38f7af007879b2c952de751b29d Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Tue, 3 Dec 2024 18:05:09 +0000 Subject: [PATCH 058/504] mm/vma: move unmapped_area() internals to mm/vma.c We want to be able to unit test the unmapped area logic, so move it to mm/vma.c. The wrappers which invoke this remain in place in mm/mmap.c. In addition, naturally, update the existing test code to enable this to be compiled in userland. Link: https://lkml.kernel.org/r/53a57a52a64ea54e9d129d2e2abca3a538022379.1733248985.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: Al Viro Cc: Christian Brauner Cc: Eric W. Biederman Cc: Jan Kara Cc: Jann Horn Cc: Kees Cook Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/mmap.c | 109 ------------------------------- mm/vma.c | 109 +++++++++++++++++++++++++++++++ mm/vma.h | 3 + tools/testing/vma/vma.c | 6 ++ tools/testing/vma/vma_internal.h | 59 +++++++++++++++++ 5 files changed, 177 insertions(+), 109 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index 775db706b822..7aa372a75326 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -580,115 +580,6 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ -/** - * unmapped_area() - Find an area between the low_limit and the high_limit with - * the correct alignment and offset, all from @info. Note: current->mm is used - * for the search. - * - * @info: The unmapped area information including the range [low_limit - - * high_limit), the alignment offset and mask. - * - * Return: A memory address or -ENOMEM. - */ -static unsigned long unmapped_area(struct vm_unmapped_area_info *info) -{ - unsigned long length, gap; - unsigned long low_limit, high_limit; - struct vm_area_struct *tmp; - VMA_ITERATOR(vmi, current->mm, 0); - - /* Adjust search length to account for worst case alignment overhead */ - length = info->length + info->align_mask + info->start_gap; - if (length < info->length) - return -ENOMEM; - - low_limit = info->low_limit; - if (low_limit < mmap_min_addr) - low_limit = mmap_min_addr; - high_limit = info->high_limit; -retry: - if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) - return -ENOMEM; - - /* - * Adjust for the gap first so it doesn't interfere with the - * later alignment. The first step is the minimum needed to - * fulill the start gap, the next steps is the minimum to align - * that. It is the minimum needed to fulill both. - */ - gap = vma_iter_addr(&vmi) + info->start_gap; - gap += (info->align_offset - gap) & info->align_mask; - tmp = vma_next(&vmi); - if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ - if (vm_start_gap(tmp) < gap + length - 1) { - low_limit = tmp->vm_end; - vma_iter_reset(&vmi); - goto retry; - } - } else { - tmp = vma_prev(&vmi); - if (tmp && vm_end_gap(tmp) > gap) { - low_limit = vm_end_gap(tmp); - vma_iter_reset(&vmi); - goto retry; - } - } - - return gap; -} - -/** - * unmapped_area_topdown() - Find an area between the low_limit and the - * high_limit with the correct alignment and offset at the highest available - * address, all from @info. Note: current->mm is used for the search. - * - * @info: The unmapped area information including the range [low_limit - - * high_limit), the alignment offset and mask. - * - * Return: A memory address or -ENOMEM. - */ -static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) -{ - unsigned long length, gap, gap_end; - unsigned long low_limit, high_limit; - struct vm_area_struct *tmp; - VMA_ITERATOR(vmi, current->mm, 0); - - /* Adjust search length to account for worst case alignment overhead */ - length = info->length + info->align_mask + info->start_gap; - if (length < info->length) - return -ENOMEM; - - low_limit = info->low_limit; - if (low_limit < mmap_min_addr) - low_limit = mmap_min_addr; - high_limit = info->high_limit; -retry: - if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) - return -ENOMEM; - - gap = vma_iter_end(&vmi) - info->length; - gap -= (gap - info->align_offset) & info->align_mask; - gap_end = vma_iter_end(&vmi); - tmp = vma_next(&vmi); - if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ - if (vm_start_gap(tmp) < gap_end) { - high_limit = vm_start_gap(tmp); - vma_iter_reset(&vmi); - goto retry; - } - } else { - tmp = vma_prev(&vmi); - if (tmp && vm_end_gap(tmp) > gap) { - high_limit = tmp->vm_start; - vma_iter_reset(&vmi); - goto retry; - } - } - - return gap; -} - /* * Determine if the allocation needs to ensure that there is no * existing mapping within it's guard gaps, for use as start_gap. diff --git a/mm/vma.c b/mm/vma.c index 7cd174daeeec..3972376176e7 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -2563,3 +2563,112 @@ unacct_fail: vm_unacct_memory(len >> PAGE_SHIFT); return -ENOMEM; } + +/** + * unmapped_area() - Find an area between the low_limit and the high_limit with + * the correct alignment and offset, all from @info. Note: current->mm is used + * for the search. + * + * @info: The unmapped area information including the range [low_limit - + * high_limit), the alignment offset and mask. + * + * Return: A memory address or -ENOMEM. + */ +unsigned long unmapped_area(struct vm_unmapped_area_info *info) +{ + unsigned long length, gap; + unsigned long low_limit, high_limit; + struct vm_area_struct *tmp; + VMA_ITERATOR(vmi, current->mm, 0); + + /* Adjust search length to account for worst case alignment overhead */ + length = info->length + info->align_mask + info->start_gap; + if (length < info->length) + return -ENOMEM; + + low_limit = info->low_limit; + if (low_limit < mmap_min_addr) + low_limit = mmap_min_addr; + high_limit = info->high_limit; +retry: + if (vma_iter_area_lowest(&vmi, low_limit, high_limit, length)) + return -ENOMEM; + + /* + * Adjust for the gap first so it doesn't interfere with the + * later alignment. The first step is the minimum needed to + * fulill the start gap, the next steps is the minimum to align + * that. It is the minimum needed to fulill both. + */ + gap = vma_iter_addr(&vmi) + info->start_gap; + gap += (info->align_offset - gap) & info->align_mask; + tmp = vma_next(&vmi); + if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ + if (vm_start_gap(tmp) < gap + length - 1) { + low_limit = tmp->vm_end; + vma_iter_reset(&vmi); + goto retry; + } + } else { + tmp = vma_prev(&vmi); + if (tmp && vm_end_gap(tmp) > gap) { + low_limit = vm_end_gap(tmp); + vma_iter_reset(&vmi); + goto retry; + } + } + + return gap; +} + +/** + * unmapped_area_topdown() - Find an area between the low_limit and the + * high_limit with the correct alignment and offset at the highest available + * address, all from @info. Note: current->mm is used for the search. + * + * @info: The unmapped area information including the range [low_limit - + * high_limit), the alignment offset and mask. + * + * Return: A memory address or -ENOMEM. + */ +unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info) +{ + unsigned long length, gap, gap_end; + unsigned long low_limit, high_limit; + struct vm_area_struct *tmp; + VMA_ITERATOR(vmi, current->mm, 0); + + /* Adjust search length to account for worst case alignment overhead */ + length = info->length + info->align_mask + info->start_gap; + if (length < info->length) + return -ENOMEM; + + low_limit = info->low_limit; + if (low_limit < mmap_min_addr) + low_limit = mmap_min_addr; + high_limit = info->high_limit; +retry: + if (vma_iter_area_highest(&vmi, low_limit, high_limit, length)) + return -ENOMEM; + + gap = vma_iter_end(&vmi) - info->length; + gap -= (gap - info->align_offset) & info->align_mask; + gap_end = vma_iter_end(&vmi); + tmp = vma_next(&vmi); + if (tmp && (tmp->vm_flags & VM_STARTGAP_FLAGS)) { /* Avoid prev check if possible */ + if (vm_start_gap(tmp) < gap_end) { + high_limit = vm_start_gap(tmp); + vma_iter_reset(&vmi); + goto retry; + } + } else { + tmp = vma_prev(&vmi); + if (tmp && vm_end_gap(tmp) > gap) { + high_limit = tmp->vm_start; + vma_iter_reset(&vmi); + goto retry; + } + } + + return gap; +} diff --git a/mm/vma.h b/mm/vma.h index 83a15d3a8285..c60f37d89eb1 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -250,6 +250,9 @@ unsigned long __mmap_region(struct file *file, unsigned long addr, int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, unsigned long addr, unsigned long request, unsigned long flags); +unsigned long unmapped_area(struct vm_unmapped_area_info *info); +unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); + static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) { /* diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 8fab5e13c7c3..39ee61e55634 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -18,6 +18,12 @@ static bool fail_prealloc; #define vma_iter_prealloc(vmi, vma) \ (fail_prealloc ? -ENOMEM : mas_preallocate(&(vmi)->mas, (vma), GFP_KERNEL)) +#define CONFIG_DEFAULT_MMAP_MIN_ADDR 65536 + +unsigned long mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; +unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR; +unsigned long stack_guard_gap = 256UL< #include +extern unsigned long stack_guard_gap; +#ifdef CONFIG_MMU +extern unsigned long mmap_min_addr; +extern unsigned long dac_mmap_min_addr; +#else +#define mmap_min_addr 0UL +#define dac_mmap_min_addr 0UL +#endif + #define VM_WARN_ON(_expr) (WARN_ON(_expr)) #define VM_WARN_ON_ONCE(_expr) (WARN_ON_ONCE(_expr)) #define VM_BUG_ON(_expr) (BUG_ON(_expr)) @@ -52,6 +61,8 @@ #define VM_STACK VM_GROWSDOWN #define VM_SHADOW_STACK VM_NONE #define VM_SOFTDIRTY 0 +#define VM_ARCH_1 0x01000000 /* Architecture-specific flag */ +#define VM_GROWSUP VM_NONE #define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC) #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP) @@ -66,6 +77,8 @@ #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC +#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) + #ifdef CONFIG_64BIT /* VM is sealed, in vm_flags */ #define VM_SEALED _BITUL(63) @@ -395,6 +408,17 @@ struct vm_operations_struct { unsigned long addr); }; +struct vm_unmapped_area_info { +#define VM_UNMAPPED_AREA_TOPDOWN 1 + unsigned long flags; + unsigned long length; + unsigned long low_limit; + unsigned long high_limit; + unsigned long align_mask; + unsigned long align_offset; + unsigned long start_gap; +}; + static inline void vma_iter_invalidate(struct vma_iterator *vmi) { mas_pause(&vmi->mas); @@ -1055,4 +1079,39 @@ static inline int mmap_file(struct file *, struct vm_area_struct *) return 0; } +static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma) +{ + if (vma->vm_flags & VM_GROWSDOWN) + return stack_guard_gap; + + /* See reasoning around the VM_SHADOW_STACK definition */ + if (vma->vm_flags & VM_SHADOW_STACK) + return PAGE_SIZE; + + return 0; +} + +static inline unsigned long vm_start_gap(struct vm_area_struct *vma) +{ + unsigned long gap = stack_guard_start_gap(vma); + unsigned long vm_start = vma->vm_start; + + vm_start -= gap; + if (vm_start > vma->vm_start) + vm_start = 0; + return vm_start; +} + +static inline unsigned long vm_end_gap(struct vm_area_struct *vma) +{ + unsigned long vm_end = vma->vm_end; + + if (vma->vm_flags & VM_GROWSUP) { + vm_end += stack_guard_gap; + if (vm_end < vma->vm_end) + vm_end = -PAGE_SIZE; + } + return vm_end; +} + #endif /* __MM_VMA_INTERNAL_H */ From 0d3af7d660bc6b4d0513ad3b215753f8f6916a8a Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Tue, 3 Dec 2024 18:05:10 +0000 Subject: [PATCH 059/504] mm: abstract get_arg_page() stack expansion and mmap read lock Right now fs/exec.c invokes expand_downwards(), an otherwise internal implementation detail of the VMA logic in order to ensure that an arg page can be obtained by get_user_pages_remote(). In order to be able to move the stack expansion logic into mm/vma.c to make it available to userland testing we need to find an alternative approach here. We do so by providing the mmap_read_lock_maybe_expand() function which also helpfully documents what get_arg_page() is doing here and adds an additional check against VM_GROWSDOWN to make explicit that the stack expansion logic is only invoked when the VMA is indeed a downward-growing stack. This allows expand_downwards() to become a static function. Importantly, the VMA referenced by mmap_read_maybe_expand() must NOT be currently user-visible in any way, that is place within an rmap or VMA tree. It must be a newly allocated VMA. This is the case when exec invokes this function. Link: https://lkml.kernel.org/r/5295d1c70c58e6aa63d14be68d4e1de9fa1c8e6d.1733248985.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: Al Viro Cc: Christian Brauner Cc: Eric W. Biederman Cc: Jan Kara Cc: Jann Horn Cc: Kees Cook Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- fs/exec.c | 14 +++--------- include/linux/mm.h | 5 ++--- mm/mmap.c | 54 +++++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 58 insertions(+), 15 deletions(-) diff --git a/fs/exec.c b/fs/exec.c index 98cb7ba9983c..1e1f79c514de 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -205,18 +205,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, /* * Avoid relying on expanding the stack down in GUP (which * does not work for STACK_GROWSUP anyway), and just do it - * by hand ahead of time. + * ahead of time. */ - if (write && pos < vma->vm_start) { - mmap_write_lock(mm); - ret = expand_downwards(vma, pos); - if (unlikely(ret < 0)) { - mmap_write_unlock(mm); - return NULL; - } - mmap_write_downgrade(mm); - } else - mmap_read_lock(mm); + if (!mmap_read_lock_maybe_expand(mm, vma, pos, write)) + return NULL; /* * We are doing an exec(). 'current' is the process diff --git a/include/linux/mm.h b/include/linux/mm.h index b1c3db9cf355..2e5ef71b8629 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3324,6 +3324,8 @@ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admi extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); extern void exit_mmap(struct mm_struct *); int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift); +bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, bool write); static inline int check_data_rlimit(unsigned long rlim, unsigned long new, @@ -3437,9 +3439,6 @@ extern unsigned long stack_guard_gap; int expand_stack_locked(struct vm_area_struct *vma, unsigned long address); struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr); -/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */ -int expand_downwards(struct vm_area_struct *vma, unsigned long address); - /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr); extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, diff --git a/mm/mmap.c b/mm/mmap.c index 7aa372a75326..b29728df7f10 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1009,7 +1009,7 @@ static int expand_upwards(struct vm_area_struct *vma, unsigned long address) * vma is the first one with address < vma->vm_start. Have to extend vma. * mmap_lock held for writing. */ -int expand_downwards(struct vm_area_struct *vma, unsigned long address) +static int expand_downwards(struct vm_area_struct *vma, unsigned long address) { struct mm_struct *mm = vma->vm_mm; struct vm_area_struct *prev; @@ -1940,3 +1940,55 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) /* Shrink the vma to just the new range */ return vma_shrink(&vmi, vma, new_start, new_end, vma->vm_pgoff); } + +#ifdef CONFIG_MMU +/* + * Obtain a read lock on mm->mmap_lock, if the specified address is below the + * start of the VMA, the intent is to perform a write, and it is a + * downward-growing stack, then attempt to expand the stack to contain it. + * + * This function is intended only for obtaining an argument page from an ELF + * image, and is almost certainly NOT what you want to use for any other + * purpose. + * + * IMPORTANT - VMA fields are accessed without an mmap lock being held, so the + * VMA referenced must not be linked in any user-visible tree, i.e. it must be a + * new VMA being mapped. + * + * The function assumes that addr is either contained within the VMA or below + * it, and makes no attempt to validate this value beyond that. + * + * Returns true if the read lock was obtained and a stack was perhaps expanded, + * false if the stack expansion failed. + * + * On stack expansion the function temporarily acquires an mmap write lock + * before downgrading it. + */ +bool mmap_read_lock_maybe_expand(struct mm_struct *mm, + struct vm_area_struct *new_vma, + unsigned long addr, bool write) +{ + if (!write || addr >= new_vma->vm_start) { + mmap_read_lock(mm); + return true; + } + + if (!(new_vma->vm_flags & VM_GROWSDOWN)) + return false; + + mmap_write_lock(mm); + if (expand_downwards(new_vma, addr)) { + mmap_write_unlock(mm); + return false; + } + + mmap_write_downgrade(mm); + return true; +} +#else +bool mmap_read_lock_maybe_expand(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, bool write) +{ + return false; +} +#endif From c56f4c3fe628813cf9583aac72100f34276214fb Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Tue, 3 Dec 2024 18:05:11 +0000 Subject: [PATCH 060/504] mm/vma: move stack expansion logic to mm/vma.c We build on previous work making expand_downwards() an entirely internal function. This logic is subtle and so it is highly useful to get it into vma.c so we can then userland unit test. We must additionally move acct_stack_growth() to vma.c as it is a helper function used by both expand_downwards() and expand_upwards(). We are also then able to mark anon_vma_interval_tree_pre_update_vma() and anon_vma_interval_tree_post_update_vma() static as these are no longer used by anything else. Link: https://lkml.kernel.org/r/0feb104eff85922019d4fb29280f3afb130c5204.1733248985.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: Al Viro Cc: Christian Brauner Cc: Eric W. Biederman Cc: Jan Kara Cc: Jann Horn Cc: Kees Cook Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/mmap.c | 205 ----------------------- mm/vma.c | 269 +++++++++++++++++++++++++++---- mm/vma.h | 12 +- tools/testing/vma/vma.c | 5 + tools/testing/vma/vma_internal.h | 62 +++++++ 5 files changed, 310 insertions(+), 243 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index b29728df7f10..cea10c88cf0f 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -879,211 +879,6 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr, return vma; } -/* - * Verify that the stack growth is acceptable and - * update accounting. This is shared with both the - * grow-up and grow-down cases. - */ -static int acct_stack_growth(struct vm_area_struct *vma, - unsigned long size, unsigned long grow) -{ - struct mm_struct *mm = vma->vm_mm; - unsigned long new_start; - - /* address space limit tests */ - if (!may_expand_vm(mm, vma->vm_flags, grow)) - return -ENOMEM; - - /* Stack limit test */ - if (size > rlimit(RLIMIT_STACK)) - return -ENOMEM; - - /* mlock limit tests */ - if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) - return -ENOMEM; - - /* Check to ensure the stack will not grow into a hugetlb-only region */ - new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : - vma->vm_end - size; - if (is_hugepage_only_range(vma->vm_mm, new_start, size)) - return -EFAULT; - - /* - * Overcommit.. This must be the final test, as it will - * update security statistics. - */ - if (security_vm_enough_memory_mm(mm, grow)) - return -ENOMEM; - - return 0; -} - -#if defined(CONFIG_STACK_GROWSUP) -/* - * PA-RISC uses this for its stack. - * vma is the last one with address > vma->vm_end. Have to extend vma. - */ -static int expand_upwards(struct vm_area_struct *vma, unsigned long address) -{ - struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *next; - unsigned long gap_addr; - int error = 0; - VMA_ITERATOR(vmi, mm, vma->vm_start); - - if (!(vma->vm_flags & VM_GROWSUP)) - return -EFAULT; - - mmap_assert_write_locked(mm); - - /* Guard against exceeding limits of the address space. */ - address &= PAGE_MASK; - if (address >= (TASK_SIZE & PAGE_MASK)) - return -ENOMEM; - address += PAGE_SIZE; - - /* Enforce stack_guard_gap */ - gap_addr = address + stack_guard_gap; - - /* Guard against overflow */ - if (gap_addr < address || gap_addr > TASK_SIZE) - gap_addr = TASK_SIZE; - - next = find_vma_intersection(mm, vma->vm_end, gap_addr); - if (next && vma_is_accessible(next)) { - if (!(next->vm_flags & VM_GROWSUP)) - return -ENOMEM; - /* Check that both stack segments have the same anon_vma? */ - } - - if (next) - vma_iter_prev_range_limit(&vmi, address); - - vma_iter_config(&vmi, vma->vm_start, address); - if (vma_iter_prealloc(&vmi, vma)) - return -ENOMEM; - - /* We must make sure the anon_vma is allocated. */ - if (unlikely(anon_vma_prepare(vma))) { - vma_iter_free(&vmi); - return -ENOMEM; - } - - /* Lock the VMA before expanding to prevent concurrent page faults */ - vma_start_write(vma); - /* We update the anon VMA tree. */ - anon_vma_lock_write(vma->anon_vma); - - /* Somebody else might have raced and expanded it already */ - if (address > vma->vm_end) { - unsigned long size, grow; - - size = address - vma->vm_start; - grow = (address - vma->vm_end) >> PAGE_SHIFT; - - error = -ENOMEM; - if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow); - if (!error) { - if (vma->vm_flags & VM_LOCKED) - mm->locked_vm += grow; - vm_stat_account(mm, vma->vm_flags, grow); - anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_end = address; - /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); - anon_vma_interval_tree_post_update_vma(vma); - - perf_event_mmap(vma); - } - } - } - anon_vma_unlock_write(vma->anon_vma); - vma_iter_free(&vmi); - validate_mm(mm); - return error; -} -#endif /* CONFIG_STACK_GROWSUP */ - -/* - * vma is the first one with address < vma->vm_start. Have to extend vma. - * mmap_lock held for writing. - */ -static int expand_downwards(struct vm_area_struct *vma, unsigned long address) -{ - struct mm_struct *mm = vma->vm_mm; - struct vm_area_struct *prev; - int error = 0; - VMA_ITERATOR(vmi, mm, vma->vm_start); - - if (!(vma->vm_flags & VM_GROWSDOWN)) - return -EFAULT; - - mmap_assert_write_locked(mm); - - address &= PAGE_MASK; - if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) - return -EPERM; - - /* Enforce stack_guard_gap */ - prev = vma_prev(&vmi); - /* Check that both stack segments have the same anon_vma? */ - if (prev) { - if (!(prev->vm_flags & VM_GROWSDOWN) && - vma_is_accessible(prev) && - (address - prev->vm_end < stack_guard_gap)) - return -ENOMEM; - } - - if (prev) - vma_iter_next_range_limit(&vmi, vma->vm_start); - - vma_iter_config(&vmi, address, vma->vm_end); - if (vma_iter_prealloc(&vmi, vma)) - return -ENOMEM; - - /* We must make sure the anon_vma is allocated. */ - if (unlikely(anon_vma_prepare(vma))) { - vma_iter_free(&vmi); - return -ENOMEM; - } - - /* Lock the VMA before expanding to prevent concurrent page faults */ - vma_start_write(vma); - /* We update the anon VMA tree. */ - anon_vma_lock_write(vma->anon_vma); - - /* Somebody else might have raced and expanded it already */ - if (address < vma->vm_start) { - unsigned long size, grow; - - size = vma->vm_end - address; - grow = (vma->vm_start - address) >> PAGE_SHIFT; - - error = -ENOMEM; - if (grow <= vma->vm_pgoff) { - error = acct_stack_growth(vma, size, grow); - if (!error) { - if (vma->vm_flags & VM_LOCKED) - mm->locked_vm += grow; - vm_stat_account(mm, vma->vm_flags, grow); - anon_vma_interval_tree_pre_update_vma(vma); - vma->vm_start = address; - vma->vm_pgoff -= grow; - /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); - anon_vma_interval_tree_post_update_vma(vma); - - perf_event_mmap(vma); - } - } - } - anon_vma_unlock_write(vma->anon_vma); - vma_iter_free(&vmi); - validate_mm(mm); - return error; -} - /* enforced gap between the expanding stack and other mappings. */ unsigned long stack_guard_gap = 256UL<anon_vma_chain, same_vma) + anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); +} + +static void +anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) +{ + struct anon_vma_chain *avc; + + list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) + anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); +} + /* * vma_prepare() - Helper function for handling locking VMAs prior to altering * @vp: The initialized vma_prepare struct @@ -510,38 +542,6 @@ static int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, return __split_vma(vmi, vma, addr, new_below); } -/* - * vma has some anon_vma assigned, and is already inserted on that - * anon_vma's interval trees. - * - * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the - * vma must be removed from the anon_vma's interval trees using - * anon_vma_interval_tree_pre_update_vma(). - * - * After the update, the vma will be reinserted using - * anon_vma_interval_tree_post_update_vma(). - * - * The entire update must be protected by exclusive mmap_lock and by - * the root anon_vma's mutex. - */ -void -anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) -{ - struct anon_vma_chain *avc; - - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) - anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root); -} - -void -anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) -{ - struct anon_vma_chain *avc; - - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) - anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root); -} - /* * dup_anon_vma() - Helper function to duplicate anon_vma * @dst: The destination VMA @@ -2672,3 +2672,208 @@ retry: return gap; } + +/* + * Verify that the stack growth is acceptable and + * update accounting. This is shared with both the + * grow-up and grow-down cases. + */ +static int acct_stack_growth(struct vm_area_struct *vma, + unsigned long size, unsigned long grow) +{ + struct mm_struct *mm = vma->vm_mm; + unsigned long new_start; + + /* address space limit tests */ + if (!may_expand_vm(mm, vma->vm_flags, grow)) + return -ENOMEM; + + /* Stack limit test */ + if (size > rlimit(RLIMIT_STACK)) + return -ENOMEM; + + /* mlock limit tests */ + if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) + return -ENOMEM; + + /* Check to ensure the stack will not grow into a hugetlb-only region */ + new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : + vma->vm_end - size; + if (is_hugepage_only_range(vma->vm_mm, new_start, size)) + return -EFAULT; + + /* + * Overcommit.. This must be the final test, as it will + * update security statistics. + */ + if (security_vm_enough_memory_mm(mm, grow)) + return -ENOMEM; + + return 0; +} + +#if defined(CONFIG_STACK_GROWSUP) +/* + * PA-RISC uses this for its stack. + * vma is the last one with address > vma->vm_end. Have to extend vma. + */ +int expand_upwards(struct vm_area_struct *vma, unsigned long address) +{ + struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *next; + unsigned long gap_addr; + int error = 0; + VMA_ITERATOR(vmi, mm, vma->vm_start); + + if (!(vma->vm_flags & VM_GROWSUP)) + return -EFAULT; + + mmap_assert_write_locked(mm); + + /* Guard against exceeding limits of the address space. */ + address &= PAGE_MASK; + if (address >= (TASK_SIZE & PAGE_MASK)) + return -ENOMEM; + address += PAGE_SIZE; + + /* Enforce stack_guard_gap */ + gap_addr = address + stack_guard_gap; + + /* Guard against overflow */ + if (gap_addr < address || gap_addr > TASK_SIZE) + gap_addr = TASK_SIZE; + + next = find_vma_intersection(mm, vma->vm_end, gap_addr); + if (next && vma_is_accessible(next)) { + if (!(next->vm_flags & VM_GROWSUP)) + return -ENOMEM; + /* Check that both stack segments have the same anon_vma? */ + } + + if (next) + vma_iter_prev_range_limit(&vmi, address); + + vma_iter_config(&vmi, vma->vm_start, address); + if (vma_iter_prealloc(&vmi, vma)) + return -ENOMEM; + + /* We must make sure the anon_vma is allocated. */ + if (unlikely(anon_vma_prepare(vma))) { + vma_iter_free(&vmi); + return -ENOMEM; + } + + /* Lock the VMA before expanding to prevent concurrent page faults */ + vma_start_write(vma); + /* We update the anon VMA tree. */ + anon_vma_lock_write(vma->anon_vma); + + /* Somebody else might have raced and expanded it already */ + if (address > vma->vm_end) { + unsigned long size, grow; + + size = address - vma->vm_start; + grow = (address - vma->vm_end) >> PAGE_SHIFT; + + error = -ENOMEM; + if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { + error = acct_stack_growth(vma, size, grow); + if (!error) { + if (vma->vm_flags & VM_LOCKED) + mm->locked_vm += grow; + vm_stat_account(mm, vma->vm_flags, grow); + anon_vma_interval_tree_pre_update_vma(vma); + vma->vm_end = address; + /* Overwrite old entry in mtree. */ + vma_iter_store(&vmi, vma); + anon_vma_interval_tree_post_update_vma(vma); + + perf_event_mmap(vma); + } + } + } + anon_vma_unlock_write(vma->anon_vma); + vma_iter_free(&vmi); + validate_mm(mm); + return error; +} +#endif /* CONFIG_STACK_GROWSUP */ + +/* + * vma is the first one with address < vma->vm_start. Have to extend vma. + * mmap_lock held for writing. + */ +int expand_downwards(struct vm_area_struct *vma, unsigned long address) +{ + struct mm_struct *mm = vma->vm_mm; + struct vm_area_struct *prev; + int error = 0; + VMA_ITERATOR(vmi, mm, vma->vm_start); + + if (!(vma->vm_flags & VM_GROWSDOWN)) + return -EFAULT; + + mmap_assert_write_locked(mm); + + address &= PAGE_MASK; + if (address < mmap_min_addr || address < FIRST_USER_ADDRESS) + return -EPERM; + + /* Enforce stack_guard_gap */ + prev = vma_prev(&vmi); + /* Check that both stack segments have the same anon_vma? */ + if (prev) { + if (!(prev->vm_flags & VM_GROWSDOWN) && + vma_is_accessible(prev) && + (address - prev->vm_end < stack_guard_gap)) + return -ENOMEM; + } + + if (prev) + vma_iter_next_range_limit(&vmi, vma->vm_start); + + vma_iter_config(&vmi, address, vma->vm_end); + if (vma_iter_prealloc(&vmi, vma)) + return -ENOMEM; + + /* We must make sure the anon_vma is allocated. */ + if (unlikely(anon_vma_prepare(vma))) { + vma_iter_free(&vmi); + return -ENOMEM; + } + + /* Lock the VMA before expanding to prevent concurrent page faults */ + vma_start_write(vma); + /* We update the anon VMA tree. */ + anon_vma_lock_write(vma->anon_vma); + + /* Somebody else might have raced and expanded it already */ + if (address < vma->vm_start) { + unsigned long size, grow; + + size = vma->vm_end - address; + grow = (vma->vm_start - address) >> PAGE_SHIFT; + + error = -ENOMEM; + if (grow <= vma->vm_pgoff) { + error = acct_stack_growth(vma, size, grow); + if (!error) { + if (vma->vm_flags & VM_LOCKED) + mm->locked_vm += grow; + vm_stat_account(mm, vma->vm_flags, grow); + anon_vma_interval_tree_pre_update_vma(vma); + vma->vm_start = address; + vma->vm_pgoff -= grow; + /* Overwrite old entry in mtree. */ + vma_iter_store(&vmi, vma); + anon_vma_interval_tree_post_update_vma(vma); + + perf_event_mmap(vma); + } + } + } + anon_vma_unlock_write(vma->anon_vma); + vma_iter_free(&vmi); + validate_mm(mm); + return error; +} diff --git a/mm/vma.h b/mm/vma.h index c60f37d89eb1..6c460a120f82 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -139,12 +139,6 @@ void validate_mm(struct mm_struct *mm); #define validate_mm(mm) do { } while (0) #endif -/* Required for expand_downwards(). */ -void anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma); - -/* Required for expand_downwards(). */ -void anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma); - int vma_expand(struct vma_merge_struct *vmg); int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start, unsigned long end, pgoff_t pgoff); @@ -478,4 +472,10 @@ static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior) #endif +#if defined(CONFIG_STACK_GROWSUP) +int expand_upwards(struct vm_area_struct *vma, unsigned long address); +#endif + +int expand_downwards(struct vm_area_struct *vma, unsigned long address); + #endif /* __MM_VMA_H */ diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 39ee61e55634..891d87a9ad6b 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -53,6 +53,11 @@ struct task_struct *get_current(void) return &__current; } +unsigned long rlimit(unsigned int limit) +{ + return (unsigned long)-1; +} + /* Helper function to simply allocate a VMA. */ static struct vm_area_struct *alloc_vma(struct mm_struct *mm, unsigned long start, diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 6ad8bd8edaad..fab3f3bdf2f0 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -79,6 +79,11 @@ extern unsigned long dac_mmap_min_addr; #define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK) +#define RLIMIT_STACK 3 /* max stack size */ +#define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */ + +#define CAP_IPC_LOCK 14 + #ifdef CONFIG_64BIT /* VM is sealed, in vm_flags */ #define VM_SEALED _BITUL(63) @@ -478,6 +483,8 @@ static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) extern const struct vm_operations_struct vma_dummy_vm_ops; +extern unsigned long rlimit(unsigned int limit); + static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) { memset(vma, 0, sizeof(*vma)); @@ -1114,4 +1121,59 @@ static inline unsigned long vm_end_gap(struct vm_area_struct *vma) return vm_end; } +static inline int is_hugepage_only_range(struct mm_struct *mm, + unsigned long addr, unsigned long len) +{ + return 0; +} + +static inline bool vma_is_accessible(struct vm_area_struct *vma) +{ + return vma->vm_flags & VM_ACCESS_FLAGS; +} + +static inline bool capable(int cap) +{ + return true; +} + +static inline bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, + unsigned long bytes) +{ + unsigned long locked_pages, limit_pages; + + if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) + return true; + + locked_pages = bytes >> PAGE_SHIFT; + locked_pages += mm->locked_vm; + + limit_pages = rlimit(RLIMIT_MEMLOCK); + limit_pages >>= PAGE_SHIFT; + + return locked_pages <= limit_pages; +} + +static inline int __anon_vma_prepare(struct vm_area_struct *vma) +{ + struct anon_vma *anon_vma = calloc(1, sizeof(struct anon_vma)); + + if (!anon_vma) + return -ENOMEM; + + anon_vma->root = anon_vma; + vma->anon_vma = anon_vma; + + return 0; +} + +static inline int anon_vma_prepare(struct vm_area_struct *vma) +{ + if (likely(vma->anon_vma)) + return 0; + + return __anon_vma_prepare(vma); +} + + #endif /* __MM_VMA_INTERNAL_H */ From cacdd43c065a5ac258555cd2359dc56547b7207b Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Tue, 3 Dec 2024 18:05:12 +0000 Subject: [PATCH 061/504] mm/vma: move __vm_munmap() to mm/vma.c This was arbitrarily left in mmap.c it makes no sense being there, move it to vma.c to render it testable. Link: https://lkml.kernel.org/r/5e5e81807c54dfbe363edb2d431eb3d7a37fcdba.1733248985.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: Al Viro Cc: Christian Brauner Cc: Eric W. Biederman Cc: Jan Kara Cc: Jann Horn Cc: Kees Cook Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/mmap.c | 18 ------------------ mm/vma.c | 18 ++++++++++++++++++ mm/vma.h | 2 ++ tools/testing/vma/vma_internal.h | 9 +++++++++ 4 files changed, 29 insertions(+), 18 deletions(-) diff --git a/mm/mmap.c b/mm/mmap.c index cea10c88cf0f..b373486bd1c6 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1044,24 +1044,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr, return ret; } -static int __vm_munmap(unsigned long start, size_t len, bool unlock) -{ - int ret; - struct mm_struct *mm = current->mm; - LIST_HEAD(uf); - VMA_ITERATOR(vmi, mm, start); - - if (mmap_write_lock_killable(mm)) - return -EINTR; - - ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); - if (ret || !unlock) - mmap_write_unlock(mm); - - userfaultfd_unmap_complete(mm, &uf); - return ret; -} - int vm_munmap(unsigned long start, size_t len) { return __vm_munmap(start, len, false); diff --git a/mm/vma.c b/mm/vma.c index e270efc927fa..06554a732bce 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -2877,3 +2877,21 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) validate_mm(mm); return error; } + +int __vm_munmap(unsigned long start, size_t len, bool unlock) +{ + int ret; + struct mm_struct *mm = current->mm; + LIST_HEAD(uf); + VMA_ITERATOR(vmi, mm, start); + + if (mmap_write_lock_killable(mm)) + return -EINTR; + + ret = do_vmi_munmap(&vmi, mm, start, len, &uf, unlock); + if (ret || !unlock) + mmap_write_unlock(mm); + + userfaultfd_unmap_complete(mm, &uf); + return ret; +} diff --git a/mm/vma.h b/mm/vma.h index 6c460a120f82..295d44ea54db 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -478,4 +478,6 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address); int expand_downwards(struct vm_area_struct *vma, unsigned long address); +int __vm_munmap(unsigned long start, size_t len, bool unlock); + #endif /* __MM_VMA_H */ diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index fab3f3bdf2f0..a7de59a0d694 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -906,6 +906,11 @@ static inline void mmap_write_unlock(struct mm_struct *) { } +static inline int mmap_write_lock_killable(struct mm_struct *) +{ + return 0; +} + static inline bool can_modify_mm(struct mm_struct *mm, unsigned long start, unsigned long end) @@ -1175,5 +1180,9 @@ static inline int anon_vma_prepare(struct vm_area_struct *vma) return __anon_vma_prepare(vma); } +static inline void userfaultfd_unmap_complete(struct mm_struct *mm, + struct list_head *uf) +{ +} #endif /* __MM_VMA_INTERNAL_H */ From 261aae7e4097827579904734f08cd66b3382cd6e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 3 Dec 2024 11:20:50 +0100 Subject: [PATCH 062/504] mm/memory_hotplug: move debug_pagealloc_map_pages() into online_pages_range() In the near future, we want to have a single way to handover PageOffline pages to the buddy, whereby they could have: (a) Never been exposed to the buddy before: kept PageOffline when onlining the memory block. (b) Been allocated from the buddy, for example using alloc_contig_range() to then be set PageOffline, Let's start by making generic_online_page()->__free_pages_core() less special compared to ordinary page freeing (e.g., free_contig_range()), and perform the debug_pagealloc_map_pages() call unconditionally, even when the online callback might decide to keep the pages offline. All pages are already initialized with PageOffline, so nobody touches them either way. Link: https://lkml.kernel.org/r/20241203102050.223318-1-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Oscar Salvador Signed-off-by: Andrew Morton --- mm/memory_hotplug.c | 10 +++++++++- mm/page_alloc.c | 6 ------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c43b4e7fb298..20af14e695c7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -650,6 +650,7 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) * this and the first chunk to online will be pageblock_nr_pages. */ for (pfn = start_pfn; pfn < end_pfn;) { + struct page *page = pfn_to_page(pfn); int order; /* @@ -664,7 +665,14 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages) else order = MAX_PAGE_ORDER; - (*online_page_callback)(pfn_to_page(pfn), order); + /* + * Exposing the page to the buddy by freeing can cause + * issues with debug_pagealloc enabled: some archs don't + * like double-unmappings. So treat them like any pages that + * were allocated from the buddy. + */ + debug_pagealloc_map_pages(page, 1 << order); + (*online_page_callback)(page, order); pfn += (1UL << order); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 685d491451ff..7a2529d95fb0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1295,12 +1295,6 @@ void __meminit __free_pages_core(struct page *page, unsigned int order, set_page_count(p, 0); } - /* - * Freeing the page with debug_pagealloc enabled will try to - * unmap it; some archs don't like double-unmappings, so - * map it first. - */ - debug_pagealloc_map_pages(page, nr_pages); adjust_managed_page_count(page, nr_pages); } else { for (loop = 0; loop < nr_pages; loop++, p++) { From 3ea0fe314773d47da9af5272eccedf4deb3a0193 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 3 Dec 2024 10:47:27 +0100 Subject: [PATCH 063/504] mm/page_isolation: don't pass gfp flags to isolate_single_pageblock() Patch series "mm/page_alloc: gfp flags cleanups for alloc_contig_*()", v2. Let's clean up the gfp flags handling, and support __GFP_ZERO, such that we can finally remove the TODO in memtrace code. This patch (of 6): The flags are no longer used, we can stop passing them to isolate_single_pageblock(). Link: https://lkml.kernel.org/r/20241203094732.200195-1-david@redhat.com Link: https://lkml.kernel.org/r/20241203094732.200195-2-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Reviewed-by: Oscar Salvador Reviewed-by: Vishal Moola (Oracle) Cc: Christophe Leroy Cc: David Hildenbrand Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Naveen N Rao Cc: Nicholas Piggin Signed-off-by: Andrew Morton --- mm/page_isolation.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 7e04047977cf..e680d40d96de 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -286,7 +286,6 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) * within a free or in-use page. * @boundary_pfn: pageblock-aligned pfn that a page might cross * @flags: isolation flags - * @gfp_flags: GFP flags used for migrating pages * @isolate_before: isolate the pageblock before the boundary_pfn * @skip_isolation: the flag to skip the pageblock isolation in second * isolate_single_pageblock() @@ -306,8 +305,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages) * the in-use page then splitting the free page. */ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags, - gfp_t gfp_flags, bool isolate_before, bool skip_isolation, - int migratetype) + bool isolate_before, bool skip_isolation, int migratetype) { unsigned long start_pfn; unsigned long isolate_pageblock; @@ -489,7 +487,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, bool skip_isolation = false; /* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */ - ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false, + ret = isolate_single_pageblock(isolate_start, flags, false, skip_isolation, migratetype); if (ret) return ret; @@ -498,7 +496,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, skip_isolation = true; /* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */ - ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true, + ret = isolate_single_pageblock(isolate_end, flags, true, skip_isolation, migratetype); if (ret) { unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype); From d18cde53e771a8d76225b0482f4793248b1db477 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 3 Dec 2024 10:47:28 +0100 Subject: [PATCH 064/504] mm/page_isolation: don't pass gfp flags to start_isolate_page_range() The parameter is unused, so let's stop passing it. Link: https://lkml.kernel.org/r/20241203094732.200195-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Reviewed-by: Oscar Salvador Reviewed-by: Vishal Moola (Oracle) Cc: Christophe Leroy Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Naveen N Rao Cc: Nicholas Piggin Signed-off-by: Andrew Morton --- include/linux/page-isolation.h | 2 +- mm/memory_hotplug.c | 3 +-- mm/page_alloc.c | 2 +- mm/page_isolation.c | 4 +--- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 73dc2c1841ec..898bb788243b 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -31,7 +31,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page, int migratetype); int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - int migratetype, int flags, gfp_t gfp_flags); + int migratetype, int flags); void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int migratetype); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 20af14e695c7..5f497ccf473d 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -2000,8 +2000,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages, /* set above range as isolated */ ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE, - MEMORY_OFFLINE | REPORT_FAILURE, - GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL); + MEMORY_OFFLINE | REPORT_FAILURE); if (ret) { reason = "failure to isolate range"; goto failed_removal_pcplists_disabled; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7a2529d95fb0..e334f3618c31 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6450,7 +6450,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end, * put back to page allocator so that buddy can use them. */ - ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); + ret = start_isolate_page_range(start, end, migratetype, 0); if (ret) goto done; diff --git a/mm/page_isolation.c b/mm/page_isolation.c index e680d40d96de..c608e9d72865 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -442,8 +442,6 @@ failed: * and PageOffline() pages. * REPORT_FAILURE - report details about the failure to * isolate the range - * @gfp_flags: GFP flags used for migrating pages that sit across the - * range boundaries. * * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in * the range will never be allocated. Any free pages and pages freed in the @@ -476,7 +474,7 @@ failed: * Return: 0 on success and -EBUSY if any part of range cannot be isolated. */ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - int migratetype, int flags, gfp_t gfp_flags) + int migratetype, int flags) { unsigned long pfn; struct page *page; From f9c37965ac02cd9a355c1df0c3a0adf1e3b75e75 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 3 Dec 2024 10:47:29 +0100 Subject: [PATCH 065/504] mm/page_alloc: make __alloc_contig_migrate_range() static The single user is in page_alloc.c. Link: https://lkml.kernel.org/r/20241203094732.200195-4-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Zi Yan Reviewed-by: Vlastimil Babka Reviewed-by: Oscar Salvador Reviewed-by: Vishal Moola (Oracle) Cc: Christophe Leroy Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Naveen N Rao Cc: Nicholas Piggin Signed-off-by: Andrew Morton --- mm/internal.h | 4 ---- mm/page_alloc.c | 5 ++--- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 6f6585e98c6f..02890b29da5f 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -839,10 +839,6 @@ int isolate_migratepages_range(struct compact_control *cc, unsigned long low_pfn, unsigned long end_pfn); -int __alloc_contig_migrate_range(struct compact_control *cc, - unsigned long start, unsigned long end, - int migratetype); - /* Free whole pageblock and set its migration type to MIGRATE_CMA. */ void init_cma_reserved_pageblock(struct page *page); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e334f3618c31..7cd8087cd006 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6283,9 +6283,8 @@ static void alloc_contig_dump_pages(struct list_head *page_list) * @migratetype: using migratetype to filter the type of migration in * trace_mm_alloc_contig_migrate_range_info. */ -int __alloc_contig_migrate_range(struct compact_control *cc, - unsigned long start, unsigned long end, - int migratetype) +static int __alloc_contig_migrate_range(struct compact_control *cc, + unsigned long start, unsigned long end, int migratetype) { /* This function is based on compact_zone() from compaction.c. */ unsigned int nr_reclaimed; From ee86c6416df8eae500792802dfd1714250f33f5a Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 3 Dec 2024 10:47:30 +0100 Subject: [PATCH 066/504] mm/page_alloc: sort out the alloc_contig_range() gfp flags mess It's all a bit complicated for alloc_contig_range(). For example, we don't support many flags, so let's start bailing out on unsupported ones -- ignoring the placement hints, as we are already given the range to allocate. While we currently set cc.gfp_mask, in __alloc_contig_migrate_range() we simply create yet another GFP mask whereby we ignore the reclaim flags specify by the caller. That looks very inconsistent. Let's clean it up, constructing the gfp flags used for compaction/migration exactly once. Update the documentation of the gfp_mask parameter for alloc_contig_range() and alloc_contig_pages(). Link: https://lkml.kernel.org/r/20241203094732.200195-5-david@redhat.com Signed-off-by: David Hildenbrand Acked-by: Zi Yan Reviewed-by: Vlastimil Babka Reviewed-by: Oscar Salvador Cc: Christophe Leroy Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Naveen N Rao Cc: Nicholas Piggin Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/page_alloc.c | 48 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 44 insertions(+), 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7cd8087cd006..ee7519cad3af 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6293,7 +6293,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, int ret = 0; struct migration_target_control mtc = { .nid = zone_to_nid(cc->zone), - .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, + .gfp_mask = cc->gfp_mask, .reason = MR_CONTIG_RANGE, }; struct page *page; @@ -6389,6 +6389,39 @@ static void split_free_pages(struct list_head *list) } } +static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) +{ + const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM; + const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + + /* + * We are given the range to allocate; node, mobility and placement + * hints are irrelevant at this point. We'll simply ignore them. + */ + gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE | + __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE); + + /* + * We only support most reclaim flags (but not NOFAIL/NORETRY), and + * selected action flags. + */ + if (gfp_mask & ~(reclaim_mask | action_mask)) + return -EINVAL; + + /* + * Flags to control page compaction/migration/reclaim, to free up our + * page range. Migratable pages are movable, __GFP_MOVABLE is implied + * for them. + * + * Traditionally we always had __GFP_HARDWALL|__GFP_RETRY_MAYFAIL set, + * keep doing that to not degrade callers. + */ + *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) | + __GFP_HARDWALL | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; + return 0; +} + /** * alloc_contig_range() -- tries to allocate given range of pages * @start: start PFN to allocate @@ -6397,7 +6430,9 @@ static void split_free_pages(struct list_head *list) * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks * in range must have the same migratetype and it must * be either of the two. - * @gfp_mask: GFP mask to use during compaction + * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some + * action and reclaim modifiers are supported. Reclaim modifiers + * control allocation behavior during compaction/migration/reclaim. * * The PFN range does not have to be pageblock aligned. The PFN range must * belong to a single zone. @@ -6423,11 +6458,14 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end, .mode = MIGRATE_SYNC, .ignore_skip_hint = true, .no_set_skip_hint = true, - .gfp_mask = current_gfp_context(gfp_mask), .alloc_contig = true, }; INIT_LIST_HEAD(&cc.migratepages); + gfp_mask = current_gfp_context(gfp_mask); + if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask)) + return -EINVAL; + /* * What we do here is we mark all pageblocks in range as * MIGRATE_ISOLATE. Because pageblock and max order pages may @@ -6570,7 +6608,9 @@ static bool zone_spans_last_pfn(const struct zone *zone, /** * alloc_contig_pages() -- tries to find and allocate contiguous range of pages * @nr_pages: Number of contiguous pages to allocate - * @gfp_mask: GFP mask to limit search and used during compaction + * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some + * action and reclaim modifiers are supported. Reclaim modifiers + * control allocation behavior during compaction/migration/reclaim. * @nid: Target node * @nodemask: Mask for other possible nodes * From 377b2638da36acd9b0fde85a21f2037ca057fea2 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 3 Dec 2024 10:47:31 +0100 Subject: [PATCH 067/504] mm/page_alloc: forward the gfp flags from alloc_contig_range() to post_alloc_hook() In the __GFP_COMP case, we already pass the gfp_flags to prep_new_page()->post_alloc_hook(). However, in the !__GFP_COMP case, we essentially pass only hardcoded __GFP_MOVABLE to post_alloc_hook(), preventing some action modifiers from being effective.. Let's pass our now properly adjusted gfp flags there as well. This way, we can now support __GFP_ZERO for alloc_contig_*(). As a side effect, we now also support __GFP_SKIP_ZERO and__GFP_ZEROTAGS; but we'll keep the more special stuff (KASAN, NOLOCKDEP) disabled for now. It's worth noting that with __GFP_ZERO, we might unnecessarily zero pages when we have to release part of our range using free_contig_range() again. This can be optimized in the future, if ever required; the caller we'll be converting (powernv/memtrace) next won't trigger this. Link: https://lkml.kernel.org/r/20241203094732.200195-6-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Vlastimil Babka Reviewed-by: Oscar Salvador Cc: Christophe Leroy Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Naveen N Rao Cc: Nicholas Piggin Cc: Vishal Moola (Oracle) Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/page_alloc.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ee7519cad3af..03b8938aa376 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6363,7 +6363,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc, return (ret < 0) ? ret : 0; } -static void split_free_pages(struct list_head *list) +static void split_free_pages(struct list_head *list, gfp_t gfp_mask) { int order; @@ -6374,7 +6374,7 @@ static void split_free_pages(struct list_head *list) list_for_each_entry_safe(page, next, &list[order], lru) { int i; - post_alloc_hook(page, order, __GFP_MOVABLE); + post_alloc_hook(page, order, gfp_mask); set_page_refcounted(page); if (!order) continue; @@ -6392,7 +6392,8 @@ static void split_free_pages(struct list_head *list) static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) { const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM; - const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN; + const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN | + __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO; const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN; /* @@ -6540,7 +6541,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end, } if (!(gfp_mask & __GFP_COMP)) { - split_free_pages(cc.freepages); + split_free_pages(cc.freepages, gfp_mask); /* Free head and tail (if any) */ if (start != outer_start) From 2b9ed3db0a8dcf15ad191fee758fde276117bb94 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Tue, 3 Dec 2024 10:47:32 +0100 Subject: [PATCH 068/504] powernv/memtrace: use __GFP_ZERO with alloc_contig_pages() alloc_contig_pages()->alloc_contig_range() now supports __GFP_ZERO, so let's use that instead to resolve our TODO. Link: https://lkml.kernel.org/r/20241203094732.200195-7-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Vlastimil Babka Cc: Christophe Leroy Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Naveen N Rao Cc: Nicholas Piggin Cc: Oscar Salvador Cc: Vishal Moola (Oracle) Cc: Zi Yan Signed-off-by: Andrew Morton --- arch/powerpc/platforms/powernv/memtrace.c | 31 +++++------------------ 1 file changed, 6 insertions(+), 25 deletions(-) diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c index 877720c64515..4ac9808e55a4 100644 --- a/arch/powerpc/platforms/powernv/memtrace.c +++ b/arch/powerpc/platforms/powernv/memtrace.c @@ -88,26 +88,6 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop, } } -static void memtrace_clear_range(unsigned long start_pfn, - unsigned long nr_pages) -{ - unsigned long pfn; - - /* As HIGHMEM does not apply, use clear_page() directly. */ - for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { - if (IS_ALIGNED(pfn, PAGES_PER_SECTION)) - cond_resched(); - clear_page(__va(PFN_PHYS(pfn))); - } - /* - * Before we go ahead and use this range as cache inhibited range - * flush the cache. - */ - flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), - (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), - FLUSH_CHUNK_SIZE); -} - static u64 memtrace_alloc_node(u32 nid, u64 size) { const unsigned long nr_pages = PHYS_PFN(size); @@ -119,17 +99,18 @@ static u64 memtrace_alloc_node(u32 nid, u64 size) * by alloc_contig_pages(). */ page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE | - __GFP_NOWARN, nid, NULL); + __GFP_NOWARN | __GFP_ZERO, nid, NULL); if (!page) return 0; start_pfn = page_to_pfn(page); /* - * Clear the range while we still have a linear mapping. - * - * TODO: use __GFP_ZERO with alloc_contig_pages() once supported. + * Before we go ahead and use this range as cache inhibited range + * flush the cache. */ - memtrace_clear_range(start_pfn, nr_pages); + flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), + (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), + FLUSH_CHUNK_SIZE); /* * Set pages PageOffline(), to indicate that nobody (e.g., hibernation, From 9bbc3a1deaf1b6f628833589f9edd6f15811f834 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 4 Dec 2024 19:10:15 +0100 Subject: [PATCH 069/504] readahead: don't shorten readahead window in read_pages() Patch series "readahead: Reintroduce fix for improper RA window sizing". This small patch series reintroduces a fix of readahead window confusion (and thus read throughput reduction) when page_cache_ra_order() ends up failing due to folios already present in the page cache. After thinking about this for a while I have ended up with a dumb fix that just rechecks if we have something to read before calling do_page_cache_ra(). This fixes the problem reported in [1]. I still think it doesn't make much sense to update readahead window size in read_pages() so patch 1 removes that but the real fix in patch 2 does not depend on it. [1] https://lore.kernel.org/all/49648605-d800-4859-be49-624bbe60519d@gmail.com This patch (of 2): When ->readahead callback doesn't read all requested pages, read_pages() shortens the readahead window (ra->size). However we don't know why pages were not read and what appropriate window size is. So don't try to secondguess the filesystem. If it needs different readahead window, it should set it manually similarly as during expansion the filesystem can use readahead_expand(). Link: https://lkml.kernel.org/r/20241204181016.15273-1-jack@suse.cz Link: https://lkml.kernel.org/r/20241204181016.15273-2-jack@suse.cz Signed-off-by: Jan Kara Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/readahead.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/mm/readahead.c b/mm/readahead.c index e151f4b13ca4..50a64d8e06b0 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -158,20 +158,10 @@ static void read_pages(struct readahead_control *rac) if (aops->readahead) { aops->readahead(rac); - /* - * Clean up the remaining folios. The sizes in ->ra - * may be used to size the next readahead, so make sure - * they accurately reflect what happened. - */ + /* Clean up the remaining folios. */ while ((folio = readahead_folio(rac)) != NULL) { - unsigned long nr = folio_nr_pages(folio); - folio_get(folio); - rac->ra->size -= nr; - if (rac->ra->async_size >= nr) { - rac->ra->async_size -= nr; - filemap_remove_folio(folio); - } + filemap_remove_folio(folio); folio_unlock(folio); folio_put(folio); } From 08d5648d6da95620b8fa1e7ba25adb40a97e14df Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 4 Dec 2024 19:10:16 +0100 Subject: [PATCH 070/504] readahead: properly shorten readahead when falling back to do_page_cache_ra() When we succeed in creating some folios in page_cache_ra_order() but then need to fallback to single page folios, we don't shorten the amount to read passed to do_page_cache_ra() by the amount we've already read. This then results in reading more and also in placing another readahead mark in the middle of the readahead window which confuses readahead code. Fix the problem by properly reducing number of pages to read. Unlike previous attempt at this fix (commit 7c877586da31) which had to be reverted, we are now careful to check there is indeed something to read so that we don't submit negative-sized readahead. Link: https://lkml.kernel.org/r/20241204181016.15273-3-jack@suse.cz Signed-off-by: Jan Kara Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- mm/readahead.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/mm/readahead.c b/mm/readahead.c index 50a64d8e06b0..2bc3abf07828 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -448,7 +448,8 @@ void page_cache_ra_order(struct readahead_control *ractl, struct file_ra_state *ra, unsigned int new_order) { struct address_space *mapping = ractl->mapping; - pgoff_t index = readahead_index(ractl); + pgoff_t start = readahead_index(ractl); + pgoff_t index = start; unsigned int min_order = mapping_min_folio_order(mapping); pgoff_t limit = (i_size_read(mapping->host) - 1) >> PAGE_SHIFT; pgoff_t mark = index + ra->size - ra->async_size; @@ -506,12 +507,18 @@ void page_cache_ra_order(struct readahead_control *ractl, /* * If there were already pages in the page cache, then we may have * left some gaps. Let the regular readahead code take care of this - * situation. + * situation below. */ if (!err) return; fallback: - do_page_cache_ra(ractl, ra->size, ra->async_size); + /* + * ->readahead() may have updated readahead window size so we have to + * check there's still something to read. + */ + if (ra->size > index - start) + do_page_cache_ra(ractl, ra->size - (index - start), + ra->async_size); } static unsigned long ractl_max_pages(struct readahead_control *ractl, From 42200af35a6fcbb9bff60b28b523f9f575efad08 Mon Sep 17 00:00:00 2001 From: Koichiro Den Date: Thu, 5 Dec 2024 01:55:03 +0900 Subject: [PATCH 071/504] hugetlb: prioritize surplus allocation from current node Previously, surplus allocations triggered by mmap were typically made from the node where the process was running. On a page fault, the area was reliably dequeued from the hugepage_freelists for that node. However, since commit 003af997c8a9 ("hugetlb: force allocating surplus hugepages on mempolicy allowed nodes"), dequeue_hugetlb_folio_vma() may fall back to other nodes unnecessarily even if there is no MPOL_BIND policy, causing folios to be dequeued from nodes other than the current one. Also, allocating from the node where the current process is running is likely to result in a performance win, as mmap-ing processes often touch the area not so long after allocation. This change minimizes surprises for users relying on the previous behavior while maintaining the benefit introduced by the commit. So, prioritize the node the current process is running on when possible. Link: https://lkml.kernel.org/r/20241204165503.628784-1-koichiro.den@canonical.com Signed-off-by: Koichiro Den Acked-by: Aristeu Rozanski Cc: Aristeu Rozanski Cc: David Hildenbrand Cc: Muchun Song Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/hugetlb.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eaaec19caa7c..ac275a8864e0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2463,7 +2463,13 @@ static int gather_surplus_pages(struct hstate *h, long delta) long needed, allocated; bool alloc_ok = true; int node; - nodemask_t *mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); + nodemask_t *mbind_nodemask, alloc_nodemask; + + mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h)); + if (mbind_nodemask) + nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed); + else + alloc_nodemask = cpuset_current_mems_allowed; lockdep_assert_held(&hugetlb_lock); needed = (h->resv_huge_pages + delta) - h->free_huge_pages; @@ -2479,8 +2485,16 @@ retry: spin_unlock_irq(&hugetlb_lock); for (i = 0; i < needed; i++) { folio = NULL; - for_each_node_mask(node, cpuset_current_mems_allowed) { - if (!mbind_nodemask || node_isset(node, *mbind_nodemask)) { + + /* Prioritize current node */ + if (node_isset(numa_mem_id(), alloc_nodemask)) + folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), + numa_mem_id(), NULL); + + if (!folio) { + for_each_node_mask(node, alloc_nodemask) { + if (node == numa_mem_id()) + continue; folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h), node, NULL); if (folio) From c06564559629b2ca195accd5472e36cd65dcd63d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Wed, 4 Dec 2024 16:31:00 +0100 Subject: [PATCH 072/504] mm/hugetlb: don't map folios writable without VM_WRITE when copying during fork() If we have to trigger a hugetlb folio copy during fork() because the anon folio might be pinned, we currently unconditionally create a writable PTE. However, the VMA might not have write permissions (VM_WRITE) at that point. Fix it by checking the VMA for VM_WRITE. Make the code less error prone by moving checking for VM_WRITE into make_huge_pte(), and letting callers only specify whether we should try making it writable. A simple reproducer that longterm-pins the folios using liburing to then mprotect(PROT_READ) the folios befor fork() [1] results in: Before: [FAIL] access should not have worked After: [PASS] access did not work as expected [1] https://gitlab.com/davidhildenbrand/scratchspace/-/raw/main/reproducers/hugetlb-mkwrite-fork.c This is rather a corner case, so stable might not be warranted. Link: https://lkml.kernel.org/r/20241204153100.1967364-1-david@redhat.com Fixes: 4eae4efa2c29 ("hugetlb: do early cow when page pinned on src mm") Signed-off-by: David Hildenbrand Acked-by: Peter Xu Cc: Muchun Song Cc: Guillaume Morin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ac275a8864e0..c9d8c6a1c03c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5155,12 +5155,12 @@ const struct vm_operations_struct hugetlb_vm_ops = { }; static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page, - int writable) + bool try_mkwrite) { pte_t entry; unsigned int shift = huge_page_shift(hstate_vma(vma)); - if (writable) { + if (try_mkwrite && (vma->vm_flags & VM_WRITE)) { entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page, vma->vm_page_prot))); } else { @@ -5213,7 +5213,7 @@ static void hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, struct folio *new_folio, pte_t old, unsigned long sz) { - pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); + pte_t newpte = make_huge_pte(vma, &new_folio->page, true); __folio_mark_uptodate(new_folio); hugetlb_add_new_anon_rmap(new_folio, vma, addr); @@ -6249,8 +6249,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, hugetlb_add_new_anon_rmap(folio, vma, vmf->address); else hugetlb_add_file_rmap(folio); - new_pte = make_huge_pte(vma, &folio->page, ((vma->vm_flags & VM_WRITE) - && (vma->vm_flags & VM_SHARED))); + new_pte = make_huge_pte(vma, &folio->page, vma->vm_flags & VM_SHARED); /* * If this pte was previously wr-protected, keep it wr-protected even * if populated. @@ -6582,7 +6581,6 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, spinlock_t *ptl; int ret = -ENOMEM; struct folio *folio; - int writable; bool folio_in_pagecache = false; if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) { @@ -6736,12 +6734,8 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY * with wp flag set, don't set pte write bit. */ - if (wp_enabled || (is_continue && !vm_shared)) - writable = 0; - else - writable = dst_vma->vm_flags & VM_WRITE; - - _dst_pte = make_huge_pte(dst_vma, &folio->page, writable); + _dst_pte = make_huge_pte(dst_vma, &folio->page, + !wp_enabled && !(is_continue && !vm_shared)); /* * Always mark UFFDIO_COPY page dirty; note that this may not be * extremely important for hugetlbfs for now since swapping is not From 06887a136c14d1b740bf3a32d411017cd46dec71 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:41 +0800 Subject: [PATCH 073/504] mm: khugepaged: recheck pmd state in retract_page_tables() Patch series "synchronously scan and reclaim empty user PTE pages", v4. Previously, we tried to use a completely asynchronous method to reclaim empty user PTE pages [1]. After discussing with David Hildenbrand, we decided to implement synchronous reclaimation in the case of madvise(MADV_DONTNEED) as the first step. So this series aims to synchronously free the empty PTE pages in madvise(MADV_DONTNEED) case. We will detect and free empty PTE pages in zap_pte_range(), and will add zap_details.reclaim_pt to exclude cases other than madvise(MADV_DONTNEED). In zap_pte_range(), mmu_gather is used to perform batch tlb flushing and page freeing operations. Therefore, if we want to free the empty PTE page in this path, the most natural way is to add it to mmu_gather as well. Now, if CONFIG_MMU_GATHER_RCU_TABLE_FREE is selected, mmu_gather will free page table pages by semi RCU: - batch table freeing: asynchronous free by RCU - single table freeing: IPI + synchronous free But this is not enough to free the empty PTE page table pages in paths other that munmap and exit_mmap path, because IPI cannot be synchronized with rcu_read_lock() in pte_offset_map{_lock}(). So we should let single table also be freed by RCU like batch table freeing. As a first step, we supported this feature on x86_64 and selectd the newly introduced CONFIG_ARCH_SUPPORTS_PT_RECLAIM. For other cases such as madvise(MADV_FREE), consider scanning and freeing empty PTE pages asynchronously in the future. Note: issues related to TLB flushing are not new to this series and are tracked in the separate RFC patch [3]. And more context please refer to this thread [4]. [1]. https://lore.kernel.org/lkml/cover.1718267194.git.zhengqi.arch@bytedance.com/ [2]. https://lore.kernel.org/lkml/cover.1727332572.git.zhengqi.arch@bytedance.com/ [3]. https://lore.kernel.org/lkml/20240815120715.14516-1-zhengqi.arch@bytedance.com/ [4]. https://lore.kernel.org/lkml/6f38cb19-9847-4f70-bbe7-06881bb016be@bytedance.com/ This patch (of 12): In retract_page_tables(), the lock of new_folio is still held, we will be blocked in the page fault path, which prevents the pte entries from being set again. So even though the old empty PTE page may be concurrently freed and a new PTE page is filled into the pmd entry, it is still empty and can be removed. So just refactor the retract_page_tables() a little bit and recheck the pmd state after holding the pmd lock. Link: https://lkml.kernel.org/r/cover.1733305182.git.zhengqi.arch@bytedance.com Link: https://lkml.kernel.org/r/70a51804cd19d44ccaf031825d9fb6eaf92f2bad.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Suggested-by: Jann Horn Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- Documentation/mm/process_addrs.rst | 4 +++ mm/khugepaged.c | 45 ++++++++++++++++++++---------- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/Documentation/mm/process_addrs.rst b/Documentation/mm/process_addrs.rst index 1d416658d7f5..81417fa2ed20 100644 --- a/Documentation/mm/process_addrs.rst +++ b/Documentation/mm/process_addrs.rst @@ -531,6 +531,10 @@ are extra requirements for accessing them: new page table has been installed in the same location and filled with entries. Writers normally need to take the PTE lock and revalidate that the PMD entry still refers to the same PTE-level page table. + If the writer does not care whether it is the same PTE-level page table, it + can take the PMD lock and revalidate that the contents of pmd entry still meet + the requirements. In particular, this also happens in :c:func:`!retract_page_tables` + when handling :c:macro:`!MADV_COLLAPSE`. To access PTE-level page tables, a helper like :c:func:`!pte_offset_map_lock` or :c:func:`!pte_offset_map` can be used depending on stability requirements. diff --git a/mm/khugepaged.c b/mm/khugepaged.c index bad1e130eda8..5f0be134141e 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -948,17 +948,10 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, return SCAN_SUCCEED; } -static int find_pmd_or_thp_or_none(struct mm_struct *mm, - unsigned long address, - pmd_t **pmd) +static inline int check_pmd_state(pmd_t *pmd) { - pmd_t pmde; + pmd_t pmde = pmdp_get_lockless(pmd); - *pmd = mm_find_pmd(mm, address); - if (!*pmd) - return SCAN_PMD_NULL; - - pmde = pmdp_get_lockless(*pmd); if (pmd_none(pmde)) return SCAN_PMD_NONE; if (!pmd_present(pmde)) @@ -972,6 +965,17 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm, return SCAN_SUCCEED; } +static int find_pmd_or_thp_or_none(struct mm_struct *mm, + unsigned long address, + pmd_t **pmd) +{ + *pmd = mm_find_pmd(mm, address); + if (!*pmd) + return SCAN_PMD_NULL; + + return check_pmd_state(*pmd); +} + static int check_pmd_still_valid(struct mm_struct *mm, unsigned long address, pmd_t *pmd) @@ -1721,7 +1725,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) pmd_t *pmd, pgt_pmd; spinlock_t *pml; spinlock_t *ptl; - bool skipped_uffd = false; + bool success = false; /* * Check vma->anon_vma to exclude MAP_PRIVATE mappings that @@ -1758,6 +1762,19 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) mmu_notifier_invalidate_range_start(&range); pml = pmd_lock(mm, pmd); + /* + * The lock of new_folio is still held, we will be blocked in + * the page fault path, which prevents the pte entries from + * being set again. So even though the old empty PTE page may be + * concurrently freed and a new PTE page is filled into the pmd + * entry, it is still empty and can be removed. + * + * So here we only need to recheck if the state of pmd entry + * still meets our requirements, rather than checking pmd_same() + * like elsewhere. + */ + if (check_pmd_state(pmd) != SCAN_SUCCEED) + goto drop_pml; ptl = pte_lockptr(mm, pmd); if (ptl != pml) spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); @@ -1771,20 +1788,20 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff) * repeating the anon_vma check protects from one category, * and repeating the userfaultfd_wp() check from another. */ - if (unlikely(vma->anon_vma || userfaultfd_wp(vma))) { - skipped_uffd = true; - } else { + if (likely(!vma->anon_vma && !userfaultfd_wp(vma))) { pgt_pmd = pmdp_collapse_flush(vma, addr, pmd); pmdp_get_lockless_sync(); + success = true; } if (ptl != pml) spin_unlock(ptl); +drop_pml: spin_unlock(pml); mmu_notifier_invalidate_range_end(&range); - if (!skipped_uffd) { + if (success) { mm_dec_nr_ptes(mm); page_table_check_pte_clear_range(mm, addr, pgt_pmd); pte_free_defer(mm, pmd_pgtable(pgt_pmd)); From 2cda52889b899f6e4643a59b5b96799f2087bf36 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:42 +0800 Subject: [PATCH 074/504] mm: userfaultfd: recheck dst_pmd entry in move_pages_pte() In move_pages_pte(), since dst_pte needs to be none, the subsequent pte_same() check cannot prevent the dst_pte page from being freed concurrently, so we also need to abtain dst_pmdval and recheck pmd_same(). Otherwise, once we support empty PTE page reclaimation for anonymous pages, it may result in moving the src_pte page into the dts_pte page that is about to be freed by RCU. Link: https://lkml.kernel.org/r/8108c262757fc492626f3a2ffc44b775f2710e16.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- mm/userfaultfd.c | 51 +++++++++++++++++++++++++++++++----------------- 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 60a0be33766f..8e16dc290ddf 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1020,6 +1020,14 @@ void double_pt_unlock(spinlock_t *ptl1, __release(ptl2); } +static inline bool is_pte_pages_stable(pte_t *dst_pte, pte_t *src_pte, + pte_t orig_dst_pte, pte_t orig_src_pte, + pmd_t *dst_pmd, pmd_t dst_pmdval) +{ + return pte_same(ptep_get(src_pte), orig_src_pte) && + pte_same(ptep_get(dst_pte), orig_dst_pte) && + pmd_same(dst_pmdval, pmdp_get_lockless(dst_pmd)); +} static int move_present_pte(struct mm_struct *mm, struct vm_area_struct *dst_vma, @@ -1027,6 +1035,7 @@ static int move_present_pte(struct mm_struct *mm, unsigned long dst_addr, unsigned long src_addr, pte_t *dst_pte, pte_t *src_pte, pte_t orig_dst_pte, pte_t orig_src_pte, + pmd_t *dst_pmd, pmd_t dst_pmdval, spinlock_t *dst_ptl, spinlock_t *src_ptl, struct folio *src_folio) { @@ -1034,8 +1043,8 @@ static int move_present_pte(struct mm_struct *mm, double_pt_lock(dst_ptl, src_ptl); - if (!pte_same(ptep_get(src_pte), orig_src_pte) || - !pte_same(ptep_get(dst_pte), orig_dst_pte)) { + if (!is_pte_pages_stable(dst_pte, src_pte, orig_dst_pte, orig_src_pte, + dst_pmd, dst_pmdval)) { err = -EAGAIN; goto out; } @@ -1071,6 +1080,7 @@ static int move_swap_pte(struct mm_struct *mm, unsigned long dst_addr, unsigned long src_addr, pte_t *dst_pte, pte_t *src_pte, pte_t orig_dst_pte, pte_t orig_src_pte, + pmd_t *dst_pmd, pmd_t dst_pmdval, spinlock_t *dst_ptl, spinlock_t *src_ptl) { if (!pte_swp_exclusive(orig_src_pte)) @@ -1078,8 +1088,8 @@ static int move_swap_pte(struct mm_struct *mm, double_pt_lock(dst_ptl, src_ptl); - if (!pte_same(ptep_get(src_pte), orig_src_pte) || - !pte_same(ptep_get(dst_pte), orig_dst_pte)) { + if (!is_pte_pages_stable(dst_pte, src_pte, orig_dst_pte, orig_src_pte, + dst_pmd, dst_pmdval)) { double_pt_unlock(dst_ptl, src_ptl); return -EAGAIN; } @@ -1097,13 +1107,14 @@ static int move_zeropage_pte(struct mm_struct *mm, unsigned long dst_addr, unsigned long src_addr, pte_t *dst_pte, pte_t *src_pte, pte_t orig_dst_pte, pte_t orig_src_pte, + pmd_t *dst_pmd, pmd_t dst_pmdval, spinlock_t *dst_ptl, spinlock_t *src_ptl) { pte_t zero_pte; double_pt_lock(dst_ptl, src_ptl); - if (!pte_same(ptep_get(src_pte), orig_src_pte) || - !pte_same(ptep_get(dst_pte), orig_dst_pte)) { + if (!is_pte_pages_stable(dst_pte, src_pte, orig_dst_pte, orig_src_pte, + dst_pmd, dst_pmdval)) { double_pt_unlock(dst_ptl, src_ptl); return -EAGAIN; } @@ -1136,6 +1147,7 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pte_t *src_pte = NULL; pte_t *dst_pte = NULL; pmd_t dummy_pmdval; + pmd_t dst_pmdval; struct folio *src_folio = NULL; struct anon_vma *src_anon_vma = NULL; struct mmu_notifier_range range; @@ -1148,11 +1160,11 @@ static int move_pages_pte(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, retry: /* * Use the maywrite version to indicate that dst_pte will be modified, - * but since we will use pte_same() to detect the change of the pte - * entry, there is no need to get pmdval, so just pass a dummy variable - * to it. + * since dst_pte needs to be none, the subsequent pte_same() check + * cannot prevent the dst_pte page from being freed concurrently, so we + * also need to abtain dst_pmdval and recheck pmd_same() later. */ - dst_pte = pte_offset_map_rw_nolock(mm, dst_pmd, dst_addr, &dummy_pmdval, + dst_pte = pte_offset_map_rw_nolock(mm, dst_pmd, dst_addr, &dst_pmdval, &dst_ptl); /* Retry if a huge pmd materialized from under us */ @@ -1161,7 +1173,11 @@ retry: goto out; } - /* same as dst_pte */ + /* + * Unlike dst_pte, the subsequent pte_same() check can ensure the + * stability of the src_pte page, so there is no need to get pmdval, + * just pass a dummy variable to it. + */ src_pte = pte_offset_map_rw_nolock(mm, src_pmd, src_addr, &dummy_pmdval, &src_ptl); @@ -1213,7 +1229,7 @@ retry: err = move_zeropage_pte(mm, dst_vma, src_vma, dst_addr, src_addr, dst_pte, src_pte, orig_dst_pte, orig_src_pte, - dst_ptl, src_ptl); + dst_pmd, dst_pmdval, dst_ptl, src_ptl); goto out; } @@ -1303,8 +1319,8 @@ retry: err = move_present_pte(mm, dst_vma, src_vma, dst_addr, src_addr, dst_pte, src_pte, - orig_dst_pte, orig_src_pte, - dst_ptl, src_ptl, src_folio); + orig_dst_pte, orig_src_pte, dst_pmd, + dst_pmdval, dst_ptl, src_ptl, src_folio); } else { entry = pte_to_swp_entry(orig_src_pte); if (non_swap_entry(entry)) { @@ -1319,10 +1335,9 @@ retry: goto out; } - err = move_swap_pte(mm, dst_addr, src_addr, - dst_pte, src_pte, - orig_dst_pte, orig_src_pte, - dst_ptl, src_ptl); + err = move_swap_pte(mm, dst_addr, src_addr, dst_pte, src_pte, + orig_dst_pte, orig_src_pte, dst_pmd, + dst_pmdval, dst_ptl, src_ptl); } out: From 5d8d2742363c965f2517d46dc4e66bc1173c9d22 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Tue, 10 Dec 2024 16:41:56 +0800 Subject: [PATCH 075/504] mm-userfaultfd-recheck-dst_pmd-entry-in-move_pages_pte-fix The following WARN_ON_ONCE()s can also be expected to be triggered, so remove them as well. if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd)) Link: https://lkml.kernel.org/r/20241210084156.89877-1-zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/userfaultfd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 8e16dc290ddf..af3dfc3633db 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -1193,8 +1193,8 @@ retry: } /* Sanity checks before the operation */ - if (WARN_ON_ONCE(pmd_none(*dst_pmd)) || WARN_ON_ONCE(pmd_none(*src_pmd)) || - WARN_ON_ONCE(pmd_trans_huge(*dst_pmd)) || WARN_ON_ONCE(pmd_trans_huge(*src_pmd))) { + if (pmd_none(*dst_pmd) || pmd_none(*src_pmd) || + pmd_trans_huge(*dst_pmd) || pmd_trans_huge(*src_pmd)) { err = -EINVAL; goto out; } From 59df900b965d4d36acf7c50faa1cc7b46297cd2f Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:43 +0800 Subject: [PATCH 076/504] mm: introduce zap_nonpresent_ptes() Similar to zap_present_ptes(), let's introduce zap_nonpresent_ptes() to handle non-present ptes, which can improve code readability. No functional change. Link: https://lkml.kernel.org/r/009ca882036d9c7a9f815489cfeafe0bdb79d62d.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reviewed-by: Jann Horn Acked-by: David Hildenbrand Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Rientjes Cc: Hugh Dickins Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- mm/memory.c | 136 ++++++++++++++++++++++++++++------------------------ 1 file changed, 73 insertions(+), 63 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index f49eb4d4be75..bed55f8873d5 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1587,6 +1587,76 @@ static inline int zap_present_ptes(struct mmu_gather *tlb, return 1; } +static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, + struct vm_area_struct *vma, pte_t *pte, pte_t ptent, + unsigned int max_nr, unsigned long addr, + struct zap_details *details, int *rss) +{ + swp_entry_t entry; + int nr = 1; + + entry = pte_to_swp_entry(ptent); + if (is_device_private_entry(entry) || + is_device_exclusive_entry(entry)) { + struct page *page = pfn_swap_entry_to_page(entry); + struct folio *folio = page_folio(page); + + if (unlikely(!should_zap_folio(details, folio))) + return 1; + /* + * Both device private/exclusive mappings should only + * work with anonymous page so far, so we don't need to + * consider uffd-wp bit when zap. For more information, + * see zap_install_uffd_wp_if_needed(). + */ + WARN_ON_ONCE(!vma_is_anonymous(vma)); + rss[mm_counter(folio)]--; + if (is_device_private_entry(entry)) + folio_remove_rmap_pte(folio, page, vma); + folio_put(folio); + } else if (!non_swap_entry(entry)) { + /* Genuine swap entries, hence a private anon pages */ + if (!should_zap_cows(details)) + return 1; + + nr = swap_pte_batch(pte, max_nr, ptent); + rss[MM_SWAPENTS] -= nr; + free_swap_and_cache_nr(entry, nr); + } else if (is_migration_entry(entry)) { + struct folio *folio = pfn_swap_entry_folio(entry); + + if (!should_zap_folio(details, folio)) + return 1; + rss[mm_counter(folio)]--; + } else if (pte_marker_entry_uffd_wp(entry)) { + /* + * For anon: always drop the marker; for file: only + * drop the marker if explicitly requested. + */ + if (!vma_is_anonymous(vma) && !zap_drop_markers(details)) + return 1; + } else if (is_guard_swp_entry(entry)) { + /* + * Ordinary zapping should not remove guard PTE + * markers. Only do so if we should remove PTE markers + * in general. + */ + if (!zap_drop_markers(details)) + return 1; + } else if (is_hwpoison_entry(entry) || is_poisoned_swp_entry(entry)) { + if (!should_zap_cows(details)) + return 1; + } else { + /* We should have covered all the swap entry types */ + pr_alert("unrecognized swap entry 0x%lx\n", entry.val); + WARN_ON_ONCE(1); + } + clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm); + zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); + + return nr; +} + static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, @@ -1598,7 +1668,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, spinlock_t *ptl; pte_t *start_pte; pte_t *pte; - swp_entry_t entry; int nr; tlb_change_page_size(tlb, PAGE_SIZE); @@ -1611,8 +1680,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, arch_enter_lazy_mmu_mode(); do { pte_t ptent = ptep_get(pte); - struct folio *folio; - struct page *page; int max_nr; nr = 1; @@ -1622,8 +1689,8 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (need_resched()) break; + max_nr = (end - addr) / PAGE_SIZE; if (pte_present(ptent)) { - max_nr = (end - addr) / PAGE_SIZE; nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr, details, rss, &force_flush, &force_break); @@ -1631,67 +1698,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, addr += nr * PAGE_SIZE; break; } - continue; - } - - entry = pte_to_swp_entry(ptent); - if (is_device_private_entry(entry) || - is_device_exclusive_entry(entry)) { - page = pfn_swap_entry_to_page(entry); - folio = page_folio(page); - if (unlikely(!should_zap_folio(details, folio))) - continue; - /* - * Both device private/exclusive mappings should only - * work with anonymous page so far, so we don't need to - * consider uffd-wp bit when zap. For more information, - * see zap_install_uffd_wp_if_needed(). - */ - WARN_ON_ONCE(!vma_is_anonymous(vma)); - rss[mm_counter(folio)]--; - if (is_device_private_entry(entry)) - folio_remove_rmap_pte(folio, page, vma); - folio_put(folio); - } else if (!non_swap_entry(entry)) { - max_nr = (end - addr) / PAGE_SIZE; - nr = swap_pte_batch(pte, max_nr, ptent); - /* Genuine swap entries, hence a private anon pages */ - if (!should_zap_cows(details)) - continue; - rss[MM_SWAPENTS] -= nr; - free_swap_and_cache_nr(entry, nr); - } else if (is_migration_entry(entry)) { - folio = pfn_swap_entry_folio(entry); - if (!should_zap_folio(details, folio)) - continue; - rss[mm_counter(folio)]--; - } else if (pte_marker_entry_uffd_wp(entry)) { - /* - * For anon: always drop the marker; for file: only - * drop the marker if explicitly requested. - */ - if (!vma_is_anonymous(vma) && - !zap_drop_markers(details)) - continue; - } else if (is_guard_swp_entry(entry)) { - /* - * Ordinary zapping should not remove guard PTE - * markers. Only do so if we should remove PTE markers - * in general. - */ - if (!zap_drop_markers(details)) - continue; - } else if (is_hwpoison_entry(entry) || - is_poisoned_swp_entry(entry)) { - if (!should_zap_cows(details)) - continue; } else { - /* We should have covered all the swap entry types */ - pr_alert("unrecognized swap entry 0x%lx\n", entry.val); - WARN_ON_ONCE(1); + nr = zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, + addr, details, rss); } - clear_not_present_full_ptes(mm, addr, pte, nr, tlb->fullmm); - zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); add_mm_rss_vec(mm, rss); From b2c6d723f93b412c57c17053d1c210e0a4c32283 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:44 +0800 Subject: [PATCH 077/504] mm: introduce do_zap_pte_range() This commit introduces do_zap_pte_range() to actually zap the PTEs, which will help improve code readability and facilitate secondary checking of the processed PTEs in the future. No functional change. Link: https://lkml.kernel.org/r/c3fd16807f83bb7d7a376cc6de023a9f5ead17da.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reviewed-by: Jann Horn Acked-by: David Hildenbrand Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Rientjes Cc: Hugh Dickins Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- mm/memory.c | 45 ++++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index bed55f8873d5..378576e6b745 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1657,6 +1657,27 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, return nr; } +static inline int do_zap_pte_range(struct mmu_gather *tlb, + struct vm_area_struct *vma, pte_t *pte, + unsigned long addr, unsigned long end, + struct zap_details *details, int *rss, + bool *force_flush, bool *force_break) +{ + pte_t ptent = ptep_get(pte); + int max_nr = (end - addr) / PAGE_SIZE; + + if (pte_none(ptent)) + return 1; + + if (pte_present(ptent)) + return zap_present_ptes(tlb, vma, pte, ptent, max_nr, + addr, details, rss, force_flush, + force_break); + + return zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr, + details, rss); +} + static unsigned long zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, @@ -1679,28 +1700,14 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); do { - pte_t ptent = ptep_get(pte); - int max_nr; - - nr = 1; - if (pte_none(ptent)) - continue; - if (need_resched()) break; - max_nr = (end - addr) / PAGE_SIZE; - if (pte_present(ptent)) { - nr = zap_present_ptes(tlb, vma, pte, ptent, max_nr, - addr, details, rss, &force_flush, - &force_break); - if (unlikely(force_break)) { - addr += nr * PAGE_SIZE; - break; - } - } else { - nr = zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, - addr, details, rss); + nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss, + &force_flush, &force_break); + if (unlikely(force_break)) { + addr += nr * PAGE_SIZE; + break; } } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); From 97a2293a4aed066cd7b779bf81bf1f3306f7751b Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:45 +0800 Subject: [PATCH 078/504] mm: skip over all consecutive none ptes in do_zap_pte_range() Skip over all consecutive none ptes in do_zap_pte_range(), which helps optimize away need_resched() + force_break + incremental pte/addr increments etc. Link: https://lkml.kernel.org/r/8ecffbf990afd1c8ccc195a2ec321d55f0923908.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Suggested-by: David Hildenbrand Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- mm/memory.c | 27 ++++++++++++++++++++------- 1 file changed, 20 insertions(+), 7 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 378576e6b745..576eb9c4b57c 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1665,17 +1665,30 @@ static inline int do_zap_pte_range(struct mmu_gather *tlb, { pte_t ptent = ptep_get(pte); int max_nr = (end - addr) / PAGE_SIZE; + int nr = 0; - if (pte_none(ptent)) - return 1; + /* Skip all consecutive none ptes */ + if (pte_none(ptent)) { + for (nr = 1; nr < max_nr; nr++) { + ptent = ptep_get(pte + nr); + if (!pte_none(ptent)) + break; + } + max_nr -= nr; + if (!max_nr) + return nr; + pte += nr; + addr += nr * PAGE_SIZE; + } if (pte_present(ptent)) - return zap_present_ptes(tlb, vma, pte, ptent, max_nr, - addr, details, rss, force_flush, - force_break); + nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr, + details, rss, force_flush, force_break); + else + nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr, + details, rss); - return zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr, - details, rss); + return nr; } static unsigned long zap_pte_range(struct mmu_gather *tlb, From 84350e954d48f3e30fc7a13fa8fa8ec652ae8f60 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:46 +0800 Subject: [PATCH 079/504] mm: zap_install_uffd_wp_if_needed: return whether uffd-wp pte has been re-installed In some cases, we'll replace the none pte with an uffd-wp swap special pte marker when necessary. Let's expose this information to the caller through the return value, so that subsequent commits can use this information to detect whether the PTE page is empty. Link: https://lkml.kernel.org/r/9d4516554724eda87d6576468042a1741c475413.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- include/linux/mm_inline.h | 11 +++++++---- mm/memory.c | 16 ++++++++++++---- 2 files changed, 19 insertions(+), 8 deletions(-) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 1b6a917fffa4..34e5097182a0 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -564,9 +564,9 @@ static inline pte_marker copy_pte_marker( * Must be called with pgtable lock held so that no thread will see the none * pte, and if they see it, they'll fault and serialize at the pgtable lock. * - * This function is a no-op if PTE_MARKER_UFFD_WP is not enabled. + * Returns true if an uffd-wp pte was installed, false otherwise. */ -static inline void +static inline bool pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, pte_t *pte, pte_t pteval) { @@ -583,7 +583,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, * with a swap pte. There's no way of leaking the bit. */ if (vma_is_anonymous(vma) || !userfaultfd_wp(vma)) - return; + return false; /* A uffd-wp wr-protected normal pte */ if (unlikely(pte_present(pteval) && pte_uffd_wp(pteval))) @@ -596,10 +596,13 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, if (unlikely(pte_swp_uffd_wp_any(pteval))) arm_uffd_pte = true; - if (unlikely(arm_uffd_pte)) + if (unlikely(arm_uffd_pte)) { set_pte_at(vma->vm_mm, addr, pte, make_pte_marker(PTE_MARKER_UFFD_WP)); + return true; + } #endif + return false; } static inline bool vma_has_recency(struct vm_area_struct *vma) diff --git a/mm/memory.c b/mm/memory.c index 576eb9c4b57c..67406cb662ea 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1466,27 +1466,35 @@ static inline bool zap_drop_markers(struct zap_details *details) /* * This function makes sure that we'll replace the none pte with an uffd-wp * swap special pte marker when necessary. Must be with the pgtable lock held. + * + * Returns true if uffd-wp ptes was installed, false otherwise. */ -static inline void +static inline bool zap_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, pte_t *pte, int nr, struct zap_details *details, pte_t pteval) { + bool was_installed = false; + +#ifdef CONFIG_PTE_MARKER_UFFD_WP /* Zap on anonymous always means dropping everything */ if (vma_is_anonymous(vma)) - return; + return false; if (zap_drop_markers(details)) - return; + return false; for (;;) { /* the PFN in the PTE is irrelevant. */ - pte_install_uffd_wp_if_needed(vma, addr, pte, pteval); + if (pte_install_uffd_wp_if_needed(vma, addr, pte, pteval)) + was_installed = true; if (--nr == 0) break; pte++; addr += PAGE_SIZE; } +#endif + return was_installed; } static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, From ceb621488bacaf1d4356153461e3b9396eeca426 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:47 +0800 Subject: [PATCH 080/504] mm: do_zap_pte_range: return any_skipped information to the caller Let the caller of do_zap_pte_range() know whether we skip zap ptes or reinstall uffd-wp ptes through any_skipped parameter, so that subsequent commits can use this information in zap_pte_range() to detect whether the PTE page can be reclaimed. Link: https://lkml.kernel.org/r/59f33ec9f74e9f058ed319b0bfadd76b0f7adf9b.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- mm/memory.c | 36 +++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 15 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index 67406cb662ea..694156ecaf95 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1501,7 +1501,7 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, struct folio *folio, struct page *page, pte_t *pte, pte_t ptent, unsigned int nr, unsigned long addr, struct zap_details *details, int *rss, - bool *force_flush, bool *force_break) + bool *force_flush, bool *force_break, bool *any_skipped) { struct mm_struct *mm = tlb->mm; bool delay_rmap = false; @@ -1527,8 +1527,8 @@ static __always_inline void zap_present_folio_ptes(struct mmu_gather *tlb, arch_check_zapped_pte(vma, ptent); tlb_remove_tlb_entries(tlb, pte, nr, addr); if (unlikely(userfaultfd_pte_wp(vma, ptent))) - zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, - ptent); + *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, + nr, details, ptent); if (!delay_rmap) { folio_remove_rmap_ptes(folio, page, nr, vma); @@ -1552,7 +1552,7 @@ static inline int zap_present_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_t *pte, pte_t ptent, unsigned int max_nr, unsigned long addr, struct zap_details *details, int *rss, bool *force_flush, - bool *force_break) + bool *force_break, bool *any_skipped) { const fpb_t fpb_flags = FPB_IGNORE_DIRTY | FPB_IGNORE_SOFT_DIRTY; struct mm_struct *mm = tlb->mm; @@ -1567,15 +1567,17 @@ static inline int zap_present_ptes(struct mmu_gather *tlb, arch_check_zapped_pte(vma, ptent); tlb_remove_tlb_entry(tlb, pte, addr); if (userfaultfd_pte_wp(vma, ptent)) - zap_install_uffd_wp_if_needed(vma, addr, pte, 1, - details, ptent); + *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, + pte, 1, details, ptent); ksm_might_unmap_zero_page(mm, ptent); return 1; } folio = page_folio(page); - if (unlikely(!should_zap_folio(details, folio))) + if (unlikely(!should_zap_folio(details, folio))) { + *any_skipped = true; return 1; + } /* * Make sure that the common "small folio" case is as fast as possible @@ -1587,22 +1589,23 @@ static inline int zap_present_ptes(struct mmu_gather *tlb, zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, nr, addr, details, rss, force_flush, - force_break); + force_break, any_skipped); return nr; } zap_present_folio_ptes(tlb, vma, folio, page, pte, ptent, 1, addr, - details, rss, force_flush, force_break); + details, rss, force_flush, force_break, any_skipped); return 1; } static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_t *pte, pte_t ptent, unsigned int max_nr, unsigned long addr, - struct zap_details *details, int *rss) + struct zap_details *details, int *rss, bool *any_skipped) { swp_entry_t entry; int nr = 1; + *any_skipped = true; entry = pte_to_swp_entry(ptent); if (is_device_private_entry(entry) || is_device_exclusive_entry(entry)) { @@ -1660,7 +1663,7 @@ static inline int zap_nonpresent_ptes(struct mmu_gather *tlb, WARN_ON_ONCE(1); } clear_not_present_full_ptes(vma->vm_mm, addr, pte, nr, tlb->fullmm); - zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); + *any_skipped = zap_install_uffd_wp_if_needed(vma, addr, pte, nr, details, ptent); return nr; } @@ -1669,7 +1672,8 @@ static inline int do_zap_pte_range(struct mmu_gather *tlb, struct vm_area_struct *vma, pte_t *pte, unsigned long addr, unsigned long end, struct zap_details *details, int *rss, - bool *force_flush, bool *force_break) + bool *force_flush, bool *force_break, + bool *any_skipped) { pte_t ptent = ptep_get(pte); int max_nr = (end - addr) / PAGE_SIZE; @@ -1691,10 +1695,11 @@ static inline int do_zap_pte_range(struct mmu_gather *tlb, if (pte_present(ptent)) nr += zap_present_ptes(tlb, vma, pte, ptent, max_nr, addr, - details, rss, force_flush, force_break); + details, rss, force_flush, force_break, + any_skipped); else nr += zap_nonpresent_ptes(tlb, vma, pte, ptent, max_nr, addr, - details, rss); + details, rss, any_skipped); return nr; } @@ -1705,6 +1710,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, struct zap_details *details) { bool force_flush = false, force_break = false; + bool any_skipped = false; struct mm_struct *mm = tlb->mm; int rss[NR_MM_COUNTERS]; spinlock_t *ptl; @@ -1725,7 +1731,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, break; nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss, - &force_flush, &force_break); + &force_flush, &force_break, &any_skipped); if (unlikely(force_break)) { addr += nr * PAGE_SIZE; break; From 439a505cbe8742ec534e74bd1775ccbeb1de661c Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:48 +0800 Subject: [PATCH 081/504] mm: make zap_pte_range() handle full within-PMD range In preparation for reclaiming empty PTE pages, this commit first makes zap_pte_range() to handle the full within-PMD range, so that we can more easily detect and free PTE pages in this function in subsequent commits. Link: https://lkml.kernel.org/r/76c95ee641da7808cd66d642ab95841df4048295.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reviewed-by: Jann Horn Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- mm/memory.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mm/memory.c b/mm/memory.c index 694156ecaf95..d4d5bd7046e7 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1718,6 +1718,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pte_t *pte; int nr; +retry: tlb_change_page_size(tlb, PAGE_SIZE); init_rss_vec(rss); start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); @@ -1757,6 +1758,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, if (force_flush) tlb_flush_mmu(tlb); + if (addr != end) { + cond_resched(); + force_flush = false; + force_break = false; + goto retry; + } + return addr; } From bedcda218e0bab857fbe2ad2ddf24c1ddeb8deb4 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:49 +0800 Subject: [PATCH 082/504] mm: pgtable: reclaim empty PTE page in madvise(MADV_DONTNEED) Now in order to pursue high performance, applications mostly use some high-performance user-mode memory allocators, such as jemalloc or tcmalloc. These memory allocators use madvise(MADV_DONTNEED or MADV_FREE) to release physical memory, but neither MADV_DONTNEED nor MADV_FREE will release page table memory, which may cause huge page table memory usage. The following are a memory usage snapshot of one process which actually happened on our server: VIRT: 55t RES: 590g VmPTE: 110g In this case, most of the page table entries are empty. For such a PTE page where all entries are empty, we can actually free it back to the system for others to use. As a first step, this commit aims to synchronously free the empty PTE pages in madvise(MADV_DONTNEED) case. We will detect and free empty PTE pages in zap_pte_range(), and will add zap_details.reclaim_pt to exclude cases other than madvise(MADV_DONTNEED). Once an empty PTE is detected, we first try to hold the pmd lock within the pte lock. If successful, we clear the pmd entry directly (fast path). Otherwise, we wait until the pte lock is released, then re-hold the pmd and pte locks and loop PTRS_PER_PTE times to check pte_none() to re-detect whether the PTE page is empty and free it (slow path). For other cases such as madvise(MADV_FREE), consider scanning and freeing empty PTE pages asynchronously in the future. The following code snippet can show the effect of optimization: mmap 50G while (1) { for (; i < 1024 * 25; i++) { touch 2M memory madvise MADV_DONTNEED 2M } } As we can see, the memory usage of VmPTE is reduced: before after VIRT 50.0 GB 50.0 GB RES 3.1 MB 3.1 MB VmPTE 102640 KB 240 KB Link: https://lkml.kernel.org/r/92aba2b319a734913f18ba41e7d86a265f0b84e2.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 + mm/Kconfig | 15 ++++++++++ mm/Makefile | 1 + mm/internal.h | 19 +++++++++++++ mm/madvise.c | 7 ++++- mm/memory.c | 21 ++++++++++++-- mm/pt_reclaim.c | 71 ++++++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 132 insertions(+), 3 deletions(-) create mode 100644 mm/pt_reclaim.c diff --git a/include/linux/mm.h b/include/linux/mm.h index 2e5ef71b8629..9372bc058b43 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2320,6 +2320,7 @@ extern void pagefault_out_of_memory(void); struct zap_details { struct folio *single_folio; /* Locked folio to be unmapped */ bool even_cows; /* Zap COWed private pages too? */ + bool reclaim_pt; /* Need reclaim page tables? */ zap_flags_t zap_flags; /* Extra flags for zapping */ }; diff --git a/mm/Kconfig b/mm/Kconfig index 84000b016808..7949ab121070 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -1301,6 +1301,21 @@ config ARCH_HAS_USER_SHADOW_STACK The architecture has hardware support for userspace shadow call stacks (eg, x86 CET, arm64 GCS or RISC-V Zicfiss). +config ARCH_SUPPORTS_PT_RECLAIM + def_bool n + +config PT_RECLAIM + bool "reclaim empty user page table pages" + default y + depends on ARCH_SUPPORTS_PT_RECLAIM && MMU && SMP + select MMU_GATHER_RCU_TABLE_FREE + help + Try to reclaim empty user page table pages in paths other than munmap + and exit_mmap path. + + Note: now only empty user PTE page table pages will be reclaimed. + + source "mm/damon/Kconfig" endmenu diff --git a/mm/Makefile b/mm/Makefile index dba52bb0da8a..850386a67b3e 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -146,3 +146,4 @@ obj-$(CONFIG_GENERIC_IOREMAP) += ioremap.o obj-$(CONFIG_SHRINKER_DEBUG) += shrinker_debug.o obj-$(CONFIG_EXECMEM) += execmem.o obj-$(CONFIG_TMPFS_QUOTA) += shmem_quota.o +obj-$(CONFIG_PT_RECLAIM) += pt_reclaim.o diff --git a/mm/internal.h b/mm/internal.h index 02890b29da5f..b438d35045de 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1541,4 +1541,23 @@ int walk_page_range_mm(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private); +/* pt_reclaim.c */ +bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval); +void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb, + pmd_t pmdval); +void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, + struct mmu_gather *tlb); + +#ifdef CONFIG_PT_RECLAIM +bool reclaim_pt_is_enabled(unsigned long start, unsigned long end, + struct zap_details *details); +#else +static inline bool reclaim_pt_is_enabled(unsigned long start, unsigned long end, + struct zap_details *details) +{ + return false; +} +#endif /* CONFIG_PT_RECLAIM */ + + #endif /* __MM_INTERNAL_H */ diff --git a/mm/madvise.c b/mm/madvise.c index 0ceae57da7da..49f3a75046f6 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -851,7 +851,12 @@ static int madvise_free_single_vma(struct vm_area_struct *vma, static long madvise_dontneed_single_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - zap_page_range_single(vma, start, end - start, NULL); + struct zap_details details = { + .reclaim_pt = true, + .even_cows = true, + }; + + zap_page_range_single(vma, start, end - start, &details); return 0; } diff --git a/mm/memory.c b/mm/memory.c index d4d5bd7046e7..560520e20ead 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1436,7 +1436,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma) static inline bool should_zap_cows(struct zap_details *details) { /* By default, zap all pages */ - if (!details) + if (!details || details->reclaim_pt) return true; /* Or, we zap COWed pages only if the caller wants to */ @@ -1710,12 +1710,15 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, struct zap_details *details) { bool force_flush = false, force_break = false; - bool any_skipped = false; struct mm_struct *mm = tlb->mm; int rss[NR_MM_COUNTERS]; spinlock_t *ptl; pte_t *start_pte; pte_t *pte; + pmd_t pmdval; + unsigned long start = addr; + bool can_reclaim_pt = reclaim_pt_is_enabled(start, end, details); + bool direct_reclaim = false; int nr; retry: @@ -1728,17 +1731,24 @@ retry: flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); do { + bool any_skipped = false; + if (need_resched()) break; nr = do_zap_pte_range(tlb, vma, pte, addr, end, details, rss, &force_flush, &force_break, &any_skipped); + if (any_skipped) + can_reclaim_pt = false; if (unlikely(force_break)) { addr += nr * PAGE_SIZE; break; } } while (pte += nr, addr += PAGE_SIZE * nr, addr != end); + if (can_reclaim_pt && addr == end) + direct_reclaim = try_get_and_clear_pmd(mm, pmd, &pmdval); + add_mm_rss_vec(mm, rss); arch_leave_lazy_mmu_mode(); @@ -1765,6 +1775,13 @@ retry: goto retry; } + if (can_reclaim_pt) { + if (direct_reclaim) + free_pte(mm, start, tlb, pmdval); + else + try_to_free_pte(mm, pmd, start, tlb); + } + return addr; } diff --git a/mm/pt_reclaim.c b/mm/pt_reclaim.c new file mode 100644 index 000000000000..6540a3115dde --- /dev/null +++ b/mm/pt_reclaim.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include "internal.h" + +bool reclaim_pt_is_enabled(unsigned long start, unsigned long end, + struct zap_details *details) +{ + return details && details->reclaim_pt && (end - start >= PMD_SIZE); +} + +bool try_get_and_clear_pmd(struct mm_struct *mm, pmd_t *pmd, pmd_t *pmdval) +{ + spinlock_t *pml = pmd_lockptr(mm, pmd); + + if (!spin_trylock(pml)) + return false; + + *pmdval = pmdp_get_lockless(pmd); + pmd_clear(pmd); + spin_unlock(pml); + + return true; +} + +void free_pte(struct mm_struct *mm, unsigned long addr, struct mmu_gather *tlb, + pmd_t pmdval) +{ + pte_free_tlb(tlb, pmd_pgtable(pmdval), addr); + mm_dec_nr_ptes(mm); +} + +void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, + struct mmu_gather *tlb) +{ + pmd_t pmdval; + spinlock_t *pml, *ptl; + pte_t *start_pte, *pte; + int i; + + pml = pmd_lock(mm, pmd); + start_pte = pte_offset_map_rw_nolock(mm, pmd, addr, &pmdval, &ptl); + if (!start_pte) + goto out_ptl; + if (ptl != pml) + spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); + + /* Check if it is empty PTE page */ + for (i = 0, pte = start_pte; i < PTRS_PER_PTE; i++, pte++) { + if (!pte_none(ptep_get(pte))) + goto out_ptl; + } + pte_unmap(start_pte); + + pmd_clear(pmd); + + if (ptl != pml) + spin_unlock(ptl); + spin_unlock(pml); + + free_pte(mm, addr, tlb, pmdval); + + return; +out_ptl: + if (start_pte) + pte_unmap_unlock(start_pte, ptl); + if (ptl != pml) + spin_unlock(pml); +} From e1d6e5a74bd1f7a17a4e0eb98889abda097a2e27 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Fri, 6 Dec 2024 19:23:48 +0800 Subject: [PATCH 083/504] mm-pgtable-reclaim-empty-pte-page-in-madvisemadv_dontneed-fix Dan Carpenter reported the following warning: Commit e3aafd2d3551 ("mm: pgtable: reclaim empty PTE page in madvise(MADV_DONTNEED)") from Dec 4, 2024 (linux-next), leads to the following Smatch static checker warning: mm/pt_reclaim.c:69 try_to_free_pte() error: uninitialized symbol 'ptl'. To fix it, assign an initial value of NULL to the ptl. Link: https://lkml.kernel.org/r/20241206112348.51570-1-zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reported-by: Dan Carpenter Closes: https://lore.kernel.org/linux-mm/224e6a4e-43b5-4080-bdd8-b0a6fb2f0853@stanley.mountain/ Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- mm/pt_reclaim.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/pt_reclaim.c b/mm/pt_reclaim.c index 6540a3115dde..7e9455a18aae 100644 --- a/mm/pt_reclaim.c +++ b/mm/pt_reclaim.c @@ -36,7 +36,7 @@ void try_to_free_pte(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, struct mmu_gather *tlb) { pmd_t pmdval; - spinlock_t *pml, *ptl; + spinlock_t *pml, *ptl = NULL; pte_t *start_pte, *pte; int i; From 8f19e4c0b1c64c02fc54b139fdaec28a9ea7f9fc Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:50 +0800 Subject: [PATCH 084/504] x86: mm: free page table pages by RCU instead of semi RCU Now, if CONFIG_MMU_GATHER_RCU_TABLE_FREE is selected, the page table pages will be freed by semi RCU, that is: - batch table freeing: asynchronous free by RCU - single table freeing: IPI + synchronous free In this way, the page table can be lockless traversed by disabling IRQ in paths such as fast GUP. But this is not enough to free the empty PTE page table pages in paths other that munmap and exit_mmap path, because IPI cannot be synchronized with rcu_read_lock() in pte_offset_map{_lock}(). In preparation for supporting empty PTE page table pages reclaimation, let single table also be freed by RCU like batch table freeing. Then we can also use pte_offset_map() etc to prevent PTE page from being freed. Like pte_free_defer(), we can also safely use ptdesc->pt_rcu_head to free the page table pages: - The pt_rcu_head is unioned with pt_list and pmd_huge_pte. - For pt_list, it is used to manage the PGD page in x86. Fortunately tlb_remove_table() will not be used for free PGD pages, so it is safe to use pt_rcu_head. - For pmd_huge_pte, it is used for THPs, so it is safe. After applying this patch, if CONFIG_PT_RECLAIM is enabled, the function call of free_pte() is as follows: free_pte pte_free_tlb __pte_free_tlb ___pte_free_tlb paravirt_tlb_remove_table tlb_remove_table [!CONFIG_PARAVIRT, Xen PV, Hyper-V, KVM] [no-free-memory slowpath:] tlb_table_invalidate tlb_remove_table_one __tlb_remove_table_one [frees via RCU] [fastpath:] tlb_table_flush tlb_remove_table_free [frees via RCU] native_tlb_remove_table [CONFIG_PARAVIRT on native] tlb_remove_table [see above] Link: https://lkml.kernel.org/r/0287d442a973150b0e1019cc406e6322d148277a.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Catalin Marinas Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- arch/x86/include/asm/tlb.h | 20 ++++++++++++++++++++ arch/x86/kernel/paravirt.c | 7 +++++++ arch/x86/mm/pgtable.c | 10 +++++++++- include/linux/mm_types.h | 4 +++- mm/mmu_gather.c | 9 ++++++++- 5 files changed, 47 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 4d3c9d00d6b6..73f0786181cc 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -34,8 +34,28 @@ static inline void __tlb_remove_table(void *table) free_page_and_swap_cache(table); } +#ifdef CONFIG_PT_RECLAIM +static inline void __tlb_remove_table_one_rcu(struct rcu_head *head) +{ + struct page *page; + + page = container_of(head, struct page, rcu_head); + put_page(page); +} + +static inline void __tlb_remove_table_one(void *table) +{ + struct page *page; + + page = table; + call_rcu(&page->rcu_head, __tlb_remove_table_one_rcu); +} +#define __tlb_remove_table_one __tlb_remove_table_one +#endif /* CONFIG_PT_RECLAIM */ + static inline void invlpg(unsigned long addr) { asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); } + #endif /* _ASM_X86_TLB_H */ diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index fec381533555..89688921ea62 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -59,10 +59,17 @@ void __init native_pv_lock_init(void) static_branch_enable(&virt_spin_lock_key); } +#ifndef CONFIG_PT_RECLAIM static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) { tlb_remove_page(tlb, table); } +#else +static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) +{ + tlb_remove_table(tlb, table); +} +#endif struct static_key paravirt_steal_enabled; struct static_key paravirt_steal_rq_enabled; diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 5745a354a241..69a357b15974 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -19,12 +19,20 @@ EXPORT_SYMBOL(physical_mask); #endif #ifndef CONFIG_PARAVIRT +#ifndef CONFIG_PT_RECLAIM static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) { tlb_remove_page(tlb, table); } -#endif +#else +static inline +void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) +{ + tlb_remove_table(tlb, table); +} +#endif /* !CONFIG_PT_RECLAIM */ +#endif /* !CONFIG_PARAVIRT */ gfp_t __userpte_alloc_gfp = GFP_PGTABLE_USER | PGTABLE_HIGHMEM; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 332cee285662..7490d84af310 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -438,7 +438,9 @@ FOLIO_MATCH(compound_head, _head_2a); * struct ptdesc - Memory descriptor for page tables. * @__page_flags: Same as page flags. Powerpc only. * @pt_rcu_head: For freeing page table pages. - * @pt_list: List of used page tables. Used for s390 and x86. + * @pt_list: List of used page tables. Used for s390 gmap shadow pages + * (which are not linked into the user page tables) and x86 + * pgds. * @_pt_pad_1: Padding that aliases with page's compound head. * @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs. * @__page_mapping: Aliases with page->mapping. Unused for page tables. diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 99b3e9408aa0..1e21022bcf33 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -311,11 +311,18 @@ static inline void tlb_table_invalidate(struct mmu_gather *tlb) } } -static void tlb_remove_table_one(void *table) +#ifndef __tlb_remove_table_one +static inline void __tlb_remove_table_one(void *table) { tlb_remove_table_sync_one(); __tlb_remove_table(table); } +#endif + +static void tlb_remove_table_one(void *table) +{ + __tlb_remove_table_one(table); +} static void tlb_table_flush(struct mmu_gather *tlb) { From 3857d1d1bb776643241a9748dcee49aaa9d33923 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Tue, 10 Dec 2024 16:44:31 +0800 Subject: [PATCH 085/504] mm: pgtable: make ptlock be freed by RCU If ALLOC_SPLIT_PTLOCKS is enabled, the ptdesc->ptl will be a pointer and a ptlock will be allocated for it, and it will be freed immediately before the PTE page is freed. Once we support empty PTE page reclaimation, it may result in the following use-after-free problem: CPU 0 CPU 1 pte_offset_map_rw_nolock(&ptlock) --> rcu_read_lock() madvise(MADV_DONTNEED) --> ptlock_free (free ptlock immediately!) free PTE page via RCU /* UAF!! */ spin_lock(ptlock) To avoid this problem, make ptlock also be freed by RCU. Link: https://lkml.kernel.org/r/20241210084431.91414-1-zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reported-by: syzbot+1c58afed1cfd2f57efee@syzkaller.appspotmail.com Tested-by: syzbot+1c58afed1cfd2f57efee@syzkaller.appspotmail.com Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Dan Carpenter Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Peter Zijlstra Cc: Will Deacon Cc: Zach O'Keefe Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 +- include/linux/mm_types.h | 9 ++++++++- mm/memory.c | 22 ++++++++++++++++------ 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 9372bc058b43..c1c7ab0e4ac7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2925,7 +2925,7 @@ void ptlock_free(struct ptdesc *ptdesc); static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc) { - return ptdesc->ptl; + return &(ptdesc->ptl->ptl); } #else /* ALLOC_SPLIT_PTLOCKS */ static inline void ptlock_cache_init(void) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 7490d84af310..eca583aae868 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -434,6 +434,13 @@ FOLIO_MATCH(flags, _flags_2a); FOLIO_MATCH(compound_head, _head_2a); #undef FOLIO_MATCH +#if ALLOC_SPLIT_PTLOCKS +struct pt_lock { + spinlock_t ptl; + struct rcu_head rcu; +}; +#endif + /** * struct ptdesc - Memory descriptor for page tables. * @__page_flags: Same as page flags. Powerpc only. @@ -482,7 +489,7 @@ struct ptdesc { union { unsigned long _pt_pad_2; #if ALLOC_SPLIT_PTLOCKS - spinlock_t *ptl; + struct pt_lock *ptl; #else spinlock_t ptl; #endif diff --git a/mm/memory.c b/mm/memory.c index 560520e20ead..b7537bd54abe 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -7013,24 +7013,34 @@ static struct kmem_cache *page_ptl_cachep; void __init ptlock_cache_init(void) { - page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, + page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(struct pt_lock), 0, SLAB_PANIC, NULL); } bool ptlock_alloc(struct ptdesc *ptdesc) { - spinlock_t *ptl; + struct pt_lock *pt_lock; - ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); - if (!ptl) + pt_lock = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); + if (!pt_lock) return false; - ptdesc->ptl = ptl; + ptdesc->ptl = pt_lock; return true; } +static void ptlock_free_rcu(struct rcu_head *head) +{ + struct pt_lock *pt_lock; + + pt_lock = container_of(head, struct pt_lock, rcu); + kmem_cache_free(page_ptl_cachep, pt_lock); +} + void ptlock_free(struct ptdesc *ptdesc) { - kmem_cache_free(page_ptl_cachep, ptdesc->ptl); + struct pt_lock *pt_lock = ptdesc->ptl; + + call_rcu(&pt_lock->rcu, ptlock_free_rcu); } #endif From 2c9a41712426711cbddf4458cfda2d1b2f96e6d5 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 4 Dec 2024 19:09:51 +0800 Subject: [PATCH 086/504] x86: select ARCH_SUPPORTS_PT_RECLAIM if X86_64 Now, x86 has fully supported the CONFIG_PT_RECLAIM feature, and reclaiming PTE pages is profitable only on 64-bit systems, so select ARCH_SUPPORTS_PT_RECLAIM if X86_64. Link: https://lkml.kernel.org/r/841c1f35478d5354872d307888979c9e20de9c09.1733305182.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Cc: Dave Hansen Cc: Andy Lutomirski Cc: Peter Zijlstra Cc: Catalin Marinas Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Muchun Song Cc: Peter Xu Cc: Will Deacon Cc: Zach O'Keefe Cc: Dan Carpenter Signed-off-by: Andrew Morton --- arch/x86/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9d7bd0ae48c4..2e1a3e4386de 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -322,6 +322,7 @@ config X86 select FUNCTION_ALIGNMENT_4B imply IMA_SECURE_AND_OR_TRUSTED_BOOT if EFI select HAVE_DYNAMIC_FTRACE_NO_PATCHABLE + select ARCH_SUPPORTS_PT_RECLAIM if X86_64 config INSTRUCTION_DECODER def_bool y From 7f27b8acc932aa7816f86524869c48c463db7a62 Mon Sep 17 00:00:00 2001 From: Wenchao Hao Date: Mon, 2 Dec 2024 20:47:30 +0800 Subject: [PATCH 087/504] mm: add per-order mTHP swap-in fallback/fallback_charge counters Currently, large folio swap-in is supported, but we lack a method to analyze their success ratio. Similar to anon_fault_fallback, we introduce per-order mTHP swpin_fallback and swpin_fallback_charge counters for calculating their success ratio. The new counters are located at: /sys/kernel/mm/transparent_hugepage/hugepages-/stats/ swpin_fallback swpin_fallback_charge Link: https://lkml.kernel.org/r/20241202124730.2407037-1-haowenchao22@gmail.com Signed-off-by: Wenchao Hao Reviewed-by: Barry Song Reviewed-by: Lance Yang Cc: Baolin Wang Cc: David Hildenbrand Cc: Jonathan Corbet Cc: Matthew Wilcox Cc: Peter Xu Cc: Ryan Roberts Cc: Usama Arif Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/transhuge.rst | 10 ++++++++++ include/linux/huge_mm.h | 2 ++ mm/huge_memory.c | 6 ++++++ mm/memory.c | 2 ++ 4 files changed, 20 insertions(+) diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst index d870f83775bc..dff8d5985f0f 100644 --- a/Documentation/admin-guide/mm/transhuge.rst +++ b/Documentation/admin-guide/mm/transhuge.rst @@ -591,6 +591,16 @@ swpin is incremented every time a huge page is swapped in from a non-zswap swap device in one piece. +swpin_fallback + is incremented if swapin fails to allocate or charge a huge page + and instead falls back to using huge pages with lower orders or + small pages. + +swpin_fallback_charge + is incremented if swapin fails to charge a huge page and instead + falls back to using huge pages with lower orders or small pages + even though the allocation was successful. + swpout is incremented every time a huge page is swapped out to a non-zswap swap device in one piece without splitting. diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index b94c2e8ee918..93e509b6c00e 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -121,6 +121,8 @@ enum mthp_stat_item { MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, MTHP_STAT_ZSWPOUT, MTHP_STAT_SWPIN, + MTHP_STAT_SWPIN_FALLBACK, + MTHP_STAT_SWPIN_FALLBACK_CHARGE, MTHP_STAT_SWPOUT, MTHP_STAT_SWPOUT_FALLBACK, MTHP_STAT_SHMEM_ALLOC, diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 45901dc6710c..6d87db53db33 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -617,6 +617,8 @@ DEFINE_MTHP_STAT_ATTR(anon_fault_fallback, MTHP_STAT_ANON_FAULT_FALLBACK); DEFINE_MTHP_STAT_ATTR(anon_fault_fallback_charge, MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE); DEFINE_MTHP_STAT_ATTR(zswpout, MTHP_STAT_ZSWPOUT); DEFINE_MTHP_STAT_ATTR(swpin, MTHP_STAT_SWPIN); +DEFINE_MTHP_STAT_ATTR(swpin_fallback, MTHP_STAT_SWPIN_FALLBACK); +DEFINE_MTHP_STAT_ATTR(swpin_fallback_charge, MTHP_STAT_SWPIN_FALLBACK_CHARGE); DEFINE_MTHP_STAT_ATTR(swpout, MTHP_STAT_SWPOUT); DEFINE_MTHP_STAT_ATTR(swpout_fallback, MTHP_STAT_SWPOUT_FALLBACK); #ifdef CONFIG_SHMEM @@ -637,6 +639,8 @@ static struct attribute *anon_stats_attrs[] = { #ifndef CONFIG_SHMEM &zswpout_attr.attr, &swpin_attr.attr, + &swpin_fallback_attr.attr, + &swpin_fallback_charge_attr.attr, &swpout_attr.attr, &swpout_fallback_attr.attr, #endif @@ -669,6 +673,8 @@ static struct attribute *any_stats_attrs[] = { #ifdef CONFIG_SHMEM &zswpout_attr.attr, &swpin_attr.attr, + &swpin_fallback_attr.attr, + &swpin_fallback_charge_attr.attr, &swpout_attr.attr, &swpout_fallback_attr.attr, #endif diff --git a/mm/memory.c b/mm/memory.c index b7537bd54abe..69ab5bb6db75 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4258,8 +4258,10 @@ static struct folio *alloc_swap_folio(struct vm_fault *vmf) if (!mem_cgroup_swapin_charge_folio(folio, vma->vm_mm, gfp, entry)) return folio; + count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK_CHARGE); folio_put(folio); } + count_mthp_stat(order, MTHP_STAT_SWPIN_FALLBACK); order = next_order(&orders, order); } From fccdd1fff114871b61e3bab5dc4e829080d92e23 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 5 Dec 2024 19:07:48 +0000 Subject: [PATCH 088/504] selftests/mm: add fork CoW guard page test When we fork anonymous pages, apply a guard page then remove it, the previous CoW mapping is cleared. This might not be obvious to an outside observer without taking some time to think about how the overall process functions, so document that this is the case through a test, which also usefully asserts that the behaviour is as we expect. This is grouped with other, more important, fork tests that ensure that guard pages are correctly propagated on fork. Fix a typo in a nearby comment at the same time. Link: https://lkml.kernel.org/r/20241205190748.115656-1-lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Cc: Jann Horn Cc: Shuah Khan Cc: Vlastimil Babka Cc: Ryan Roberts Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/guard-pages.c | 73 +++++++++++++++++++++++- 1 file changed, 72 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/guard-pages.c b/tools/testing/selftests/mm/guard-pages.c index 7cdf815d0d63..d8f8dee9ebbd 100644 --- a/tools/testing/selftests/mm/guard-pages.c +++ b/tools/testing/selftests/mm/guard-pages.c @@ -990,7 +990,7 @@ TEST_F(guard_pages, fork) MAP_ANON | MAP_PRIVATE, -1, 0); ASSERT_NE(ptr, MAP_FAILED); - /* Establish guard apges in the first 5 pages. */ + /* Establish guard pages in the first 5 pages. */ ASSERT_EQ(madvise(ptr, 5 * page_size, MADV_GUARD_INSTALL), 0); pid = fork(); @@ -1029,6 +1029,77 @@ TEST_F(guard_pages, fork) ASSERT_EQ(munmap(ptr, 10 * page_size), 0); } +/* + * Assert expected behaviour after we fork populated ranges of anonymous memory + * and then guard and unguard the range. + */ +TEST_F(guard_pages, fork_cow) +{ + const unsigned long page_size = self->page_size; + char *ptr; + pid_t pid; + int i; + + /* Map 10 pages. */ + ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, -1, 0); + ASSERT_NE(ptr, MAP_FAILED); + + /* Populate range. */ + for (i = 0; i < 10 * page_size; i++) { + char chr = 'a' + (i % 26); + + ptr[i] = chr; + } + + pid = fork(); + ASSERT_NE(pid, -1); + if (!pid) { + /* This is the child process now. */ + + /* Ensure the range is as expected. */ + for (i = 0; i < 10 * page_size; i++) { + char expected = 'a' + (i % 26); + char actual = ptr[i]; + + ASSERT_EQ(actual, expected); + } + + /* Establish guard pages across the whole range. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_INSTALL), 0); + /* Remove it. */ + ASSERT_EQ(madvise(ptr, 10 * page_size, MADV_GUARD_REMOVE), 0); + + /* + * By removing the guard pages, the page tables will be + * cleared. Assert that we are looking at the zero page now. + */ + for (i = 0; i < 10 * page_size; i++) { + char actual = ptr[i]; + + ASSERT_EQ(actual, '\0'); + } + + exit(0); + } + + /* Parent process. */ + + /* Parent simply waits on child. */ + waitpid(pid, NULL, 0); + + /* Ensure the range is unchanged in parent anon range. */ + for (i = 0; i < 10 * page_size; i++) { + char expected = 'a' + (i % 26); + char actual = ptr[i]; + + ASSERT_EQ(actual, expected); + } + + /* Cleanup. */ + ASSERT_EQ(munmap(ptr, 10 * page_size), 0); +} + /* * Assert that forking a process with VMAs that do have VM_WIPEONFORK set * behave as expected. From 25168440aedb1c95328d62f2caf6035613e55617 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Tue, 7 Jan 2025 14:29:35 +0000 Subject: [PATCH 089/504] selftests/mm: static process_madvise() wrapper for guard-pages The recently introduced guard-pages mm selftest uses the process_madvise() syscall, a wrapper for which was added to glibc v2.36. For those of us stuck with older distributions this causes a compile error when compiling the mm selftests. For example Ubuntu 22.04 uses glibc 2.35, which does not have the wrapper. To workaround the issue, let's introduce our own static process_madvise() wrapper that uses glibc's syscall() helper. While we are at it, add the guard-page test suite to run_vmtests.sh so that it can be automatically run by CI systems. Link: https://lkml.kernel.org/r/20250107142937.1870478-1-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Reviewed-by: Lorenzo Stoakes Cc: Jann Horn Cc: Liam R. Howlett Cc: Shuah Khan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/guard-pages.c | 10 ++++++++-- tools/testing/selftests/mm/run_vmtests.sh | 5 +++++ 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/mm/guard-pages.c b/tools/testing/selftests/mm/guard-pages.c index d8f8dee9ebbd..ece37212a8a2 100644 --- a/tools/testing/selftests/mm/guard-pages.c +++ b/tools/testing/selftests/mm/guard-pages.c @@ -55,6 +55,12 @@ static int pidfd_open(pid_t pid, unsigned int flags) return syscall(SYS_pidfd_open, pid, flags); } +static ssize_t sys_process_madvise(int pidfd, const struct iovec *iovec, + size_t n, int advice, unsigned int flags) +{ + return syscall(__NR_process_madvise, pidfd, iovec, n, advice, flags); +} + /* * Enable our signal catcher and try to read/write the specified buffer. The * return value indicates whether the read/write succeeds without a fatal @@ -419,7 +425,7 @@ TEST_F(guard_pages, process_madvise) ASSERT_EQ(munmap(&ptr_region[99 * page_size], page_size), 0); /* Now guard in one step. */ - count = process_madvise(pidfd, vec, 6, MADV_GUARD_INSTALL, 0); + count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_INSTALL, 0); /* OK we don't have permission to do this, skip. */ if (count == -1 && errno == EPERM) @@ -440,7 +446,7 @@ TEST_F(guard_pages, process_madvise) ASSERT_FALSE(try_read_write_buf(&ptr3[19 * page_size])); /* Now do the same with unguard... */ - count = process_madvise(pidfd, vec, 6, MADV_GUARD_REMOVE, 0); + count = sys_process_madvise(pidfd, vec, 6, MADV_GUARD_REMOVE, 0); /* ...and everything should now succeed. */ diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 2fc290d9430c..00c3f07ea100 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -45,6 +45,8 @@ separated by spaces: vmalloc smoke tests - hmm hmm smoke tests +- madv_guard + test madvise(2) MADV_GUARD_INSTALL and MADV_GUARD_REMOVE options - madv_populate test memadvise(2) MADV_POPULATE_{READ,WRITE} options - memfd_secret @@ -375,6 +377,9 @@ CATEGORY="mremap" run_test ./mremap_dontunmap CATEGORY="hmm" run_test bash ./test_hmm.sh smoke +# MADV_GUARD_INSTALL and MADV_GUARD_REMOVE tests +CATEGORY="madv_guard" run_test ./guard-pages + # MADV_POPULATE_READ and MADV_POPULATE_WRITE tests CATEGORY="madv_populate" run_test ./madv_populate From b79da689498155f8463a7b5ac33e3960b5d8c225 Mon Sep 17 00:00:00 2001 From: Guo Weikang Date: Thu, 5 Dec 2024 17:45:21 +0800 Subject: [PATCH 090/504] mm/shmem: refactor to reuse vfs_parse_monolithic_sep for option parsing shmem_parse_options() is refactored to use vfs_parse_monolithic_sep() with a custom separator function, shmem_next_opt(). This eliminates redundant logic for parsing comma-separated options and ensures consistency with other kernel code that uses the same interface. The vfs_parse_monolithic_sep() helper was introduced in commit e001d1447cd4 ("fs: factor out vfs_parse_monolithic_sep() helper"). Link: https://lkml.kernel.org/r/20241205094521.1244678-1-guoweikang.kernel@gmail.com Signed-off-by: Guo Weikang Cc: Amir Goldstein Cc: Hugh Dickins Signed-off-by: Andrew Morton --- mm/shmem.c | 65 +++++++++++++++++++++++------------------------------- 1 file changed, 27 insertions(+), 38 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index bdc2df0b6cf7..b562a040d4e3 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4647,48 +4647,37 @@ bad_value: return invalfc(fc, "Bad value for '%s'", param->key); } -static int shmem_parse_options(struct fs_context *fc, void *data) +static char *shmem_next_opt(char **s) { - char *options = data; + char *sbegin = *s; + char *p; - if (options) { - int err = security_sb_eat_lsm_opts(options, &fc->security); - if (err) - return err; - } + if (sbegin == NULL) + return NULL; - while (options != NULL) { - char *this_char = options; - for (;;) { - /* - * NUL-terminate this option: unfortunately, - * mount options form a comma-separated list, - * but mpol's nodelist may also contain commas. - */ - options = strchr(options, ','); - if (options == NULL) - break; - options++; - if (!isdigit(*options)) { - options[-1] = '\0'; - break; - } - } - if (*this_char) { - char *value = strchr(this_char, '='); - size_t len = 0; - int err; - - if (value) { - *value++ = '\0'; - len = strlen(value); - } - err = vfs_parse_fs_string(fc, this_char, value, len); - if (err < 0) - return err; + /* + * NUL-terminate this option: unfortunately, + * mount options form a comma-separated list, + * but mpol's nodelist may also contain commas. + */ + for (;;) { + p = strchr(*s, ','); + if (p == NULL) + break; + *s = p + 1; + if (!isdigit(*(p+1))) { + *p = '\0'; + return sbegin; } } - return 0; + + *s = NULL; + return sbegin; +} + +static int shmem_parse_monolithic(struct fs_context *fc, void *data) +{ + return vfs_parse_monolithic_sep(fc, data, shmem_next_opt); } /* @@ -5038,7 +5027,7 @@ static const struct fs_context_operations shmem_fs_context_ops = { .free = shmem_free_fc, .get_tree = shmem_get_tree, #ifdef CONFIG_TMPFS - .parse_monolithic = shmem_parse_options, + .parse_monolithic = shmem_parse_monolithic, .parse_param = shmem_parse_one, .reconfigure = shmem_reconfigure, #endif From f7b933402c684a127aa869df241c3fa5769f322d Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 22 Nov 2024 09:44:14 -0800 Subject: [PATCH 091/504] seqlock: add raw_seqcount_try_begin Add raw_seqcount_try_begin() to opens a read critical section of the given seqcount_t if the counter is even. This enables eliding the critical section entirely if the counter is odd, instead of doing the speculation knowing it will fail. Link: https://lkml.kernel.org/r/20241122174416.1367052-1-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: David Hildenbrand Reviewed-by: Liam R. Howlett Suggested-by: Peter Zijlstra Cc: Christian Brauner Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Paul E. McKenney Cc: Peter Xu Cc: Shakeel Butt Cc: Sourav Panda Cc: Vlastimil Babka Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/seqlock.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 5298765d6ca4..22c2c48b4265 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -318,6 +318,28 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) __seq; \ }) +/** + * raw_seqcount_try_begin() - begin a seqcount_t read critical section + * w/o lockdep and w/o counter stabilization + * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants + * + * Similar to raw_seqcount_begin(), except it enables eliding the critical + * section entirely if odd, instead of doing the speculation knowing it will + * fail. + * + * Useful when counter stabilization is more or less equivalent to taking + * the lock and there is a slowpath that does that. + * + * If true, start will be set to the (even) sequence count read. + * + * Return: true when a read critical section is started. + */ +#define raw_seqcount_try_begin(s, start) \ +({ \ + start = raw_read_seqcount(s); \ + !(start & 1); \ +}) + /** * raw_seqcount_begin() - begin a seqcount_t read critical section w/o * lockdep and w/o counter stabilization From 4e4620002da7b6f090266773369d23ee233ba57f Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 22 Nov 2024 09:44:15 -0800 Subject: [PATCH 092/504] mm: convert mm_lock_seq to a proper seqcount Convert mm_lock_seq to be seqcount_t and change all mmap_write_lock variants to increment it, in-line with the usual seqcount usage pattern. This lets us check whether the mmap_lock is write-locked by checking mm_lock_seq.sequence counter (odd=locked, even=unlocked). This will be used when implementing mmap_lock speculation functions. As a result vm_lock_seq is also change to be unsigned to match the type of mm_lock_seq.sequence. Link: https://lkml.kernel.org/r/20241122174416.1367052-2-surenb@google.com Suggested-by: Peter Zijlstra Signed-off-by: Suren Baghdasaryan Reviewed-by: Liam R. Howlett Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Paul E. McKenney Cc: Peter Xu Cc: Shakeel Butt Cc: Sourav Panda Cc: Vlastimil Babka Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 12 ++-- include/linux/mm_types.h | 7 ++- include/linux/mmap_lock.h | 97 +++++++++++++++++++------------- kernel/fork.c | 5 +- mm/init-mm.c | 2 +- tools/testing/vma/vma.c | 4 +- tools/testing/vma/vma_internal.h | 4 +- 7 files changed, 74 insertions(+), 57 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index c1c7ab0e4ac7..b75eed7f5def 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -711,7 +711,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * we don't rely on for anything - the mm_lock_seq read against which we * need ordering is below. */ - if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq)) + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence)) return false; if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) @@ -728,7 +728,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * after it has been unlocked. * This pairs with RELEASE semantics in vma_end_write_all(). */ - if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) { + if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) { up_read(&vma->vm_lock->lock); return false; } @@ -743,7 +743,7 @@ static inline void vma_end_read(struct vm_area_struct *vma) } /* WARNING! Can only be used if mmap_lock is expected to be write-locked */ -static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) +static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_lock_seq) { mmap_assert_write_locked(vma->vm_mm); @@ -751,7 +751,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) * current task is holding mmap_write_lock, both vma->vm_lock_seq and * mm->mm_lock_seq can't be concurrently modified. */ - *mm_lock_seq = vma->vm_mm->mm_lock_seq; + *mm_lock_seq = vma->vm_mm->mm_lock_seq.sequence; return (vma->vm_lock_seq == *mm_lock_seq); } @@ -762,7 +762,7 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq) */ static inline void vma_start_write(struct vm_area_struct *vma) { - int mm_lock_seq; + unsigned int mm_lock_seq; if (__is_vma_write_locked(vma, &mm_lock_seq)) return; @@ -780,7 +780,7 @@ static inline void vma_start_write(struct vm_area_struct *vma) static inline void vma_assert_write_locked(struct vm_area_struct *vma) { - int mm_lock_seq; + unsigned int mm_lock_seq; VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma); } diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index eca583aae868..c668a60a1dc3 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -736,7 +736,7 @@ struct vm_area_struct { * counter reuse can only lead to occasional unnecessary use of the * slowpath. */ - int vm_lock_seq; + unsigned int vm_lock_seq; /* Unstable RCU readers are allowed to read this. */ struct vma_lock *vm_lock; #endif @@ -930,6 +930,9 @@ struct mm_struct { * Roughly speaking, incrementing the sequence number is * equivalent to releasing locks on VMAs; reading the sequence * number can be part of taking a read lock on a VMA. + * Incremented every time mmap_lock is write-locked/unlocked. + * Initialized to 0, therefore odd values indicate mmap_lock + * is write-locked and even values that it's released. * * Can be modified under write mmap_lock using RELEASE * semantics. @@ -938,7 +941,7 @@ struct mm_struct { * Can be read with ACQUIRE semantics if not holding write * mmap_lock. */ - int mm_lock_seq; + seqcount_t mm_lock_seq; #endif diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index de9dc20b01ba..9715326f5a85 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -71,6 +71,62 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm) } #ifdef CONFIG_PER_VMA_LOCK +static inline void mm_lock_seqcount_init(struct mm_struct *mm) +{ + seqcount_init(&mm->mm_lock_seq); +} + +static inline void mm_lock_seqcount_begin(struct mm_struct *mm) +{ + do_raw_write_seqcount_begin(&mm->mm_lock_seq); +} + +static inline void mm_lock_seqcount_end(struct mm_struct *mm) +{ + ASSERT_EXCLUSIVE_WRITER(mm->mm_lock_seq); + do_raw_write_seqcount_end(&mm->mm_lock_seq); +} + +#else +static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} +static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {} +static inline void mm_lock_seqcount_end(struct mm_struct *mm) {} +#endif + +static inline void mmap_init_lock(struct mm_struct *mm) +{ + init_rwsem(&mm->mmap_lock); + mm_lock_seqcount_init(mm); +} + +static inline void mmap_write_lock(struct mm_struct *mm) +{ + __mmap_lock_trace_start_locking(mm, true); + down_write(&mm->mmap_lock); + mm_lock_seqcount_begin(mm); + __mmap_lock_trace_acquire_returned(mm, true, true); +} + +static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) +{ + __mmap_lock_trace_start_locking(mm, true); + down_write_nested(&mm->mmap_lock, subclass); + mm_lock_seqcount_begin(mm); + __mmap_lock_trace_acquire_returned(mm, true, true); +} + +static inline int mmap_write_lock_killable(struct mm_struct *mm) +{ + int ret; + + __mmap_lock_trace_start_locking(mm, true); + ret = down_write_killable(&mm->mmap_lock); + if (!ret) + mm_lock_seqcount_begin(mm); + __mmap_lock_trace_acquire_returned(mm, true, ret == 0); + return ret; +} + /* * Drop all currently-held per-VMA locks. * This is called from the mmap_lock implementation directly before releasing @@ -82,46 +138,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm) static inline void vma_end_write_all(struct mm_struct *mm) { mmap_assert_write_locked(mm); - /* - * Nobody can concurrently modify mm->mm_lock_seq due to exclusive - * mmap_lock being held. - * We need RELEASE semantics here to ensure that preceding stores into - * the VMA take effect before we unlock it with this store. - * Pairs with ACQUIRE semantics in vma_start_read(). - */ - smp_store_release(&mm->mm_lock_seq, mm->mm_lock_seq + 1); -} -#else -static inline void vma_end_write_all(struct mm_struct *mm) {} -#endif - -static inline void mmap_init_lock(struct mm_struct *mm) -{ - init_rwsem(&mm->mmap_lock); -} - -static inline void mmap_write_lock(struct mm_struct *mm) -{ - __mmap_lock_trace_start_locking(mm, true); - down_write(&mm->mmap_lock); - __mmap_lock_trace_acquire_returned(mm, true, true); -} - -static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass) -{ - __mmap_lock_trace_start_locking(mm, true); - down_write_nested(&mm->mmap_lock, subclass); - __mmap_lock_trace_acquire_returned(mm, true, true); -} - -static inline int mmap_write_lock_killable(struct mm_struct *mm) -{ - int ret; - - __mmap_lock_trace_start_locking(mm, true); - ret = down_write_killable(&mm->mmap_lock); - __mmap_lock_trace_acquire_returned(mm, true, ret == 0); - return ret; + mm_lock_seqcount_end(mm); } static inline void mmap_write_unlock(struct mm_struct *mm) diff --git a/kernel/fork.c b/kernel/fork.c index 9b301180fd41..ded49f18cd95 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -448,7 +448,7 @@ static bool vma_lock_alloc(struct vm_area_struct *vma) return false; init_rwsem(&vma->vm_lock->lock); - vma->vm_lock_seq = -1; + vma->vm_lock_seq = UINT_MAX; return true; } @@ -1262,9 +1262,6 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, seqcount_init(&mm->write_protect_seq); mmap_init_lock(mm); INIT_LIST_HEAD(&mm->mmlist); -#ifdef CONFIG_PER_VMA_LOCK - mm->mm_lock_seq = 0; -#endif mm_pgtables_bytes_init(mm); mm->map_count = 0; mm->locked_vm = 0; diff --git a/mm/init-mm.c b/mm/init-mm.c index 24c809379274..6af3ad675930 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -40,7 +40,7 @@ struct mm_struct init_mm = { .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock), .mmlist = LIST_HEAD_INIT(init_mm.mmlist), #ifdef CONFIG_PER_VMA_LOCK - .mm_lock_seq = 0, + .mm_lock_seq = SEQCNT_ZERO(init_mm.mm_lock_seq), #endif .user_ns = &init_user_ns, .cpu_bitmap = CPU_BITS_NONE, diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 891d87a9ad6b..920fba58884e 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -100,7 +100,7 @@ static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, * begun. Linking to the tree will have caused this to be incremented, * which means we will get a false positive otherwise. */ - vma->vm_lock_seq = -1; + vma->vm_lock_seq = UINT_MAX; return vma; } @@ -225,7 +225,7 @@ static bool vma_write_started(struct vm_area_struct *vma) int seq = vma->vm_lock_seq; /* We reset after each check. */ - vma->vm_lock_seq = -1; + vma->vm_lock_seq = UINT_MAX; /* The vma_start_write() stub simply increments this value. */ return seq > -1; diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index a7de59a0d694..b973b3e41c83 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -281,7 +281,7 @@ struct vm_area_struct { * counter reuse can only lead to occasional unnecessary use of the * slowpath. */ - int vm_lock_seq; + unsigned int vm_lock_seq; struct vma_lock *vm_lock; #endif @@ -467,7 +467,7 @@ static inline bool vma_lock_alloc(struct vm_area_struct *vma) return false; init_rwsem(&vma->vm_lock->lock); - vma->vm_lock_seq = -1; + vma->vm_lock_seq = UINT_MAX; return true; } From fdc570588b7d3ba88a3bab4dd5f73af4493fd96d Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 22 Nov 2024 09:44:16 -0800 Subject: [PATCH 093/504] mm: introduce mmap_lock_speculate_{try_begin|retry} Add helper functions to speculatively perform operations without read-locking mmap_lock, expecting that mmap_lock will not be write-locked and mm is not modified from under us. Link: https://lkml.kernel.org/r/20241122174416.1367052-3-surenb@google.com Suggested-by: Peter Zijlstra Signed-off-by: Suren Baghdasaryan Reviewed-by: Liam R. Howlett Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Paul E. McKenney Cc: Peter Xu Cc: Shakeel Butt Cc: Sourav Panda Cc: Vlastimil Babka Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mmap_lock.h | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 9715326f5a85..8ac3041df053 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -71,6 +71,7 @@ static inline void mmap_assert_write_locked(const struct mm_struct *mm) } #ifdef CONFIG_PER_VMA_LOCK + static inline void mm_lock_seqcount_init(struct mm_struct *mm) { seqcount_init(&mm->mm_lock_seq); @@ -87,11 +88,39 @@ static inline void mm_lock_seqcount_end(struct mm_struct *mm) do_raw_write_seqcount_end(&mm->mm_lock_seq); } -#else +static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) +{ + /* + * Since mmap_lock is a sleeping lock, and waiting for it to become + * unlocked is more or less equivalent with taking it ourselves, don't + * bother with the speculative path if mmap_lock is already write-locked + * and take the slow path, which takes the lock. + */ + return raw_seqcount_try_begin(&mm->mm_lock_seq, *seq); +} + +static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) +{ + return do_read_seqcount_retry(&mm->mm_lock_seq, seq); +} + +#else /* CONFIG_PER_VMA_LOCK */ + static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} static inline void mm_lock_seqcount_begin(struct mm_struct *mm) {} static inline void mm_lock_seqcount_end(struct mm_struct *mm) {} -#endif + +static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned int *seq) +{ + return false; +} + +static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) +{ + return true; +} + +#endif /* CONFIG_PER_VMA_LOCK */ static inline void mmap_init_lock(struct mm_struct *mm) { From c9111a1994c034e19f2270af30148f76cb733650 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 6 Dec 2024 20:24:17 -0800 Subject: [PATCH 094/504] mm-introduce-mmap_lock_speculate_try_beginretry-fix use read_seqcount_retry() in mmap_lock_speculate_retry(), per Wei Yang Cc: Liam R. Howlett Cc: Peter Zijlstra Cc: Suren Baghdasaryan Cc: Wei Yang Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickins Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: Paul E. McKenney Cc: Peter Xu Cc: Shakeel Butt Cc: Sourav Panda Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/mmap_lock.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 8ac3041df053..45a21faa3ff6 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -101,7 +101,7 @@ static inline bool mmap_lock_speculate_try_begin(struct mm_struct *mm, unsigned static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int seq) { - return do_read_seqcount_retry(&mm->mm_lock_seq, seq); + return read_seqcount_retry(&mm->mm_lock_seq, seq); } #else /* CONFIG_PER_VMA_LOCK */ From 228cf93b3a16e35fcc7f585008d31452be9675ab Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 9 Dec 2024 18:20:01 -0800 Subject: [PATCH 095/504] mm/damon/tests/vaddr-kunit.h: reduce stack consumption After "mm: move per-vma lock into vm_area_struct" we're hitting mm/damon/tests/vaddr-kunit.h: In function 'damon_test_three_regions_in_vmas': mm/damon/tests/vaddr-kunit.h:92:1: error: the frame size of 3280 bytes is larger than 2048 bytes [-Werror=frame-larger-than=] Fix by moving all those vmas off the stack. Closes: https://lkml.kernel.org/r/20241209170829.11311e70@canb.auug.org.au Reported-by: Stephen Rothwell Reviewed-by: SeongJae Park Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- mm/damon/tests/vaddr-kunit.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/damon/tests/vaddr-kunit.h b/mm/damon/tests/vaddr-kunit.h index b9fe3bc8472b..f9a6ee9a9f0c 100644 --- a/mm/damon/tests/vaddr-kunit.h +++ b/mm/damon/tests/vaddr-kunit.h @@ -68,7 +68,7 @@ static void damon_test_three_regions_in_vmas(struct kunit *test) static struct mm_struct mm; struct damon_addr_range regions[3] = {0}; /* 10-20-25, 200-210-220, 300-305, 307-330 */ - struct vm_area_struct vmas[] = { + static const struct vm_area_struct vmas[] = { (struct vm_area_struct) {.vm_start = 10, .vm_end = 20}, (struct vm_area_struct) {.vm_start = 20, .vm_end = 25}, (struct vm_area_struct) {.vm_start = 200, .vm_end = 210}, From 90fe99813be379f1c06b7d9a5f3d1e32dcc12423 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 9 Dec 2024 21:22:25 -0800 Subject: [PATCH 096/504] mm-damon-tests-vaddr-kunith-reduce-stack-consumption-fix fix build Cc: SeongJae Park Cc: Stephen Rothwell Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- mm/damon/tests/vaddr-kunit.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/damon/tests/vaddr-kunit.h b/mm/damon/tests/vaddr-kunit.h index f9a6ee9a9f0c..7cd944266a92 100644 --- a/mm/damon/tests/vaddr-kunit.h +++ b/mm/damon/tests/vaddr-kunit.h @@ -68,7 +68,7 @@ static void damon_test_three_regions_in_vmas(struct kunit *test) static struct mm_struct mm; struct damon_addr_range regions[3] = {0}; /* 10-20-25, 200-210-220, 300-305, 307-330 */ - static const struct vm_area_struct vmas[] = { + static struct vm_area_struct vmas[] = { (struct vm_area_struct) {.vm_start = 10, .vm_end = 20}, (struct vm_area_struct) {.vm_start = 20, .vm_end = 25}, (struct vm_area_struct) {.vm_start = 200, .vm_end = 210}, From 74ae9a9a94b01ced6a555be85f21505a4bf5379a Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 6 Dec 2024 22:50:36 +0000 Subject: [PATCH 097/504] mm: enforce __must_check on VMA merge and split It is of critical importance to check the return results on VMA merge (and split), failure to do so can result in use-after-free's. This bug has recurred, so have the compiler enforce this check to prevent any future repetition. Link: https://lkml.kernel.org/r/20241206225036.273103-1-lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Acked-by: Vlastimil Babka Cc: Jann Horn Signed-off-by: Andrew Morton --- mm/vma.c | 8 +++++--- mm/vma.h | 26 +++++++++++++++----------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/mm/vma.c b/mm/vma.c index 06554a732bce..6fa240e5b0c5 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -447,8 +447,9 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, * has already been checked or doesn't make sense to fail. * VMA Iterator will point to the original VMA. */ -static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, - unsigned long addr, int new_below) +static __must_check int +__split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, + unsigned long addr, int new_below) { struct vma_prepare vp; struct vm_area_struct *new; @@ -710,7 +711,8 @@ static bool can_merge_remove_vma(struct vm_area_struct *vma) * - The caller must hold a WRITE lock on the mm_struct->mmap_lock. * - vmi must be positioned within [@vmg->vma->vm_start, @vmg->vma->vm_end). */ -static struct vm_area_struct *vma_merge_existing_range(struct vma_merge_struct *vmg) +static __must_check struct vm_area_struct *vma_merge_existing_range( + struct vma_merge_struct *vmg) { struct vm_area_struct *vma = vmg->vma; struct vm_area_struct *prev = vmg->prev; diff --git a/mm/vma.h b/mm/vma.h index 295d44ea54db..61ed044b6145 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -139,9 +139,10 @@ void validate_mm(struct mm_struct *mm); #define validate_mm(mm) do { } while (0) #endif -int vma_expand(struct vma_merge_struct *vmg); -int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, - unsigned long start, unsigned long end, pgoff_t pgoff); +__must_check int vma_expand(struct vma_merge_struct *vmg); +__must_check int vma_shrink(struct vma_iterator *vmi, + struct vm_area_struct *vma, + unsigned long start, unsigned long end, pgoff_t pgoff); static inline int vma_iter_store_gfp(struct vma_iterator *vmi, struct vm_area_struct *vma, gfp_t gfp) @@ -174,13 +175,14 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *next); /* We are about to modify the VMA's flags. */ -struct vm_area_struct *vma_modify_flags(struct vma_iterator *vmi, +__must_check struct vm_area_struct +*vma_modify_flags(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long new_flags); /* We are about to modify the VMA's flags and/or anon_name. */ -struct vm_area_struct +__must_check struct vm_area_struct *vma_modify_flags_name(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, @@ -190,7 +192,7 @@ struct vm_area_struct struct anon_vma_name *new_name); /* We are about to modify the VMA's memory policy. */ -struct vm_area_struct +__must_check struct vm_area_struct *vma_modify_policy(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, @@ -198,7 +200,7 @@ struct vm_area_struct struct mempolicy *new_pol); /* We are about to modify the VMA's flags and/or uffd context. */ -struct vm_area_struct +__must_check struct vm_area_struct *vma_modify_flags_uffd(struct vma_iterator *vmi, struct vm_area_struct *prev, struct vm_area_struct *vma, @@ -206,11 +208,13 @@ struct vm_area_struct unsigned long new_flags, struct vm_userfaultfd_ctx new_ctx); -struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg); +__must_check struct vm_area_struct +*vma_merge_new_range(struct vma_merge_struct *vmg); -struct vm_area_struct *vma_merge_extend(struct vma_iterator *vmi, - struct vm_area_struct *vma, - unsigned long delta); +__must_check struct vm_area_struct +*vma_merge_extend(struct vma_iterator *vmi, + struct vm_area_struct *vma, + unsigned long delta); void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb); From db532628e27e0b3542ed1959c03f2f0f36d41c66 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 6 Dec 2024 21:28:46 +0000 Subject: [PATCH 098/504] mm: perform all memfd seal checks in a single place We no longer actually need to perform these checks in the f_op->mmap() hook any longer. We already moved the operation which clears VM_MAYWRITE on a read-only mapping of a write-sealed memfd in order to work around the restrictions imposed by commit 5de195060b2e ("mm: resolve faulty mmap_region() error path behaviour"). There is no reason for us not to simply go ahead and additionally check to see if any pre-existing seals are in place here rather than defer this to the f_op->mmap() hook. By doing this we remove more logic from shmem_mmap() which doesn't belong there, as well as doing the same for hugetlbfs_file_mmap(). We also remove dubious shared logic in mm.h which simply does not belong there either. It makes sense to do these checks at the earliest opportunity, we know these are shmem (or hugetlbfs) mappings whose relevant VMA flags will not change from the invoking do_mmap() so there is simply no need to wait. This also means the implementation of further memfd seal flags can be done within mm/memfd.c and also have the opportunity to modify VMA flags as necessary early in the mapping logic. Link: https://lkml.kernel.org/r/20241206212846.210835-1-lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Tested-by: Isaac J. Manjarres Cc: Hugh Dickins Cc: Jann Horn Cc: Kalesh Singh Cc: Liam R. Howlett Cc: Muchun Song Cc: Vlastimil Babka Cc: Jeff Xu Signed-off-by: Andrew Morton --- fs/hugetlbfs/inode.c | 5 ---- include/linux/memfd.h | 22 ++++++++--------- include/linux/mm.h | 55 ------------------------------------------- mm/memfd.c | 44 +++++++++++++++++++++++++++++++++- mm/mmap.c | 12 +++++++--- mm/shmem.c | 6 ----- 6 files changed, 62 insertions(+), 82 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index fc1ae5132127..62fb0cbc93ab 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -99,7 +99,6 @@ static const struct fs_parameter_spec hugetlb_fs_parameters[] = { static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); - struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode); loff_t len, vma_len; int ret; struct hstate *h = hstate_file(file); @@ -116,10 +115,6 @@ static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) vm_flags_set(vma, VM_HUGETLB | VM_DONTEXPAND); vma->vm_ops = &hugetlb_vm_ops; - ret = seal_check_write(info->seals, vma); - if (ret) - return ret; - /* * page based offset in vm_pgoff could be sufficiently large to * overflow a loff_t when converted to byte offset. This can diff --git a/include/linux/memfd.h b/include/linux/memfd.h index d437e3070850..d53408b0bd31 100644 --- a/include/linux/memfd.h +++ b/include/linux/memfd.h @@ -7,7 +7,14 @@ #ifdef CONFIG_MEMFD_CREATE extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned int arg); struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); -unsigned int *memfd_file_seals_ptr(struct file *file); +/* + * Check for any existing seals on mmap, return an error if access is denied due + * to sealing, or 0 otherwise. + * + * We also update VMA flags if appropriate by manipulating the VMA flags pointed + * to by vm_flags_ptr. + */ +int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr); #else static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) { @@ -17,19 +24,10 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) { return ERR_PTR(-EINVAL); } - -static inline unsigned int *memfd_file_seals_ptr(struct file *file) +int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags) { - return NULL; + return 0; } #endif -/* Retrieve memfd seals associated with the file, if any. */ -static inline unsigned int memfd_file_seals(struct file *file) -{ - unsigned int *sealsp = memfd_file_seals_ptr(file); - - return sealsp ? *sealsp : 0; -} - #endif /* __LINUX_MEMFD_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index b75eed7f5def..33c8bd1ffeb1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4102,61 +4102,6 @@ void mem_dump_obj(void *object); static inline void mem_dump_obj(void *object) {} #endif -static inline bool is_write_sealed(int seals) -{ - return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); -} - -/** - * is_readonly_sealed - Checks whether write-sealed but mapped read-only, - * in which case writes should be disallowing moving - * forwards. - * @seals: the seals to check - * @vm_flags: the VMA flags to check - * - * Returns whether readonly sealed, in which case writess should be disallowed - * going forward. - */ -static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags) -{ - /* - * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as - * MAP_SHARED and read-only, take care to not allow mprotect to - * revert protections on such mappings. Do this only for shared - * mappings. For private mappings, don't need to mask - * VM_MAYWRITE as we still want them to be COW-writable. - */ - if (is_write_sealed(seals) && - ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED)) - return true; - - return false; -} - -/** - * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and - * handle them. - * @seals: the seals to check - * @vma: the vma to operate on - * - * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper - * check/handling on the vma flags. Return 0 if check pass, or <0 for errors. - */ -static inline int seal_check_write(int seals, struct vm_area_struct *vma) -{ - if (!is_write_sealed(seals)) - return 0; - - /* - * New PROT_WRITE and MAP_SHARED mmaps are not allowed when - * write seals are active. - */ - if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE)) - return -EPERM; - - return 0; -} - #ifdef CONFIG_ANON_VMA_NAME int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, unsigned long len_in, diff --git a/mm/memfd.c b/mm/memfd.c index 35a370d75c9a..5f5a23c9051d 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -170,7 +170,7 @@ static int memfd_wait_for_pins(struct address_space *mapping) return error; } -unsigned int *memfd_file_seals_ptr(struct file *file) +static unsigned int *memfd_file_seals_ptr(struct file *file) { if (shmem_file(file)) return &SHMEM_I(file_inode(file))->seals; @@ -327,6 +327,48 @@ static int check_sysctl_memfd_noexec(unsigned int *flags) return 0; } +static inline bool is_write_sealed(unsigned int seals) +{ + return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE); +} + +static int check_write_seal(unsigned long *vm_flags_ptr) +{ + unsigned long vm_flags = *vm_flags_ptr; + unsigned long mask = vm_flags & (VM_SHARED | VM_WRITE); + + /* If a private matting then writability is irrelevant. */ + if (!(mask & VM_SHARED)) + return 0; + + /* + * New PROT_WRITE and MAP_SHARED mmaps are not allowed when + * write seals are active. + */ + if (mask & VM_WRITE) + return -EPERM; + + /* + * This is a read-only mapping, disallow mprotect() from making a + * write-sealed mapping writable in future. + */ + *vm_flags_ptr &= ~VM_MAYWRITE; + + return 0; +} + +int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr) +{ + int err = 0; + unsigned int *seals_ptr = memfd_file_seals_ptr(file); + unsigned int seals = seals_ptr ? *seals_ptr : 0; + + if (is_write_sealed(seals)) + err = check_write_seal(vm_flags_ptr); + + return err; +} + SYSCALL_DEFINE2(memfd_create, const char __user *, uname, unsigned int, flags) diff --git a/mm/mmap.c b/mm/mmap.c index b373486bd1c6..df9154b15ef9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -368,8 +368,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (file) { struct inode *inode = file_inode(file); - unsigned int seals = memfd_file_seals(file); unsigned long flags_mask; + int err; if (!file_mmap_ok(file, inode, pgoff, len)) return -EOVERFLOW; @@ -409,8 +409,6 @@ unsigned long do_mmap(struct file *file, unsigned long addr, vm_flags |= VM_SHARED | VM_MAYSHARE; if (!(file->f_mode & FMODE_WRITE)) vm_flags &= ~(VM_MAYWRITE | VM_SHARED); - else if (is_readonly_sealed(seals, vm_flags)) - vm_flags &= ~VM_MAYWRITE; fallthrough; case MAP_PRIVATE: if (!(file->f_mode & FMODE_READ)) @@ -430,6 +428,14 @@ unsigned long do_mmap(struct file *file, unsigned long addr, default: return -EINVAL; } + + /* + * Check to see if we are violating any seals and update VMA + * flags if necessary to avoid future seal violations. + */ + err = memfd_check_seals_mmap(file, &vm_flags); + if (err) + return (unsigned long)err; } else { switch (flags & MAP_TYPE) { case MAP_SHARED: diff --git a/mm/shmem.c b/mm/shmem.c index b562a040d4e3..a687ed3404ff 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2816,12 +2816,6 @@ out_nomem: static int shmem_mmap(struct file *file, struct vm_area_struct *vma) { struct inode *inode = file_inode(file); - struct shmem_inode_info *info = SHMEM_I(inode); - int ret; - - ret = seal_check_write(info->seals, vma); - if (ret) - return ret; file_accessed(file); /* This is anonymous shared memory if it is unlinked at the time of mmap */ From a540dc30bae59e96428bc91b25f1a0c9dc3372ab Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 9 Dec 2024 11:04:08 +0000 Subject: [PATCH 099/504] mm: fix typos in !memfd inline stub I typo'd the declaration of memfd_check_seals_mmap() in the case where CONFIG_MEMFD_CREATE is not defined, resulting in build failures. Fix this, and correct the misspelling of vm_flags which should be vm_flags_ptr at the same time. Link: https://lkml.kernel.org/r/7dee6c5d-480b-4c24-b98e-6fa47dbd8a23@lucifer.local Signed-off-by: Lorenzo Stoakes Signed-off-by: Andrew Morton --- include/linux/memfd.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/include/linux/memfd.h b/include/linux/memfd.h index d53408b0bd31..246daadbfde8 100644 --- a/include/linux/memfd.h +++ b/include/linux/memfd.h @@ -24,7 +24,8 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) { return ERR_PTR(-EINVAL); } -int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags) +static inline int memfd_check_seals_mmap(struct file *file, + unsigned long *vm_flags_ptr) { return 0; } From 09a3762697e810e311af23d10d79587da440e9dd Mon Sep 17 00:00:00 2001 From: Guillaume Morin Date: Fri, 6 Dec 2024 22:28:36 +0100 Subject: [PATCH 100/504] mm/hugetlb: support FOLL_FORCE|FOLL_WRITE Eric reported that PTRACE_POKETEXT fails when applications use hugetlb for mapping text using huge pages. Before commit 1d8d14641fd9 ("mm/hugetlb: support write-faults in shared mappings"), PTRACE_POKETEXT worked by accident, but it was buggy and silently ended up mapping pages writable into the page tables even though VM_WRITE was not set. In general, FOLL_FORCE|FOLL_WRITE does currently not work with hugetlb. Let's implement FOLL_FORCE|FOLL_WRITE properly for hugetlb, such that what used to work in the past by accident now properly works, allowing applications using hugetlb for text etc. to get properly debugged. This change might also be required to implement uprobes support for hugetlb [1]. [1] https://lore.kernel.org/lkml/ZiK50qob9yl5e0Xz@bender.morinfr.org/ Link: https://lkml.kernel.org/r/Z1NshNfWuzUCPebA@bender.morinfr.org Signed-off-by: Guillaume Morin Cc: Muchun Song Cc: Andrew Morton Cc: Peter Xu Cc: David Hildenbrand Cc: Eric Hagberg Signed-off-by: Andrew Morton --- mm/gup.c | 91 +++++++++++++++++++++++++--------------------------- mm/hugetlb.c | 17 +++++----- 2 files changed, 53 insertions(+), 55 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index 3b75e631f369..00a1269cbee0 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -596,6 +596,33 @@ static struct folio *try_grab_folio_fast(struct page *page, int refs, } #endif /* CONFIG_HAVE_GUP_FAST */ +/* Common code for can_follow_write_* */ +static inline bool can_follow_write_common(struct page *page, + struct vm_area_struct *vma, unsigned int flags) +{ + /* Maybe FOLL_FORCE is set to override it? */ + if (!(flags & FOLL_FORCE)) + return false; + + /* But FOLL_FORCE has no effect on shared mappings */ + if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) + return false; + + /* ... or read-only private ones */ + if (!(vma->vm_flags & VM_MAYWRITE)) + return false; + + /* ... or already writable ones that just need to take a write fault */ + if (vma->vm_flags & VM_WRITE) + return false; + + /* + * See can_change_pte_writable(): we broke COW and could map the page + * writable if we have an exclusive anonymous page ... + */ + return page && PageAnon(page) && PageAnonExclusive(page); +} + static struct page *no_page_table(struct vm_area_struct *vma, unsigned int flags, unsigned long address) { @@ -622,6 +649,18 @@ static struct page *no_page_table(struct vm_area_struct *vma, } #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES +/* FOLL_FORCE can write to even unwritable PUDs in COW mappings. */ +static inline bool can_follow_write_pud(pud_t pud, struct page *page, + struct vm_area_struct *vma, + unsigned int flags) +{ + /* If the pud is writable, we can write to the page. */ + if (pud_write(pud)) + return true; + + return can_follow_write_common(page, vma, flags); +} + static struct page *follow_huge_pud(struct vm_area_struct *vma, unsigned long addr, pud_t *pudp, int flags, struct follow_page_context *ctx) @@ -634,10 +673,11 @@ static struct page *follow_huge_pud(struct vm_area_struct *vma, assert_spin_locked(pud_lockptr(mm, pudp)); - if ((flags & FOLL_WRITE) && !pud_write(pud)) + if (!pud_present(pud)) return NULL; - if (!pud_present(pud)) + if ((flags & FOLL_WRITE) && + !can_follow_write_pud(pud, pfn_to_page(pfn), vma, flags)) return NULL; pfn += (addr & ~PUD_MASK) >> PAGE_SHIFT; @@ -686,27 +726,7 @@ static inline bool can_follow_write_pmd(pmd_t pmd, struct page *page, if (pmd_write(pmd)) return true; - /* Maybe FOLL_FORCE is set to override it? */ - if (!(flags & FOLL_FORCE)) - return false; - - /* But FOLL_FORCE has no effect on shared mappings */ - if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) - return false; - - /* ... or read-only private ones */ - if (!(vma->vm_flags & VM_MAYWRITE)) - return false; - - /* ... or already writable ones that just need to take a write fault */ - if (vma->vm_flags & VM_WRITE) - return false; - - /* - * See can_change_pte_writable(): we broke COW and could map the page - * writable if we have an exclusive anonymous page ... - */ - if (!page || !PageAnon(page) || !PageAnonExclusive(page)) + if (!can_follow_write_common(page, vma, flags)) return false; /* ... and a write-fault isn't required for other reasons. */ @@ -807,27 +827,7 @@ static inline bool can_follow_write_pte(pte_t pte, struct page *page, if (pte_write(pte)) return true; - /* Maybe FOLL_FORCE is set to override it? */ - if (!(flags & FOLL_FORCE)) - return false; - - /* But FOLL_FORCE has no effect on shared mappings */ - if (vma->vm_flags & (VM_MAYSHARE | VM_SHARED)) - return false; - - /* ... or read-only private ones */ - if (!(vma->vm_flags & VM_MAYWRITE)) - return false; - - /* ... or already writable ones that just need to take a write fault */ - if (vma->vm_flags & VM_WRITE) - return false; - - /* - * See can_change_pte_writable(): we broke COW and could map the page - * writable if we have an exclusive anonymous page ... - */ - if (!page || !PageAnon(page) || !PageAnonExclusive(page)) + if (!can_follow_write_common(page, vma, flags)) return false; /* ... and a write-fault isn't required for other reasons. */ @@ -1294,9 +1294,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) if (!(vm_flags & VM_WRITE) || (vm_flags & VM_SHADOW_STACK)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; - /* hugetlb does not support FOLL_FORCE|FOLL_WRITE. */ - if (is_vm_hugetlb_page(vma)) - return -EFAULT; /* * We used to let the write,force case do COW in a * VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could diff --git a/mm/hugetlb.c b/mm/hugetlb.c index c9d8c6a1c03c..21de25546a25 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5183,6 +5183,13 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma, update_mmu_cache(vma, address, ptep); } +static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + if (vma->vm_flags & VM_WRITE) + set_huge_ptep_writable(vma, address, ptep); +} + bool is_hugetlb_entry_migration(pte_t pte) { swp_entry_t swp; @@ -5828,13 +5835,6 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, if (!unshare && huge_pte_uffd_wp(pte)) return 0; - /* - * hugetlb does not support FOLL_FORCE-style write faults that keep the - * PTE mapped R/O such as maybe_mkwrite() would do. - */ - if (WARN_ON_ONCE(!unshare && !(vma->vm_flags & VM_WRITE))) - return VM_FAULT_SIGSEGV; - /* Let's take out MAP_SHARED mappings first. */ if (vma->vm_flags & VM_MAYSHARE) { set_huge_ptep_writable(vma, vmf->address, vmf->pte); @@ -5863,7 +5863,8 @@ retry_avoidcopy: SetPageAnonExclusive(&old_folio->page); } if (likely(!unshare)) - set_huge_ptep_writable(vma, vmf->address, vmf->pte); + set_huge_ptep_maybe_writable(vma, vmf->address, + vmf->pte); delayacct_wpcopy_end(); return 0; From eea14834b00df15b87719e21171bb44e802839a8 Mon Sep 17 00:00:00 2001 From: Jeff Xu Date: Fri, 6 Dec 2024 19:48:39 +0000 Subject: [PATCH 101/504] mseal: remove can_do_mseal() No code logic change. can_do_mseal() is called exclusively by mseal.c, and mseal.c is compiled only when CONFIG_64BIT flag is set in makefile. Therefore, it is unnecessary to have 32 bit stub function in the header file, remove this function and merge the logic into do_mseal(). Link: https://lkml.kernel.org/r/20241206013934.2782793-1-jeffxu@google.com Link: https://lkml.kernel.org/r/20241206194839.3030596-2-jeffxu@google.com Signed-off-by: Jeff Xu Reviewed-by: Lorenzo Stoakes Cc: Jorge Lucangeli Obes Cc: Kees Cook Cc: Liam R. Howlett Cc: Pedro Falcato Cc: Randy Dunlap Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/internal.h | 16 ---------------- mm/mseal.c | 6 +++--- 2 files changed, 3 insertions(+), 19 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index b438d35045de..4d4028d74e5d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1451,22 +1451,6 @@ void __meminit __init_single_page(struct page *page, unsigned long pfn, unsigned long shrink_slab(gfp_t gfp_mask, int nid, struct mem_cgroup *memcg, int priority); -#ifdef CONFIG_64BIT -static inline int can_do_mseal(unsigned long flags) -{ - if (flags) - return -EINVAL; - - return 0; -} - -#else -static inline int can_do_mseal(unsigned long flags) -{ - return -EPERM; -} -#endif - #ifdef CONFIG_SHRINKER_DEBUG static inline __printf(2, 0) int shrinker_debugfs_name_alloc( struct shrinker *shrinker, const char *fmt, va_list ap) diff --git a/mm/mseal.c b/mm/mseal.c index 81d6e980e8a9..c27197ac04e8 100644 --- a/mm/mseal.c +++ b/mm/mseal.c @@ -217,9 +217,9 @@ int do_mseal(unsigned long start, size_t len_in, unsigned long flags) unsigned long end; struct mm_struct *mm = current->mm; - ret = can_do_mseal(flags); - if (ret) - return ret; + /* Verify flags not set. */ + if (flags) + return -EINVAL; start = untagged_addr(start); if (!PAGE_ALIGNED(start)) From 8e7ff02bc7f4806c13c83cbfc65e40df2ea5953f Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Mon, 9 Dec 2024 23:56:21 +0500 Subject: [PATCH 102/504] selftests/mm: thp_settings: remove const from return type Patch series "selftest/mm: Remove warnings found by adding compiler flags". Recently, I reviewed a patch on the mm/kselftest mailing list about a test which had obvious type mismatch fix in it. It was strange why that wasn't caught during development and when patch was accepted. This led me to discover that those extra compiler options to catch these warnings aren't being used. When I added them, I found tens of warnings in just mm suite. In this series, I'm fixing those warnings in a few files. More fixes will be sent later. This patch (of 4): Remove cost from the return type as it is ignored anyways and generates the warning: warning: type qualifiers ignored on function return type [-Wignored-qualifiers] Link: https://lkml.kernel.org/r/20241209185624.2245158-1-usama.anjum@collabora.com Link: https://lkml.kernel.org/r/20241209185624.2245158-2-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: David Hildenbrand Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/thp_settings.c | 4 ++-- tools/testing/selftests/mm/thp_settings.h | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/mm/thp_settings.c b/tools/testing/selftests/mm/thp_settings.c index 577eaab6266f..ad872af1c81a 100644 --- a/tools/testing/selftests/mm/thp_settings.c +++ b/tools/testing/selftests/mm/thp_settings.c @@ -87,7 +87,7 @@ int write_file(const char *path, const char *buf, size_t buflen) return (unsigned int) numwritten; } -const unsigned long read_num(const char *path) +unsigned long read_num(const char *path) { char buf[21]; @@ -172,7 +172,7 @@ void thp_write_string(const char *name, const char *val) } } -const unsigned long thp_read_num(const char *name) +unsigned long thp_read_num(const char *name) { char path[PATH_MAX]; int ret; diff --git a/tools/testing/selftests/mm/thp_settings.h b/tools/testing/selftests/mm/thp_settings.h index 876235a23460..fc131d23d593 100644 --- a/tools/testing/selftests/mm/thp_settings.h +++ b/tools/testing/selftests/mm/thp_settings.h @@ -64,12 +64,12 @@ struct thp_settings { int read_file(const char *path, char *buf, size_t buflen); int write_file(const char *path, const char *buf, size_t buflen); -const unsigned long read_num(const char *path); +unsigned long read_num(const char *path); void write_num(const char *path, unsigned long num); int thp_read_string(const char *name, const char * const strings[]); void thp_write_string(const char *name, const char *val); -const unsigned long thp_read_num(const char *name); +unsigned long thp_read_num(const char *name); void thp_write_num(const char *name, unsigned long num); void thp_write_settings(struct thp_settings *settings); From 3517ced10f29ffec12bc21c6fda697d271421dca Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Mon, 9 Dec 2024 23:56:22 +0500 Subject: [PATCH 103/504] selftests/mm: pagemap_ioctl: Fix types mismatches shown by compiler options Fix following warnings caught by compiler: - There are several type mismatches among different variables. - Remove unused variable warnings. Link: https://lkml.kernel.org/r/20241209185624.2245158-3-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pagemap_ioctl.c | 108 +++++++++++---------- tools/testing/selftests/mm/vm_util.c | 2 +- 2 files changed, 59 insertions(+), 51 deletions(-) diff --git a/tools/testing/selftests/mm/pagemap_ioctl.c b/tools/testing/selftests/mm/pagemap_ioctl.c index fdafce0654e9..57b4bba2b45f 100644 --- a/tools/testing/selftests/mm/pagemap_ioctl.c +++ b/tools/testing/selftests/mm/pagemap_ioctl.c @@ -34,8 +34,8 @@ #define PAGEMAP "/proc/self/pagemap" int pagemap_fd; int uffd; -int page_size; -int hpage_size; +unsigned int page_size; +unsigned int hpage_size; const char *progname; #define LEN(region) ((region.end - region.start)/page_size) @@ -235,7 +235,9 @@ int get_reads(struct page_region *vec, int vec_size) int sanity_tests_sd(void) { - int mem_size, vec_size, ret, ret2, ret3, i, num_pages = 1000, total_pages = 0; + unsigned long long mem_size, vec_size, i, total_pages = 0; + long ret, ret2, ret3; + int num_pages = 1000; int total_writes, total_reads, reads, count; struct page_region *vec, *vec2; char *mem, *m[2]; @@ -321,9 +323,9 @@ int sanity_tests_sd(void) ret = pagemap_ioctl(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); - ksft_test_result(ret == mem_size/(page_size * 2), + ksft_test_result((unsigned long long)ret == mem_size/(page_size * 2), "%s Repeated pattern of written and non-written pages\n", __func__); /* 4. Repeated pattern of written and non-written pages in parts */ @@ -331,21 +333,21 @@ int sanity_tests_sd(void) PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, num_pages/2 - 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ret2 = pagemap_ioctl(mem, mem_size, vec, 2, 0, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret2 < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret2, errno, strerror(errno)); ret3 = pagemap_ioctl(mem, mem_size, vec, vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret3 < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret3, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret3, errno, strerror(errno)); ksft_test_result((ret + ret3) == num_pages/2 && ret2 == 2, - "%s Repeated pattern of written and non-written pages in parts %d %d %d\n", + "%s Repeated pattern of written and non-written pages in parts %ld %ld %ld\n", __func__, ret, ret3, ret2); /* 5. Repeated pattern of written and non-written pages max_pages */ @@ -357,13 +359,13 @@ int sanity_tests_sd(void) PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, num_pages/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ret2 = pagemap_ioctl(mem, mem_size, vec, vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret2 < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret2, errno, strerror(errno)); ksft_test_result(ret == num_pages/2 && ret2 == 1, "%s Repeated pattern of written and non-written pages max_pages\n", @@ -378,12 +380,12 @@ int sanity_tests_sd(void) PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ret2 = pagemap_ioctl(mem, mem_size, vec2, vec_size, 0, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret2 < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret2, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret2, errno, strerror(errno)); ksft_test_result(ret == 1 && LEN(vec[0]) == 2 && vec[0].start == (uintptr_t)(mem + page_size) && @@ -416,7 +418,7 @@ int sanity_tests_sd(void) ret = pagemap_ioctl(m[1], mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 1 && LEN(vec[0]) == mem_size/page_size, "%s Two regions\n", __func__); @@ -448,7 +450,7 @@ int sanity_tests_sd(void) PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); for (i = 0; i < mem_size/page_size; i += 2) mem[i * page_size]++; @@ -457,7 +459,7 @@ int sanity_tests_sd(void) PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); total_pages += ret; @@ -465,7 +467,7 @@ int sanity_tests_sd(void) PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); total_pages += ret; @@ -473,7 +475,7 @@ int sanity_tests_sd(void) PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, mem_size/(page_size*5), PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); total_pages += ret; @@ -515,9 +517,9 @@ int sanity_tests_sd(void) vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); - if (ret > vec_size) + if ((unsigned long)ret > vec_size) break; reads = get_reads(vec, ret); @@ -554,63 +556,63 @@ int sanity_tests_sd(void) ret = pagemap_ioc(mem, 0, vec, vec_size, 0, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 0 && walk_end == (long)mem, "Walk_end: Same start and end address\n"); ret = pagemap_ioc(mem, 0, vec, vec_size, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 0 && walk_end == (long)mem, "Walk_end: Same start and end with WP\n"); ret = pagemap_ioc(mem, 0, vec, 0, PM_SCAN_WP_MATCHING | PM_SCAN_CHECK_WPASYNC, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 0 && walk_end == (long)mem, "Walk_end: Same start and end with 0 output buffer\n"); ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), "Walk_end: Big vec\n"); ret = pagemap_ioc(mem, mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), "Walk_end: vec of minimum length\n"); ret = pagemap_ioc(mem, mem_size, vec, 1, 0, vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), "Walk_end: Max pages specified\n"); ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size/2), "Walk_end: Half max pages\n"); ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size), "Walk_end: 1 max page\n"); ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, -1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 1 && walk_end == (long)(mem + mem_size), "Walk_end: max pages\n"); @@ -621,49 +623,49 @@ int sanity_tests_sd(void) ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); - ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); + ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size), "Walk_end sparse: Big vec\n"); ret = pagemap_ioc(mem, mem_size, vec, 1, 0, 0, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2), "Walk_end sparse: vec of minimum length\n"); ret = pagemap_ioc(mem, mem_size, vec, 1, 0, vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2), "Walk_end sparse: Max pages specified\n"); ret = pagemap_ioc(mem, mem_size, vec, vec_size/2, 0, vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); - ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); + ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size), "Walk_end sparse: Max pages specified\n"); ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, vec_size, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); - ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); + ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size), "Walk_end sparse: Max pages specified\n"); ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, vec_size/2, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); - ksft_test_result(ret == vec_size/2 && walk_end == (long)(mem + mem_size), + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); + ksft_test_result((unsigned long)ret == vec_size/2 && walk_end == (long)(mem + mem_size), "Walk_endsparse : Half max pages\n"); ret = pagemap_ioc(mem, mem_size, vec, vec_size, 0, 1, PAGE_IS_WRITTEN, 0, 0, PAGE_IS_WRITTEN, &walk_end); if (ret < 0) - ksft_exit_fail_msg("error %d %d %s\n", ret, errno, strerror(errno)); + ksft_exit_fail_msg("error %ld %d %s\n", ret, errno, strerror(errno)); ksft_test_result(ret == 1 && walk_end == (long)(mem + page_size * 2), "Walk_end: 1 max page\n"); @@ -674,9 +676,10 @@ int sanity_tests_sd(void) return 0; } -int base_tests(char *prefix, char *mem, int mem_size, int skip) +int base_tests(char *prefix, char *mem, unsigned long long mem_size, int skip) { - int vec_size, written; + unsigned long long vec_size; + int written; struct page_region *vec, *vec2; if (skip) { @@ -799,8 +802,8 @@ int hpage_unit_tests(void) char *map; int ret, ret2; size_t num_pages = 10; - int map_size = hpage_size * num_pages; - int vec_size = map_size/page_size; + unsigned long long map_size = hpage_size * num_pages; + unsigned long long vec_size = map_size/page_size; struct page_region *vec, *vec2; vec = malloc(sizeof(struct page_region) * vec_size); @@ -1047,7 +1050,8 @@ static void test_simple(void) int sanity_tests(void) { - int mem_size, vec_size, ret, fd, i, buf_size; + unsigned long long mem_size, vec_size; + int ret, fd, i, buf_size; struct page_region *vec; char *mem, *fmem; struct stat sbuf; @@ -1312,7 +1316,9 @@ static ssize_t get_dirty_pages_reset(char *mem, unsigned int count, { struct pm_scan_arg arg = {0}; struct page_region rgns[256]; - int i, j, cnt, ret; + unsigned long long i, j; + long ret; + int cnt; arg.size = sizeof(struct pm_scan_arg); arg.start = (uintptr_t)mem; @@ -1330,7 +1336,7 @@ static ssize_t get_dirty_pages_reset(char *mem, unsigned int count, ksft_exit_fail_msg("ioctl failed\n"); cnt = 0; - for (i = 0; i < ret; ++i) { + for (i = 0; i < (unsigned long)ret; ++i) { if (rgns[i].categories != PAGE_IS_WRITTEN) ksft_exit_fail_msg("wrong flags\n"); @@ -1384,9 +1390,10 @@ void *thread_proc(void *mem) static void transact_test(int page_size) { unsigned int i, count, extra_pages; + unsigned int c; pthread_t th; char *mem; - int ret, c; + int ret; if (pthread_barrier_init(&start_barrier, NULL, nthreads + 1)) ksft_exit_fail_msg("pthread_barrier_init\n"); @@ -1473,9 +1480,10 @@ static void transact_test(int page_size) extra_thread_faults); } -int main(int argc, char *argv[]) +int main(int __attribute__((unused)) argc, char *argv[]) { - int mem_size, shmid, buf_size, fd, i, ret; + int shmid, buf_size, fd, i, ret; + unsigned long long mem_size; char *mem, *map, *fmem; struct stat sbuf; diff --git a/tools/testing/selftests/mm/vm_util.c b/tools/testing/selftests/mm/vm_util.c index d8d0cf04bb57..7519c9a892f0 100644 --- a/tools/testing/selftests/mm/vm_util.c +++ b/tools/testing/selftests/mm/vm_util.c @@ -138,7 +138,7 @@ void clear_softdirty(void) ksft_exit_fail_msg("opening clear_refs failed\n"); ret = write(fd, ctrl, strlen(ctrl)); close(fd); - if (ret != strlen(ctrl)) + if (ret != (signed int)strlen(ctrl)) ksft_exit_fail_msg("writing clear_refs failed\n"); } From 4f741bfff54ea349e81c84c30252ad953310cdf5 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Mon, 9 Dec 2024 23:56:23 +0500 Subject: [PATCH 104/504] selftests/mm: mseal_test: remove unused variables Fix following warnings: - Remove unused variables and fix following warnings: Link: https://lkml.kernel.org/r/20241209185624.2245158-4-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mseal_test.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/mm/mseal_test.c b/tools/testing/selftests/mm/mseal_test.c index 01675c412b2a..ad17005521a8 100644 --- a/tools/testing/selftests/mm/mseal_test.c +++ b/tools/testing/selftests/mm/mseal_test.c @@ -802,7 +802,7 @@ static void test_seal_mprotect_partial_mprotect_tail(bool seal) } -static void test_seal_mprotect_two_vma_with_gap(bool seal) +static void test_seal_mprotect_two_vma_with_gap(void) { void *ptr; unsigned long page_size = getpagesize(); @@ -1864,7 +1864,7 @@ static void test_seal_madvise_nodiscard(bool seal) REPORT_TEST_PASS(); } -int main(int argc, char **argv) +int main(void) { bool test_seal = seal_support(); @@ -1913,8 +1913,8 @@ int main(int argc, char **argv) test_seal_mprotect_partial_mprotect(false); test_seal_mprotect_partial_mprotect(true); - test_seal_mprotect_two_vma_with_gap(false); - test_seal_mprotect_two_vma_with_gap(true); + test_seal_mprotect_two_vma_with_gap(); + test_seal_mprotect_two_vma_with_gap(); test_seal_mprotect_merge(false); test_seal_mprotect_merge(true); From 680d396b5e442f64ee2cadb5b5a45ba392d9d7e7 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Mon, 9 Dec 2024 23:56:24 +0500 Subject: [PATCH 105/504] selftests/mm: mremap_test: Remove unused variable and type mismatches Remove unused variable and fix type mismatches. Link: https://lkml.kernel.org/r/20241209185624.2245158-5-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/mremap_test.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/mm/mremap_test.c b/tools/testing/selftests/mm/mremap_test.c index 5a3a9bcba640..d207a52f2b5b 100644 --- a/tools/testing/selftests/mm/mremap_test.c +++ b/tools/testing/selftests/mm/mremap_test.c @@ -34,7 +34,7 @@ struct config { unsigned long long dest_alignment; unsigned long long region_size; int overlapping; - int dest_preamble_size; + unsigned int dest_preamble_size; }; struct test { @@ -328,7 +328,7 @@ static void mremap_move_within_range(unsigned int pattern_seed, char *rand_addr) { char *test_name = "mremap mremap move within range"; void *src, *dest; - int i, success = 1; + unsigned int i, success = 1; size_t size = SIZE_MB(20); void *ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, @@ -569,7 +569,7 @@ static void mremap_move_1mb_from_start(unsigned int pattern_seed, { char *test_name = "mremap move 1mb from start at 1MB+256KB aligned src"; void *src = NULL, *dest = NULL; - int i, success = 1; + unsigned int i, success = 1; /* Config to reuse get_source_mapping() to do an aligned mmap. */ struct config c = { @@ -636,7 +636,7 @@ out: static void run_mremap_test_case(struct test test_case, int *failures, unsigned int threshold_mb, - unsigned int pattern_seed, char *rand_addr) + char *rand_addr) { long long remap_time = remap_region(test_case.config, threshold_mb, rand_addr); @@ -708,7 +708,8 @@ static int parse_args(int argc, char **argv, unsigned int *threshold_mb, int main(int argc, char **argv) { int failures = 0; - int i, run_perf_tests; + unsigned int i; + int run_perf_tests; unsigned int threshold_mb = VALIDATION_DEFAULT_THRESHOLD; /* hard-coded test configs */ @@ -831,7 +832,7 @@ int main(int argc, char **argv) for (i = 0; i < ARRAY_SIZE(test_cases); i++) run_mremap_test_case(test_cases[i], &failures, threshold_mb, - pattern_seed, rand_addr); + rand_addr); maps_fp = fopen("/proc/self/maps", "r"); @@ -853,7 +854,7 @@ int main(int argc, char **argv) "mremap HAVE_MOVE_PMD/PUD optimization time comparison for 1GB region:"); for (i = 0; i < ARRAY_SIZE(perf_test_cases); i++) run_mremap_test_case(perf_test_cases[i], &failures, - threshold_mb, pattern_seed, + threshold_mb, rand_addr); } From 0b54c6f18c24d829b859f754144d8a056c05ace7 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 5 Dec 2024 10:05:07 +0100 Subject: [PATCH 106/504] mm/page_alloc: don't use __GFP_HARDWALL when migrating pages via alloc_contig*() Patch series "mm: don't use __GFP_HARDWALL when migrating remote pages". __GFP_HARDWALL means that we will be respecting the cpuset of the caller when allocating a page. However, when we are migrating remote allocations (pages allocated from other context), the cpuset of the current context is irrelevant. For memory offlining + alloc_contig_*(), this is rather obvious. There might be other such page migration users, let's start with the obvious ones. This patch (of 2): We'll migrate pages allocated by other contexts; respecting the cpuset of the alloc_contig*() caller when allocating a migration target does not make sense. Drop the __GFP_HARDWALL. Note that in an ideal world, migration code could figure out the cpuset of the original context and take that into consideration. Link: https://lkml.kernel.org/r/20241205090508.2095225-1-david@redhat.com Link: https://lkml.kernel.org/r/20241205090508.2095225-2-david@redhat.com Signed-off-by: David Hildenbrand Suggested-by: Vlastimil Babka Reviewed-by: Vlastimil Babka Reviewed-by: Oscar Salvador Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/page_alloc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 03b8938aa376..681a6fa7eaa8 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6415,11 +6415,11 @@ static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask) * page range. Migratable pages are movable, __GFP_MOVABLE is implied * for them. * - * Traditionally we always had __GFP_HARDWALL|__GFP_RETRY_MAYFAIL set, - * keep doing that to not degrade callers. + * Traditionally we always had __GFP_RETRY_MAYFAIL set, keep doing that + * to not degrade callers. */ *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) | - __GFP_HARDWALL | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; + __GFP_MOVABLE | __GFP_RETRY_MAYFAIL; return 0; } From beac5a2742bb063d87eac087f7cebba5d5dcfb2b Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 5 Dec 2024 10:05:08 +0100 Subject: [PATCH 107/504] mm/memory_hotplug: don't use __GFP_HARDWALL when migrating pages via memory offlining We'll migrate pages allocated by other context; respecting the cpuset of the memory offlining context when allocating a migration target does not make sense. Drop the __GFP_HARDWALL by using GFP_KERNEL. Note that in an ideal world, migration code could figure out the cpuset of the original context and take that into consideration. Link: https://lkml.kernel.org/r/20241205090508.2095225-3-david@redhat.com Signed-off-by: David Hildenbrand Suggested-by: Vlastimil Babka Reviewed-by: Vlastimil Babka Acked-by: Oscar Salvador Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/memory_hotplug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 5f497ccf473d..3b6f93962481 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1838,7 +1838,7 @@ put_folio: nodemask_t nmask = node_states[N_MEMORY]; struct migration_target_control mtc = { .nmask = &nmask, - .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, + .gfp_mask = GFP_KERNEL | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, .reason = MR_MEMORY_HOTPLUG, }; int ret; From 0fb948f18e5fd88c87777a82b50c574573e7b3b6 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:06 +0000 Subject: [PATCH 108/504] selftests/mm: fix condition in uffd_move_test_common() Patch series "pkeys kselftests improvements". This series brings various cleanups and fixes for the mm (mostly pkeys) kselftests. The original goal was to make the pkeys tests work out of the box and without build warning - it turned out to be more involved than expected. The most important change is enabling -O2 when building all mm kselftests (patch 5). This is actually needed for the pkeys tests to run successfully (see gcc command line at the top of protection_keys.c and pkey_sighandler_tests.c), and seems to have no negative impact on the other tests. It certainly can't hurt performance! The following patches address a few obvious issues in the pkeys tests (unused code, bad scope for functions/variables, etc.) and finally make a couple of small improvements. There is one ugliness that this series does not fix: some functions in pkey-.h call functions that are actually defined in protection_keys.c. For instance, expect_fault_on_read_execonly_key() in pkey-x86.h calls expected_pkey_fault(). This means that other test programs that use pkey-helpers.h (namely pkey_sighandler_tests) would fail to link if they called such functions defined in pkey-.h. Fixing this would require a more comprehensive reorganisation of the pkey-* headers, which doesn't seem worth it (patch 9 adds a comment to pkey-helpers.h to clarify the situation). Some more details on the patches: - Patch 1 is an unrelated fix that was revealed by inspecting a warning. It seems fairly harmless though, so I thought I'd just post it as part of this series. - Patch 2-5 fix various warnings that come up by building the mm tests at -O2 and finally enable -O2. - Patch 6-12 are various cleanups for the pkeys tests. Patch 11 in particular enables is_pkeys_supported() to be called from outside protection_keys.c (patch 13 relies on this). - Patch 13-14 are small improvements to pkey_sighandler_tests.c. Many thanks to Ryan Roberts for checking that the mm tests still run fine on arm64 with those patches applied. I've also checked that the pkeys tests run fine on arm64 and x86. This patch (of 14): area_src and area_dst are saved at the beginning of the function if chunk_size > page_size. The intention is quite clearly to restore them at the end based on the same condition, but step_size is considered instead of chunk_size. Considering that step_size is a number of pages, the condition is likely to be false. Use the same condition as when saving so that the globals are restored as intended. Link: https://lkml.kernel.org/r/20241209095019.1732120-1-kevin.brodsky@arm.com Link: https://lkml.kernel.org/r/20241209095019.1732120-2-kevin.brodsky@arm.com Fixes: a2bf6a9ca805 ("selftests/mm: add UFFDIO_MOVE ioctl test") Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-unit-tests.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index a2e71b1636e7..74c884713bf7 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -1190,7 +1190,7 @@ uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size, nr, count, count_verify[src_offs + nr + i]); } } - if (step_size > page_size) { + if (chunk_size > page_size) { area_src = orig_area_src; area_dst = orig_area_dst; } From 9bbcea717620cbfc04664a7fad0294028b95732e Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:07 +0000 Subject: [PATCH 109/504] selftests/mm: fix -Wmaybe-uninitialized warnings A few -Wmaybe-uninitialized warnings show up when building the mm tests with -O2. None of them looks worrying; silence them by initialising the problematic variables. Link: https://lkml.kernel.org/r/20241209095019.1732120-3-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/ksm_tests.c | 2 +- tools/testing/selftests/mm/mremap_test.c | 2 +- tools/testing/selftests/mm/soft-dirty.c | 2 +- tools/testing/selftests/mm/uffd-unit-tests.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/mm/ksm_tests.c b/tools/testing/selftests/mm/ksm_tests.c index b748c48908d9..dcdd5bb20f3d 100644 --- a/tools/testing/selftests/mm/ksm_tests.c +++ b/tools/testing/selftests/mm/ksm_tests.c @@ -776,7 +776,7 @@ err_out: int main(int argc, char *argv[]) { - int ret, opt; + int ret = 0, opt; int prot = 0; int ksm_scan_limit_sec = KSM_SCAN_LIMIT_SEC_DEFAULT; int merge_type = KSM_MERGE_TYPE_DEFAULT; diff --git a/tools/testing/selftests/mm/mremap_test.c b/tools/testing/selftests/mm/mremap_test.c index d207a52f2b5b..bb84476a177f 100644 --- a/tools/testing/selftests/mm/mremap_test.c +++ b/tools/testing/selftests/mm/mremap_test.c @@ -384,7 +384,7 @@ out: static long long remap_region(struct config c, unsigned int threshold_mb, char *rand_addr) { - void *addr, *src_addr, *dest_addr, *dest_preamble_addr; + void *addr, *src_addr, *dest_addr, *dest_preamble_addr = NULL; unsigned long long t, d; struct timespec t_start = {0, 0}, t_end = {0, 0}; long long start_ns, end_ns, align_mask, ret, offset; diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c index bdfa5d085f00..8e1462ce0532 100644 --- a/tools/testing/selftests/mm/soft-dirty.c +++ b/tools/testing/selftests/mm/soft-dirty.c @@ -128,7 +128,7 @@ static void test_mprotect(int pagemap_fd, int pagesize, bool anon) { const char *type[] = {"file", "anon"}; const char *fname = "./soft-dirty-test-file"; - int test_fd; + int test_fd = 0; char *map; if (anon) { diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index 74c884713bf7..9ff71fa1f9bf 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -1122,7 +1122,7 @@ uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size, char c; unsigned long long count; struct uffd_args args = { 0 }; - char *orig_area_src, *orig_area_dst; + char *orig_area_src = NULL, *orig_area_dst = NULL; unsigned long step_size, step_count; unsigned long src_offs = 0; unsigned long dst_offs = 0; From 673568a4aceb5baa46cdd2f6cc40ca7ee1b1d745 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:08 +0000 Subject: [PATCH 110/504] selftests/mm: fix strncpy() length GCC complains (with -O2) that the length is equal to the destination size, which is indeed invalid. Subtract 1 from the size of the array to leave room for '\0'. Link: https://lkml.kernel.org/r/20241209095019.1732120-4-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/write_to_hugetlbfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/write_to_hugetlbfs.c b/tools/testing/selftests/mm/write_to_hugetlbfs.c index 1289d311efd7..34c91f7e6128 100644 --- a/tools/testing/selftests/mm/write_to_hugetlbfs.c +++ b/tools/testing/selftests/mm/write_to_hugetlbfs.c @@ -89,7 +89,7 @@ int main(int argc, char **argv) size = atoi(optarg); break; case 'p': - strncpy(path, optarg, sizeof(path)); + strncpy(path, optarg, sizeof(path) - 1); break; case 'm': if (atoi(optarg) >= MAX_METHOD) { From 3cb647c156dad6856daa22b4dbbdded195602b53 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:09 +0000 Subject: [PATCH 111/504] selftests/mm: fix -Warray-bounds warnings in pkey_sighandler_tests GCC doesn't like dereferencing a pointer set to 0x1 (when building at -O2): pkey_sighandler_tests.c:166:9: warning: array subscript 0 is outside array bounds of 'int[0]' [-Warray-bounds=] 166 | *(int *) (0x1) = 1; | ^~~~~~~~~~~~~~ cc1: note: source object is likely at address zero Using NULL instead seems to make it happy. This should make no difference in practice (SIGSEGV with SEGV_MAPERR will be the outcome regardless), we just need to update the expected si_addr. Link: https://lkml.kernel.org/r/20241209095019.1732120-5-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Cc: kernel test robot Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pkey_sighandler_tests.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c index c593a426341c..e7b91794f184 100644 --- a/tools/testing/selftests/mm/pkey_sighandler_tests.c +++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c @@ -163,7 +163,7 @@ static void *thread_segv_with_pkey0_disabled(void *ptr) __write_pkey_reg(pkey_reg_restrictive_default()); /* Segfault (with SEGV_MAPERR) */ - *(int *) (0x1) = 1; + *(int *)NULL = 1; return NULL; } @@ -179,7 +179,6 @@ static void *thread_segv_pkuerr_stack(void *ptr) static void *thread_segv_maperr_ptr(void *ptr) { stack_t *stack = ptr; - int *bad = (int *)1; u64 pkey_reg; /* @@ -195,7 +194,7 @@ static void *thread_segv_maperr_ptr(void *ptr) __write_pkey_reg(pkey_reg); /* Segfault */ - *bad = 1; + *(int *)NULL = 1; syscall_raw(SYS_exit, 0, 0, 0, 0, 0, 0); return NULL; } @@ -234,7 +233,7 @@ static void test_sigsegv_handler_with_pkey0_disabled(void) ksft_test_result(siginfo.si_signo == SIGSEGV && siginfo.si_code == SEGV_MAPERR && - siginfo.si_addr == (void *)1, + siginfo.si_addr == NULL, "%s\n", __func__); } @@ -349,7 +348,7 @@ static void test_sigsegv_handler_with_different_pkey_for_stack(void) ksft_test_result(siginfo.si_signo == SIGSEGV && siginfo.si_code == SEGV_MAPERR && - siginfo.si_addr == (void *)1, + siginfo.si_addr == NULL, "%s\n", __func__); } From 464880205d4eee452d8851d1fa7dbcb3a4ef41ee Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Wed, 18 Dec 2024 15:36:15 +0000 Subject: [PATCH 112/504] selftests-mm-fix-warray-bounds-warnings-in-pkey_sighandler_tests-fix Dereferencing a null pointer on Clang is not a good idea - it will entirely optimise out the dereference. Make the pointer volatile to force the access (and fault). Link: https://lkml.kernel.org/r/20241218153615.2267571-1-kevin.brodsky@arm.com Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202412140850.4TW4YBqc-lkp@intel.com/ Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pkey_sighandler_tests.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c index e7b91794f184..c6c020a2a6f5 100644 --- a/tools/testing/selftests/mm/pkey_sighandler_tests.c +++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c @@ -163,7 +163,7 @@ static void *thread_segv_with_pkey0_disabled(void *ptr) __write_pkey_reg(pkey_reg_restrictive_default()); /* Segfault (with SEGV_MAPERR) */ - *(int *)NULL = 1; + *(volatile int *)NULL = 1; return NULL; } @@ -194,7 +194,7 @@ static void *thread_segv_maperr_ptr(void *ptr) __write_pkey_reg(pkey_reg); /* Segfault */ - *(int *)NULL = 1; + *(volatile int *)NULL = 1; syscall_raw(SYS_exit, 0, 0, 0, 0, 0, 0); return NULL; } From 6942ef2adb16cb6f3dbb883afd95b9a4f896c040 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:10 +0000 Subject: [PATCH 113/504] selftests/mm: build with -O2 The mm kselftests are currently built with no optimisation (-O0). It's unclear why, and besides being obviously suboptimal, this also prevents the pkeys tests from working as intended. Let's build all the tests with -O2. Link: https://lkml.kernel.org/r/20241209095019.1732120-6-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index f2db43c64f83..8aa5bdec9e7b 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -33,7 +33,7 @@ endif # LDLIBS. MAKEFLAGS += --no-builtin-rules -CFLAGS = -Wall -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES) +CFLAGS = -Wall -O2 -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES) LDLIBS = -lrt -lpthread -lm KDIR ?= /lib/modules/$(shell uname -r)/build From 47d314253b4c2c14f2015b735d996b5e124f0343 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Tue, 7 Jan 2025 17:01:10 +0000 Subject: [PATCH 114/504] selftests/mm: silence unused-result warnings Switching to -O2 when building the mm tests has the unexpected side effect of triggering many unused-result warnings on certain distros like Ubuntu, where GCC is configured so that -O2 implies -D_FORTIFY_SOURCE. Explicitly disable FORTIFY_SOURCE to avoid those warnings. This has no effect on upstream toolchains where FORTIFY_SOURCE is not implicitly enabled. Link: https://lkml.kernel.org/r/20250107170110.2819685-1-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Suggested-by: Ryan Roberts Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/Makefile | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 8aa5bdec9e7b..0d5d8f2f8652 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -36,6 +36,13 @@ MAKEFLAGS += --no-builtin-rules CFLAGS = -Wall -O2 -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES) LDLIBS = -lrt -lpthread -lm +# Some distributions (such as Ubuntu) configure GCC so that _FORTIFY_SOURCE is +# automatically enabled at -O1 or above. This triggers various unused-result +# warnings where functions such as read() or write() are called and their +# return value is not checked. Disable _FORTIFY_SOURCE to silence those +# warnings. +CFLAGS += -U_FORTIFY_SOURCE + KDIR ?= /lib/modules/$(shell uname -r)/build ifneq (,$(wildcard $(KDIR)/Module.symvers)) ifneq (,$(wildcard $(KDIR)/include/linux/page_frag_cache.h)) From e247c719697cc48595554d750a7d253d72413124 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:11 +0000 Subject: [PATCH 115/504] selftests/mm: remove unused pkey helpers Commit 5f23f6d082a9 ("x86/pkeys: Add self-tests") introduced a number of helpers and functions that don't seem to have ever been used. Let's remove them. Link: https://lkml.kernel.org/r/20241209095019.1732120-7-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pkey-helpers.h | 34 -------------------- tools/testing/selftests/mm/protection_keys.c | 34 -------------------- 2 files changed, 68 deletions(-) diff --git a/tools/testing/selftests/mm/pkey-helpers.h b/tools/testing/selftests/mm/pkey-helpers.h index f7cfe163b0ff..472febd992eb 100644 --- a/tools/testing/selftests/mm/pkey-helpers.h +++ b/tools/testing/selftests/mm/pkey-helpers.h @@ -26,9 +26,7 @@ #ifndef DEBUG_LEVEL #define DEBUG_LEVEL 0 #endif -#define DPRINT_IN_SIGNAL_BUF_SIZE 4096 extern int dprint_in_signal; -extern char dprint_in_signal_buffer[DPRINT_IN_SIGNAL_BUF_SIZE]; extern int test_nr; extern int iteration_nr; @@ -171,38 +169,6 @@ static inline void write_pkey_reg(u64 pkey_reg) pkey_reg, __read_pkey_reg()); } -/* - * These are technically racy. since something could - * change PKEY register between the read and the write. - */ -static inline void __pkey_access_allow(int pkey, int do_allow) -{ - u64 pkey_reg = read_pkey_reg(); - int bit = pkey * 2; - - if (do_allow) - pkey_reg &= (1<= 0); - - ptr = mmap(0, size, prot, MAP_SHARED, fd, 0); - pkey_assert(ptr != (void *)-1); - - mprotect_pkey(ptr, size, prot, pkey); - - record_pkey_malloc(ptr, size, prot); - - dprintf1("mmap()'d for pkey %d @ %p\n", pkey, ptr); - close(fd); - return ptr; -} - void *(*pkey_malloc[])(long size, int prot, u16 pkey) = { malloc_pkey_with_mprotect, malloc_pkey_with_mprotect_subpage, malloc_pkey_anon_huge, malloc_pkey_hugetlb -/* can not do direct with the pkey_mprotect() API: - malloc_pkey_mmap_direct, - malloc_pkey_mmap_dax, -*/ }; void *malloc_pkey(long size, int prot, u16 pkey) From f7457fd3a82eb0c2c22f3c9235426e9fecdbcba8 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:12 +0000 Subject: [PATCH 116/504] selftests/mm: define types using typedef in pkey-helpers.h Using #define to define types should be avoided. Use typedef instead. Also ensure that __u* types are actually defined by including . Link: https://lkml.kernel.org/r/20241209095019.1732120-8-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pkey-helpers.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/mm/pkey-helpers.h b/tools/testing/selftests/mm/pkey-helpers.h index 472febd992eb..84376ab09545 100644 --- a/tools/testing/selftests/mm/pkey-helpers.h +++ b/tools/testing/selftests/mm/pkey-helpers.h @@ -13,13 +13,15 @@ #include #include +#include + #include "../kselftest.h" /* Define some kernel-like types */ -#define u8 __u8 -#define u16 __u16 -#define u32 __u32 -#define u64 __u64 +typedef __u8 u8; +typedef __u16 u16; +typedef __u32 u32; +typedef __u64 u64; #define PTR_ERR_ENOTSUP ((void *)-ENOTSUP) From e7d422b080a30252c7c9fbda22325e45ab4a419b Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:13 +0000 Subject: [PATCH 117/504] selftests/mm: ensure pkey-*.h define inline functions only Headers should not define non-inline functions, as this prevents them from being included more than once in a given program. pkey-helpers.h and the arch-specific headers it includes currently define multiple such non-inline functions. In most cases those functions can simply be made inline - this patch does just that. read_ptr() is an exception as it must not be inlined. Since it is only called from protection_keys.c, we just move it there. Link: https://lkml.kernel.org/r/20241209095019.1732120-9-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pkey-arm64.h | 4 ++-- tools/testing/selftests/mm/pkey-helpers.h | 8 +------- tools/testing/selftests/mm/pkey-powerpc.h | 4 ++-- tools/testing/selftests/mm/pkey-x86.h | 6 +++--- tools/testing/selftests/mm/protection_keys.c | 7 +++++++ 5 files changed, 15 insertions(+), 14 deletions(-) diff --git a/tools/testing/selftests/mm/pkey-arm64.h b/tools/testing/selftests/mm/pkey-arm64.h index d9d2100eafc0..9897e31f16dd 100644 --- a/tools/testing/selftests/mm/pkey-arm64.h +++ b/tools/testing/selftests/mm/pkey-arm64.h @@ -81,11 +81,11 @@ static inline int get_arch_reserved_keys(void) return NR_RESERVED_PKEYS; } -void expect_fault_on_read_execonly_key(void *p1, int pkey) +static inline void expect_fault_on_read_execonly_key(void *p1, int pkey) { } -void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey) +static inline void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey) { return PTR_ERR_ENOTSUP; } diff --git a/tools/testing/selftests/mm/pkey-helpers.h b/tools/testing/selftests/mm/pkey-helpers.h index 84376ab09545..bc81275a89d9 100644 --- a/tools/testing/selftests/mm/pkey-helpers.h +++ b/tools/testing/selftests/mm/pkey-helpers.h @@ -84,13 +84,7 @@ extern void abort_hooks(void); # define noinline __attribute__((noinline)) #endif -noinline int read_ptr(int *ptr) -{ - /* Keep GCC from optimizing this away somehow */ - barrier(); - return *ptr; -} - +noinline int read_ptr(int *ptr); void expected_pkey_fault(int pkey); int sys_pkey_alloc(unsigned long flags, unsigned long init_val); int sys_pkey_free(unsigned long pkey); diff --git a/tools/testing/selftests/mm/pkey-powerpc.h b/tools/testing/selftests/mm/pkey-powerpc.h index 3d0c0bdae5bc..1bad310d282a 100644 --- a/tools/testing/selftests/mm/pkey-powerpc.h +++ b/tools/testing/selftests/mm/pkey-powerpc.h @@ -91,7 +91,7 @@ static inline int get_arch_reserved_keys(void) return NR_RESERVED_PKEYS_64K_3KEYS; } -void expect_fault_on_read_execonly_key(void *p1, int pkey) +static inline void expect_fault_on_read_execonly_key(void *p1, int pkey) { /* * powerpc does not allow userspace to change permissions of exec-only @@ -105,7 +105,7 @@ void expect_fault_on_read_execonly_key(void *p1, int pkey) /* 4-byte instructions * 16384 = 64K page */ #define __page_o_noops() asm(".rept 16384 ; nop; .endr") -void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey) +static inline void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey) { void *ptr; int ret; diff --git a/tools/testing/selftests/mm/pkey-x86.h b/tools/testing/selftests/mm/pkey-x86.h index ac91777c8917..f7ecd335df1e 100644 --- a/tools/testing/selftests/mm/pkey-x86.h +++ b/tools/testing/selftests/mm/pkey-x86.h @@ -113,7 +113,7 @@ static inline u32 pkey_bit_position(int pkey) #define XSTATE_PKEY 0x200 #define XSTATE_BV_OFFSET 512 -int pkey_reg_xstate_offset(void) +static inline int pkey_reg_xstate_offset(void) { unsigned int eax; unsigned int ebx; @@ -148,7 +148,7 @@ static inline int get_arch_reserved_keys(void) return NR_RESERVED_PKEYS; } -void expect_fault_on_read_execonly_key(void *p1, int pkey) +static inline void expect_fault_on_read_execonly_key(void *p1, int pkey) { int ptr_contents; @@ -157,7 +157,7 @@ void expect_fault_on_read_execonly_key(void *p1, int pkey) expected_pkey_fault(pkey); } -void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey) +static inline void *malloc_pkey_with_mprotect_subpage(long size, int prot, u16 pkey) { return PTR_ERR_ENOTSUP; } diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c index fcbebc4490b4..82ece325b70c 100644 --- a/tools/testing/selftests/mm/protection_keys.c +++ b/tools/testing/selftests/mm/protection_keys.c @@ -54,6 +54,13 @@ int test_nr; u64 shadow_pkey_reg; int dprint_in_signal; +noinline int read_ptr(int *ptr) +{ + /* Keep GCC from optimizing this away somehow */ + barrier(); + return *ptr; +} + void cat_into_file(char *str, char *file) { int fd = open(file, O_RDWR); From 4fda423469caf0ad1db38ffc0a69d197e50467f3 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:14 +0000 Subject: [PATCH 118/504] selftests/mm: remove empty pkey helper definition Some of the functions declared in pkey-helpers.h are actually defined in protections_keys.c, meaning they can only be called from protections_keys.c. This is less than ideal, but it is hard to avoid as these helpers are themselves called from inline functions in pkey-.h. Let's at least add a comment clarifying that. We can also remove the empty definition in pkey_sighandler_tests.c: expected_pkey_fault() is not meant to be called from there. Link: https://lkml.kernel.org/r/20241209095019.1732120-10-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pkey-helpers.h | 6 ++++-- tools/testing/selftests/mm/pkey_sighandler_tests.c | 2 -- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/mm/pkey-helpers.h b/tools/testing/selftests/mm/pkey-helpers.h index bc81275a89d9..7604cc66ef0e 100644 --- a/tools/testing/selftests/mm/pkey-helpers.h +++ b/tools/testing/selftests/mm/pkey-helpers.h @@ -84,10 +84,12 @@ extern void abort_hooks(void); # define noinline __attribute__((noinline)) #endif -noinline int read_ptr(int *ptr); -void expected_pkey_fault(int pkey); int sys_pkey_alloc(unsigned long flags, unsigned long init_val); int sys_pkey_free(unsigned long pkey); + +/* For functions called from protection_keys.c only */ +noinline int read_ptr(int *ptr); +void expected_pkey_fault(int pkey); int mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot, unsigned long pkey); void record_pkey_malloc(void *ptr, long size, int prot); diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c index c6c020a2a6f5..b6184865629a 100644 --- a/tools/testing/selftests/mm/pkey_sighandler_tests.c +++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c @@ -32,8 +32,6 @@ #define STACK_SIZE PTHREAD_STACK_MIN -void expected_pkey_fault(int pkey) {} - pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; pthread_cond_t cond = PTHREAD_COND_INITIALIZER; siginfo_t siginfo = {0}; From 401d0a489e68351167342d3e4c6bbbf0a8a5a20d Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:15 +0000 Subject: [PATCH 119/504] selftests/mm: ensure non-global pkey symbols are marked static The pkey tests define a whole lot of functions and some global variables. A few are truly global (declared in pkey-helpers.h), but the majority are file-scoped. Make sure those are labelled static. Some of the pkey_{access,write}_{allow,deny} helpers are not called, or only called when building for some architectures. Mark them __maybe_unused to suppress compiler warnings. Link: https://lkml.kernel.org/r/20241209095019.1732120-11-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pkey-helpers.h | 3 + .../selftests/mm/pkey_sighandler_tests.c | 6 +- tools/testing/selftests/mm/protection_keys.c | 132 +++++++++--------- 3 files changed, 72 insertions(+), 69 deletions(-) diff --git a/tools/testing/selftests/mm/pkey-helpers.h b/tools/testing/selftests/mm/pkey-helpers.h index 7604cc66ef0e..6f0ab7b42738 100644 --- a/tools/testing/selftests/mm/pkey-helpers.h +++ b/tools/testing/selftests/mm/pkey-helpers.h @@ -83,6 +83,9 @@ extern void abort_hooks(void); #ifndef noinline # define noinline __attribute__((noinline)) #endif +#ifndef __maybe_unused +# define __maybe_unused __attribute__((__unused__)) +#endif int sys_pkey_alloc(unsigned long flags, unsigned long init_val); int sys_pkey_free(unsigned long pkey); diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c index b6184865629a..425da9556867 100644 --- a/tools/testing/selftests/mm/pkey_sighandler_tests.c +++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c @@ -32,9 +32,9 @@ #define STACK_SIZE PTHREAD_STACK_MIN -pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; -pthread_cond_t cond = PTHREAD_COND_INITIALIZER; -siginfo_t siginfo = {0}; +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t cond = PTHREAD_COND_INITIALIZER; +static siginfo_t siginfo = {0}; /* * We need to use inline assembly instead of glibc's syscall because glibc's diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c index 82ece325b70c..f43cf3b75d8e 100644 --- a/tools/testing/selftests/mm/protection_keys.c +++ b/tools/testing/selftests/mm/protection_keys.c @@ -61,7 +61,7 @@ noinline int read_ptr(int *ptr) return *ptr; } -void cat_into_file(char *str, char *file) +static void cat_into_file(char *str, char *file) { int fd = open(file, O_RDWR); int ret; @@ -88,7 +88,7 @@ void cat_into_file(char *str, char *file) #if CONTROL_TRACING > 0 static int warned_tracing; -int tracing_root_ok(void) +static int tracing_root_ok(void) { if (geteuid() != 0) { if (!warned_tracing) @@ -101,7 +101,7 @@ int tracing_root_ok(void) } #endif -void tracing_on(void) +static void tracing_on(void) { #if CONTROL_TRACING > 0 #define TRACEDIR "/sys/kernel/tracing" @@ -125,7 +125,7 @@ void tracing_on(void) #endif } -void tracing_off(void) +static void tracing_off(void) { #if CONTROL_TRACING > 0 if (!tracing_root_ok()) @@ -159,7 +159,7 @@ __attribute__((__aligned__(65536))) #else __attribute__((__aligned__(PAGE_SIZE))) #endif -void lots_o_noops_around_write(int *write_to_me) +static void lots_o_noops_around_write(int *write_to_me) { dprintf3("running %s()\n", __func__); __page_o_noops(); @@ -170,7 +170,7 @@ void lots_o_noops_around_write(int *write_to_me) dprintf3("%s() done\n", __func__); } -void dump_mem(void *dumpme, int len_bytes) +static void dump_mem(void *dumpme, int len_bytes) { char *c = (void *)dumpme; int i; @@ -213,7 +213,7 @@ static int hw_pkey_set(int pkey, unsigned long rights, unsigned long flags) return 0; } -void pkey_disable_set(int pkey, int flags) +static void pkey_disable_set(int pkey, int flags) { unsigned long syscall_flags = 0; int ret; @@ -251,7 +251,7 @@ void pkey_disable_set(int pkey, int flags) pkey, flags); } -void pkey_disable_clear(int pkey, int flags) +static void pkey_disable_clear(int pkey, int flags) { unsigned long syscall_flags = 0; int ret; @@ -277,19 +277,19 @@ void pkey_disable_clear(int pkey, int flags) pkey, read_pkey_reg()); } -void pkey_write_allow(int pkey) +__maybe_unused static void pkey_write_allow(int pkey) { pkey_disable_clear(pkey, PKEY_DISABLE_WRITE); } -void pkey_write_deny(int pkey) +__maybe_unused static void pkey_write_deny(int pkey) { pkey_disable_set(pkey, PKEY_DISABLE_WRITE); } -void pkey_access_allow(int pkey) +__maybe_unused static void pkey_access_allow(int pkey) { pkey_disable_clear(pkey, PKEY_DISABLE_ACCESS); } -void pkey_access_deny(int pkey) +__maybe_unused static void pkey_access_deny(int pkey) { pkey_disable_set(pkey, PKEY_DISABLE_ACCESS); } @@ -307,9 +307,9 @@ static char *si_code_str(int si_code) return "UNKNOWN"; } -int pkey_faults; -int last_si_pkey = -1; -void signal_handler(int signum, siginfo_t *si, void *vucontext) +static int pkey_faults; +static int last_si_pkey = -1; +static void signal_handler(int signum, siginfo_t *si, void *vucontext) { ucontext_t *uctxt = vucontext; int trapno; @@ -403,14 +403,14 @@ void signal_handler(int signum, siginfo_t *si, void *vucontext) dprint_in_signal = 0; } -void sig_chld(int x) +static void sig_chld(int x) { dprint_in_signal = 1; dprintf2("[%d] SIGCHLD: %d\n", getpid(), x); dprint_in_signal = 0; } -void setup_sigsegv_handler(void) +static void setup_sigsegv_handler(void) { int r, rs; struct sigaction newact; @@ -436,13 +436,13 @@ void setup_sigsegv_handler(void) pkey_assert(r == 0); } -void setup_handlers(void) +static void setup_handlers(void) { signal(SIGCHLD, &sig_chld); setup_sigsegv_handler(); } -pid_t fork_lazy_child(void) +static pid_t fork_lazy_child(void) { pid_t forkret; @@ -488,7 +488,7 @@ int sys_pkey_alloc(unsigned long flags, unsigned long init_val) return ret; } -int alloc_pkey(void) +static int alloc_pkey(void) { int ret; unsigned long init_val = 0x0; @@ -546,7 +546,7 @@ int sys_pkey_free(unsigned long pkey) * not cleared. This ensures we get lots of random bit sets * and clears on the vma and pte pkey bits. */ -int alloc_random_pkey(void) +static int alloc_random_pkey(void) { int max_nr_pkey_allocs; int ret; @@ -629,7 +629,7 @@ struct pkey_malloc_record { }; struct pkey_malloc_record *pkey_malloc_records; struct pkey_malloc_record *pkey_last_malloc_record; -long nr_pkey_malloc_records; +static long nr_pkey_malloc_records; void record_pkey_malloc(void *ptr, long size, int prot) { long i; @@ -667,7 +667,7 @@ void record_pkey_malloc(void *ptr, long size, int prot) nr_pkey_malloc_records++; } -void free_pkey_malloc(void *ptr) +static void free_pkey_malloc(void *ptr) { long i; int ret; @@ -694,8 +694,7 @@ void free_pkey_malloc(void *ptr) pkey_assert(false); } - -void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey) +static void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey) { void *ptr; int ret; @@ -715,7 +714,7 @@ void *malloc_pkey_with_mprotect(long size, int prot, u16 pkey) return ptr; } -void *malloc_pkey_anon_huge(long size, int prot, u16 pkey) +static void *malloc_pkey_anon_huge(long size, int prot, u16 pkey) { int ret; void *ptr; @@ -745,10 +744,10 @@ void *malloc_pkey_anon_huge(long size, int prot, u16 pkey) return ptr; } -int hugetlb_setup_ok; +static int hugetlb_setup_ok; #define SYSFS_FMT_NR_HUGE_PAGES "/sys/kernel/mm/hugepages/hugepages-%ldkB/nr_hugepages" #define GET_NR_HUGE_PAGES 10 -void setup_hugetlbfs(void) +static void setup_hugetlbfs(void) { int err; int fd; @@ -796,7 +795,7 @@ void setup_hugetlbfs(void) hugetlb_setup_ok = 1; } -void *malloc_pkey_hugetlb(long size, int prot, u16 pkey) +static void *malloc_pkey_hugetlb(long size, int prot, u16 pkey) { void *ptr; int flags = MAP_ANONYMOUS|MAP_PRIVATE|MAP_HUGETLB; @@ -817,7 +816,7 @@ void *malloc_pkey_hugetlb(long size, int prot, u16 pkey) return ptr; } -void *(*pkey_malloc[])(long size, int prot, u16 pkey) = { +static void *(*pkey_malloc[])(long size, int prot, u16 pkey) = { malloc_pkey_with_mprotect, malloc_pkey_with_mprotect_subpage, @@ -825,7 +824,7 @@ void *(*pkey_malloc[])(long size, int prot, u16 pkey) = { malloc_pkey_hugetlb }; -void *malloc_pkey(long size, int prot, u16 pkey) +static void *malloc_pkey(long size, int prot, u16 pkey) { void *ret; static int malloc_type; @@ -855,7 +854,7 @@ void *malloc_pkey(long size, int prot, u16 pkey) return ret; } -int last_pkey_faults; +static int last_pkey_faults; #define UNKNOWN_PKEY -2 void expected_pkey_fault(int pkey) { @@ -897,9 +896,9 @@ void expected_pkey_fault(int pkey) pkey_assert(last_pkey_faults == pkey_faults); \ } while (0) -int test_fds[10] = { -1 }; -int nr_test_fds; -void __save_test_fd(int fd) +static int test_fds[10] = { -1 }; +static int nr_test_fds; +static void __save_test_fd(int fd) { pkey_assert(fd >= 0); pkey_assert(nr_test_fds < ARRAY_SIZE(test_fds)); @@ -907,14 +906,14 @@ void __save_test_fd(int fd) nr_test_fds++; } -int get_test_read_fd(void) +static int get_test_read_fd(void) { int test_fd = open("/etc/passwd", O_RDONLY); __save_test_fd(test_fd); return test_fd; } -void close_test_fds(void) +static void close_test_fds(void) { int i; @@ -927,7 +926,7 @@ void close_test_fds(void) nr_test_fds = 0; } -void test_pkey_alloc_free_attach_pkey0(int *ptr, u16 pkey) +static void test_pkey_alloc_free_attach_pkey0(int *ptr, u16 pkey) { int i, err; int max_nr_pkey_allocs; @@ -979,7 +978,7 @@ void test_pkey_alloc_free_attach_pkey0(int *ptr, u16 pkey) pkey_assert(!err); } -void test_read_of_write_disabled_region(int *ptr, u16 pkey) +static void test_read_of_write_disabled_region(int *ptr, u16 pkey) { int ptr_contents; @@ -989,7 +988,7 @@ void test_read_of_write_disabled_region(int *ptr, u16 pkey) dprintf1("*ptr: %d\n", ptr_contents); dprintf1("\n"); } -void test_read_of_access_disabled_region(int *ptr, u16 pkey) +static void test_read_of_access_disabled_region(int *ptr, u16 pkey) { int ptr_contents; @@ -1001,7 +1000,7 @@ void test_read_of_access_disabled_region(int *ptr, u16 pkey) expected_pkey_fault(pkey); } -void test_read_of_access_disabled_region_with_page_already_mapped(int *ptr, +static void test_read_of_access_disabled_region_with_page_already_mapped(int *ptr, u16 pkey) { int ptr_contents; @@ -1018,7 +1017,7 @@ void test_read_of_access_disabled_region_with_page_already_mapped(int *ptr, expected_pkey_fault(pkey); } -void test_write_of_write_disabled_region_with_page_already_mapped(int *ptr, +static void test_write_of_write_disabled_region_with_page_already_mapped(int *ptr, u16 pkey) { *ptr = __LINE__; @@ -1029,14 +1028,14 @@ void test_write_of_write_disabled_region_with_page_already_mapped(int *ptr, expected_pkey_fault(pkey); } -void test_write_of_write_disabled_region(int *ptr, u16 pkey) +static void test_write_of_write_disabled_region(int *ptr, u16 pkey) { dprintf1("disabling write access to PKEY[%02d], doing write\n", pkey); pkey_write_deny(pkey); *ptr = __LINE__; expected_pkey_fault(pkey); } -void test_write_of_access_disabled_region(int *ptr, u16 pkey) +static void test_write_of_access_disabled_region(int *ptr, u16 pkey) { dprintf1("disabling access to PKEY[%02d], doing write\n", pkey); pkey_access_deny(pkey); @@ -1044,7 +1043,7 @@ void test_write_of_access_disabled_region(int *ptr, u16 pkey) expected_pkey_fault(pkey); } -void test_write_of_access_disabled_region_with_page_already_mapped(int *ptr, +static void test_write_of_access_disabled_region_with_page_already_mapped(int *ptr, u16 pkey) { *ptr = __LINE__; @@ -1055,7 +1054,7 @@ void test_write_of_access_disabled_region_with_page_already_mapped(int *ptr, expected_pkey_fault(pkey); } -void test_kernel_write_of_access_disabled_region(int *ptr, u16 pkey) +static void test_kernel_write_of_access_disabled_region(int *ptr, u16 pkey) { int ret; int test_fd = get_test_read_fd(); @@ -1067,7 +1066,8 @@ void test_kernel_write_of_access_disabled_region(int *ptr, u16 pkey) dprintf1("read ret: %d\n", ret); pkey_assert(ret); } -void test_kernel_write_of_write_disabled_region(int *ptr, u16 pkey) + +static void test_kernel_write_of_write_disabled_region(int *ptr, u16 pkey) { int ret; int test_fd = get_test_read_fd(); @@ -1080,7 +1080,7 @@ void test_kernel_write_of_write_disabled_region(int *ptr, u16 pkey) pkey_assert(ret); } -void test_kernel_gup_of_access_disabled_region(int *ptr, u16 pkey) +static void test_kernel_gup_of_access_disabled_region(int *ptr, u16 pkey) { int pipe_ret, vmsplice_ret; struct iovec iov; @@ -1102,7 +1102,7 @@ void test_kernel_gup_of_access_disabled_region(int *ptr, u16 pkey) close(pipe_fds[1]); } -void test_kernel_gup_write_to_write_disabled_region(int *ptr, u16 pkey) +static void test_kernel_gup_write_to_write_disabled_region(int *ptr, u16 pkey) { int ignored = 0xdada; int futex_ret; @@ -1120,7 +1120,7 @@ void test_kernel_gup_write_to_write_disabled_region(int *ptr, u16 pkey) } /* Assumes that all pkeys other than 'pkey' are unallocated */ -void test_pkey_syscalls_on_non_allocated_pkey(int *ptr, u16 pkey) +static void test_pkey_syscalls_on_non_allocated_pkey(int *ptr, u16 pkey) { int err; int i; @@ -1143,7 +1143,7 @@ void test_pkey_syscalls_on_non_allocated_pkey(int *ptr, u16 pkey) } /* Assumes that all pkeys other than 'pkey' are unallocated */ -void test_pkey_syscalls_bad_args(int *ptr, u16 pkey) +static void test_pkey_syscalls_bad_args(int *ptr, u16 pkey) { int err; int bad_pkey = NR_PKEYS+99; @@ -1153,7 +1153,7 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey) pkey_assert(err); } -void become_child(void) +static void become_child(void) { pid_t forkret; @@ -1169,7 +1169,7 @@ void become_child(void) } /* Assumes that all pkeys other than 'pkey' are unallocated */ -void test_pkey_alloc_exhaust(int *ptr, u16 pkey) +static void test_pkey_alloc_exhaust(int *ptr, u16 pkey) { int err; int allocated_pkeys[NR_PKEYS] = {0}; @@ -1236,7 +1236,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey) } } -void arch_force_pkey_reg_init(void) +static void arch_force_pkey_reg_init(void) { #if defined(__i386__) || defined(__x86_64__) /* arch */ u64 *buf; @@ -1275,7 +1275,7 @@ void arch_force_pkey_reg_init(void) * a long-running test that continually checks the pkey * register. */ -void test_pkey_init_state(int *ptr, u16 pkey) +static void test_pkey_init_state(int *ptr, u16 pkey) { int err; int allocated_pkeys[NR_PKEYS] = {0}; @@ -1313,7 +1313,7 @@ void test_pkey_init_state(int *ptr, u16 pkey) * have to call pkey_alloc() to use it first. Make sure that it * is usable. */ -void test_mprotect_with_pkey_0(int *ptr, u16 pkey) +static void test_mprotect_with_pkey_0(int *ptr, u16 pkey) { long size; int prot; @@ -1337,7 +1337,7 @@ void test_mprotect_with_pkey_0(int *ptr, u16 pkey) mprotect_pkey(ptr, size, prot, pkey); } -void test_ptrace_of_child(int *ptr, u16 pkey) +static void test_ptrace_of_child(int *ptr, u16 pkey) { __attribute__((__unused__)) int peek_result; pid_t child_pid; @@ -1413,7 +1413,7 @@ void test_ptrace_of_child(int *ptr, u16 pkey) free(plain_ptr_unaligned); } -void *get_pointer_to_instructions(void) +static void *get_pointer_to_instructions(void) { void *p1; @@ -1434,7 +1434,7 @@ void *get_pointer_to_instructions(void) return p1; } -void test_executing_on_unreadable_memory(int *ptr, u16 pkey) +static void test_executing_on_unreadable_memory(int *ptr, u16 pkey) { void *p1; int scratch; @@ -1466,7 +1466,7 @@ void test_executing_on_unreadable_memory(int *ptr, u16 pkey) pkey_assert(!ret); } -void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey) +static void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey) { void *p1; int scratch; @@ -1515,7 +1515,7 @@ void test_implicit_mprotect_exec_only_memory(int *ptr, u16 pkey) } #if defined(__i386__) || defined(__x86_64__) -void test_ptrace_modifies_pkru(int *ptr, u16 pkey) +static void test_ptrace_modifies_pkru(int *ptr, u16 pkey) { u32 new_pkru; pid_t child; @@ -1638,7 +1638,7 @@ void test_ptrace_modifies_pkru(int *ptr, u16 pkey) #endif #if defined(__aarch64__) -void test_ptrace_modifies_pkru(int *ptr, u16 pkey) +static void test_ptrace_modifies_pkru(int *ptr, u16 pkey) { pid_t child; int status, ret; @@ -1715,7 +1715,7 @@ void test_ptrace_modifies_pkru(int *ptr, u16 pkey) } #endif -void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey) +static void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey) { int size = PAGE_SIZE; int sret; @@ -1729,7 +1729,7 @@ void test_mprotect_pkey_on_unsupported_cpu(int *ptr, u16 pkey) pkey_assert(sret < 0); } -void (*pkey_tests[])(int *ptr, u16 pkey) = { +static void (*pkey_tests[])(int *ptr, u16 pkey) = { test_read_of_write_disabled_region, test_read_of_access_disabled_region, test_read_of_access_disabled_region_with_page_already_mapped, @@ -1755,7 +1755,7 @@ void (*pkey_tests[])(int *ptr, u16 pkey) = { #endif }; -void run_tests_once(void) +static void run_tests_once(void) { int *ptr; int prot = PROT_READ|PROT_WRITE; @@ -1789,7 +1789,7 @@ void run_tests_once(void) iteration_nr++; } -void pkey_setup_shadow(void) +static void pkey_setup_shadow(void) { shadow_pkey_reg = __read_pkey_reg(); } From 842d6b3b4ee871c8b56f2074eae8e455e285b06a Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:16 +0000 Subject: [PATCH 120/504] selftests/mm: use sys_pkey helpers consistently sys_pkey_alloc, sys_pkey_free and sys_mprotect_pkey are currently used in protections_keys.c, while pkey_sighandler_tests.c calls the libc wrappers directly (e.g. pkey_mprotect()). This is probably ok when using glibc (those symbols appeared a while ago), but Musl does not currently provide them. The logging in the helpers from pkey-helpers.h can also come in handy. Make things more consistent by using the sys_pkey helpers in pkey_sighandler_tests.c too. To that end their implementation is moved to a common .c file (pkey_util.c). This also enables calling is_pkeys_supported() outside of protections_keys.c, since it relies on sys_pkey_{alloc,free}. Link: https://lkml.kernel.org/r/20241209095019.1732120-12-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/Makefile | 4 +- tools/testing/selftests/mm/pkey-helpers.h | 2 + .../selftests/mm/pkey_sighandler_tests.c | 8 ++-- tools/testing/selftests/mm/pkey_util.c | 40 +++++++++++++++++++ tools/testing/selftests/mm/protection_keys.c | 35 ---------------- 5 files changed, 48 insertions(+), 41 deletions(-) create mode 100644 tools/testing/selftests/mm/pkey_util.c diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 0d5d8f2f8652..57db36f5167d 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -153,8 +153,8 @@ TEST_FILES += write_hugetlb_memory.sh include ../lib.mk -$(TEST_GEN_PROGS): vm_util.c thp_settings.c -$(TEST_GEN_FILES): vm_util.c thp_settings.c +$(TEST_GEN_PROGS): vm_util.c thp_settings.c pkey_util.c +$(TEST_GEN_FILES): vm_util.c thp_settings.c pkey_util.c $(OUTPUT)/uffd-stress: uffd-common.c $(OUTPUT)/uffd-unit-tests: uffd-common.c diff --git a/tools/testing/selftests/mm/pkey-helpers.h b/tools/testing/selftests/mm/pkey-helpers.h index 6f0ab7b42738..f080e97b39be 100644 --- a/tools/testing/selftests/mm/pkey-helpers.h +++ b/tools/testing/selftests/mm/pkey-helpers.h @@ -89,6 +89,8 @@ extern void abort_hooks(void); int sys_pkey_alloc(unsigned long flags, unsigned long init_val); int sys_pkey_free(unsigned long pkey); +int sys_mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot, + unsigned long pkey); /* For functions called from protection_keys.c only */ noinline int read_ptr(int *ptr); diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c index 425da9556867..63443b75f49e 100644 --- a/tools/testing/selftests/mm/pkey_sighandler_tests.c +++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c @@ -311,8 +311,8 @@ static void test_sigsegv_handler_with_different_pkey_for_stack(void) __write_pkey_reg(pkey_reg); /* Protect the new stack with MPK 1 */ - pkey = pkey_alloc(0, 0); - pkey_mprotect(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey); + pkey = sys_pkey_alloc(0, 0); + sys_mprotect_pkey(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey); /* Set up alternate signal stack that will use the default MPK */ sigstack.ss_sp = mmap(0, STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, @@ -484,8 +484,8 @@ static void test_pkru_sigreturn(void) __write_pkey_reg(pkey_reg); /* Protect the stack with MPK 2 */ - pkey = pkey_alloc(0, 0); - pkey_mprotect(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey); + pkey = sys_pkey_alloc(0, 0); + sys_mprotect_pkey(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey); /* Set up alternate signal stack that will use the default MPK */ sigstack.ss_sp = mmap(0, STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, diff --git a/tools/testing/selftests/mm/pkey_util.c b/tools/testing/selftests/mm/pkey_util.c new file mode 100644 index 000000000000..ca4ad0d44ab2 --- /dev/null +++ b/tools/testing/selftests/mm/pkey_util.c @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include + +#include "pkey-helpers.h" + +int sys_pkey_alloc(unsigned long flags, unsigned long init_val) +{ + int ret = syscall(SYS_pkey_alloc, flags, init_val); + dprintf1("%s(flags=%lx, init_val=%lx) syscall ret: %d errno: %d\n", + __func__, flags, init_val, ret, errno); + return ret; +} + +int sys_pkey_free(unsigned long pkey) +{ + int ret = syscall(SYS_pkey_free, pkey); + dprintf1("%s(pkey=%ld) syscall ret: %d\n", __func__, pkey, ret); + return ret; +} + +int sys_mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot, + unsigned long pkey) +{ + int sret; + + dprintf2("%s(0x%p, %zx, prot=%lx, pkey=%lx)\n", __func__, + ptr, size, orig_prot, pkey); + + errno = 0; + sret = syscall(__NR_pkey_mprotect, ptr, size, orig_prot, pkey); + if (errno) { + dprintf2("SYS_mprotect_key sret: %d\n", sret); + dprintf2("SYS_mprotect_key prot: 0x%lx\n", orig_prot); + dprintf2("SYS_mprotect_key failed, errno: %d\n", errno); + if (DEBUG_LEVEL >= 2) + perror("SYS_mprotect_pkey"); + } + return sret; +} diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c index f43cf3b75d8e..3688571e6b39 100644 --- a/tools/testing/selftests/mm/protection_keys.c +++ b/tools/testing/selftests/mm/protection_keys.c @@ -460,34 +460,6 @@ static pid_t fork_lazy_child(void) return forkret; } -int sys_mprotect_pkey(void *ptr, size_t size, unsigned long orig_prot, - unsigned long pkey) -{ - int sret; - - dprintf2("%s(0x%p, %zx, prot=%lx, pkey=%lx)\n", __func__, - ptr, size, orig_prot, pkey); - - errno = 0; - sret = syscall(__NR_pkey_mprotect, ptr, size, orig_prot, pkey); - if (errno) { - dprintf2("SYS_mprotect_key sret: %d\n", sret); - dprintf2("SYS_mprotect_key prot: 0x%lx\n", orig_prot); - dprintf2("SYS_mprotect_key failed, errno: %d\n", errno); - if (DEBUG_LEVEL >= 2) - perror("SYS_mprotect_pkey"); - } - return sret; -} - -int sys_pkey_alloc(unsigned long flags, unsigned long init_val) -{ - int ret = syscall(SYS_pkey_alloc, flags, init_val); - dprintf1("%s(flags=%lx, init_val=%lx) syscall ret: %d errno: %d\n", - __func__, flags, init_val, ret, errno); - return ret; -} - static int alloc_pkey(void) { int ret; @@ -534,13 +506,6 @@ static int alloc_pkey(void) return ret; } -int sys_pkey_free(unsigned long pkey) -{ - int ret = syscall(SYS_pkey_free, pkey); - dprintf1("%s(pkey=%ld) syscall ret: %d\n", __func__, pkey, ret); - return ret; -} - /* * I had a bug where pkey bits could be set by mprotect() but * not cleared. This ensures we get lots of random bit sets From a2643d42c3e903a3f3a6caacbf1e7bb78b78a5c4 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 16 Dec 2024 09:28:49 +0000 Subject: [PATCH 121/504] selftests/mm: fix dependency on pkey_util.c The pkey* files can only be built on architectures that support pkeys (pkey-helpers.h #error's otherwise). Adding pkey_util.c as dependency to all $(TEST_GEN_FILES) is therefore a bad idea. Make it a dependency of the pkeys tests only. Those tests are built in 32/64-bit variants on x86_64 so we need to add an explicit dependency there as well. Link: https://lkml.kernel.org/r/20241216092849.2140850-1-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 57db36f5167d..f430c4303c0d 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -153,16 +153,20 @@ TEST_FILES += write_hugetlb_memory.sh include ../lib.mk -$(TEST_GEN_PROGS): vm_util.c thp_settings.c pkey_util.c -$(TEST_GEN_FILES): vm_util.c thp_settings.c pkey_util.c +$(TEST_GEN_PROGS): vm_util.c thp_settings.c +$(TEST_GEN_FILES): vm_util.c thp_settings.c $(OUTPUT)/uffd-stress: uffd-common.c $(OUTPUT)/uffd-unit-tests: uffd-common.c +$(OUTPUT)/protection_keys: pkey_util.c +$(OUTPUT)/pkey_sighandler_tests: pkey_util.c ifeq ($(ARCH),x86_64) BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32)) BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) +$(BINARIES_32) $(BINARIES_64): pkey_util.c + define gen-target-rule-32 $(1) $(1)_32: $(OUTPUT)/$(1)_32 .PHONY: $(1) $(1)_32 From 5a14c7ad08569f013af548300373a9788f233a75 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:17 +0000 Subject: [PATCH 122/504] selftests/mm: rename pkey register macro PKEY_ALLOW_ALL is meant to represent the pkey register value that allows all accesses (enables all pkeys). However its current naming suggests that the value applies to *one* key only (like PKEY_DISABLE_ACCESS for instance). Rename PKEY_ALLOW_ALL to PKEY_REG_ALLOW_ALL to avoid such misunderstanding. This is consistent with the PKEY_REG_ALLOW_NONE macro introduced by commit 6e182dc9f268 ("selftests/mm: Use generic pkey register manipulation"). Link: https://lkml.kernel.org/r/20241209095019.1732120-13-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pkey-arm64.h | 2 +- tools/testing/selftests/mm/protection_keys.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/mm/pkey-arm64.h b/tools/testing/selftests/mm/pkey-arm64.h index 9897e31f16dd..8e9685e03c44 100644 --- a/tools/testing/selftests/mm/pkey-arm64.h +++ b/tools/testing/selftests/mm/pkey-arm64.h @@ -30,7 +30,7 @@ #define NR_PKEYS 8 #define NR_RESERVED_PKEYS 1 /* pkey-0 */ -#define PKEY_ALLOW_ALL 0x77777777 +#define PKEY_REG_ALLOW_ALL 0x77777777 #define PKEY_REG_ALLOW_NONE 0x0 #define PKEY_BITS_PER_PKEY 4 diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c index 3688571e6b39..a4683f2476f2 100644 --- a/tools/testing/selftests/mm/protection_keys.c +++ b/tools/testing/selftests/mm/protection_keys.c @@ -396,7 +396,7 @@ static void signal_handler(int signum, siginfo_t *si, void *vucontext) /* restore access and let the faulting instruction continue */ pkey_access_allow(siginfo_pkey); #elif defined(__aarch64__) - aarch64_write_signal_pkey(uctxt, PKEY_ALLOW_ALL); + aarch64_write_signal_pkey(uctxt, PKEY_REG_ALLOW_ALL); #endif /* arch */ pkey_faults++; dprintf1("<<<<==================================================\n"); @@ -842,7 +842,7 @@ void expected_pkey_fault(int pkey) */ if (__read_pkey_reg() != 0) #elif defined(__aarch64__) - if (__read_pkey_reg() != PKEY_ALLOW_ALL) + if (__read_pkey_reg() != PKEY_REG_ALLOW_ALL) #else if (__read_pkey_reg() != shadow_pkey_reg) #endif /* arch */ From 2ca42af27c456d31da82f142d7b48556036a9dd3 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:18 +0000 Subject: [PATCH 123/504] selftests/mm: skip pkey_sighandler_tests if support is missing The pkey_sighandler_tests are bound to fail if either the kernel or CPU doesn't support pkeys. Skip the tests if pkeys support is missing. Link: https://lkml.kernel.org/r/20241209095019.1732120-14-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pkey_sighandler_tests.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c index 63443b75f49e..4f3679d11c05 100644 --- a/tools/testing/selftests/mm/pkey_sighandler_tests.c +++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c @@ -535,6 +535,9 @@ int main(int argc, char *argv[]) ksft_print_header(); ksft_set_plan(ARRAY_SIZE(pkey_tests)); + if (!is_pkeys_supported()) + ksft_exit_skip("pkeys not supported\n"); + for (i = 0; i < ARRAY_SIZE(pkey_tests); i++) (*pkey_tests[i])(); From 4e4f220926c8af76051bad92306ca3c3d6704166 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Mon, 9 Dec 2024 09:50:19 +0000 Subject: [PATCH 124/504] selftests/mm: remove X permission from sigaltstack mapping There is no reason why the alternate signal stack should be mapped as RWX. Map it as RW instead. Link: https://lkml.kernel.org/r/20241209095019.1732120-15-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Cc: Aruna Ramakrishna Cc: Catalin Marinas Cc: Dave Hansen Cc: Joey Gouly Cc: Keith Lucas Cc: Ryan Roberts Cc: Shuah Khan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/pkey_sighandler_tests.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c index 4f3679d11c05..1ac8c8809880 100644 --- a/tools/testing/selftests/mm/pkey_sighandler_tests.c +++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c @@ -315,7 +315,7 @@ static void test_sigsegv_handler_with_different_pkey_for_stack(void) sys_mprotect_pkey(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey); /* Set up alternate signal stack that will use the default MPK */ - sigstack.ss_sp = mmap(0, STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, + sigstack.ss_sp = mmap(0, STACK_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); sigstack.ss_flags = 0; sigstack.ss_size = STACK_SIZE; @@ -488,7 +488,7 @@ static void test_pkru_sigreturn(void) sys_mprotect_pkey(stack, STACK_SIZE, PROT_READ | PROT_WRITE, pkey); /* Set up alternate signal stack that will use the default MPK */ - sigstack.ss_sp = mmap(0, STACK_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, + sigstack.ss_sp = mmap(0, STACK_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); sigstack.ss_flags = 0; sigstack.ss_size = STACK_SIZE; From dac0e1a7401729001bae8ee710dba95628108c25 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 10 Dec 2024 13:50:26 -0800 Subject: [PATCH 125/504] samples: add a skeleton of a sample DAMON module for working set size estimation Patch series "mm/damon: add sample modules". Implement a proactive cold memory regions reclaiming logic of prcl sample module using DAMOS. The logic treats memory regions that not accessed at all for five or more seconds as cold, and reclaim those as soon as found. This patch (of 5): Add a skeleton for a sample DAMON static module that can be used for estimating working set size of a given process. Note that it is a static module since DAMON is not exporting symbols to loadable modules for now. It exposes two module parameters, namely 'pid' and 'enable'. 'pid' will specify the process that the module will estimate the working set size of. 'enable' will receive whether to start or stop the estimation. Because this is just a skeleton, the parameters do nothing, though. The functionalities will be implemented by following commits. Link: https://lkml.kernel.org/r/20241210215030.85675-1-sj@kernel.org Link: https://lkml.kernel.org/r/20241210215030.85675-2-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- MAINTAINERS | 1 + samples/Kconfig | 2 ++ samples/Makefile | 1 + samples/damon/Kconfig | 17 +++++++++++ samples/damon/Makefile | 3 ++ samples/damon/wsse.c | 65 ++++++++++++++++++++++++++++++++++++++++++ 6 files changed, 89 insertions(+) create mode 100644 samples/damon/Kconfig create mode 100644 samples/damon/Makefile create mode 100644 samples/damon/wsse.c diff --git a/MAINTAINERS b/MAINTAINERS index 30cbc3d44cd5..46e4ccf18c99 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6330,6 +6330,7 @@ F: Documentation/mm/damon/ F: include/linux/damon.h F: include/trace/events/damon.h F: mm/damon/ +F: samples/damon/ F: tools/testing/selftests/damon/ DAVICOM FAST ETHERNET (DMFE) NETWORK DRIVER diff --git a/samples/Kconfig b/samples/Kconfig index b288d9991d27..8d5a36f0e5d6 100644 --- a/samples/Kconfig +++ b/samples/Kconfig @@ -293,6 +293,8 @@ config SAMPLE_CGROUP source "samples/rust/Kconfig" +source "samples/damon/Kconfig" + endif # SAMPLES config HAVE_SAMPLE_FTRACE_DIRECT diff --git a/samples/Makefile b/samples/Makefile index b85fa64390c5..726bb5293486 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -39,3 +39,4 @@ obj-$(CONFIG_SAMPLE_KMEMLEAK) += kmemleak/ obj-$(CONFIG_SAMPLE_CORESIGHT_SYSCFG) += coresight/ obj-$(CONFIG_SAMPLE_FPROBE) += fprobe/ obj-$(CONFIG_SAMPLES_RUST) += rust/ +obj-$(CONFIG_SAMPLE_DAMON_WSSE) += damon/ diff --git a/samples/damon/Kconfig b/samples/damon/Kconfig new file mode 100644 index 000000000000..b799e01345c8 --- /dev/null +++ b/samples/damon/Kconfig @@ -0,0 +1,17 @@ +# SPDX-License-Identifier: GPL-2.0 + +menu "DAMON Samples" + +config SAMPLE_DAMON_WSSE + bool "DAMON sameple module for working set size estimation" + depends on DAMON && DAMON_VADDR + help + This builds DAMON sample module for working set size estimation. + + The module receives a pid, monitor access to the virtual address + space of the process, estimate working set size of the process, and + repeatedly prints the size on the kernel log. + + If unsure, say N. + +endmenu diff --git a/samples/damon/Makefile b/samples/damon/Makefile new file mode 100644 index 000000000000..ccbe93d40130 --- /dev/null +++ b/samples/damon/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_SAMPLE_DAMON_WSSE) += wsse.o diff --git a/samples/damon/wsse.c b/samples/damon/wsse.c new file mode 100644 index 000000000000..7f2cb76a1a70 --- /dev/null +++ b/samples/damon/wsse.c @@ -0,0 +1,65 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * working set size estimation: monitor access pattern of given process and + * print estimated working set size (total size of regions that showing some + * access). + */ + +#define pr_fmt(fmt) "damon_sample_wsse: " fmt + +#include +#include +#include +#include + +static int target_pid __read_mostly; +module_param(target_pid, int, 0600); + +static int damon_sample_wsse_enable_store( + const char *val, const struct kernel_param *kp); + +static const struct kernel_param_ops enable_param_ops = { + .set = damon_sample_wsse_enable_store, + .get = param_get_bool, +}; + +static bool enable __read_mostly; +module_param_cb(enable, &enable_param_ops, &enable, 0600); +MODULE_PARM_DESC(enable, "Enable or disable DAMON_SAMPLE_WSSE"); + +static int damon_sample_wsse_start(void) +{ + pr_info("start\n"); + return 0; +} + +static void damon_sample_wsse_stop(void) +{ + pr_info("stop\n"); +} + +static int damon_sample_wsse_enable_store( + const char *val, const struct kernel_param *kp) +{ + bool enabled = enable; + int err; + + err = kstrtobool(val, &enable); + if (err) + return err; + + if (enable == enabled) + return 0; + + if (enable) + return damon_sample_wsse_start(); + damon_sample_wsse_stop(); + return 0; +} + +static int __init damon_sample_wsse_init(void) +{ + return 0; +} + +module_init(damon_sample_wsse_init); From ffb747fcf54b538df74c1e44f03eb4bfe6d6ac4b Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 10 Dec 2024 13:50:27 -0800 Subject: [PATCH 126/504] samples/damon/wsse: start and stop DAMON as the user requests Start running DAMON to monitor accesses of a process that the user specified via 'target_pid' parameter, when 'y' is passed to 'enable' parameter. Stop running DAMON when 'n' is passed to 'enable' parameter. Estimating the working set size from DAMON's monitoring results and reporting it to the user will be implemented by the following commit. Link: https://lkml.kernel.org/r/20241210215030.85675-3-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- samples/damon/wsse.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/samples/damon/wsse.c b/samples/damon/wsse.c index 7f2cb76a1a70..2ba0c91baad9 100644 --- a/samples/damon/wsse.c +++ b/samples/damon/wsse.c @@ -27,15 +27,48 @@ static bool enable __read_mostly; module_param_cb(enable, &enable_param_ops, &enable, 0600); MODULE_PARM_DESC(enable, "Enable or disable DAMON_SAMPLE_WSSE"); +static struct damon_ctx *ctx; +static struct pid *target_pidp; + static int damon_sample_wsse_start(void) { + struct damon_target *target; + pr_info("start\n"); - return 0; + + ctx = damon_new_ctx(); + if (!ctx) + return -ENOMEM; + if (damon_select_ops(ctx, DAMON_OPS_VADDR)) { + damon_destroy_ctx(ctx); + return -EINVAL; + } + + target = damon_new_target(); + if (!target) { + damon_destroy_ctx(ctx); + return -ENOMEM; + } + damon_add_target(ctx, target); + target_pidp = find_get_pid(target_pid); + if (!target_pidp) { + damon_destroy_ctx(ctx); + return -EINVAL; + } + target->pid = target_pidp; + + return damon_start(&ctx, 1, true); } static void damon_sample_wsse_stop(void) { pr_info("stop\n"); + if (ctx) { + damon_stop(&ctx, 1); + damon_destroy_ctx(ctx); + } + if (target_pidp) + put_pid(target_pidp); } static int damon_sample_wsse_enable_store( From 14688bd8b2cdacbd503671ca54b1503d7255b654 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 10 Dec 2024 13:50:28 -0800 Subject: [PATCH 127/504] samples/damon/wsse: implement working set size estimation and logging Implement the DAMON-based working set size estimation logic. The logic iterates memory regions in DAMON-generated access pattern snapshot for every aggregation interval and get the total sum of the size of any region having one or higher 'nr_accesses' count. That is, it assumes any region having one or higher 'nr_accesses' to be a part of the working set. The estimated value is reported to the user by printing it to the kernel log. Link: https://lkml.kernel.org/r/20241210215030.85675-4-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- samples/damon/wsse.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/samples/damon/wsse.c b/samples/damon/wsse.c index 2ba0c91baad9..11be25803274 100644 --- a/samples/damon/wsse.c +++ b/samples/damon/wsse.c @@ -30,6 +30,23 @@ MODULE_PARM_DESC(enable, "Enable or disable DAMON_SAMPLE_WSSE"); static struct damon_ctx *ctx; static struct pid *target_pidp; +static int damon_sample_wsse_after_aggregate(struct damon_ctx *c) +{ + struct damon_target *t; + + damon_for_each_target(t, c) { + struct damon_region *r; + unsigned long wss = 0; + + damon_for_each_region(r, t) { + if (r->nr_accesses > 0) + wss += r->ar.end - r->ar.start; + } + pr_info("wss: %lu\n", wss); + } + return 0; +} + static int damon_sample_wsse_start(void) { struct damon_target *target; @@ -57,6 +74,7 @@ static int damon_sample_wsse_start(void) } target->pid = target_pidp; + ctx->callback.after_aggregation = damon_sample_wsse_after_aggregate; return damon_start(&ctx, 1, true); } From b5d5b9094de761c618eb5e2f1c68b199fc00c7a2 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 10 Dec 2024 13:50:29 -0800 Subject: [PATCH 128/504] samples/damon: introduce a skeleton of a smaple DAMON module for proactive reclamation DAMON is not only for monitoring of access patterns, but also for access-aware system operations. For the system operations, DAMON provides a feature called DAMOS (Data Access Monitoring-based Operation Schemes). There is no sample API usage of DAMOS, though. Copy the working set size estimation sample modules with changed names of the module and symbols, to use it as a skeleton for a sample module showing the DAMOS API usage. The following commit will make it proactively reclaim cold memory of the given process, using DAMOS. Link: https://lkml.kernel.org/r/20241210215030.85675-5-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- samples/Makefile | 1 + samples/damon/Kconfig | 13 +++++ samples/damon/Makefile | 1 + samples/damon/prcl.c | 116 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 131 insertions(+) create mode 100644 samples/damon/prcl.c diff --git a/samples/Makefile b/samples/Makefile index 726bb5293486..5af6bb8afb07 100644 --- a/samples/Makefile +++ b/samples/Makefile @@ -40,3 +40,4 @@ obj-$(CONFIG_SAMPLE_CORESIGHT_SYSCFG) += coresight/ obj-$(CONFIG_SAMPLE_FPROBE) += fprobe/ obj-$(CONFIG_SAMPLES_RUST) += rust/ obj-$(CONFIG_SAMPLE_DAMON_WSSE) += damon/ +obj-$(CONFIG_SAMPLE_DAMON_PRCL) += damon/ diff --git a/samples/damon/Kconfig b/samples/damon/Kconfig index b799e01345c8..63f6dcd71daa 100644 --- a/samples/damon/Kconfig +++ b/samples/damon/Kconfig @@ -14,4 +14,17 @@ config SAMPLE_DAMON_WSSE If unsure, say N. +config SAMPLE_DAMON_PRCL + bool "DAMON sameple module for access-aware proactive reclamation" + depends on DAMON && DAMON_VADDR + help + This builds DAMON sample module for access-aware proactive + reclamation. + + The module receives a pid, monitor access to the virtual address + space of the process, find memory regions that not accessed, and + proactively reclaim the regions. + + If unsure, say N. + endmenu diff --git a/samples/damon/Makefile b/samples/damon/Makefile index ccbe93d40130..7f155143f237 100644 --- a/samples/damon/Makefile +++ b/samples/damon/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_SAMPLE_DAMON_WSSE) += wsse.o +obj-$(CONFIG_SAMPLE_DAMON_PRCL) += prcl.o diff --git a/samples/damon/prcl.c b/samples/damon/prcl.c new file mode 100644 index 000000000000..b34b9bfed532 --- /dev/null +++ b/samples/damon/prcl.c @@ -0,0 +1,116 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * proactive reclamation: monitor access pattern of a given process, find + * regiosn that seems not accessed, and proactively page out the regions. + */ + +#define pr_fmt(fmt) "damon_sample_prcl: " fmt + +#include +#include +#include +#include + +static int target_pid __read_mostly; +module_param(target_pid, int, 0600); + +static int damon_sample_prcl_enable_store( + const char *val, const struct kernel_param *kp); + +static const struct kernel_param_ops enable_param_ops = { + .set = damon_sample_prcl_enable_store, + .get = param_get_bool, +}; + +static bool enable __read_mostly; +module_param_cb(enable, &enable_param_ops, &enable, 0600); +MODULE_PARM_DESC(enable, "Enable of disable DAMON_SAMPLE_WSSE"); + +static struct damon_ctx *ctx; +static struct pid *target_pidp; + +static int damon_sample_prcl_after_aggregate(struct damon_ctx *c) +{ + struct damon_target *t; + + damon_for_each_target(t, c) { + struct damon_region *r; + unsigned long wss = 0; + + damon_for_each_region(r, t) { + if (r->nr_accesses > 0) + wss += r->ar.end - r->ar.start; + } + pr_info("wss: %lu\n", wss); + } + return 0; +} + +static int damon_sample_prcl_start(void) +{ + struct damon_target *target; + + pr_info("start\n"); + + ctx = damon_new_ctx(); + if (!ctx) + return -ENOMEM; + if (damon_select_ops(ctx, DAMON_OPS_VADDR)) { + damon_destroy_ctx(ctx); + return -EINVAL; + } + + target = damon_new_target(); + if (!target) { + damon_destroy_ctx(ctx); + return -ENOMEM; + } + damon_add_target(ctx, target); + target_pidp = find_get_pid(target_pid); + if (!target_pidp) { + damon_destroy_ctx(ctx); + return -EINVAL; + } + target->pid = target_pidp; + + ctx->callback.after_aggregation = damon_sample_prcl_after_aggregate; + + return damon_start(&ctx, 1, true); +} + +static void damon_sample_prcl_stop(void) +{ + pr_info("stop\n"); + if (ctx) { + damon_stop(&ctx, 1); + damon_destroy_ctx(ctx); + } + if (target_pidp) + put_pid(target_pidp); +} + +static int damon_sample_prcl_enable_store( + const char *val, const struct kernel_param *kp) +{ + bool enabled = enable; + int err; + + err = kstrtobool(val, &enable); + if (err) + return err; + + if (enable == enabled) + return 0; + + if (enable) + return damon_sample_prcl_start(); + damon_sample_prcl_stop(); + return 0; +} + +static int __init damon_sample_prcl_init(void) +{ + return 0; +} + +module_init(damon_sample_prcl_init); From a027ae8fbab4bff5f7f60d214898985ec44908b1 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Tue, 10 Dec 2024 13:50:30 -0800 Subject: [PATCH 129/504] samples/damon/prcl: implement schemes setup Implement a proactive cold memory regions reclaiming logic of prcl sample module using DAMOS. The logic treats memory regions that not accessed at all for five or more seconds as cold, and reclaim those as soon as found. Link: https://lkml.kernel.org/r/20241210215030.85675-6-sj@kernel.org Signed-off-by: SeongJae Park Signed-off-by: Andrew Morton --- samples/damon/prcl.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/samples/damon/prcl.c b/samples/damon/prcl.c index b34b9bfed532..c3acbdab7a62 100644 --- a/samples/damon/prcl.c +++ b/samples/damon/prcl.c @@ -49,6 +49,7 @@ static int damon_sample_prcl_after_aggregate(struct damon_ctx *c) static int damon_sample_prcl_start(void) { struct damon_target *target; + struct damos *scheme; pr_info("start\n"); @@ -75,6 +76,25 @@ static int damon_sample_prcl_start(void) ctx->callback.after_aggregation = damon_sample_prcl_after_aggregate; + scheme = damon_new_scheme( + &(struct damos_access_pattern) { + .min_sz_region = PAGE_SIZE, + .max_sz_region = ULONG_MAX, + .min_nr_accesses = 0, + .max_nr_accesses = 0, + .min_age_region = 50, + .max_age_region = UINT_MAX}, + DAMOS_PAGEOUT, + 0, + &(struct damos_quota){}, + &(struct damos_watermarks){}, + NUMA_NO_NODE); + if (!scheme) { + damon_destroy_ctx(ctx); + return -ENOMEM; + } + damon_set_schemes(ctx, &scheme, 1); + return damon_start(&ctx, 1, true); } From 69492f3c547fed769e12248b0223823ca9487877 Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 10 Dec 2024 21:48:07 +0900 Subject: [PATCH 130/504] mm/migrate: remove slab checks in isolate_movable_page() Commit 8b8817630ae8 ("mm/migrate: make isolate_movable_page() skip slab pages") introduced slab checks to prevent mis-identification of slab pages as movable kernel pages. However, after Matthew's frozen folio series, these slab checks became unnecessary as the migration logic fails to increase the reference count for frozen slab folios. Remove these redundant slab checks and associated memory barriers. Link: https://lkml.kernel.org/r/20241210124807.8584-1-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: David Hildenbrand Reviewed-by: Vlastimil Babka Acked-by: David Rientjes Cc: Christoph Lameter Cc: Joonsoo Kim Cc: Matthew Wilcox Cc: Pekka Enberg Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/migrate.c | 8 -------- mm/slub.c | 4 ---- 2 files changed, 12 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index e9e00d1d1d19..32cc8e0b1cce 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -68,10 +68,6 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode) if (!folio) goto out; - if (unlikely(folio_test_slab(folio))) - goto out_putfolio; - /* Pairs with smp_wmb() in slab freeing, e.g. SLUB's __free_slab() */ - smp_rmb(); /* * Check movable flag before taking the page lock because * we use non-atomic bitops on newly allocated page flags so @@ -79,10 +75,6 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode) */ if (unlikely(!__folio_test_movable(folio))) goto out_putfolio; - /* Pairs with smp_wmb() in slab allocation, e.g. SLUB's alloc_slab_page() */ - smp_rmb(); - if (unlikely(folio_test_slab(folio))) - goto out_putfolio; /* * As movable pages are not isolated from LRU lists, concurrent diff --git a/mm/slub.c b/mm/slub.c index a8e9b5106f4c..996691c137eb 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2429,8 +2429,6 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node, slab = folio_slab(folio); __folio_set_slab(folio); - /* Make the flag visible before any changes to folio->mapping */ - smp_wmb(); if (folio_is_pfmemalloc(folio)) slab_set_pfmemalloc(slab); @@ -2651,8 +2649,6 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab) __slab_clear_pfmemalloc(slab); folio->mapping = NULL; - /* Make the mapping reset visible before clearing the flag */ - smp_wmb(); __folio_clear_slab(folio); mm_account_reclaimed_pages(pages); unaccount_slab(slab, order, s); From 9629e6b3be22836dc5eb394bad7bd3cd2cfe4f41 Mon Sep 17 00:00:00 2001 From: Joshua Hahn Date: Wed, 11 Dec 2024 12:39:49 -0800 Subject: [PATCH 131/504] memcg/hugetlb: introduce memcg_accounts_hugetlb Patch series "memcg/hugetlb: Rework memcg hugetlb charging", v3. This series cleans up memcg's hugetlb charging logic by deprecating the current memcg hugetlb try-charge + {commit, cancel} logic present in alloc_hugetlb_folio. A single function mem_cgroup_charge_hugetlb takes its place instead. This makes the code more maintainable by simplifying the error path and reduces memcg's footprint in hugetlb logic. This patch introduces a few changes in the hugetlb folio allocation error path: (a) Instead of having multiple return points, we consolidate them to two: one for reaching the memcg limit or running out of memory (-ENOMEM) and one for hugetlb allocation fails / limit being reached (-ENOSPC). (b) Previously, the memcg limit was checked before the folio is acquired, meaning the hugeTLB folio isn't acquired if the limit is reached. This patch performs the charging after the folio is reached, meaning if memcg's limit is reached, the acquired folio is freed right away. This patch builds on two earlier patch series: [2] which adds memcg hugeTLB counters, and [3] which deprecates charge moving and removes the last references to mem_cgroup_cancel_charge. The request for this cleanup can be found in [2]. [1] https://lore.kernel.org/all/20231006184629.155543-1-nphamcs@gmail.com/ [2] https://lore.kernel.org/all/20241101204402.1885383-1-joshua.hahnjy@gmail.com/ [3] https://lore.kernel.org/linux-mm/20241025012304.2473312-1-shakeel.butt@linux.dev/ This patch (of 3): This patch isolates the check for whether memcg accounts hugetlb. This condition can only be true if the memcg mount option memory_hugetlb_accounting is on, which includes hugetlb usage in memory.current. Link: https://lkml.kernel.org/r/20241211203951.764733-1-joshua.hahnjy@gmail.com Link: https://lkml.kernel.org/r/20241211203951.764733-2-joshua.hahnjy@gmail.com Signed-off-by: Joshua Hahn Acked-by: Shakeel Butt Reviewed-by: Nhat Pham Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/memcontrol.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7b3503d12aaf..b25eab9c933e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1448,6 +1448,18 @@ unsigned long memcg_page_state_local_output(struct mem_cgroup *memcg, int item) memcg_page_state_output_unit(item); } +#ifdef CONFIG_HUGETLB_PAGE +static bool memcg_accounts_hugetlb(void) +{ + return cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING; +} +#else /* CONFIG_HUGETLB_PAGE */ +static bool memcg_accounts_hugetlb(void) +{ + return false; +} +#endif /* CONFIG_HUGETLB_PAGE */ + static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) { int i; @@ -1469,7 +1481,7 @@ static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) #ifdef CONFIG_HUGETLB_PAGE if (unlikely(memory_stats[i].idx == NR_HUGETLB) && - !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)) + !memcg_accounts_hugetlb()) continue; #endif size = memcg_page_state_output(memcg, memory_stats[i].idx); @@ -4540,8 +4552,7 @@ int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, * but do not attempt to commit charge later (or cancel on error) either. */ if (mem_cgroup_disabled() || !memcg || - !cgroup_subsys_on_dfl(memory_cgrp_subsys) || - !(cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING)) + !cgroup_subsys_on_dfl(memory_cgrp_subsys) || !memcg_accounts_hugetlb()) return -EOPNOTSUPP; if (try_charge(memcg, gfp, nr_pages)) From 90e5785fcd48d5e60371570e2161e2ef25df42b5 Mon Sep 17 00:00:00 2001 From: Joshua Hahn Date: Wed, 11 Dec 2024 12:39:50 -0800 Subject: [PATCH 132/504] memcg/hugetlb: introduce mem_cgroup_charge_hugetlb This patch introduces mem_cgroup_charge_hugetlb which combines the logic of mem_cgroup_hugetlb_try_charge / mem_cgroup_hugetlb_commit_charge and removes the need for mem_cgroup_hugetlb_cancel_charge. It also reduces the footprint of memcg in hugetlb code and consolidates all memcg related error paths into one. Link: https://lkml.kernel.org/r/20241211203951.764733-3-joshua.hahnjy@gmail.com Signed-off-by: Joshua Hahn Acked-by: Shakeel Butt Reviewed-by: Nhat Pham Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 7 +++++++ mm/hugetlb.c | 35 ++++++++++++++--------------------- mm/memcontrol.c | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 55 insertions(+), 21 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index b28180269e75..387470bed399 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -649,6 +649,8 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, long nr_pages); +int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp); + int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); @@ -1169,6 +1171,11 @@ static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, return 0; } +static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp) +{ + return 0; +} + static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry) { diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 21de25546a25..1672bfd85b4d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2981,21 +2981,13 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct folio *folio; - long map_chg, map_commit, nr_pages = pages_per_huge_page(h); + long map_chg, map_commit; long gbl_chg; - int memcg_charge_ret, ret, idx; + int ret, idx; struct hugetlb_cgroup *h_cg = NULL; - struct mem_cgroup *memcg; bool deferred_reserve; gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; - memcg = get_mem_cgroup_from_current(); - memcg_charge_ret = mem_cgroup_hugetlb_try_charge(memcg, gfp, nr_pages); - if (memcg_charge_ret == -ENOMEM) { - mem_cgroup_put(memcg); - return ERR_PTR(-ENOMEM); - } - idx = hstate_index(h); /* * Examine the region/reserve map to determine if the process @@ -3003,12 +2995,8 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, * code of zero indicates a reservation exists (no change). */ map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); - if (map_chg < 0) { - if (!memcg_charge_ret) - mem_cgroup_cancel_charge(memcg, nr_pages); - mem_cgroup_put(memcg); + if (map_chg < 0) return ERR_PTR(-ENOMEM); - } /* * Processes that did not create the mapping will have no @@ -3106,10 +3094,18 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, } } - if (!memcg_charge_ret) - mem_cgroup_commit_charge(folio, memcg); + ret = mem_cgroup_charge_hugetlb(folio, gfp); + /* + * Unconditionally increment NR_HUGETLB here. If it turns out that + * mem_cgroup_charge_hugetlb failed, then immediately free the page and + * decrement NR_HUGETLB. + */ lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h)); - mem_cgroup_put(memcg); + + if (ret == -ENOMEM) { + free_huge_folio(folio); + return ERR_PTR(-ENOMEM); + } return folio; @@ -3124,9 +3120,6 @@ out_subpool_put: hugepage_subpool_put_pages(spool, 1); out_end_reservation: vma_end_reservation(h, vma, addr); - if (!memcg_charge_ret) - mem_cgroup_cancel_charge(memcg, nr_pages); - mem_cgroup_put(memcg); return ERR_PTR(-ENOSPC); } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b25eab9c933e..c903e260a830 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4561,6 +4561,40 @@ int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, return 0; } +/** + * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio + * @folio: folio being charged + * @gfp: reclaim mode + * + * This function is called when allocating a huge page folio, after the page has + * already been obtained and charged to the appropriate hugetlb cgroup + * controller (if it is enabled). + * + * Returns ENOMEM if the memcg is already full. + * Returns 0 if either the charge was successful, or if we skip the charging. + */ +int mem_cgroup_charge_hugetlb(struct folio *folio, gfp_t gfp) +{ + struct mem_cgroup *memcg = get_mem_cgroup_from_current(); + int ret = 0; + + /* + * Even memcg does not account for hugetlb, we still want to update + * system-level stats via lruvec_stat_mod_folio. Return 0, and skip + * charging the memcg. + */ + if (mem_cgroup_disabled() || !memcg_accounts_hugetlb() || + !memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) + goto out; + + if (charge_memcg(folio, memcg, gfp)) + ret = -ENOMEM; + +out: + mem_cgroup_put(memcg); + return ret; +} + /** * mem_cgroup_swapin_charge_folio - Charge a newly allocated folio for swapin. * @folio: folio to charge. From 65925f59d4701d5d16e554f6a4ae08d6f1f6aea0 Mon Sep 17 00:00:00 2001 From: Joshua Hahn Date: Wed, 11 Dec 2024 12:39:51 -0800 Subject: [PATCH 133/504] memcg/hugetlb: remove memcg hugetlb try-commit-cancel protocol This patch fully removes the mem_cgroup_{try, commit, cancel}_charge functions, as well as their hugetlb variants. Link: https://lkml.kernel.org/r/20241211203951.764733-4-joshua.hahnjy@gmail.com Signed-off-by: Joshua Hahn Acked-by: Shakeel Butt Reviewed-by: Nhat Pham Cc: Roman Gushchin Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 22 ------------- mm/memcontrol.c | 65 ++------------------------------------ 2 files changed, 3 insertions(+), 84 deletions(-) diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 387470bed399..6e74b8254d9b 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -620,8 +620,6 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target, page_counter_read(&memcg->memory); } -void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg); - int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp); /** @@ -646,9 +644,6 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, return __mem_cgroup_charge(folio, mm, gfp); } -int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, - long nr_pages); - int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp); int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, @@ -679,7 +674,6 @@ static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) __mem_cgroup_uncharge_folios(folios); } -void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages); void mem_cgroup_replace_folio(struct folio *old, struct folio *new); void mem_cgroup_migrate(struct folio *old, struct folio *new); @@ -1154,23 +1148,12 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target, return false; } -static inline void mem_cgroup_commit_charge(struct folio *folio, - struct mem_cgroup *memcg) -{ -} - static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) { return 0; } -static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, - gfp_t gfp, long nr_pages) -{ - return 0; -} - static inline int mem_cgroup_charge_hugetlb(struct folio* folio, gfp_t gfp) { return 0; @@ -1194,11 +1177,6 @@ static inline void mem_cgroup_uncharge_folios(struct folio_batch *folios) { } -static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, - unsigned int nr_pages) -{ -} - static inline void mem_cgroup_replace_folio(struct folio *old, struct folio *new) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c903e260a830..7ddbb2d12eb9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2383,21 +2383,6 @@ done_restock: return 0; } -/** - * mem_cgroup_cancel_charge() - cancel an uncommitted try_charge() call. - * @memcg: memcg previously charged. - * @nr_pages: number of pages previously charged. - */ -void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) -{ - if (mem_cgroup_is_root(memcg)) - return; - - page_counter_uncharge(&memcg->memory, nr_pages); - if (do_memsw_account()) - page_counter_uncharge(&memcg->memsw, nr_pages); -} - static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) { VM_BUG_ON_FOLIO(folio_memcg_charged(folio), folio); @@ -2411,18 +2396,6 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) folio->memcg_data = (unsigned long)memcg; } -/** - * mem_cgroup_commit_charge - commit a previously successful try_charge(). - * @folio: folio to commit the charge to. - * @memcg: memcg previously charged. - */ -void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg) -{ - css_get(&memcg->css); - commit_charge(folio, memcg); - memcg1_commit_charge(folio, memcg); -} - static inline void __mod_objcg_mlstate(struct obj_cgroup *objcg, struct pglist_data *pgdat, enum node_stat_item idx, int nr) @@ -4510,7 +4483,9 @@ static int charge_memcg(struct folio *folio, struct mem_cgroup *memcg, if (ret) goto out; - mem_cgroup_commit_charge(folio, memcg); + css_get(&memcg->css); + commit_charge(folio, memcg); + memcg1_commit_charge(folio, memcg); out: return ret; } @@ -4527,40 +4502,6 @@ int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp) return ret; } -/** - * mem_cgroup_hugetlb_try_charge - try to charge the memcg for a hugetlb folio - * @memcg: memcg to charge. - * @gfp: reclaim mode. - * @nr_pages: number of pages to charge. - * - * This function is called when allocating a huge page folio to determine if - * the memcg has the capacity for it. It does not commit the charge yet, - * as the hugetlb folio itself has not been obtained from the hugetlb pool. - * - * Once we have obtained the hugetlb folio, we can call - * mem_cgroup_commit_charge() to commit the charge. If we fail to obtain the - * folio, we should instead call mem_cgroup_cancel_charge() to undo the effect - * of try_charge(). - * - * Returns 0 on success. Otherwise, an error code is returned. - */ -int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, - long nr_pages) -{ - /* - * If hugetlb memcg charging is not enabled, do not fail hugetlb allocation, - * but do not attempt to commit charge later (or cancel on error) either. - */ - if (mem_cgroup_disabled() || !memcg || - !cgroup_subsys_on_dfl(memory_cgrp_subsys) || !memcg_accounts_hugetlb()) - return -EOPNOTSUPP; - - if (try_charge(memcg, gfp, nr_pages)) - return -ENOMEM; - - return 0; -} - /** * mem_cgroup_charge_hugetlb - charge the memcg for a hugetlb folio * @folio: folio being charged From 5dd40400bdb358fa08808fa3dc16f5b612c91e36 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 11 Dec 2024 10:53:15 +0000 Subject: [PATCH 134/504] MAINTAINERS: update MEMORY MAPPING section Update the MEMORY MAPPING section to contain VMA logic as it makes no sense to have these two sections separate. Additionally, add files which permit changes to the attributes and/or ranges spanned by memory mappings, in essence anything which might alter the output of /proc/$pid/[s]maps. This is necessarily fuzzy, as there is not quite as good separation of concerns as we would ideally like in the kernel. However each of these files interacts with the VMA and memory mapping logic in such a way as to be inseparatable from it, and it is important that they are maintained in conjunction with it. Link: https://lkml.kernel.org/r/20241211105315.21756-1-lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Acked-by: Vlastimil Babka Cc: David Hildenbrand Cc: Jann Horn Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- MAINTAINERS | 23 ++++++++--------------- 1 file changed, 8 insertions(+), 15 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 46e4ccf18c99..6b66e4f4a629 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -15073,7 +15073,15 @@ L: linux-mm@kvack.org S: Maintained W: http://www.linux-mm.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm +F: mm/mlock.c F: mm/mmap.c +F: mm/mprotect.c +F: mm/mremap.c +F: mm/mseal.c +F: mm/vma.c +F: mm/vma.h +F: mm/vma_internal.h +F: tools/testing/vma/ MEMORY TECHNOLOGY DEVICES (MTD) M: Miquel Raynal @@ -25026,21 +25034,6 @@ F: include/uapi/linux/vsockmon.h F: net/vmw_vsock/ F: tools/testing/vsock/ -VMA -M: Andrew Morton -M: Liam R. Howlett -M: Lorenzo Stoakes -R: Vlastimil Babka -R: Jann Horn -L: linux-mm@kvack.org -S: Maintained -W: https://www.linux-mm.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm -F: mm/vma.c -F: mm/vma.h -F: mm/vma_internal.h -F: tools/testing/vma/ - VMALLOC M: Andrew Morton R: Uladzislau Rezki From 9c0f25c277d4a59ba63ae790d77ad71793e756f5 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 12 Dec 2024 11:48:41 +0000 Subject: [PATCH 135/504] mm: assert mmap write lock held on do_mmap(), mmap_region() Both of these functions can be invoked outside of mm, so it is probably a good idea to assert that the required lock is held. Will only have an impact if CONFIG_DEBUG_VM is set, otherwise this amounts to no change at all. Link: https://lkml.kernel.org/r/20241212114841.55185-1-lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Acked-by: Vlastimil Babka Cc: Jann Horn Cc: Liam R. Howlett Signed-off-by: Andrew Morton --- mm/mmap.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mm/mmap.c b/mm/mmap.c index df9154b15ef9..43ef85028921 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -291,6 +291,8 @@ unsigned long do_mmap(struct file *file, unsigned long addr, *populate = 0; + mmap_assert_write_locked(mm); + if (!len) return -EINVAL; @@ -1023,6 +1025,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long ret; bool writable_file_mapping = false; + mmap_assert_write_locked(current->mm); + /* Check to see if MDWE is applicable. */ if (map_deny_write_exec(vm_flags, vm_flags)) return -EACCES; From 86e1fe1bf579612c339a5497895fb2cea4dd36c7 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 12 Dec 2024 11:31:52 +0000 Subject: [PATCH 136/504] mm: add comments to do_mmap(), mmap_region() and vm_mmap() It isn't always entirely clear to users the difference between do_mmap(), mmap_region() and vm_mmap(), so add comments to clarify what's going on in each. This is compounded by the fact that we actually allow callers external to mm to invoke both do_mmap() and mmap_region() (!), the latter of which is really strictly speaking an internal memory mapping implementation detail. Link: https://lkml.kernel.org/r/20241212113152.28849-1-lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: Jann Horn Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/mmap.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++++- mm/util.c | 17 ++++++++++++ 2 files changed, 95 insertions(+), 1 deletion(-) diff --git a/mm/mmap.c b/mm/mmap.c index 43ef85028921..aef835984b1c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -277,8 +277,62 @@ static inline bool file_mmap_ok(struct file *file, struct inode *inode, return true; } -/* +/** + * do_mmap() - Perform a userland memory mapping into the current process + * address space of length @len with protection bits @prot, mmap flags @flags + * (from which VMA flags will be inferred), and any additional VMA flags to + * apply @vm_flags. If this is a file-backed mapping then the file is specified + * in @file and page offset into the file via @pgoff. + * + * This function does not perform security checks on the file and assumes, if + * @uf is non-NULL, the caller has provided a list head to track unmap events + * for userfaultfd @uf. + * + * It also simply indicates whether memory population is required by setting + * @populate, which must be non-NULL, expecting the caller to actually perform + * this task itself if appropriate. + * + * This function will invoke architecture-specific (and if provided and + * relevant, file system-specific) logic to determine the most appropriate + * unmapped area in which to place the mapping if not MAP_FIXED. + * + * Callers which require userland mmap() behaviour should invoke vm_mmap(), + * which is also exported for module use. + * + * Those which require this behaviour less security checks, userfaultfd and + * populate behaviour, and who handle the mmap write lock themselves, should + * call this function. + * + * Note that the returned address may reside within a merged VMA if an + * appropriate merge were to take place, so it doesn't necessarily specify the + * start of a VMA, rather only the start of a valid mapped range of length + * @len bytes, rounded down to the nearest page size. + * * The caller must write-lock current->mm->mmap_lock. + * + * @file: An optional struct file pointer describing the file which is to be + * mapped, if a file-backed mapping. + * @addr: If non-zero, hints at (or if @flags has MAP_FIXED set, specifies) the + * address at which to perform this mapping. See mmap (2) for details. Must be + * page-aligned. + * @len: The length of the mapping. Will be page-aligned and must be at least 1 + * page in size. + * @prot: Protection bits describing access required to the mapping. See mmap + * (2) for details. + * @flags: Flags specifying how the mapping should be performed, see mmap (2) + * for details. + * @vm_flags: VMA flags which should be set by default, or 0 otherwise. + * @pgoff: Page offset into the @file if file-backed, should be 0 otherwise. + * @populate: A pointer to a value which will be set to 0 if no population of + * the range is required, or the number of bytes to populate if it is. Must be + * non-NULL. See mmap (2) for details as to under what circumstances population + * of the range occurs. + * @uf: An optional pointer to a list head to track userfaultfd unmap events + * should unmapping events arise. If provided, it is up to the caller to manage + * this. + * + * Returns: Either an error, or the address at which the requested mapping has + * been performed. */ unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, @@ -1018,6 +1072,29 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return do_vmi_munmap(&vmi, mm, start, len, uf, false); } +/** + * mmap_region() - Actually perform the userland mapping of a VMA into + * current->mm with known, aligned and overflow-checked @addr and @len, and + * correctly determined VMA flags @vm_flags and page offset @pgoff. + * + * This is an internal memory management function, and should not be used + * directly. + * + * The caller must write-lock current->mm->mmap_lock. + * + * @file: If a file-backed mapping, a pointer to the struct file describing the + * file to be mapped, otherwise NULL. + * @addr: The page-aligned address at which to perform the mapping. + * @len: The page-aligned, non-zero, length of the mapping. + * @vm_flags: The VMA flags which should be applied to the mapping. + * @pgoff: If @file is specified, the page offset into the file, if not then + * the virtual page offset in memory of the anonymous mapping. + * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap + * events. + * + * Returns: Either an error, or the address at which the requested mapping has + * been performed. + */ unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) diff --git a/mm/util.c b/mm/util.c index 60aa40f612b8..b6b9684a1438 100644 --- a/mm/util.c +++ b/mm/util.c @@ -582,6 +582,23 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, return ret; } +/* + * Perform a userland memory mapping into the current process address space. See + * the comment for do_mmap() for more details on this operation in general. + * + * This differs from do_mmap() in that: + * + * a. An offset parameter is provided rather than pgoff, which is both checked + * for overflow and page alignment. + * b. mmap locking is performed on the caller's behalf. + * c. Userfaultfd unmap events and memory population are handled. + * + * This means that this function performs essentially the same work as if + * userland were invoking mmap (2). + * + * Returns either an error, or the address at which the requested mapping has + * been performed. + */ unsigned long vm_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) From ea40abb32f3cc860ffae018c7db35cf32edea65e Mon Sep 17 00:00:00 2001 From: Guo Weikang Date: Thu, 12 Dec 2024 18:10:00 +0800 Subject: [PATCH 137/504] mm/early_ioremap: add null pointer checks to prevent NULL-pointer dereference The early_ioremap interface can fail and return NULL in certain cases. To prevent NULL-pointer dereference crashes, fixed issues in the acpi_extlog and copy_early_mem interfaces, improving robustness when handling early memory. Link: https://lkml.kernel.org/r/20241212101004.1544070-1-guoweikang.kernel@gmail.com Signed-off-by: Guo Weikang Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Baoquan He Cc: Borislav Petkov (AMD) Cc: Dave Hansen Cc: H. Peter Anvin Cc: Ingo Molnar Cc: Ingo Molnar Cc: Jason A. Donenfeld Cc: Julian Stecklina Cc: Kevin Loughlin Cc: Len Brown Cc: Rafael J. Wysocki Cc: "Rafael J. Wysocki" Cc: Thomas Gleixner Cc: Xin Li (Intel) Signed-off-by: Andrew Morton --- arch/x86/kernel/setup.c | 5 ++++- drivers/acpi/acpi_extlog.c | 14 ++++++++++++++ include/asm-generic/early_ioremap.h | 2 +- mm/early_ioremap.c | 8 +++++++- 4 files changed, 26 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index f1fea506e20f..cebee310e200 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -259,6 +259,7 @@ static void __init relocate_initrd(void) u64 ramdisk_image = get_ramdisk_image(); u64 ramdisk_size = get_ramdisk_size(); u64 area_size = PAGE_ALIGN(ramdisk_size); + int ret = 0; /* We need to move the initrd down into directly mapped mem */ u64 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, @@ -272,7 +273,9 @@ static void __init relocate_initrd(void) printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); - copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size); + ret = copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size); + if (ret) + panic("Copy RAMDISK failed\n"); printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" " [mem %#010llx-%#010llx]\n", diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index ca87a0939135..f7fb7205028d 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -251,6 +251,10 @@ static int __init extlog_init(void) } extlog_l1_hdr = acpi_os_map_iomem(l1_dirbase, l1_hdr_size); + if (!extlog_l1_hdr) { + rc = -ENOMEM; + goto err_release_l1_hdr; + } l1_head = (struct extlog_l1_head *)extlog_l1_hdr; l1_size = l1_head->total_len; l1_percpu_entry = l1_head->entries; @@ -268,6 +272,10 @@ static int __init extlog_init(void) goto err; } extlog_l1_addr = acpi_os_map_iomem(l1_dirbase, l1_size); + if (!extlog_l1_addr) { + rc = -ENOMEM; + goto err_release_l1_dir; + } l1_entry_base = (u64 *)((u8 *)extlog_l1_addr + l1_hdr_size); /* remap elog table */ @@ -279,6 +287,10 @@ static int __init extlog_init(void) goto err_release_l1_dir; } elog_addr = acpi_os_map_iomem(elog_base, elog_size); + if (!elog_addr) { + rc = -ENOMEM; + goto err_release_elog; + } rc = -ENOMEM; /* allocate buffer to save elog record */ @@ -300,6 +312,8 @@ err_release_l1_dir: if (extlog_l1_addr) acpi_os_unmap_iomem(extlog_l1_addr, l1_size); release_mem_region(l1_dirbase, l1_size); +err_release_l1_hdr: + release_mem_region(l1_dirbase, l1_hdr_size); err: pr_warn(FW_BUG "Extended error log disabled because of problems parsing f/w tables\n"); return rc; diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index 9d0479f50f97..5db59a1efb65 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -35,7 +35,7 @@ extern void early_ioremap_reset(void); /* * Early copy from unmapped memory to kernel mapped memory. */ -extern void copy_from_early_mem(void *dest, phys_addr_t src, +extern int copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size); #else diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index ce06b2884789..ff35b84a7b50 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -245,7 +245,10 @@ early_memremap_prot(resource_size_t phys_addr, unsigned long size, #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) -void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) +/* + * If no empty slot, handle that and return -ENOMEM. + */ +int __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) { unsigned long slop, clen; char *p; @@ -256,12 +259,15 @@ void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) if (clen > MAX_MAP_CHUNK - slop) clen = MAX_MAP_CHUNK - slop; p = early_memremap(src & PAGE_MASK, clen + slop); + if (!p) + return -ENOMEM; memcpy(dest, p + slop, clen); early_memunmap(p, clen + slop); dest += clen; src += clen; size -= clen; } + return 0; } #else /* CONFIG_MMU */ From b08f065fbcacfd729b323e4e1a990a1e13d27416 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Sun, 8 Dec 2024 21:45:16 +0100 Subject: [PATCH 138/504] x86/kgdb: use IS_ERR_PCPU() macro Enable strict percpu address space checks via x86 named address space qualifiers. Percpu variables are declared in __seg_gs/__seg_fs named AS and kept named AS qualified until they are dereferenced via percpu accessor. This approach enables various compiler checks for cross-namespace variable assignments. Please note that current version of sparse doesn't know anything about __typeof_unqual__() operator. Avoid the usage of __typeof_unqual__() when sparse checking is active to prevent sparse errors with unknowing keyword. The proposed patch by Dan Carpenter to implement __typeof_unqual__() handling in sparse is located at: https://lore.kernel.org/lkml/5b8d0dee-8fb6-45af-ba6c-7f74aff9a4b8@stanley.mountain/ This patch (of 6): Use IS_ERR_PCPU() when checking the error pointer in the percpu address space. This macro adds intermediate cast to unsigned long when switching named address spaces. The patch will avoid future build errors due to pointer address space mismatch with enabled strict percpu address space checks. Link: https://lkml.kernel.org/r/20241208204708.3742696-1-ubizjak@gmail.com Link: https://lkml.kernel.org/r/20241208204708.3742696-2-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Acked-by: Dennis Zhou Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Tejun Heo Cc: Christoph Lameter Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Brian Gerst Cc: Peter Zijlstra Cc: Arnd Bergmann Cc: Boqun Feng Cc: "David S. Miller" Cc: Denys Vlasenko Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Kent Overstreet Cc: Paolo Abeni Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/x86/kernel/kgdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 9c9faa1634fb..102641fd2172 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -655,7 +655,7 @@ void kgdb_arch_late(void) if (breakinfo[i].pev) continue; breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); - if (IS_ERR((void * __force)breakinfo[i].pev)) { + if (IS_ERR_PCPU(breakinfo[i].pev)) { printk(KERN_ERR "kgdb: Could not allocate hw" "breakpoints\nDisabling the kernel debugger\n"); breakinfo[i].pev = NULL; From 108c71a15c153b4e868eaf56739d2fb1178542e4 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Sun, 8 Dec 2024 21:45:17 +0100 Subject: [PATCH 139/504] compiler.h: introduce TYPEOF_UNQUAL() macro Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof operator when available, to return unqualified type of the expression. Current version of sparse doesn't know anything about __typeof_unqual__() operator. Avoid the usage of __typeof_unqual__() when sparse checking is active to prevent sparse errors with unknowing keyword. Link: https://lkml.kernel.org/r/20241208204708.3742696-3-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Acked-by: Dennis Zhou Cc: Thomas Gleixner Cc: Tejun Heo Cc: Christoph Lameter Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Ingo Molnar Cc: Brian Gerst Cc: Denys Vlasenko Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Boqun Feng Cc: Borislav Petkov Cc: Dave Hansen Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Kent Overstreet Cc: Paolo Abeni Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- include/linux/compiler.h | 13 +++++++++++++ init/Kconfig | 3 +++ 2 files changed, 16 insertions(+) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 240c632c5b95..567a7af257d1 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -336,6 +336,19 @@ static inline void *offset_to_ptr(const int *off) */ #define prevent_tail_call_optimization() mb() +/* + * Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof + * operator when available, to return unqualified type of the exp. + * + * XXX: Remove test for __CHECKER__ once + * sparse learns about __typeof_unqual__. + */ +#if defined(CONFIG_CC_HAS_TYPEOF_UNQUAL) && !defined(__CHECKER__) +# define TYPEOF_UNQUAL(exp) __typeof_unqual__(exp) +#else +# define TYPEOF_UNQUAL(exp) __typeof__(exp) +#endif + #include #endif /* __LINUX_COMPILER_H */ diff --git a/init/Kconfig b/init/Kconfig index a20e6efd3f0f..c1f9eb3d5f2e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -894,6 +894,9 @@ config ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH config CC_HAS_INT128 def_bool !$(cc-option,$(m64-flag) -D__SIZEOF_INT128__=0) && 64BIT +config CC_HAS_TYPEOF_UNQUAL + def_bool $(success,echo 'int foo (int a) { __typeof_unqual__(a) b = a; return b; }' | $(CC) -x c - -S -o /dev/null) + config CC_IMPLICIT_FALLTHROUGH string default "-Wimplicit-fallthrough=5" if CC_IS_GCC && $(cc-option,-Wimplicit-fallthrough=5) From da5d95511a2d1ef7a3781b3314a22e528a8eb2df Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Sun, 8 Dec 2024 21:45:18 +0100 Subject: [PATCH 140/504] percpu: use TYPEOF_UNQUAL() in variable declarations Use TYPEOF_UNQUAL() to declare variables as a corresponding type without named address space qualifier to avoid "`__seg_gs' specified for auto variable `var'" errors. Link: https://lkml.kernel.org/r/20241208204708.3742696-4-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Acked-by: Christoph Lameter Acked-by: Dennis Zhou Cc: Tejun Heo Cc: Andy Lutomirski Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Kent Overstreet Cc: Arnd Bergmann Cc: "David S. Miller" Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Paolo Abeni Cc: Peter Zijlstra Cc: Will Deacon Cc: Waiman Long Cc: Boqun Feng Cc: Linus Torvalds Cc: Brian Gerst Cc: Denys Vlasenko Signed-off-by: Andrew Morton --- arch/x86/include/asm/percpu.h | 10 +++++----- fs/bcachefs/util.h | 2 +- include/asm-generic/percpu.h | 26 +++++++++++++------------- include/linux/part_stat.h | 2 +- include/linux/percpu-defs.h | 4 ++-- include/net/snmp.h | 5 ++--- kernel/locking/percpu-rwsem.c | 2 +- net/mpls/internal.h | 4 ++-- 8 files changed, 27 insertions(+), 28 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index e525cd85f999..666e4137b09f 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -180,7 +180,7 @@ do { \ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ \ if (0) { \ - typeof(_var) pto_tmp__; \ + TYPEOF_UNQUAL(_var) pto_tmp__; \ pto_tmp__ = (_val); \ (void)pto_tmp__; \ } \ @@ -219,7 +219,7 @@ do { \ __pcpu_type_##size pto_val__ = __pcpu_cast_##size(_val); \ \ if (0) { \ - typeof(_var) pto_tmp__; \ + TYPEOF_UNQUAL(_var) pto_tmp__; \ pto_tmp__ = (_val); \ (void)pto_tmp__; \ } \ @@ -240,7 +240,7 @@ do { \ (val) == (typeof(val))-1)) ? (int)(val) : 0; \ \ if (0) { \ - typeof(var) pao_tmp__; \ + TYPEOF_UNQUAL(var) pao_tmp__; \ pao_tmp__ = (val); \ (void)pao_tmp__; \ } \ @@ -273,7 +273,7 @@ do { \ */ #define raw_percpu_xchg_op(_var, _nval) \ ({ \ - typeof(_var) pxo_old__ = raw_cpu_read(_var); \ + TYPEOF_UNQUAL(_var) pxo_old__ = raw_cpu_read(_var); \ \ raw_cpu_write(_var, _nval); \ \ @@ -287,7 +287,7 @@ do { \ */ #define this_percpu_xchg_op(_var, _nval) \ ({ \ - typeof(_var) pxo_old__ = this_cpu_read(_var); \ + TYPEOF_UNQUAL(_var) pxo_old__ = this_cpu_read(_var); \ \ do { } while (!this_cpu_try_cmpxchg(_var, &pxo_old__, _nval)); \ \ diff --git a/fs/bcachefs/util.h b/fs/bcachefs/util.h index fb02c1c36004..415a5803b8f4 100644 --- a/fs/bcachefs/util.h +++ b/fs/bcachefs/util.h @@ -586,7 +586,7 @@ do { \ #define per_cpu_sum(_p) \ ({ \ - typeof(*_p) _ret = 0; \ + TYPEOF_UNQUAL(*_p) _ret = 0; \ \ int cpu; \ for_each_possible_cpu(cpu) \ diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 94cbd50cc870..50597b975a49 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -74,7 +74,7 @@ do { \ #define raw_cpu_generic_add_return(pcp, val) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \ \ *__p += val; \ *__p; \ @@ -82,8 +82,8 @@ do { \ #define raw_cpu_generic_xchg(pcp, nval) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + TYPEOF_UNQUAL(pcp) __ret; \ __ret = *__p; \ *__p = nval; \ __ret; \ @@ -91,7 +91,7 @@ do { \ #define __cpu_fallback_try_cmpxchg(pcp, ovalp, nval, _cmpxchg) \ ({ \ - typeof(pcp) __val, __old = *(ovalp); \ + TYPEOF_UNQUAL(pcp) __val, __old = *(ovalp); \ __val = _cmpxchg(pcp, __old, nval); \ if (__val != __old) \ *(ovalp) = __val; \ @@ -100,8 +100,8 @@ do { \ #define raw_cpu_generic_try_cmpxchg(pcp, ovalp, nval) \ ({ \ - typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \ - typeof(pcp) __val = *__p, ___old = *(ovalp); \ + TYPEOF_UNQUAL(pcp) *__p = raw_cpu_ptr(&(pcp)); \ + TYPEOF_UNQUAL(pcp) __val = *__p, ___old = *(ovalp); \ bool __ret; \ if (__val == ___old) { \ *__p = nval; \ @@ -115,14 +115,14 @@ do { \ #define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ - typeof(pcp) __old = (oval); \ + TYPEOF_UNQUAL(pcp) __old = (oval); \ raw_cpu_generic_try_cmpxchg(pcp, &__old, nval); \ __old; \ }) #define __this_cpu_generic_read_nopreempt(pcp) \ ({ \ - typeof(pcp) ___ret; \ + TYPEOF_UNQUAL(pcp) ___ret; \ preempt_disable_notrace(); \ ___ret = READ_ONCE(*raw_cpu_ptr(&(pcp))); \ preempt_enable_notrace(); \ @@ -131,7 +131,7 @@ do { \ #define __this_cpu_generic_read_noirq(pcp) \ ({ \ - typeof(pcp) ___ret; \ + TYPEOF_UNQUAL(pcp) ___ret; \ unsigned long ___flags; \ raw_local_irq_save(___flags); \ ___ret = raw_cpu_generic_read(pcp); \ @@ -141,7 +141,7 @@ do { \ #define this_cpu_generic_read(pcp) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ if (__native_word(pcp)) \ __ret = __this_cpu_generic_read_nopreempt(pcp); \ else \ @@ -160,7 +160,7 @@ do { \ #define this_cpu_generic_add_return(pcp, val) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_add_return(pcp, val); \ @@ -170,7 +170,7 @@ do { \ #define this_cpu_generic_xchg(pcp, nval) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_xchg(pcp, nval); \ @@ -190,7 +190,7 @@ do { \ #define this_cpu_generic_cmpxchg(pcp, oval, nval) \ ({ \ - typeof(pcp) __ret; \ + TYPEOF_UNQUAL(pcp) __ret; \ unsigned long __flags; \ raw_local_irq_save(__flags); \ __ret = raw_cpu_generic_cmpxchg(pcp, oval, nval); \ diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index ac8c44dd8237..c5e9cac0575e 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -33,7 +33,7 @@ struct disk_stats { #define part_stat_read(part, field) \ ({ \ - typeof((part)->bd_stats->field) res = 0; \ + TYPEOF_UNQUAL((part)->bd_stats->field) res = 0; \ unsigned int _cpu; \ for_each_possible_cpu(_cpu) \ res += per_cpu_ptr((part)->bd_stats, _cpu)->field; \ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 5b520fe86b60..79b9402404f1 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -317,7 +317,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { } #define __pcpu_size_call_return(stem, variable) \ ({ \ - typeof(variable) pscr_ret__; \ + TYPEOF_UNQUAL(variable) pscr_ret__; \ __verify_pcpu_ptr(&(variable)); \ switch(sizeof(variable)) { \ case 1: pscr_ret__ = stem##1(variable); break; \ @@ -332,7 +332,7 @@ static __always_inline void __this_cpu_preempt_check(const char *op) { } #define __pcpu_size_call_return2(stem, variable, ...) \ ({ \ - typeof(variable) pscr2_ret__; \ + TYPEOF_UNQUAL(variable) pscr2_ret__; \ __verify_pcpu_ptr(&(variable)); \ switch(sizeof(variable)) { \ case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ diff --git a/include/net/snmp.h b/include/net/snmp.h index 468a67836e2f..4cb4326dfebe 100644 --- a/include/net/snmp.h +++ b/include/net/snmp.h @@ -159,7 +159,7 @@ struct linux_tls_mib { #define __SNMP_ADD_STATS64(mib, field, addend) \ do { \ - __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \ + TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \ u64_stats_update_begin(&ptr->syncp); \ ptr->mibs[field] += addend; \ u64_stats_update_end(&ptr->syncp); \ @@ -176,8 +176,7 @@ struct linux_tls_mib { #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) #define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \ do { \ - __typeof__(*mib) *ptr; \ - ptr = raw_cpu_ptr((mib)); \ + TYPEOF_UNQUAL(*mib) *ptr = raw_cpu_ptr(mib); \ u64_stats_update_begin(&ptr->syncp); \ ptr->mibs[basefield##PKTS]++; \ ptr->mibs[basefield##OCTETS] += addend; \ diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index 6083883c4fe0..d6964fc29f51 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -184,7 +184,7 @@ EXPORT_SYMBOL_GPL(__percpu_down_read); #define per_cpu_sum(var) \ ({ \ - typeof(var) __sum = 0; \ + TYPEOF_UNQUAL(var) __sum = 0; \ int cpu; \ compiletime_assert_atomic_type(__sum); \ for_each_possible_cpu(cpu) \ diff --git a/net/mpls/internal.h b/net/mpls/internal.h index b9f492ddf93b..83c629529b57 100644 --- a/net/mpls/internal.h +++ b/net/mpls/internal.h @@ -33,7 +33,7 @@ struct mpls_dev { #define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \ do { \ - __typeof__(*(mdev)->stats) *ptr = \ + TYPEOF_UNQUAL(*(mdev)->stats) *ptr = \ raw_cpu_ptr((mdev)->stats); \ local_bh_disable(); \ u64_stats_update_begin(&ptr->syncp); \ @@ -45,7 +45,7 @@ struct mpls_dev { #define MPLS_INC_STATS(mdev, field) \ do { \ - __typeof__(*(mdev)->stats) *ptr = \ + TYPEOF_UNQUAL(*(mdev)->stats) *ptr = \ raw_cpu_ptr((mdev)->stats); \ local_bh_disable(); \ u64_stats_update_begin(&ptr->syncp); \ From f1f70eb5a2478c2966ccc2df83dceda7f6fa7f52 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Sun, 8 Dec 2024 21:45:19 +0100 Subject: [PATCH 141/504] percpu: use TYPEOF_UNQUAL() in *_cpu_ptr() accessors Use TYPEOF_UNQUAL() macro to declare the return type of *_cpu_ptr() accessors in the generic named address space to avoid access to data from pointer to non-enclosed address space type of errors. Link: https://lkml.kernel.org/r/20241208204708.3742696-5-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Acked-by: Christoph Lameter Acked-by: Dennis Zhou Cc: Tejun Heo Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Brian Gerst Cc: Peter Zijlstra Cc: Arnd Bergmann Cc: Boqun Feng Cc: "David S. Miller" Cc: Denys Vlasenko Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Kent Overstreet Cc: Paolo Abeni Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/x86/include/asm/percpu.h | 8 ++++++-- include/linux/percpu-defs.h | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 666e4137b09f..27f668660abe 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -73,10 +73,14 @@ unsigned long tcp_ptr__ = raw_cpu_read_long(this_cpu_off); \ \ tcp_ptr__ += (__force unsigned long)(_ptr); \ - (typeof(*(_ptr)) __kernel __force *)tcp_ptr__; \ + (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)tcp_ptr__; \ }) #else -#define arch_raw_cpu_ptr(_ptr) ({ BUILD_BUG(); (typeof(_ptr))0; }) +#define arch_raw_cpu_ptr(_ptr) \ +({ \ + BUILD_BUG(); \ + (TYPEOF_UNQUAL(*(_ptr)) __force __kernel *)0; \ +}) #endif #define PER_CPU_VAR(var) %__percpu_seg:(var)__percpu_rel diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index 79b9402404f1..b4859e87846c 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -221,7 +221,7 @@ do { \ } while (0) #define PERCPU_PTR(__p) \ - (typeof(*(__p)) __force __kernel *)((__force unsigned long)(__p)) + ((TYPEOF_UNQUAL(*(__p)) __force __kernel *)(__force unsigned long)(__p)) #ifdef CONFIG_SMP From a9cb47d0aae3020881f1783344e8b5a41ed7f71d Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Sun, 8 Dec 2024 21:45:20 +0100 Subject: [PATCH 142/504] percpu: repurpose __percpu tag as a named address space qualifier The patch introduces __percpu_qual define and repurposes __percpu tag as a named address space qualifier using the new define. Arches can now conditionally define __percpu_qual as their named address space qualifier for percpu variables. Link: https://lkml.kernel.org/r/20241208204708.3742696-6-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Acked-by: Dennis Zhou Cc: Arnd Bergmann Cc: Thomas Gleixner Cc: Tejun Heo Cc: Christoph Lameter Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Ingo Molnar Cc: Brian Gerst Cc: "H. Peter Anvin" Cc: Peter Zijlstra Cc: Boqun Feng Cc: Borislav Petkov Cc: Dave Hansen Cc: "David S. Miller" Cc: Denys Vlasenko Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Kent Overstreet Cc: Paolo Abeni Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- include/asm-generic/percpu.h | 13 +++++++++++++ include/linux/compiler_types.h | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 50597b975a49..02aeca21479a 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -6,6 +6,19 @@ #include #include +/* + * __percpu_qual is the qualifier for the percpu named address space. + * + * Most arches use generic named address space for percpu variables but + * some arches define percpu variables in different named address space + * (on the x86 arch, percpu variable may be declared as being relative + * to the %fs or %gs segments using __seg_fs or __seg_gs named address + * space qualifier). + */ +#ifndef __percpu_qual +# define __percpu_qual +#endif + #ifdef CONFIG_SMP /* diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index 981cc3d7e3aa..5d6544545658 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -57,7 +57,7 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { } # define __user BTF_TYPE_TAG(user) # endif # define __iomem -# define __percpu BTF_TYPE_TAG(percpu) +# define __percpu __percpu_qual BTF_TYPE_TAG(percpu) # define __rcu BTF_TYPE_TAG(rcu) # define __chk_user_ptr(x) (void)0 From 13c16faf982dcd06aef465cdbd9e7b3dcbd6ab83 Mon Sep 17 00:00:00 2001 From: Uros Bizjak Date: Sun, 8 Dec 2024 21:45:21 +0100 Subject: [PATCH 143/504] percpu/x86: enable strict percpu checks via named AS qualifiers This patch declares percpu variables in __seg_gs/__seg_fs named AS and keeps them named AS qualified until they are dereferenced with percpu accessor. This approach enables various compiler check for cross-namespace variable assignments. Link: https://lkml.kernel.org/r/20241208204708.3742696-7-ubizjak@gmail.com Signed-off-by: Uros Bizjak Acked-by: Nadav Amit Acked-by: Dennis Zhou Cc: Tejun Heo Cc: Christoph Lameter Cc: Thomas Gleixner Cc: Ingo Molnar Cc: Borislav Petkov Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Linus Torvalds Cc: Andy Lutomirski Cc: Brian Gerst Cc: Peter Zijlstra Cc: Arnd Bergmann Cc: Boqun Feng Cc: "David S. Miller" Cc: Denys Vlasenko Cc: Eric Dumazet Cc: Jakub Kicinski Cc: Kent Overstreet Cc: Paolo Abeni Cc: Waiman Long Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/x86/include/asm/percpu.h | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 27f668660abe..1ef08289e667 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -95,9 +95,23 @@ #endif /* CONFIG_SMP */ -#define __my_cpu_type(var) typeof(var) __percpu_seg_override -#define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr) -#define __my_cpu_var(var) (*__my_cpu_ptr(&(var))) +/* + * XXX: Remove test for __CHECKER__ once + * sparse learns about __typeof_unqual__. + */ +#if defined(CONFIG_USE_X86_SEG_SUPPORT) && \ + defined(CONFIG_CC_HAS_TYPEOF_UNQUAL) && !defined(__CHECKER__) +# define __my_cpu_type(var) typeof(var) +# define __my_cpu_ptr(ptr) (ptr) +# define __my_cpu_var(var) (var) + +# define __percpu_qual __percpu_seg_override +#else +# define __my_cpu_type(var) typeof(var) __percpu_seg_override +# define __my_cpu_ptr(ptr) (__my_cpu_type(*(ptr))*)(__force uintptr_t)(ptr) +# define __my_cpu_var(var) (*__my_cpu_ptr(&(var))) +#endif + #define __percpu_arg(x) __percpu_prefix "%" #x #define __force_percpu_arg(x) __force_percpu_prefix "%" #x From 875285f409d69b24c74da279bbeda969518a8a4c Mon Sep 17 00:00:00 2001 From: Jinliang Zheng Date: Fri, 13 Dec 2024 11:18:20 +0800 Subject: [PATCH 144/504] mm: fix outdated incorrect code comments for handle_mm_fault() Link: https://lkml.kernel.org/r/20241213031820.778342-1-alexjlzheng@tencent.com Signed-off-by: Jinliang Zheng Cc: "Liam R. Howlett" Signed-off-by: Andrew Morton --- mm/memory.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 69ab5bb6db75..5ebad355b245 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6140,7 +6140,8 @@ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, } /* - * By the time we get here, we already hold the mm semaphore + * By the time we get here, we already hold either the VMA lock or the + * mmap_Lock (FAULT_FLAG_VMA_LOCK tells you which). * * The mmap_lock may have been released depending on flags and our * return value. See filemap_fault() and __folio_lock_or_retry(). From 5584f1be93efc777b8475cc875eba49044917830 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 13 Dec 2024 16:35:29 -0800 Subject: [PATCH 145/504] mm-fix-outdated-incorrect-code-comments-for-handle_mm_fault-fix s/mmap_Lock/mmap_lock/, per Liam Cc: Jinliang Zheng Cc: Jinliang Zheng Cc: "Liam R. Howlett" Signed-off-by: Andrew Morton --- mm/memory.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 5ebad355b245..4fe0404fa198 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6141,7 +6141,7 @@ static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma, /* * By the time we get here, we already hold either the VMA lock or the - * mmap_Lock (FAULT_FLAG_VMA_LOCK tells you which). + * mmap_lock (FAULT_FLAG_VMA_LOCK tells you which). * * The mmap_lock may have been released depending on flags and our * return value. See filemap_fault() and __folio_lock_or_retry(). From 5e5028fe3ae87c9700c89ef98a1dc8f493e3333b Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 12 Dec 2024 08:34:23 +0100 Subject: [PATCH 146/504] mm: unexport apply_to_existing_page_range apply_to_existing_page_range() is only used by non-modular code. Link: https://lkml.kernel.org/r/20241212073423.1439954-1-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- mm/memory.c | 1 - 1 file changed, 1 deletion(-) diff --git a/mm/memory.c b/mm/memory.c index 4fe0404fa198..c870ca8f84a9 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3082,7 +3082,6 @@ int apply_to_existing_page_range(struct mm_struct *mm, unsigned long addr, { return __apply_to_page_range(mm, addr, size, fn, data, false); } -EXPORT_SYMBOL_GPL(apply_to_existing_page_range); /* * handle_pte_fault chooses page fault handler according to an entry which was From d452a33588aa3e63b779d5622ea676a3c7ebdc52 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Fri, 22 Nov 2024 15:23:55 -0800 Subject: [PATCH 147/504] mm: add AS_WRITEBACK_INDETERMINATE mapping flag Patch series "fuse: remove temp page copies in writeback", v6. The purpose of this patchset is to help make writeback-cache write performance in FUSE filesystems as fast as possible. In the current FUSE writeback design (see commit 3be5a52b30aa ("fuse: support writable mmap"))), a temp page is allocated for every dirty page to be written back, the contents of the dirty page are copied over to the temp page, and the temp page gets handed to the server to write back. This is done so that writeback may be immediately cleared on the dirty page, and this in turn is done for two reasons: a) in order to mitigate the following deadlock scenario that may arise if reclaim waits on writeback on the dirty page to complete (more details can be found in this thread [1]): * single-threaded FUSE server is in the middle of handling a request that needs a memory allocation * memory allocation triggers direct reclaim * direct reclaim waits on a folio under writeback * the FUSE server can't write back the folio since it's stuck in direct reclaim b) in order to unblock internal (eg sync, page compaction) waits on writeback without needing the server to complete writing back to disk, which may take an indeterminate amount of time. Allocating and copying dirty pages to temp pages is the biggest performance bottleneck for FUSE writeback. This patchset aims to get rid of the temp page altogether (which will also allow us to get rid of the internal FUSE rb tree that is needed to keep track of writeback status on the temp pages). Benchmarks show approximately a 20% improvement in throughput for 4k block-size writes and a 45% improvement for 1M block-size writes. With removing the temp page, writeback state is now only cleared on the dirty page after the server has written it back to disk. This may take an indeterminate amount of time. As well, there is also the possibility of malicious or well-intentioned but buggy servers where writeback may in the worst case scenario, never complete. This means that any folio_wait_writeback() on a dirty page belonging to a FUSE filesystem needs to be carefully audited. In particular, these are the cases that need to be accounted for: * potentially deadlocking in reclaim, as mentioned above * potentially stalling sync(2) * potentially stalling page migration / compaction This patchset adds a new mapping flag, AS_WRITEBACK_INDETERMINATE, which filesystems may set on its inode mappings to indicate that writeback operations may take an indeterminate amount of time to complete. FUSE will set this flag on its mappings. This patchset adds checks to the critical parts of reclaim, sync, and page migration logic where writeback may be waited on. Please note the following: * For sync(2), waiting on writeback will be skipped for FUSE, but this has no effect on existing behavior. Dirty FUSE pages are already not guaranteed to be written to disk by the time sync(2) returns (eg writeback is cleared on the dirty page but the server may not have written out the temp page to disk yet). If the caller wishes to ensure the data has actually been synced to disk, they should use fsync(2)/fdatasync(2) instead. * AS_WRITEBACK_INDETERMINATE does not indicate that the folios should never be waited on when in writeback. There are some cases where the wait is desirable. For example, for the sync_file_range() syscall, it is fine to wait on the writeback since the caller passes in a fd for the operation. [1] https://lore.kernel.org/linux-kernel/495d2400-1d96-4924-99d3-8b2952e05fc3@linux.alibaba.com/ This patch (of 5): Add a new mapping flag AS_WRITEBACK_INDETERMINATE which filesystems may set to indicate that writing back to disk may take an indeterminate amount of time to complete. Extra caution should be taken when waiting on writeback for folios belonging to mappings where this flag is set. Link: https://lkml.kernel.org/r/20241122232359.429647-1-joannelkoong@gmail.com Link: https://lkml.kernel.org/r/20241122232359.429647-2-joannelkoong@gmail.com Signed-off-by: Joanne Koong Reviewed-by: Shakeel Butt Acked-by: Miklos Szeredi Cc: Bernd Schubert Cc: Jingbo Xu Cc: Josef Bacik Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index fc2e1319c7bb..12136ed844ac 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -210,6 +210,7 @@ enum mapping_flags { AS_STABLE_WRITES = 7, /* must wait for writeback before modifying folio contents */ AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */ + AS_WRITEBACK_INDETERMINATE = 9, /* Use caution when waiting on writeback */ /* Bits 16-25 are used for FOLIO_ORDER */ AS_FOLIO_ORDER_BITS = 5, AS_FOLIO_ORDER_MIN = 16, @@ -335,6 +336,16 @@ static inline bool mapping_inaccessible(struct address_space *mapping) return test_bit(AS_INACCESSIBLE, &mapping->flags); } +static inline void mapping_set_writeback_indeterminate(struct address_space *mapping) +{ + set_bit(AS_WRITEBACK_INDETERMINATE, &mapping->flags); +} + +static inline bool mapping_writeback_indeterminate(struct address_space *mapping) +{ + return test_bit(AS_WRITEBACK_INDETERMINATE, &mapping->flags); +} + static inline gfp_t mapping_gfp_mask(struct address_space * mapping) { return mapping->gfp_mask; From fbfab936ca96e6657b86040b62853f5f3d89d3c7 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Fri, 22 Nov 2024 15:23:56 -0800 Subject: [PATCH 148/504] mm: skip reclaiming folios in legacy memcg writeback indeterminate contexts Currently in shrink_folio_list(), reclaim for folios under writeback falls into 3 different cases: 1) Reclaim is encountering an excessive number of folios under writeback and this folio has both the writeback and reclaim flags set 2) Dirty throttling is enabled (this happens if reclaim through cgroup is not enabled, if reclaim through cgroupv2 memcg is enabled, or if reclaim is on the root cgroup), or if the folio is not marked for immediate reclaim, or if the caller does not have __GFP_FS (or __GFP_IO if it's going to swap) set 3) Legacy cgroupv1 encounters a folio that already has the reclaim flag set and the caller did not have __GFP_FS (or __GFP_IO if swap) set In cases 1) and 2), we activate the folio and skip reclaiming it while in case 3), we wait for writeback to finish on the folio and then try to reclaim the folio again. In case 3, we wait on writeback because cgroupv1 does not have dirty folio throttling, as such this is a mitigation against the case where there are too many folios in writeback with nothing else to reclaim. For filesystems where writeback may take an indeterminate amount of time to write to disk, this has the possibility of stalling reclaim. In this commit, if legacy memcg encounters a folio with the reclaim flag set (eg case 3) and the folio belongs to a mapping that has the AS_WRITEBACK_INDETERMINATE flag set, the folio will be activated and skip reclaim (eg default to behavior in case 2) instead. Link: https://lkml.kernel.org/r/20241122232359.429647-3-joannelkoong@gmail.com Signed-off-by: Joanne Koong Reviewed-by: Shakeel Butt Acked-by: Miklos Szeredi Cc: Bernd Schubert Cc: Jingbo Xu Cc: Josef Bacik Signed-off-by: Andrew Morton --- mm/vmscan.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 49f801b00d5d..4f669fc2bd7c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1137,8 +1137,9 @@ retry: * 2) Global or new memcg reclaim encounters a folio that is * not marked for immediate reclaim, or the caller does not * have __GFP_FS (or __GFP_IO if it's simply going to swap, - * not to fs). In this case mark the folio for immediate - * reclaim and continue scanning. + * not to fs), or the writeback may take an indeterminate + * amount of time to complete. In this case mark the folio + * for immediate reclaim and continue scanning. * * Require may_enter_fs() because we would wait on fs, which * may not have submitted I/O yet. And the loop driver might @@ -1163,6 +1164,8 @@ retry: * takes to write them to disk. */ if (folio_test_writeback(folio)) { + mapping = folio_mapping(folio); + /* Case 1 above */ if (current_is_kswapd() && folio_test_reclaim(folio) && @@ -1173,7 +1176,8 @@ retry: /* Case 2 above */ } else if (writeback_throttling_sane(sc) || !folio_test_reclaim(folio) || - !may_enter_fs(folio, sc->gfp_mask)) { + !may_enter_fs(folio, sc->gfp_mask) || + (mapping && mapping_writeback_indeterminate(mapping))) { /* * This is slightly racy - * folio_end_writeback() might have From 230283b5c8d2945856af97783f20e619e7bd2920 Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Fri, 22 Nov 2024 15:23:57 -0800 Subject: [PATCH 149/504] fs/writeback: in wait_sb_inodes(), skip wait for AS_WRITEBACK_INDETERMINATE mappings For filesystems with the AS_WRITEBACK_INDETERMINATE flag set, writeback operations may take an indeterminate time to complete. For example, writing data back to disk in FUSE filesystems depends on the userspace server successfully completing writeback. In this commit, wait_sb_inodes() skips waiting on writeback if the inode's mapping has AS_WRITEBACK_INDETERMINATE set, else sync(2) may take an indeterminate amount of time to complete. If the caller wishes to ensure the data for a mapping with the AS_WRITEBACK_INDETERMINATE flag set has actually been written back to disk, they should use fsync(2)/fdatasync(2) instead. Link: https://lkml.kernel.org/r/20241122232359.429647-4-joannelkoong@gmail.com Signed-off-by: Joanne Koong Reviewed-by: Jingbo Xu Acked-by: Miklos Szeredi Cc: Bernd Schubert Cc: Josef Bacik Cc: Shakeel Butt Signed-off-by: Andrew Morton --- fs/fs-writeback.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 3cd99e2dc6ac..5980ac24c7a4 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -2681,6 +2681,9 @@ static void wait_sb_inodes(struct super_block *sb) if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) continue; + if (mapping_writeback_indeterminate(mapping)) + continue; + spin_unlock_irq(&sb->s_inode_wblist_lock); spin_lock(&inode->i_lock); From 4d782bff5fa3f4ee27e3ecdd4982936f550e7b5f Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Fri, 22 Nov 2024 15:23:58 -0800 Subject: [PATCH 150/504] mm/migrate: skip migrating folios under writeback with AS_WRITEBACK_INDETERMINATE mappings For migrations called in MIGRATE_SYNC mode, skip migrating the folio if it is under writeback and has the AS_WRITEBACK_INDETERMINATE flag set on its mapping. If the AS_WRITEBACK_INDETERMINATE flag is set on the mapping, the writeback may take an indeterminate amount of time to complete, and waits may get stuck. Link: https://lkml.kernel.org/r/20241122232359.429647-5-joannelkoong@gmail.com Signed-off-by: Joanne Koong Reviewed-by: Shakeel Butt Acked-by: Miklos Szeredi Cc: Bernd Schubert Cc: Jingbo Xu Cc: Josef Bacik Signed-off-by: Andrew Morton --- mm/migrate.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/migrate.c b/mm/migrate.c index 32cc8e0b1cce..caadbe393aa2 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1254,7 +1254,10 @@ static int migrate_folio_unmap(new_folio_t get_new_folio, */ switch (mode) { case MIGRATE_SYNC: - break; + if (!src->mapping || + !mapping_writeback_indeterminate(src->mapping)) + break; + fallthrough; default: rc = -EBUSY; goto out; From b35976b7d2135863beef029f1e46b25f3d8d268e Mon Sep 17 00:00:00 2001 From: Joanne Koong Date: Fri, 22 Nov 2024 15:23:59 -0800 Subject: [PATCH 151/504] fuse: remove tmp folio for writebacks and internal rb tree In the current FUSE writeback design (see commit 3be5a52b30aa ("fuse: support writable mmap")), a temp page is allocated for every dirty page to be written back, the contents of the dirty page are copied over to the temp page, and the temp page gets handed to the server to write back. This is done so that writeback may be immediately cleared on the dirty page, and this in turn is done for two reasons: a) in order to mitigate the following deadlock scenario that may arise if reclaim waits on writeback on the dirty page to complete: * single-threaded FUSE server is in the middle of handling a request that needs a memory allocation * memory allocation triggers direct reclaim * direct reclaim waits on a folio under writeback * the FUSE server can't write back the folio since it's stuck in direct reclaim b) in order to unblock internal (eg sync, page compaction) waits on writeback without needing the server to complete writing back to disk, which may take an indeterminate amount of time. With a recent change that added AS_WRITEBACK_INDETERMINATE and mitigates the situations described above, FUSE writeback does not need to use temp pages if it sets AS_WRITEBACK_INDETERMINATE on its inode mappings. This commit sets AS_WRITEBACK_INDETERMINATE on the inode mappings and removes the temporary pages + extra copying and the internal rb tree. fio benchmarks -- (using averages observed from 10 runs, throwing away outliers) Setup: sudo mount -t tmpfs -o size=30G tmpfs ~/tmp_mount ./libfuse/build/example/passthrough_ll -o writeback -o max_threads=4 -o source=~/tmp_mount ~/fuse_mount fio --name=writeback --ioengine=sync --rw=write --bs={1k,4k,1M} --size=2G --numjobs=2 --ramp_time=30 --group_reporting=1 --directory=/root/fuse_mount bs = 1k 4k 1M Before 351 MiB/s 1818 MiB/s 1851 MiB/s After 341 MiB/s 2246 MiB/s 2685 MiB/s % diff -3% 23% 45% Link: https://lkml.kernel.org/r/20241122232359.429647-6-joannelkoong@gmail.com Signed-off-by: Joanne Koong Reviewed-by: Jingbo Xu Acked-by: Miklos Szeredi Cc: Bernd Schubert Cc: Josef Bacik Cc: Shakeel Butt Signed-off-by: Andrew Morton --- fs/fuse/file.c | 360 ++++------------------------------------------- fs/fuse/fuse_i.h | 3 - 2 files changed, 28 insertions(+), 335 deletions(-) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 88d0946b5bc9..1970d1a699a6 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -415,89 +415,11 @@ u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) struct fuse_writepage_args { struct fuse_io_args ia; - struct rb_node writepages_entry; struct list_head queue_entry; - struct fuse_writepage_args *next; struct inode *inode; struct fuse_sync_bucket *bucket; }; -static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi, - pgoff_t idx_from, pgoff_t idx_to) -{ - struct rb_node *n; - - n = fi->writepages.rb_node; - - while (n) { - struct fuse_writepage_args *wpa; - pgoff_t curr_index; - - wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry); - WARN_ON(get_fuse_inode(wpa->inode) != fi); - curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT; - if (idx_from >= curr_index + wpa->ia.ap.num_folios) - n = n->rb_right; - else if (idx_to < curr_index) - n = n->rb_left; - else - return wpa; - } - return NULL; -} - -/* - * Check if any page in a range is under writeback - */ -static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, - pgoff_t idx_to) -{ - struct fuse_inode *fi = get_fuse_inode(inode); - bool found; - - if (RB_EMPTY_ROOT(&fi->writepages)) - return false; - - spin_lock(&fi->lock); - found = fuse_find_writeback(fi, idx_from, idx_to); - spin_unlock(&fi->lock); - - return found; -} - -static inline bool fuse_page_is_writeback(struct inode *inode, pgoff_t index) -{ - return fuse_range_is_writeback(inode, index, index); -} - -/* - * Wait for page writeback to be completed. - * - * Since fuse doesn't rely on the VM writeback tracking, this has to - * use some other means. - */ -static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) -{ - struct fuse_inode *fi = get_fuse_inode(inode); - - wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); -} - -static inline bool fuse_folio_is_writeback(struct inode *inode, - struct folio *folio) -{ - pgoff_t last = folio_next_index(folio) - 1; - return fuse_range_is_writeback(inode, folio_index(folio), last); -} - -static void fuse_wait_on_folio_writeback(struct inode *inode, - struct folio *folio) -{ - struct fuse_inode *fi = get_fuse_inode(inode); - - wait_event(fi->page_waitq, !fuse_folio_is_writeback(inode, folio)); -} - /* * Wait for all pending writepages on the inode to finish. * @@ -886,13 +808,6 @@ static int fuse_do_readfolio(struct file *file, struct folio *folio) ssize_t res; u64 attr_ver; - /* - * With the temporary pages that are used to complete writeback, we can - * have writeback that extends beyond the lifetime of the folio. So - * make sure we read a properly synced folio. - */ - fuse_wait_on_folio_writeback(inode, folio); - attr_ver = fuse_get_attr_version(fm->fc); /* Don't overflow end offset */ @@ -1003,17 +918,12 @@ static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) static void fuse_readahead(struct readahead_control *rac) { struct inode *inode = rac->mapping->host; - struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); unsigned int max_pages, nr_pages; - pgoff_t first = readahead_index(rac); - pgoff_t last = first + readahead_count(rac) - 1; if (fuse_is_bad(inode)) return; - wait_event(fi->page_waitq, !fuse_range_is_writeback(inode, first, last)); - max_pages = min_t(unsigned int, fc->max_pages, fc->max_read / PAGE_SIZE); @@ -1172,7 +1082,7 @@ static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, int err; for (i = 0; i < ap->num_folios; i++) - fuse_wait_on_folio_writeback(inode, ap->folios[i]); + folio_wait_writeback(ap->folios[i]); fuse_write_args_fill(ia, ff, pos, count); ia->write.in.flags = fuse_write_flags(iocb); @@ -1622,7 +1532,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, return res; } } - if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) { + if (!cuse && filemap_range_has_writeback(mapping, pos, (pos + count - 1))) { if (!write) inode_lock(inode); fuse_sync_writes(inode); @@ -1819,38 +1729,34 @@ static ssize_t fuse_splice_write(struct pipe_inode_info *pipe, struct file *out, static void fuse_writepage_free(struct fuse_writepage_args *wpa) { struct fuse_args_pages *ap = &wpa->ia.ap; - int i; if (wpa->bucket) fuse_sync_bucket_dec(wpa->bucket); - for (i = 0; i < ap->num_folios; i++) - folio_put(ap->folios[i]); - fuse_file_put(wpa->ia.ff, false); kfree(ap->folios); kfree(wpa); } -static void fuse_writepage_finish_stat(struct inode *inode, struct folio *folio) -{ - struct backing_dev_info *bdi = inode_to_bdi(inode); - - dec_wb_stat(&bdi->wb, WB_WRITEBACK); - node_stat_sub_folio(folio, NR_WRITEBACK_TEMP); - wb_writeout_inc(&bdi->wb); -} - static void fuse_writepage_finish(struct fuse_writepage_args *wpa) { struct fuse_args_pages *ap = &wpa->ia.ap; struct inode *inode = wpa->inode; struct fuse_inode *fi = get_fuse_inode(inode); + struct backing_dev_info *bdi = inode_to_bdi(inode); int i; - for (i = 0; i < ap->num_folios; i++) - fuse_writepage_finish_stat(inode, ap->folios[i]); + for (i = 0; i < ap->num_folios; i++) { + /* + * Benchmarks showed that ending writeback within the + * scope of the fi->lock alleviates xarray lock + * contention and noticeably improves performance. + */ + folio_end_writeback(ap->folios[i]); + dec_wb_stat(&bdi->wb, WB_WRITEBACK); + wb_writeout_inc(&bdi->wb); + } wake_up(&fi->page_waitq); } @@ -1861,7 +1767,6 @@ static void fuse_send_writepage(struct fuse_mount *fm, __releases(fi->lock) __acquires(fi->lock) { - struct fuse_writepage_args *aux, *next; struct fuse_inode *fi = get_fuse_inode(wpa->inode); struct fuse_write_in *inarg = &wpa->ia.write.in; struct fuse_args *args = &wpa->ia.ap.args; @@ -1898,19 +1803,8 @@ __acquires(fi->lock) out_free: fi->writectr--; - rb_erase(&wpa->writepages_entry, &fi->writepages); fuse_writepage_finish(wpa); spin_unlock(&fi->lock); - - /* After rb_erase() aux request list is private */ - for (aux = wpa->next; aux; aux = next) { - next = aux->next; - aux->next = NULL; - fuse_writepage_finish_stat(aux->inode, - aux->ia.ap.folios[0]); - fuse_writepage_free(aux); - } - fuse_writepage_free(wpa); spin_lock(&fi->lock); } @@ -1938,43 +1832,6 @@ __acquires(fi->lock) } } -static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root, - struct fuse_writepage_args *wpa) -{ - pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT; - pgoff_t idx_to = idx_from + wpa->ia.ap.num_folios - 1; - struct rb_node **p = &root->rb_node; - struct rb_node *parent = NULL; - - WARN_ON(!wpa->ia.ap.num_folios); - while (*p) { - struct fuse_writepage_args *curr; - pgoff_t curr_index; - - parent = *p; - curr = rb_entry(parent, struct fuse_writepage_args, - writepages_entry); - WARN_ON(curr->inode != wpa->inode); - curr_index = curr->ia.write.in.offset >> PAGE_SHIFT; - - if (idx_from >= curr_index + curr->ia.ap.num_folios) - p = &(*p)->rb_right; - else if (idx_to < curr_index) - p = &(*p)->rb_left; - else - return curr; - } - - rb_link_node(&wpa->writepages_entry, parent, p); - rb_insert_color(&wpa->writepages_entry, root); - return NULL; -} - -static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa) -{ - WARN_ON(fuse_insert_writeback(root, wpa)); -} - static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args, int error) { @@ -1994,41 +1851,6 @@ static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args, if (!fc->writeback_cache) fuse_invalidate_attr_mask(inode, FUSE_STATX_MODIFY); spin_lock(&fi->lock); - rb_erase(&wpa->writepages_entry, &fi->writepages); - while (wpa->next) { - struct fuse_mount *fm = get_fuse_mount(inode); - struct fuse_write_in *inarg = &wpa->ia.write.in; - struct fuse_writepage_args *next = wpa->next; - - wpa->next = next->next; - next->next = NULL; - tree_insert(&fi->writepages, next); - - /* - * Skip fuse_flush_writepages() to make it easy to crop requests - * based on primary request size. - * - * 1st case (trivial): there are no concurrent activities using - * fuse_set/release_nowrite. Then we're on safe side because - * fuse_flush_writepages() would call fuse_send_writepage() - * anyway. - * - * 2nd case: someone called fuse_set_nowrite and it is waiting - * now for completion of all in-flight requests. This happens - * rarely and no more than once per page, so this should be - * okay. - * - * 3rd case: someone (e.g. fuse_do_setattr()) is in the middle - * of fuse_set_nowrite..fuse_release_nowrite section. The fact - * that fuse_set_nowrite returned implies that all in-flight - * requests were completed along with all of their secondary - * requests. Further primary requests are blocked by negative - * writectr. Hence there cannot be any in-flight requests and - * no invocations of fuse_writepage_end() while we're in - * fuse_set_nowrite..fuse_release_nowrite section. - */ - fuse_send_writepage(fm, next, inarg->offset + inarg->size); - } fi->writectr--; fuse_writepage_finish(wpa); spin_unlock(&fi->lock); @@ -2115,19 +1937,16 @@ static void fuse_writepage_add_to_bucket(struct fuse_conn *fc, } static void fuse_writepage_args_page_fill(struct fuse_writepage_args *wpa, struct folio *folio, - struct folio *tmp_folio, uint32_t folio_index) + uint32_t folio_index) { struct inode *inode = folio->mapping->host; struct fuse_args_pages *ap = &wpa->ia.ap; - folio_copy(tmp_folio, folio); - - ap->folios[folio_index] = tmp_folio; + ap->folios[folio_index] = folio; ap->descs[folio_index].offset = 0; ap->descs[folio_index].length = PAGE_SIZE; inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); - node_stat_add_folio(tmp_folio, NR_WRITEBACK_TEMP); } static struct fuse_writepage_args *fuse_writepage_args_setup(struct folio *folio, @@ -2162,18 +1981,12 @@ static int fuse_writepage_locked(struct folio *folio) struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_writepage_args *wpa; struct fuse_args_pages *ap; - struct folio *tmp_folio; struct fuse_file *ff; - int error = -ENOMEM; + int error = -EIO; - tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0); - if (!tmp_folio) - goto err; - - error = -EIO; ff = fuse_write_file_get(fi); if (!ff) - goto err_nofile; + goto err; wpa = fuse_writepage_args_setup(folio, ff); error = -ENOMEM; @@ -2184,22 +1997,17 @@ static int fuse_writepage_locked(struct folio *folio) ap->num_folios = 1; folio_start_writeback(folio); - fuse_writepage_args_page_fill(wpa, folio, tmp_folio, 0); + fuse_writepage_args_page_fill(wpa, folio, 0); spin_lock(&fi->lock); - tree_insert(&fi->writepages, wpa); list_add_tail(&wpa->queue_entry, &fi->queued_writes); fuse_flush_writepages(inode); spin_unlock(&fi->lock); - folio_end_writeback(folio); - return 0; err_writepage_args: fuse_file_put(ff, false); -err_nofile: - folio_put(tmp_folio); err: mapping_set_error(folio->mapping, error); return error; @@ -2209,7 +2017,6 @@ struct fuse_fill_wb_data { struct fuse_writepage_args *wpa; struct fuse_file *ff; struct inode *inode; - struct folio **orig_folios; unsigned int max_folios; }; @@ -2244,69 +2051,11 @@ static void fuse_writepages_send(struct fuse_fill_wb_data *data) struct fuse_writepage_args *wpa = data->wpa; struct inode *inode = data->inode; struct fuse_inode *fi = get_fuse_inode(inode); - int num_folios = wpa->ia.ap.num_folios; - int i; spin_lock(&fi->lock); list_add_tail(&wpa->queue_entry, &fi->queued_writes); fuse_flush_writepages(inode); spin_unlock(&fi->lock); - - for (i = 0; i < num_folios; i++) - folio_end_writeback(data->orig_folios[i]); -} - -/* - * Check under fi->lock if the page is under writeback, and insert it onto the - * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's - * one already added for a page at this offset. If there's none, then insert - * this new request onto the auxiliary list, otherwise reuse the existing one by - * swapping the new temp page with the old one. - */ -static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, - struct folio *folio) -{ - struct fuse_inode *fi = get_fuse_inode(new_wpa->inode); - struct fuse_writepage_args *tmp; - struct fuse_writepage_args *old_wpa; - struct fuse_args_pages *new_ap = &new_wpa->ia.ap; - - WARN_ON(new_ap->num_folios != 0); - new_ap->num_folios = 1; - - spin_lock(&fi->lock); - old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa); - if (!old_wpa) { - spin_unlock(&fi->lock); - return true; - } - - for (tmp = old_wpa->next; tmp; tmp = tmp->next) { - pgoff_t curr_index; - - WARN_ON(tmp->inode != new_wpa->inode); - curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT; - if (curr_index == folio->index) { - WARN_ON(tmp->ia.ap.num_folios != 1); - swap(tmp->ia.ap.folios[0], new_ap->folios[0]); - break; - } - } - - if (!tmp) { - new_wpa->next = old_wpa->next; - old_wpa->next = new_wpa; - } - - spin_unlock(&fi->lock); - - if (tmp) { - fuse_writepage_finish_stat(new_wpa->inode, - folio); - fuse_writepage_free(new_wpa); - } - - return false; } static bool fuse_writepage_need_send(struct fuse_conn *fc, struct folio *folio, @@ -2315,15 +2064,6 @@ static bool fuse_writepage_need_send(struct fuse_conn *fc, struct folio *folio, { WARN_ON(!ap->num_folios); - /* - * Being under writeback is unlikely but possible. For example direct - * read to an mmaped fuse file will set the page dirty twice; once when - * the pages are faulted with get_user_pages(), and then after the read - * completed. - */ - if (fuse_folio_is_writeback(data->inode, folio)) - return true; - /* Reached max pages */ if (ap->num_folios == fc->max_pages) return true; @@ -2333,7 +2073,7 @@ static bool fuse_writepage_need_send(struct fuse_conn *fc, struct folio *folio, return true; /* Discontinuity */ - if (data->orig_folios[ap->num_folios - 1]->index + 1 != folio_index(folio)) + if (ap->folios[ap->num_folios - 1]->index + 1 != folio_index(folio)) return true; /* Need to grow the pages array? If so, did the expansion fail? */ @@ -2352,7 +2092,6 @@ static int fuse_writepages_fill(struct folio *folio, struct inode *inode = data->inode; struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = get_fuse_conn(inode); - struct folio *tmp_folio; int err; if (!data->ff) { @@ -2367,54 +2106,23 @@ static int fuse_writepages_fill(struct folio *folio, data->wpa = NULL; } - err = -ENOMEM; - tmp_folio = folio_alloc(GFP_NOFS | __GFP_HIGHMEM, 0); - if (!tmp_folio) - goto out_unlock; - - /* - * The page must not be redirtied until the writeout is completed - * (i.e. userspace has sent a reply to the write request). Otherwise - * there could be more than one temporary page instance for each real - * page. - * - * This is ensured by holding the page lock in page_mkwrite() while - * checking fuse_page_is_writeback(). We already hold the page lock - * since clear_page_dirty_for_io() and keep it held until we add the - * request to the fi->writepages list and increment ap->num_folios. - * After this fuse_page_is_writeback() will indicate that the page is - * under writeback, so we can release the page lock. - */ if (data->wpa == NULL) { err = -ENOMEM; wpa = fuse_writepage_args_setup(folio, data->ff); - if (!wpa) { - folio_put(tmp_folio); + if (!wpa) goto out_unlock; - } fuse_file_get(wpa->ia.ff); data->max_folios = 1; ap = &wpa->ia.ap; } folio_start_writeback(folio); - fuse_writepage_args_page_fill(wpa, folio, tmp_folio, ap->num_folios); - data->orig_folios[ap->num_folios] = folio; + fuse_writepage_args_page_fill(wpa, folio, ap->num_folios); err = 0; - if (data->wpa) { - /* - * Protected by fi->lock against concurrent access by - * fuse_page_is_writeback(). - */ - spin_lock(&fi->lock); - ap->num_folios++; - spin_unlock(&fi->lock); - } else if (fuse_writepage_add(wpa, folio)) { + ap->num_folios++; + if (!data->wpa) data->wpa = wpa; - } else { - folio_end_writeback(folio); - } out_unlock: folio_unlock(folio); @@ -2441,13 +2149,6 @@ static int fuse_writepages(struct address_space *mapping, data.wpa = NULL; data.ff = NULL; - err = -ENOMEM; - data.orig_folios = kcalloc(fc->max_pages, - sizeof(struct folio *), - GFP_NOFS); - if (!data.orig_folios) - goto out; - err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); if (data.wpa) { WARN_ON(!data.wpa->ia.ap.num_folios); @@ -2456,7 +2157,6 @@ static int fuse_writepages(struct address_space *mapping, if (data.ff) fuse_file_put(data.ff, false); - kfree(data.orig_folios); out: return err; } @@ -2481,8 +2181,6 @@ static int fuse_write_begin(struct file *file, struct address_space *mapping, if (IS_ERR(folio)) goto error; - fuse_wait_on_page_writeback(mapping->host, folio->index); - if (folio_test_uptodate(folio) || len >= folio_size(folio)) goto success; /* @@ -2545,13 +2243,9 @@ static int fuse_launder_folio(struct folio *folio) { int err = 0; if (folio_clear_dirty_for_io(folio)) { - struct inode *inode = folio->mapping->host; - - /* Serialize with pending writeback for the same page */ - fuse_wait_on_page_writeback(inode, folio->index); err = fuse_writepage_locked(folio); if (!err) - fuse_wait_on_page_writeback(inode, folio->index); + folio_wait_writeback(folio); } return err; } @@ -2595,7 +2289,7 @@ static vm_fault_t fuse_page_mkwrite(struct vm_fault *vmf) return VM_FAULT_NOPAGE; } - fuse_wait_on_folio_writeback(inode, folio); + folio_wait_writeback(folio); return VM_FAULT_LOCKED; } @@ -3413,9 +3107,12 @@ static const struct address_space_operations fuse_file_aops = { void fuse_init_file_inode(struct inode *inode, unsigned int flags) { struct fuse_inode *fi = get_fuse_inode(inode); + struct fuse_conn *fc = get_fuse_conn(inode); inode->i_fop = &fuse_file_operations; inode->i_data.a_ops = &fuse_file_aops; + if (fc->writeback_cache) + mapping_set_writeback_indeterminate(&inode->i_data); INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); @@ -3423,7 +3120,6 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags) fi->iocachectr = 0; init_waitqueue_head(&fi->page_waitq); init_waitqueue_head(&fi->direct_io_waitq); - fi->writepages = RB_ROOT; if (IS_ENABLED(CONFIG_FUSE_DAX)) fuse_dax_inode_init(inode, flags); diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 74744c6f2860..23736c5c64c1 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -141,9 +141,6 @@ struct fuse_inode { /* waitq for direct-io completion */ wait_queue_head_t direct_io_waitq; - - /* List of writepage requestst (pending or sent) */ - struct rb_root writepages; }; /* readdir cache (directory only) */ From 7fe592ee635863a82e29a49cc09d7d5a90be33a8 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 13 Dec 2024 16:24:09 +0000 Subject: [PATCH 152/504] tools: testing: add simple __mmap_region() userland test Introduce demonstrative, basic, __mmap_region() test upon which we can base further work upon moving forwards. This simply asserts that mappings can be made and merges occur as expected. As part of this change, fix the security_vm_enough_memory_mm() stub which was previously incorrectly implemented. Link: https://lkml.kernel.org/r/20241213162409.41498-1-lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: Jann Horn Cc: Liam R. Howlett Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- tools/testing/vma/vma.c | 53 ++++++++++++++++++++++++++++++++ tools/testing/vma/vma_internal.h | 2 +- 2 files changed, 54 insertions(+), 1 deletion(-) diff --git a/tools/testing/vma/vma.c b/tools/testing/vma/vma.c index 920fba58884e..04ab45e27fb8 100644 --- a/tools/testing/vma/vma.c +++ b/tools/testing/vma/vma.c @@ -1574,6 +1574,57 @@ static bool test_expand_only_mode(void) return true; } +static bool test_mmap_region_basic(void) +{ + struct mm_struct mm = {}; + unsigned long addr; + struct vm_area_struct *vma; + VMA_ITERATOR(vmi, &mm, 0); + + current->mm = &mm; + + /* Map at 0x300000, length 0x3000. */ + addr = __mmap_region(NULL, 0x300000, 0x3000, + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, + 0x300, NULL); + ASSERT_EQ(addr, 0x300000); + + /* Map at 0x250000, length 0x3000. */ + addr = __mmap_region(NULL, 0x250000, 0x3000, + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, + 0x250, NULL); + ASSERT_EQ(addr, 0x250000); + + /* Map at 0x303000, merging to 0x300000 of length 0x6000. */ + addr = __mmap_region(NULL, 0x303000, 0x3000, + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, + 0x303, NULL); + ASSERT_EQ(addr, 0x303000); + + /* Map at 0x24d000, merging to 0x250000 of length 0x6000. */ + addr = __mmap_region(NULL, 0x24d000, 0x3000, + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE, + 0x24d, NULL); + ASSERT_EQ(addr, 0x24d000); + + ASSERT_EQ(mm.map_count, 2); + + for_each_vma(vmi, vma) { + if (vma->vm_start == 0x300000) { + ASSERT_EQ(vma->vm_end, 0x306000); + ASSERT_EQ(vma->vm_pgoff, 0x300); + } else if (vma->vm_start == 0x24d000) { + ASSERT_EQ(vma->vm_end, 0x253000); + ASSERT_EQ(vma->vm_pgoff, 0x24d); + } else { + ASSERT_FALSE(true); + } + } + + cleanup_mm(&mm, &vmi); + return true; +} + int main(void) { int num_tests = 0, num_fail = 0; @@ -1607,6 +1658,8 @@ int main(void) TEST(copy_vma); TEST(expand_only_mode); + TEST(mmap_region_basic); + #undef TEST printf("%d tests run, %d passed, %d failed.\n", diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index b973b3e41c83..ae635eecbfa8 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -996,7 +996,7 @@ static inline bool is_file_hugepages(struct file *) static inline int security_vm_enough_memory_mm(struct mm_struct *, long) { - return true; + return 0; } static inline bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long) From 2ab3219381e6708cea59355b072adf8cc020785b Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Sun, 15 Dec 2024 21:44:47 -0800 Subject: [PATCH 153/504] mm/huge_memory.c: rename shadowed local split_huge_pages_write() has a lccal `buf' which shadows incoming arg `buf'. Reviewer confusion resulted. Rename the inner local to `tok_buf'. Cc: Leo Stone Signed-off-by: Andrew Morton --- mm/huge_memory.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 6d87db53db33..2654a9548749 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -4181,20 +4181,21 @@ static ssize_t split_huge_pages_write(struct file *file, const char __user *buf, if (input_buf[0] == '/') { char *tok; - char *buf = input_buf; + char *tok_buf = input_buf; char file_path[MAX_INPUT_BUF_SZ]; pgoff_t off_start = 0, off_end = 0; size_t input_len = strlen(input_buf); - tok = strsep(&buf, ","); - if (tok && buf) { + tok = strsep(&tok_buf, ","); + if (tok && tok_buf) { strscpy(file_path, tok); } else { ret = -EINVAL; goto out; } - ret = sscanf(buf, "0x%lx,0x%lx,%d", &off_start, &off_end, &new_order); + ret = sscanf(tok_buf, "0x%lx,0x%lx,%d", &off_start, + &off_end, &new_order); if (ret != 2 && ret != 3) { ret = -EINVAL; goto out; From 5758b518c1f2be928c79289d47cb668b0d9caff4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Wei=C3=9Fschuh?= Date: Mon, 16 Dec 2024 13:20:25 +0100 Subject: [PATCH 154/504] mm/page_idle: constify 'struct bin_attribute' MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The sysfs core now allows instances of 'struct bin_attribute' to be moved into read-only memory. Make use of that to protect them against accidental or malicious modifications. Link: https://lkml.kernel.org/r/20241216-sysfs-const-bin_attr-page_idle-v1-1-cc01ecc55196@weissschuh.net Signed-off-by: Thomas Weißschuh Signed-off-by: Andrew Morton --- mm/page_idle.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/page_idle.c b/mm/page_idle.c index 41ea77f22011..947c7c7a3728 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -112,7 +112,7 @@ static void page_idle_clear_pte_refs(struct folio *folio) } static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t pos, size_t count) { u64 *out = (u64 *)buf; @@ -157,7 +157,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, } static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj, - struct bin_attribute *attr, char *buf, + const struct bin_attribute *attr, char *buf, loff_t pos, size_t count) { const u64 *in = (u64 *)buf; @@ -193,17 +193,17 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj, return (char *)in - buf; } -static struct bin_attribute page_idle_bitmap_attr = +static const struct bin_attribute page_idle_bitmap_attr = __BIN_ATTR(bitmap, 0600, page_idle_bitmap_read, page_idle_bitmap_write, 0); -static struct bin_attribute *page_idle_bin_attrs[] = { +static const struct bin_attribute *const page_idle_bin_attrs[] = { &page_idle_bitmap_attr, NULL, }; static const struct attribute_group page_idle_attr_group = { - .bin_attrs = page_idle_bin_attrs, + .bin_attrs_new = page_idle_bin_attrs, .name = "page_idle", }; From 20e827315c523fb13860b717a7e2d8bc2bf33320 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Mon, 16 Dec 2024 14:01:13 -0500 Subject: [PATCH 155/504] test_maple_tree: test exhausted upper limit of mtree_alloc_cyclic() When the upper bound of the search is exhausted, the maple state may be returned in an error state of -EBUSY. This means maple state needs to be reset before the second search in mas_alloc_cylic() to ensure the search happens. This test ensures the issue is not recreated. Link: https://lkml.kernel.org/r/20241216190113.1226145-3-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Reviewed-by: Yang Erkun Cc: Christian Brauner Cc: Chuck Lever says: Signed-off-by: Andrew Morton --- lib/test_maple_tree.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index 72bda304b595..13e2a10d7554 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -3738,6 +3738,34 @@ static noinline void __init alloc_cyclic_testing(struct maple_tree *mt) } mtree_destroy(mt); + + /* + * Issue with reverse search was discovered + * https://lore.kernel.org/all/20241216060600.287B4C4CED0@smtp.kernel.org/ + * Exhausting the allocation area and forcing the search to wrap needs a + * mas_reset() in mas_alloc_cyclic(). + */ + next = 0; + mt_init_flags(mt, MT_FLAGS_ALLOC_RANGE); + for (int i = 0; i < 1023; i++) { + mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL); + MT_BUG_ON(mt, i != location - 2); + MT_BUG_ON(mt, i != next - 3); + MT_BUG_ON(mt, mtree_load(mt, location) != mt); + } + mtree_erase(mt, 123); + MT_BUG_ON(mt, mtree_load(mt, 123) != NULL); + mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL); + MT_BUG_ON(mt, 123 != location); + MT_BUG_ON(mt, 124 != next); + MT_BUG_ON(mt, mtree_load(mt, location) != mt); + mtree_erase(mt, 100); + mtree_alloc_cyclic(mt, &location, mt, 2, 1024, &next, GFP_KERNEL); + MT_BUG_ON(mt, 100 != location); + MT_BUG_ON(mt, 101 != next); + MT_BUG_ON(mt, mtree_load(mt, location) != mt); + mtree_destroy(mt); + /* Overflow test */ next = ULONG_MAX - 1; ret = mtree_alloc_cyclic(mt, &location, mt, 2, ULONG_MAX, &next, GFP_KERNEL); From 1bfc2db3c739c3e884cb5d8e37321fa4b3eed471 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Wed, 18 Dec 2024 19:46:30 +0800 Subject: [PATCH 156/504] mm, memcontrol: avoid duplicated memcg enable check Patch series "mm/swap_cgroup: remove global swap cgroup lock", v3. This series removes the global swap cgroup lock. The critical section of this lock is very short but it's still a bottle neck for mass parallel swap workloads. Up to 10% performance gain for tmpfs build kernel test on a 48c96t system under memory pressure, and no regression for other cases: This patch (of 3): mem_cgroup_uncharge_swap() includes a mem_cgroup_disabled() check, so the caller doesn't need to check that. Link: https://lkml.kernel.org/r/20241218114633.85196-1-ryncsn@gmail.com Link: https://lkml.kernel.org/r/20241218114633.85196-2-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Yosry Ahmed Reviewed-by: Roman Gushchin Acked-by: Shakeel Butt Acked-by: Chris Li Cc: Barry Song Cc: Hugh Dickins Cc: Johannes Weiner Cc: Michal Hocko Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7ddbb2d12eb9..5c373d275e7a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4595,7 +4595,7 @@ void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) * correspond 1:1 to page and swap slot lifetimes: we charge the * page to memory here, and uncharge swap when the slot is freed. */ - if (!mem_cgroup_disabled() && do_memsw_account()) { + if (do_memsw_account()) { /* * The swap entry might not get freed for a long time, * let's not wait for it. The page already received a From 0cf52d2e00ef54cb7a002dcdedadf7af93b6beeb Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Wed, 18 Dec 2024 19:46:31 +0800 Subject: [PATCH 157/504] mm/swap_cgroup: remove swap_cgroup_cmpxchg This function is never used after commit 6b611388b626 ("memcg-v1: remove charge move code"). Link: https://lkml.kernel.org/r/20241218114633.85196-3-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Yosry Ahmed Reviewed-by: Roman Gushchin Acked-by: Shakeel Butt Acked-by: Chris Li Cc: Barry Song Cc: Hugh Dickins Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton --- include/linux/swap_cgroup.h | 2 -- mm/swap_cgroup.c | 29 ----------------------------- 2 files changed, 31 deletions(-) diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h index ae73a87775b3..d521ad1c4164 100644 --- a/include/linux/swap_cgroup.h +++ b/include/linux/swap_cgroup.h @@ -6,8 +6,6 @@ #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) -extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new); extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, unsigned int nr_ents); extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index f63d1aa072a1..1770b076f6b7 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -45,35 +45,6 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, return &ctrl->map[offset]; } -/** - * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. - * @ent: swap entry to be cmpxchged - * @old: old id - * @new: new id - * - * Returns old id at success, 0 at failure. - * (There is no mem_cgroup using 0 as its id) - */ -unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new) -{ - struct swap_cgroup_ctrl *ctrl; - struct swap_cgroup *sc; - unsigned long flags; - unsigned short retval; - - sc = lookup_swap_cgroup(ent, &ctrl); - - spin_lock_irqsave(&ctrl->lock, flags); - retval = sc->id; - if (retval == old) - sc->id = new; - else - retval = 0; - spin_unlock_irqrestore(&ctrl->lock, flags); - return retval; -} - /** * swap_cgroup_record - record mem_cgroup for a set of swap entries * @ent: the first swap entry to be recorded into From a9507592fa5cd16815eedb97937f161b7f626a5e Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Wed, 18 Dec 2024 19:46:32 +0800 Subject: [PATCH 158/504] mm/swap_cgroup: remove global swap cgroup lock commit e9e58a4ec3b1 ("memcg: avoid use cmpxchg in swap cgroup maintainance") replaced the cmpxchg/xchg with a global irq spinlock because some archs doesn't support 2 bytes cmpxchg/xchg. Clearly this won't scale well. And as commented in swap_cgroup.c, this lock is not needed for map synchronization. Emulation of 2 bytes xchg with atomic cmpxchg isn't hard, so implement it to get rid of this lock. Introduced two helpers for doing so and they can be easily dropped if a generic 2 byte xchg is support. Testing using 64G brd and build with build kernel with make -j96 in 1.5G memory cgroup using 4k folios showed below improvement (6 test run): Before this series: Sys time: 10782.29 (stdev 42.353886) Real time: 171.49 (stdev 0.595541) After this commit: Sys time: 9617.23 (stdev 37.764062), -10.81% Real time: 159.65 (stdev 0.587388), -6.90% With 64k folios and 2G memcg: Before this series: Sys time: 8176.94 (stdev 26.414712) Real time: 141.98 (stdev 0.797382) After this commit: Sys time: 7358.98 (stdev 54.927593), -10.00% Real time: 134.07 (stdev 0.757463), -5.57% Sequential swapout of 8G 64k zero folios with madvise (24 test run): Before this series: 5461409.12 us (stdev 183957.827084) After this commit: 5420447.26 us (stdev 196419.240317) Sequential swapin of 8G 4k zero folios (24 test run): Before this series: 19736958.916667 us (stdev 189027.246676) After this commit: 19662182.629630 us (stdev 172717.640614) Performance is better or at least not worse for all tests above. Link: https://lkml.kernel.org/r/20241218114633.85196-4-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Roman Gushchin Cc: Barry Song Cc: Chris Li Cc: Hugh Dickins Cc: Johannes Weiner Cc: Michal Hocko Cc: Shakeel Butt Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/swap_cgroup.c | 77 ++++++++++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 28 deletions(-) diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index 1770b076f6b7..cf0445cb35ed 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -7,19 +7,20 @@ static DEFINE_MUTEX(swap_cgroup_mutex); +/* Pack two cgroup id (short) of two entries in one swap_cgroup (atomic_t) */ +#define ID_PER_SC (sizeof(struct swap_cgroup) / sizeof(unsigned short)) +#define ID_SHIFT (BITS_PER_TYPE(unsigned short)) +#define ID_MASK (BIT(ID_SHIFT) - 1) struct swap_cgroup { - unsigned short id; + atomic_t ids; }; struct swap_cgroup_ctrl { struct swap_cgroup *map; - spinlock_t lock; }; static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; -#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) - /* * SwapCgroup implements "lookup" and "exchange" operations. * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge @@ -30,19 +31,35 @@ static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; * SwapCache(and its swp_entry) is under lock. * - When called via swap_free(), there is no user of this entry and no race. * Then, we don't need lock around "exchange". - * - * TODO: we can push these buffers out to HIGHMEM. */ -static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, - struct swap_cgroup_ctrl **ctrlp) +static unsigned short __swap_cgroup_id_lookup(struct swap_cgroup *map, + pgoff_t offset) { - pgoff_t offset = swp_offset(ent); - struct swap_cgroup_ctrl *ctrl; + unsigned int shift = (offset % ID_PER_SC) * ID_SHIFT; + unsigned int old_ids = atomic_read(&map[offset / ID_PER_SC].ids); - ctrl = &swap_cgroup_ctrl[swp_type(ent)]; - if (ctrlp) - *ctrlp = ctrl; - return &ctrl->map[offset]; + BUILD_BUG_ON(!is_power_of_2(ID_PER_SC)); + BUILD_BUG_ON(sizeof(struct swap_cgroup) != sizeof(atomic_t)); + + return (old_ids >> shift) & ID_MASK; +} + +static unsigned short __swap_cgroup_id_xchg(struct swap_cgroup *map, + pgoff_t offset, + unsigned short new_id) +{ + unsigned short old_id; + struct swap_cgroup *sc = &map[offset / ID_PER_SC]; + unsigned int shift = (offset % ID_PER_SC) * ID_SHIFT; + unsigned int new_ids, old_ids = atomic_read(&sc->ids); + + do { + old_id = (old_ids >> shift) & ID_MASK; + new_ids = (old_ids & ~(ID_MASK << shift)); + new_ids |= ((unsigned int)new_id) << shift; + } while (!atomic_try_cmpxchg(&sc->ids, &old_ids, new_ids)); + + return old_id; } /** @@ -58,21 +75,19 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, unsigned int nr_ents) { struct swap_cgroup_ctrl *ctrl; - struct swap_cgroup *sc; - unsigned short old; - unsigned long flags; pgoff_t offset = swp_offset(ent); pgoff_t end = offset + nr_ents; + unsigned short old, iter; + struct swap_cgroup *map; - sc = lookup_swap_cgroup(ent, &ctrl); + ctrl = &swap_cgroup_ctrl[swp_type(ent)]; + map = ctrl->map; - spin_lock_irqsave(&ctrl->lock, flags); - old = sc->id; - for (; offset < end; offset++, sc++) { - VM_BUG_ON(sc->id != old); - sc->id = id; - } - spin_unlock_irqrestore(&ctrl->lock, flags); + old = __swap_cgroup_id_lookup(map, offset); + do { + iter = __swap_cgroup_id_xchg(map, offset, id); + VM_BUG_ON(iter != old); + } while (++offset != end); return old; } @@ -85,9 +100,13 @@ unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, */ unsigned short lookup_swap_cgroup_id(swp_entry_t ent) { + struct swap_cgroup_ctrl *ctrl; + if (mem_cgroup_disabled()) return 0; - return lookup_swap_cgroup(ent, NULL)->id; + + ctrl = &swap_cgroup_ctrl[swp_type(ent)]; + return __swap_cgroup_id_lookup(ctrl->map, swp_offset(ent)); } int swap_cgroup_swapon(int type, unsigned long max_pages) @@ -98,14 +117,16 @@ int swap_cgroup_swapon(int type, unsigned long max_pages) if (mem_cgroup_disabled()) return 0; - map = vcalloc(max_pages, sizeof(struct swap_cgroup)); + BUILD_BUG_ON(sizeof(unsigned short) * ID_PER_SC != + sizeof(struct swap_cgroup)); + map = vcalloc(DIV_ROUND_UP(max_pages, ID_PER_SC), + sizeof(struct swap_cgroup)); if (!map) goto nomem; ctrl = &swap_cgroup_ctrl[type]; mutex_lock(&swap_cgroup_mutex); ctrl->map = map; - spin_lock_init(&ctrl->lock); mutex_unlock(&swap_cgroup_mutex); return 0; From 5988d689dcee2889a2c0632e6447dcd8312e6cc6 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Wed, 18 Dec 2024 19:46:33 +0800 Subject: [PATCH 159/504] mm/swap_cgroup: decouple swap cgroup recording and clearing The current implementation of swap cgroup tracking is a bit complex and fragile: On charging path, swap_cgroup_record always records an actual memcg id, and it depends on the caller to make sure all entries passed in must belong to one single folio. As folios are always charged or uncharged as a whole, and always charged and uncharged in order, swap_cgroup doesn't need an extra lock. On uncharging path, swap_cgroup_record always sets the record to zero. These entries won't be charged again until uncharging is done. So there is no extra lock needed either. Worth noting that swap cgroup clearing may happen without folio involved, eg. exiting processes will zap its page table without swapin. The xchg/cmpxchg provides atomic operations and barriers to ensure no tearing or synchronization issue of these swap cgroup records. It works but quite error-prone. Things can be much clear and robust by decoupling recording and clearing into two helpers. Recording takes the actual folio being charged as argument, and clearing always set the record to zero, and refine the debug sanity checks to better reflect their usage Benchmark even showed a very slight improvement as it saved some extra arguments and lookups: make -j96 with defconfig on tmpfs in 1.5G memory cgroup using 4k folios: Before: sys 9617.23 (stdev 37.764062) After : sys 9541.54 (stdev 42.973976) make -j96 with defconfig on tmpfs in 2G memory cgroup using 64k folios: Before: sys 7358.98 (stdev 54.927593) After : sys 7337.82 (stdev 39.398956) Link: https://lkml.kernel.org/r/20241218114633.85196-5-ryncsn@gmail.com Signed-off-by: Kairui Song Suggested-by: Chris Li Cc: Barry Song Cc: Hugh Dickins Cc: Johannes Weiner Cc: Michal Hocko Cc: Roman Gushchin Cc: Shakeel Butt Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap_cgroup.h | 12 ++++--- mm/memcontrol.c | 13 +++----- mm/swap_cgroup.c | 66 +++++++++++++++++++++++-------------- 3 files changed, 55 insertions(+), 36 deletions(-) diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h index d521ad1c4164..b5ec038069da 100644 --- a/include/linux/swap_cgroup.h +++ b/include/linux/swap_cgroup.h @@ -6,8 +6,8 @@ #if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP) -extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, - unsigned int nr_ents); +extern void swap_cgroup_record(struct folio *folio, swp_entry_t ent); +extern unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents); extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); extern int swap_cgroup_swapon(int type, unsigned long max_pages); extern void swap_cgroup_swapoff(int type); @@ -15,8 +15,12 @@ extern void swap_cgroup_swapoff(int type); #else static inline -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, - unsigned int nr_ents) +void swap_cgroup_record(struct folio *folio, swp_entry_t ent) +{ +} + +static inline +unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents) { return 0; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 5c373d275e7a..65fb5eee1466 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4959,7 +4959,6 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) { struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; - unsigned short oldid; VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); VM_BUG_ON_FOLIO(folio_ref_count(folio), folio); @@ -4986,11 +4985,10 @@ void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry) /* Get references for the tail pages, too */ if (nr_entries > 1) mem_cgroup_id_get_many(swap_memcg, nr_entries - 1); - oldid = swap_cgroup_record(entry, mem_cgroup_id(swap_memcg), - nr_entries); - VM_BUG_ON_FOLIO(oldid, folio); mod_memcg_state(swap_memcg, MEMCG_SWAP, nr_entries); + swap_cgroup_record(folio, entry); + folio_unqueue_deferred_split(folio); folio->memcg_data = 0; @@ -5021,7 +5019,6 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) unsigned int nr_pages = folio_nr_pages(folio); struct page_counter *counter; struct mem_cgroup *memcg; - unsigned short oldid; if (do_memsw_account()) return 0; @@ -5050,10 +5047,10 @@ int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry) /* Get references for the tail pages, too */ if (nr_pages > 1) mem_cgroup_id_get_many(memcg, nr_pages - 1); - oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg), nr_pages); - VM_BUG_ON_FOLIO(oldid, folio); mod_memcg_state(memcg, MEMCG_SWAP, nr_pages); + swap_cgroup_record(folio, entry); + return 0; } @@ -5067,7 +5064,7 @@ void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages) struct mem_cgroup *memcg; unsigned short id; - id = swap_cgroup_record(entry, 0, nr_pages); + id = swap_cgroup_clear(entry, nr_pages); rcu_read_lock(); memcg = mem_cgroup_from_id(id); if (memcg) { diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c index cf0445cb35ed..be39078f255b 100644 --- a/mm/swap_cgroup.c +++ b/mm/swap_cgroup.c @@ -21,17 +21,6 @@ struct swap_cgroup_ctrl { static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; -/* - * SwapCgroup implements "lookup" and "exchange" operations. - * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge - * against SwapCache. At swap_free(), this is accessed directly from swap. - * - * This means, - * - we have no race in "exchange" when we're accessed via SwapCache because - * SwapCache(and its swp_entry) is under lock. - * - When called via swap_free(), there is no user of this entry and no race. - * Then, we don't need lock around "exchange". - */ static unsigned short __swap_cgroup_id_lookup(struct swap_cgroup *map, pgoff_t offset) { @@ -63,29 +52,58 @@ static unsigned short __swap_cgroup_id_xchg(struct swap_cgroup *map, } /** - * swap_cgroup_record - record mem_cgroup for a set of swap entries + * swap_cgroup_record - record mem_cgroup for a set of swap entries. + * These entries must belong to one single folio, and that folio + * must be being charged for swap space (swap out), and these + * entries must not have been charged + * + * @folio: the folio that the swap entry belongs to + * @ent: the first swap entry to be recorded + */ +void swap_cgroup_record(struct folio *folio, swp_entry_t ent) +{ + unsigned int nr_ents = folio_nr_pages(folio); + struct swap_cgroup *map; + pgoff_t offset, end; + unsigned short old; + + offset = swp_offset(ent); + end = offset + nr_ents; + map = swap_cgroup_ctrl[swp_type(ent)].map; + + do { + old = __swap_cgroup_id_xchg(map, offset, + mem_cgroup_id(folio_memcg(folio))); + VM_BUG_ON(old); + } while (++offset != end); +} + +/** + * swap_cgroup_clear - clear mem_cgroup for a set of swap entries. + * These entries must be being uncharged from swap. They either + * belongs to one single folio in the swap cache (swap in for + * cgroup v1), or no longer have any users (slot freeing). + * * @ent: the first swap entry to be recorded into - * @id: mem_cgroup to be recorded * @nr_ents: number of swap entries to be recorded * - * Returns old value at success, 0 at failure. - * (Of course, old value can be 0.) + * Returns the existing old value. */ -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id, - unsigned int nr_ents) +unsigned short swap_cgroup_clear(swp_entry_t ent, unsigned int nr_ents) { - struct swap_cgroup_ctrl *ctrl; pgoff_t offset = swp_offset(ent); pgoff_t end = offset + nr_ents; - unsigned short old, iter; struct swap_cgroup *map; + unsigned short old, iter = 0; - ctrl = &swap_cgroup_ctrl[swp_type(ent)]; - map = ctrl->map; + offset = swp_offset(ent); + end = offset + nr_ents; + map = swap_cgroup_ctrl[swp_type(ent)].map; - old = __swap_cgroup_id_lookup(map, offset); do { - iter = __swap_cgroup_id_xchg(map, offset, id); + old = __swap_cgroup_id_xchg(map, offset, 0); + if (!iter) + iter = old; VM_BUG_ON(iter != old); } while (++offset != end); @@ -119,7 +137,7 @@ int swap_cgroup_swapon(int type, unsigned long max_pages) BUILD_BUG_ON(sizeof(unsigned short) * ID_PER_SC != sizeof(struct swap_cgroup)); - map = vcalloc(DIV_ROUND_UP(max_pages, ID_PER_SC), + map = vzalloc(DIV_ROUND_UP(max_pages, ID_PER_SC) * sizeof(struct swap_cgroup)); if (!map) goto nomem; From 6a6af2161b18c69d6a288dbe6ee62ebb690f3748 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 18 Dec 2024 15:34:18 +0900 Subject: [PATCH 160/504] zram: free slot memory early during write Patch series "zram: split page type read/write handling", v2. This is a subset of [1] series which contains only fixes and improvements (no new features, as ZRAM_HUGE split is still under consideration). The motivation for factoring out is that zram_write_page() gets more and more complex all the time, because it tries to handle too many scenarios: ZRAM_SAME store, ZRAM_HUGE store, compress page store with zs_malloc allocation slowpath and conditional recompression, etc. Factor those out and make things easier to handle. Addition of cond_resched() is simply a fix, I can trigger watchdog from zram writeback(). And early slot free is just a reasonable thing to do. [1] https://lore.kernel.org/linux-kernel/20241119072057.3440039-1-senozhatsky@chromium.org This patch (of 7): In the current implementation entry's previously allocated memory is released in the very last moment, when we already have allocated a new memory for new data. This, basically, temporarily increases memory usage for no good reason. For example, consider the case when both old (stale) and new entry data are incompressible so such entry will temporarily use two physical pages - one for stale (old) data and one for new data. We can release old memory as soon as we get a write request for entry. Link: https://lkml.kernel.org/r/20241218063513.297475-1-senozhatsky@chromium.org Link: https://lkml.kernel.org/r/20241218063513.297475-2-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 7903a4da40ac..bf35575f5284 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1649,6 +1649,11 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) unsigned long element = 0; enum zram_pageflags flags = 0; + /* First, free memory allocated to this slot (if any) */ + zram_slot_lock(zram, index); + zram_free_page(zram, index); + zram_slot_unlock(zram, index); + mem = kmap_local_page(page); if (page_same_filled(mem, &element)) { kunmap_local(mem); @@ -1737,13 +1742,7 @@ compress_again: zs_unmap_object(zram->mem_pool, handle); atomic64_add(comp_len, &zram->stats.compr_data_size); out: - /* - * Free memory associated with this sector - * before overwriting unused sectors. - */ zram_slot_lock(zram, index); - zram_free_page(zram, index); - if (comp_len == PAGE_SIZE) { zram_set_flag(zram, index, ZRAM_HUGE); atomic64_inc(&zram->stats.huge_pages); From b0d9cdde5da7628cc4b6b0e402e3aab8d4861c6e Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 18 Dec 2024 15:34:19 +0900 Subject: [PATCH 161/504] zram: remove entry element member Element is in the same anon union as handle and hence holds the same value, which makes code below sort of confusing handle = zram_get_handle() if (!handle) element = zram_get_element() Element doesn't really simplify the code, let's just remove it. We already re-purpose handle to store the block id a written back page. Link: https://lkml.kernel.org/r/20241218063513.297475-3-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 23 +++++------------------ drivers/block/zram/zram_drv.h | 5 +---- 2 files changed, 6 insertions(+), 22 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index bf35575f5284..974af83b9483 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -112,17 +112,6 @@ static void zram_clear_flag(struct zram *zram, u32 index, zram->table[index].flags &= ~BIT(flag); } -static inline void zram_set_element(struct zram *zram, u32 index, - unsigned long element) -{ - zram->table[index].element = element; -} - -static unsigned long zram_get_element(struct zram *zram, u32 index) -{ - return zram->table[index].element; -} - static size_t zram_get_obj_size(struct zram *zram, u32 index) { return zram->table[index].flags & (BIT(ZRAM_FLAG_SHIFT) - 1); @@ -879,7 +868,7 @@ static ssize_t writeback_store(struct device *dev, zram_free_page(zram, index); zram_set_flag(zram, index, ZRAM_WB); - zram_set_element(zram, index, blk_idx); + zram_set_handle(zram, index, blk_idx); blk_idx = 0; atomic64_inc(&zram->stats.pages_stored); spin_lock(&zram->wb_limit_lock); @@ -1505,7 +1494,7 @@ static void zram_free_page(struct zram *zram, size_t index) if (zram_test_flag(zram, index, ZRAM_WB)) { zram_clear_flag(zram, index, ZRAM_WB); - free_block_bdev(zram, zram_get_element(zram, index)); + free_block_bdev(zram, zram_get_handle(zram, index)); goto out; } @@ -1549,12 +1538,10 @@ static int zram_read_from_zspool(struct zram *zram, struct page *page, handle = zram_get_handle(zram, index); if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { - unsigned long value; void *mem; - value = handle ? zram_get_element(zram, index) : 0; mem = kmap_local_page(page); - zram_fill_page(mem, PAGE_SIZE, value); + zram_fill_page(mem, PAGE_SIZE, handle); kunmap_local(mem); return 0; } @@ -1600,7 +1587,7 @@ static int zram_read_page(struct zram *zram, struct page *page, u32 index, */ zram_slot_unlock(zram, index); - ret = read_from_bdev(zram, page, zram_get_element(zram, index), + ret = read_from_bdev(zram, page, zram_get_handle(zram, index), parent); } @@ -1751,7 +1738,7 @@ out: if (flags) { zram_set_flag(zram, index, flags); - zram_set_element(zram, index, element); + zram_set_handle(zram, index, element); } else { zram_set_handle(zram, index, handle); zram_set_obj_size(zram, index, comp_len); diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 134be414e210..db78d7c01b9a 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -62,10 +62,7 @@ enum zram_pageflags { /* Allocated for each disk page */ struct zram_table_entry { - union { - unsigned long handle; - unsigned long element; - }; + unsigned long handle; unsigned int flags; spinlock_t lock; #ifdef CONFIG_ZRAM_TRACK_ENTRY_ACTIME From daf03ca573843ac81b4903b5fa76e7f79fcda653 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 18 Dec 2024 15:34:20 +0900 Subject: [PATCH 162/504] zram: factor out ZRAM_SAME write Handling of ZRAM_SAME now uses a goto to the final stages of zram_write_page() plus it introduces a branch and flags variable, which is not making the code any simpler. In reality, we can handle ZRAM_SAME immediately when we detect such pages and remove a goto and a branch. Factor out ZRAM_SAME handling into a separate routine to simplify zram_write_page(). Link: https://lkml.kernel.org/r/20241218063513.297475-4-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 37 ++++++++++++++++++++--------------- 1 file changed, 21 insertions(+), 16 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 974af83b9483..7f7cb5b080f9 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1625,6 +1625,20 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, return zram_read_page(zram, bvec->bv_page, index, bio); } +static int write_same_filled_page(struct zram *zram, unsigned long fill, + u32 index) +{ + zram_slot_lock(zram, index); + zram_set_flag(zram, index, ZRAM_SAME); + zram_set_handle(zram, index, fill); + zram_slot_unlock(zram, index); + + atomic64_inc(&zram->stats.same_pages); + atomic64_inc(&zram->stats.pages_stored); + + return 0; +} + static int zram_write_page(struct zram *zram, struct page *page, u32 index) { int ret = 0; @@ -1634,7 +1648,7 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) void *src, *dst, *mem; struct zcomp_strm *zstrm; unsigned long element = 0; - enum zram_pageflags flags = 0; + bool same_filled; /* First, free memory allocated to this slot (if any) */ zram_slot_lock(zram, index); @@ -1642,14 +1656,10 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) zram_slot_unlock(zram, index); mem = kmap_local_page(page); - if (page_same_filled(mem, &element)) { - kunmap_local(mem); - /* Free memory associated with this sector now. */ - flags = ZRAM_SAME; - atomic64_inc(&zram->stats.same_pages); - goto out; - } + same_filled = page_same_filled(mem, &element); kunmap_local(mem); + if (same_filled) + return write_same_filled_page(zram, element, index); compress_again: zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); @@ -1728,7 +1738,7 @@ compress_again: zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); zs_unmap_object(zram->mem_pool, handle); atomic64_add(comp_len, &zram->stats.compr_data_size); -out: + zram_slot_lock(zram, index); if (comp_len == PAGE_SIZE) { zram_set_flag(zram, index, ZRAM_HUGE); @@ -1736,13 +1746,8 @@ out: atomic64_inc(&zram->stats.huge_pages_since); } - if (flags) { - zram_set_flag(zram, index, flags); - zram_set_handle(zram, index, element); - } else { - zram_set_handle(zram, index, handle); - zram_set_obj_size(zram, index, comp_len); - } + zram_set_handle(zram, index, handle); + zram_set_obj_size(zram, index, comp_len); zram_slot_unlock(zram, index); /* Update stats */ From 4e5bab5076600f70430bc6362f9702fd30ed318d Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 18 Dec 2024 15:34:21 +0900 Subject: [PATCH 163/504] zram: factor out ZRAM_HUGE write zram_write_page() handles: ZRAM_SAME pages (which was already factored out) stores, regular page stores and ZRAM_HUGE pages stores. ZRAM_HUGE handling adds a significant amount of complexity. Instead, we can handle ZRAM_HUGE in a separate function. This allows us to simplify zs_handle allocations slow-path, as it now does not handle ZRAM_HUGE case. ZRAM_HUGE zs_handle allocation, on the other hand, can now drop __GFP_KSWAPD_RECLAIM because we handle ZRAM_HUGE in preemptible context (outside of local-lock scope). Link: https://lkml.kernel.org/r/20241218063513.297475-5-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 136 +++++++++++++++++++++------------- 1 file changed, 83 insertions(+), 53 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 7f7cb5b080f9..2a5ecaf292e0 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -132,6 +132,27 @@ static inline bool zram_allocated(struct zram *zram, u32 index) zram_test_flag(zram, index, ZRAM_WB); } +static inline void update_used_max(struct zram *zram, const unsigned long pages) +{ + unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages); + + do { + if (cur_max >= pages) + return; + } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages, + &cur_max, pages)); +} + +static bool zram_can_store_page(struct zram *zram) +{ + unsigned long alloced_pages; + + alloced_pages = zs_get_total_pages(zram->mem_pool); + update_used_max(zram, alloced_pages); + + return !zram->limit_pages || alloced_pages <= zram->limit_pages; +} + #if PAGE_SIZE != 4096 static inline bool is_partial_io(struct bio_vec *bvec) { @@ -266,18 +287,6 @@ static struct zram_pp_slot *select_pp_slot(struct zram_pp_ctl *ctl) } #endif -static inline void update_used_max(struct zram *zram, - const unsigned long pages) -{ - unsigned long cur_max = atomic_long_read(&zram->stats.max_used_pages); - - do { - if (cur_max >= pages) - return; - } while (!atomic_long_try_cmpxchg(&zram->stats.max_used_pages, - &cur_max, pages)); -} - static inline void zram_fill_page(void *ptr, unsigned long len, unsigned long value) { @@ -1639,13 +1648,54 @@ static int write_same_filled_page(struct zram *zram, unsigned long fill, return 0; } +static int write_incompressible_page(struct zram *zram, struct page *page, + u32 index) +{ + unsigned long handle; + void *src, *dst; + + /* + * This function is called from preemptible context so we don't need + * to do optimistic and fallback to pessimistic handle allocation, + * like we do for compressible pages. + */ + handle = zs_malloc(zram->mem_pool, PAGE_SIZE, + GFP_NOIO | __GFP_HIGHMEM | __GFP_MOVABLE); + if (IS_ERR_VALUE(handle)) + return PTR_ERR((void *)handle); + + if (!zram_can_store_page(zram)) { + zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + zs_free(zram->mem_pool, handle); + return -ENOMEM; + } + + dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); + src = kmap_local_page(page); + memcpy(dst, src, PAGE_SIZE); + kunmap_local(src); + zs_unmap_object(zram->mem_pool, handle); + + zram_slot_lock(zram, index); + zram_set_flag(zram, index, ZRAM_HUGE); + zram_set_handle(zram, index, handle); + zram_set_obj_size(zram, index, PAGE_SIZE); + zram_slot_unlock(zram, index); + + atomic64_add(PAGE_SIZE, &zram->stats.compr_data_size); + atomic64_inc(&zram->stats.huge_pages); + atomic64_inc(&zram->stats.huge_pages_since); + atomic64_inc(&zram->stats.pages_stored); + + return 0; +} + static int zram_write_page(struct zram *zram, struct page *page, u32 index) { int ret = 0; - unsigned long alloced_pages; unsigned long handle = -ENOMEM; unsigned int comp_len = 0; - void *src, *dst, *mem; + void *dst, *mem; struct zcomp_strm *zstrm; unsigned long element = 0; bool same_filled; @@ -1663,10 +1713,10 @@ static int zram_write_page(struct zram *zram, struct page *page, u32 index) compress_again: zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); - src = kmap_local_page(page); + mem = kmap_local_page(page); ret = zcomp_compress(zram->comps[ZRAM_PRIMARY_COMP], zstrm, - src, &comp_len); - kunmap_local(src); + mem, &comp_len); + kunmap_local(mem); if (unlikely(ret)) { zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); @@ -1675,8 +1725,11 @@ compress_again: return ret; } - if (comp_len >= huge_class_size) - comp_len = PAGE_SIZE; + if (comp_len >= huge_class_size) { + zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); + return write_incompressible_page(zram, page, index); + } + /* * handle allocation has 2 paths: * a) fast path is executed with preemption disabled (for @@ -1692,35 +1745,23 @@ compress_again: */ if (IS_ERR_VALUE(handle)) handle = zs_malloc(zram->mem_pool, comp_len, - __GFP_KSWAPD_RECLAIM | - __GFP_NOWARN | - __GFP_HIGHMEM | - __GFP_MOVABLE); + __GFP_KSWAPD_RECLAIM | + __GFP_NOWARN | + __GFP_HIGHMEM | + __GFP_MOVABLE); if (IS_ERR_VALUE(handle)) { zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); atomic64_inc(&zram->stats.writestall); handle = zs_malloc(zram->mem_pool, comp_len, - GFP_NOIO | __GFP_HIGHMEM | - __GFP_MOVABLE); + GFP_NOIO | __GFP_HIGHMEM | + __GFP_MOVABLE); if (IS_ERR_VALUE(handle)) return PTR_ERR((void *)handle); - if (comp_len != PAGE_SIZE) - goto compress_again; - /* - * If the page is not compressible, you need to acquire the - * lock and execute the code below. The zcomp_stream_get() - * call is needed to disable the cpu hotplug and grab the - * zstrm buffer back. It is necessary that the dereferencing - * of the zstrm variable below occurs correctly. - */ - zstrm = zcomp_stream_get(zram->comps[ZRAM_PRIMARY_COMP]); + goto compress_again; } - alloced_pages = zs_get_total_pages(zram->mem_pool); - update_used_max(zram, alloced_pages); - - if (zram->limit_pages && alloced_pages > zram->limit_pages) { + if (!zram_can_store_page(zram)) { zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); zs_free(zram->mem_pool, handle); return -ENOMEM; @@ -1728,30 +1769,19 @@ compress_again: dst = zs_map_object(zram->mem_pool, handle, ZS_MM_WO); - src = zstrm->buffer; - if (comp_len == PAGE_SIZE) - src = kmap_local_page(page); - memcpy(dst, src, comp_len); - if (comp_len == PAGE_SIZE) - kunmap_local(src); - + memcpy(dst, zstrm->buffer, comp_len); zcomp_stream_put(zram->comps[ZRAM_PRIMARY_COMP]); zs_unmap_object(zram->mem_pool, handle); - atomic64_add(comp_len, &zram->stats.compr_data_size); zram_slot_lock(zram, index); - if (comp_len == PAGE_SIZE) { - zram_set_flag(zram, index, ZRAM_HUGE); - atomic64_inc(&zram->stats.huge_pages); - atomic64_inc(&zram->stats.huge_pages_since); - } - zram_set_handle(zram, index, handle); zram_set_obj_size(zram, index, comp_len); zram_slot_unlock(zram, index); /* Update stats */ atomic64_inc(&zram->stats.pages_stored); + atomic64_add(comp_len, &zram->stats.compr_data_size); + return ret; } From c214754c51b1541bc3b2b056ebadedc164fa1af5 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 18 Dec 2024 15:34:22 +0900 Subject: [PATCH 164/504] zram: factor out different page types read Similarly to write, split the page read code into ZRAM_HUGE read, ZRAM_SAME read and compressed page read to simplify the code. Link: https://lkml.kernel.org/r/20241218063513.297475-6-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 95 +++++++++++++++++++++-------------- 1 file changed, 57 insertions(+), 38 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 2a5ecaf292e0..c4454ceee4dd 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1531,6 +1531,56 @@ out: zram_set_obj_size(zram, index, 0); } +static int read_same_filled_page(struct zram *zram, struct page *page, + u32 index) +{ + void *mem; + + mem = kmap_local_page(page); + zram_fill_page(mem, PAGE_SIZE, zram_get_handle(zram, index)); + kunmap_local(mem); + return 0; +} + +static int read_incompressible_page(struct zram *zram, struct page *page, + u32 index) +{ + unsigned long handle; + void *src, *dst; + + handle = zram_get_handle(zram, index); + src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + dst = kmap_local_page(page); + copy_page(dst, src); + kunmap_local(dst); + zs_unmap_object(zram->mem_pool, handle); + + return 0; +} + +static int read_compressed_page(struct zram *zram, struct page *page, u32 index) +{ + struct zcomp_strm *zstrm; + unsigned long handle; + unsigned int size; + void *src, *dst; + int ret, prio; + + handle = zram_get_handle(zram, index); + size = zram_get_obj_size(zram, index); + prio = zram_get_priority(zram, index); + + zstrm = zcomp_stream_get(zram->comps[prio]); + src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); + dst = kmap_local_page(page); + ret = zcomp_decompress(zram->comps[prio], zstrm, src, size, dst); + kunmap_local(dst); + zs_unmap_object(zram->mem_pool, handle); + zcomp_stream_put(zram->comps[prio]); + + return ret; +} + /* * Reads (decompresses if needed) a page from zspool (zsmalloc). * Corresponding ZRAM slot should be locked. @@ -1538,45 +1588,14 @@ out: static int zram_read_from_zspool(struct zram *zram, struct page *page, u32 index) { - struct zcomp_strm *zstrm; - unsigned long handle; - unsigned int size; - void *src, *dst; - u32 prio; - int ret; + if (zram_test_flag(zram, index, ZRAM_SAME) || + !zram_get_handle(zram, index)) + return read_same_filled_page(zram, page, index); - handle = zram_get_handle(zram, index); - if (!handle || zram_test_flag(zram, index, ZRAM_SAME)) { - void *mem; - - mem = kmap_local_page(page); - zram_fill_page(mem, PAGE_SIZE, handle); - kunmap_local(mem); - return 0; - } - - size = zram_get_obj_size(zram, index); - - if (size != PAGE_SIZE) { - prio = zram_get_priority(zram, index); - zstrm = zcomp_stream_get(zram->comps[prio]); - } - - src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO); - if (size == PAGE_SIZE) { - dst = kmap_local_page(page); - copy_page(dst, src); - kunmap_local(dst); - ret = 0; - } else { - dst = kmap_local_page(page); - ret = zcomp_decompress(zram->comps[prio], zstrm, - src, size, dst); - kunmap_local(dst); - zcomp_stream_put(zram->comps[prio]); - } - zs_unmap_object(zram->mem_pool, handle); - return ret; + if (!zram_test_flag(zram, index, ZRAM_HUGE)) + return read_compressed_page(zram, page, index); + else + return read_incompressible_page(zram, page, index); } static int zram_read_page(struct zram *zram, struct page *page, u32 index, From 0f661d07d0b36ad5a6be5b46250e33abad1a39f8 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 18 Dec 2024 15:34:23 +0900 Subject: [PATCH 165/504] zram: use zram_read_from_zspool() in writeback We only can read pages from zspool in writeback, zram_read_page() is not really right in that context not only because it's a more generic function that handles ZRAM_WB pages, but also because it requires us to unlock slot between slot flag check and actual page read. Use zram_read_from_zspool() instead and do slot flags check and page read under the same slot lock. Link: https://lkml.kernel.org/r/20241218063513.297475-7-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index c4454ceee4dd..faa58d85cef8 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -55,8 +55,8 @@ static size_t huge_class_size; static const struct block_device_operations zram_devops; static void zram_free_page(struct zram *zram, size_t index); -static int zram_read_page(struct zram *zram, struct page *page, u32 index, - struct bio *parent); +static int zram_read_from_zspool(struct zram *zram, struct page *page, + u32 index); static int zram_slot_trylock(struct zram *zram, u32 index) { @@ -831,13 +831,10 @@ static ssize_t writeback_store(struct device *dev, */ if (!zram_test_flag(zram, index, ZRAM_PP_SLOT)) goto next; + if (zram_read_from_zspool(zram, page, index)) + goto next; zram_slot_unlock(zram, index); - if (zram_read_page(zram, page, index, NULL)) { - release_pp_slot(zram, pps); - continue; - } - bio_init(&bio, zram->bdev, &bio_vec, 1, REQ_OP_WRITE | REQ_SYNC); bio.bi_iter.bi_sector = blk_idx * (PAGE_SIZE >> 9); From c56857fab87a4c206409b2fbdb085e141774f284 Mon Sep 17 00:00:00 2001 From: Sergey Senozhatsky Date: Wed, 18 Dec 2024 15:34:24 +0900 Subject: [PATCH 166/504] zram: cond_resched() in writeback loop zram writeback is a costly operation, because every target slot (unless ZRAM_HUGE) is decompressed before it gets written to a backing device. The writeback to a backing device uses submit_bio_wait() which may look like a rescheduling point. However, if the backing device has BD_HAS_SUBMIT_BIO bit set __submit_bio() calls directly disk->fops->submit_bio(bio) on the backing device and so when submit_bio_wait() calls blk_wait_io() the I/O is already done. On such systems we effective end up in a loop for_each (target slot) { decompress(slot) __submit_bio() disk->fops->submit_bio(bio) } Which on PREEMPT_NONE systems triggers watchdogs (since there are no explicit rescheduling points). Add cond_resched() to the zram writeback loop. Link: https://lkml.kernel.org/r/20241218063513.297475-8-senozhatsky@chromium.org Signed-off-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton --- drivers/block/zram/zram_drv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index faa58d85cef8..70ecaee25c20 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -884,6 +884,8 @@ static ssize_t writeback_store(struct device *dev, next: zram_slot_unlock(zram, index); release_pp_slot(zram, pps); + + cond_resched(); } if (blk_idx) From 43fee8e1f89eae1e4e631365c57936625ef8e28c Mon Sep 17 00:00:00 2001 From: yangge Date: Sat, 11 Jan 2025 15:58:20 +0800 Subject: [PATCH 167/504] mm: replace free hugepage folios after migration My machine has 4 NUMA nodes, each equipped with 32GB of memory. I have configured each NUMA node with 16GB of CMA and 16GB of in-use hugetlb pages. The allocation of contiguous memory via cma_alloc() can fail probabilistically. When there are free hugetlb folios in the hugetlb pool, during the migration of in-use hugetlb folios, new folios are allocated from the free hugetlb pool. After the migration is completed, the old folios are released back to the free hugetlb pool instead of being returned to the buddy system. This can cause test_pages_isolated() check to fail, ultimately leading to the failure of cma_alloc(). Call trace: cma_alloc() __alloc_contig_migrate_range() // migrate in-use hugepage test_pages_isolated() __test_page_isolated_in_pageblock() PageBuddy(page) // check if the page is in buddy To address this issue, we introduce a function named replace_free_hugepage_folios(). This function will replace the hugepage in the free hugepage pool with a new one and release the old one to the buddy system. After the migration of in-use hugetlb pages is completed, we will invoke replace_free_hugepage_folios() to ensure that these hugepages are properly released to the buddy system. Following this step, when test_pages_isolated() is executed for inspection, it will successfully pass. Additionally, when alloc_contig_range() is used to migrate multiple in-use hugetlb pages, it can result in some in-use hugetlb pages being released back to the free hugetlb pool and subsequently being reallocated and used again. For example: [huge 0] [huge 1] To migrate huge 0, we obtain huge x from the pool. After the migration is completed, we return the now-freed huge 0 back to the pool. When it's time to migrate huge 1, we can simply reuse the now-freed huge 0 from the pool. As a result, when replace_free_hugepage_folios() is executed, it cannot release huge 0 back to the buddy system. To address this issue, we should prevent the reuse of isolated free hugepages during the migration process. Link: https://lkml.kernel.org/r/1734503588-16254-1-git-send-email-yangge1116@126.com Link: https://lkml.kernel.org/r/1736582300-11364-1-git-send-email-yangge1116@126.com Signed-off-by: yangge Cc: Baolin Wang Cc: Barry Song <21cnbao@gmail.com> Cc: David Hildenbrand Cc: SeongJae Park Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 7 +++++++ mm/hugetlb.c | 42 +++++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 12 +++++++++++- 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index ae4fe8615bb6..10faf42ca96a 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -681,6 +681,7 @@ struct huge_bootmem_page { }; int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); +int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, @@ -1059,6 +1060,12 @@ static inline int isolate_or_dissolve_huge_page(struct page *page, return -ENOMEM; } +static inline int replace_free_hugepage_folios(unsigned long start_pfn, + unsigned long end_pfn) +{ + return 0; +} + static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1672bfd85b4d..312ed27b9721 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -48,6 +48,7 @@ #include #include "internal.h" #include "hugetlb_vmemmap.h" +#include int hugetlb_max_hstate __read_mostly; unsigned int default_hstate_idx; @@ -1336,6 +1337,9 @@ static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h, if (folio_test_hwpoison(folio)) continue; + if (is_migrate_isolate_page(&folio->page)) + continue; + list_move(&folio->lru, &h->hugepage_activelist); folio_ref_unfreeze(folio, 1); folio_clear_hugetlb_freed(folio); @@ -2975,6 +2979,44 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) return ret; } +/* + * replace_free_hugepage_folios - Replace free hugepage folios in a given pfn + * range with new folios. + * @start_pfn: start pfn of the given pfn range + * @end_pfn: end pfn of the given pfn range + * Returns 0 on success, otherwise negated error. + */ +int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) +{ + struct hstate *h; + struct folio *folio; + int ret = 0; + + LIST_HEAD(isolate_list); + + while (start_pfn < end_pfn) { + folio = pfn_folio(start_pfn); + if (folio_test_hugetlb(folio)) { + h = folio_hstate(folio); + } else { + start_pfn++; + continue; + } + + if (!folio_ref_count(folio)) { + ret = alloc_and_dissolve_hugetlb_folio(h, folio, + &isolate_list); + if (ret) + break; + + putback_movable_pages(&isolate_list); + } + start_pfn++; + } + + return ret; +} + struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, int avoid_reserve) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 681a6fa7eaa8..aa70d0e73d6d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6507,7 +6507,17 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end, ret = __alloc_contig_migrate_range(&cc, start, end, migratetype); if (ret && ret != -EBUSY) goto done; - ret = 0; + + /* + * When in-use hugetlb pages are migrated, they may simply be released + * back into the free hugepage pool instead of being returned to the + * buddy system. After the migration of in-use huge pages is completed, + * we will invoke replace_free_hugepage_folios() to ensure that these + * hugepages are properly released to the buddy system. + */ + ret = replace_free_hugepage_folios(start, end); + if (ret) + goto done; /* * Pages from [start, end) are within a pageblock_nr_pages From 3a62efe7a5bdc2eb0331a84bc05d469ef23571bc Mon Sep 17 00:00:00 2001 From: Donet Tom Date: Thu, 19 Dec 2024 04:27:20 -0600 Subject: [PATCH 168/504] selftests/mm: add new test cases to the migration test Added three new test cases to the migration tests: 1. Shared anon THP migration test This test will mmap shared anon memory, madvise it to MADV_HUGEPAGE, then do migration entry testing. One thread will move pages back and forth between nodes whilst other threads try and access them. 2. Private anon hugetlb migration test This test will mmap private anon hugetlb memory and then do the migration entry testing. 3. Shared anon hugetlb migration test This test will mmap shared anon hugetlb memory and then do the migration entry testing. Test results ============ # ./tools/testing/selftests/mm/migration TAP version 13 1..6 # Starting 6 tests from 1 test cases. # RUN migration.private_anon ... # OK migration.private_anon ok 1 migration.private_anon # RUN migration.shared_anon ... # OK migration.shared_anon ok 2 migration.shared_anon # RUN migration.private_anon_thp ... # OK migration.private_anon_thp ok 3 migration.private_anon_thp # RUN migration.shared_anon_thp ... # OK migration.shared_anon_thp ok 4 migration.shared_anon_thp # RUN migration.private_anon_htlb ... # OK migration.private_anon_htlb ok 5 migration.private_anon_htlb # RUN migration.shared_anon_htlb ... # OK migration.shared_anon_htlb ok 6 migration.shared_anon_htlb # PASSED: 6 / 6 tests passed. # Totals: pass:6 fail:0 xfail:0 xpass:0 skip:0 error:0 # Link: https://lkml.kernel.org/r/20241219102720.4487-1-donettom@linux.ibm.com Signed-off-by: Donet Tom Reviewed-by: Dev Jain Cc: Baolin Wang Cc: David Hildenbrand Cc: Ritesh Harjani (IBM) Cc: Shuah Khan Cc: Zi Yan Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/migration.c | 99 ++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) diff --git a/tools/testing/selftests/mm/migration.c b/tools/testing/selftests/mm/migration.c index 64bcbb7151cf..1e3a595fbf01 100644 --- a/tools/testing/selftests/mm/migration.c +++ b/tools/testing/selftests/mm/migration.c @@ -204,4 +204,103 @@ TEST_F_TIMEOUT(migration, private_anon_thp, 2*RUNTIME) ASSERT_EQ(pthread_cancel(self->threads[i]), 0); } +/* + * migration test with shared anon THP page + */ + +TEST_F_TIMEOUT(migration, shared_anon_thp, 2*RUNTIME) +{ + pid_t pid; + uint64_t *ptr; + int i; + + if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0) + SKIP(return, "Not enough threads or NUMA nodes available"); + + ptr = mmap(NULL, 2 * TWOMEG, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS, -1, 0); + ASSERT_NE(ptr, MAP_FAILED); + + ptr = (uint64_t *) ALIGN((uintptr_t) ptr, TWOMEG); + ASSERT_EQ(madvise(ptr, TWOMEG, MADV_HUGEPAGE), 0); + + memset(ptr, 0xde, TWOMEG); + for (i = 0; i < self->nthreads - 1; i++) { + pid = fork(); + if (!pid) { + prctl(PR_SET_PDEATHSIG, SIGHUP); + /* Parent may have died before prctl so check now. */ + if (getppid() == 1) + kill(getpid(), SIGHUP); + access_mem(ptr); + } else { + self->pids[i] = pid; + } + } + + ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0); + for (i = 0; i < self->nthreads - 1; i++) + ASSERT_EQ(kill(self->pids[i], SIGTERM), 0); +} + +/* + * migration test with private anon hugetlb page + */ +TEST_F_TIMEOUT(migration, private_anon_htlb, 2*RUNTIME) +{ + uint64_t *ptr; + int i; + + if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0) + SKIP(return, "Not enough threads or NUMA nodes available"); + + ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); + ASSERT_NE(ptr, MAP_FAILED); + + memset(ptr, 0xde, TWOMEG); + for (i = 0; i < self->nthreads - 1; i++) + if (pthread_create(&self->threads[i], NULL, access_mem, ptr)) + perror("Couldn't create thread"); + + ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0); + for (i = 0; i < self->nthreads - 1; i++) + ASSERT_EQ(pthread_cancel(self->threads[i]), 0); +} + +/* + * migration test with shared anon hugetlb page + */ +TEST_F_TIMEOUT(migration, shared_anon_htlb, 2*RUNTIME) +{ + pid_t pid; + uint64_t *ptr; + int i; + + if (self->nthreads < 2 || self->n1 < 0 || self->n2 < 0) + SKIP(return, "Not enough threads or NUMA nodes available"); + + ptr = mmap(NULL, TWOMEG, PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); + ASSERT_NE(ptr, MAP_FAILED); + + memset(ptr, 0xde, TWOMEG); + for (i = 0; i < self->nthreads - 1; i++) { + pid = fork(); + if (!pid) { + prctl(PR_SET_PDEATHSIG, SIGHUP); + /* Parent may have died before prctl so check now. */ + if (getppid() == 1) + kill(getpid(), SIGHUP); + access_mem(ptr); + } else { + self->pids[i] = pid; + } + } + + ASSERT_EQ(migrate(ptr, self->n1, self->n2), 0); + for (i = 0; i < self->nthreads - 1; i++) + ASSERT_EQ(kill(self->pids[i], SIGTERM), 0); +} + TEST_HARNESS_MAIN From e5e3619114ff301cf6f6af8bef26ffe12f454533 Mon Sep 17 00:00:00 2001 From: Gregory Price Date: Fri, 20 Dec 2024 16:07:09 -0500 Subject: [PATCH 169/504] mm: add build-time option for hotplug memory default online type Memory hotplug presently auto-onlines memory into a zone the kernel deems appropriate if CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y. The memhp_default_state boot param enables runtime config, but it's not possible to do this at build-time. Remove CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE, and replace it with CONFIG_MHP_DEFAULT_ONLINE_TYPE_* choices that sync with the boot param. Selections: CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE => mhp_default_online_type = "offline" Memory will not be onlined automatically. CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO => mhp_default_online_type = "online" Memory will be onlined automatically in a zone deemed. appropriate by the kernel. CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL => mhp_default_online_type = "online_kernel" Memory will be onlined automatically. The zone may allow kernel data (e.g. ZONE_NORMAL). CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE => mhp_default_online_type = "online_movable" Memory will be onlined automatically. The zone will be ZONE_MOVABLE. Default to CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE to match the existing default CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=n behavior. Existing users of CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y should use CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO. Link: https://lkml.kernel.org/r/20241220210709.300066-1-gourry@gourry.net Signed-off-by: Gregory Price Acked-by: David Hildenbrand Cc: Greg Kroah-Hartman Cc: Huacai Chen Cc: Jonathan Corbet Cc: Oscar Salvador Cc: "Rafael J. Wysocki" Cc: WANG Xuerui Signed-off-by: Andrew Morton --- .../admin-guide/kernel-parameters.txt | 4 +- .../admin-guide/mm/memory-hotplug.rst | 4 +- arch/loongarch/configs/loongson3_defconfig | 5 +- drivers/base/memory.c | 4 +- include/linux/memory_hotplug.h | 5 +- mm/Kconfig | 57 ++++++++++++++++--- mm/memory_hotplug.c | 33 ++++++++--- 7 files changed, 89 insertions(+), 23 deletions(-) diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index c79691eee54f..9138fcd18260 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -3351,8 +3351,8 @@ [KNL] Set the initial state for the memory hotplug onlining policy. If not specified, the default value is set according to the - CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE kernel config - option. + CONFIG_MHP_DEFAULT_ONLINE_TYPE kernel config + options. See Documentation/admin-guide/mm/memory-hotplug.rst. memmap=exactmap [KNL,X86,EARLY] Enable setting of an exact diff --git a/Documentation/admin-guide/mm/memory-hotplug.rst b/Documentation/admin-guide/mm/memory-hotplug.rst index cb2c080f400c..33c886f3d198 100644 --- a/Documentation/admin-guide/mm/memory-hotplug.rst +++ b/Documentation/admin-guide/mm/memory-hotplug.rst @@ -280,8 +280,8 @@ The following files are currently defined: blocks; configure auto-onlining. The default value depends on the - CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE kernel configuration - option. + CONFIG_MHP_DEFAULT_ONLINE_TYPE kernel configuration + options. See the ``state`` property of memory blocks for details. ``block_size_bytes`` read-only: the size in bytes of a memory block. diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index 4dffc90192f7..1cc6e8843680 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -113,7 +113,10 @@ CONFIG_ZBUD=y CONFIG_ZSMALLOC=m # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +# CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE is not set +CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO=y +# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL is not set +# CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE is not set CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 67858eeb92ed..348c5dbbfa68 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -512,7 +512,7 @@ static ssize_t auto_online_blocks_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%s\n", - online_type_to_str[mhp_default_online_type]); + online_type_to_str[mhp_get_default_online_type()]); } static ssize_t auto_online_blocks_store(struct device *dev, @@ -524,7 +524,7 @@ static ssize_t auto_online_blocks_store(struct device *dev, if (online_type < 0) return -EINVAL; - mhp_default_online_type = online_type; + mhp_set_default_online_type(online_type); return count; } diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index b27ddce5d324..eaac5ae8c05c 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -144,8 +144,6 @@ extern u64 max_mem_size; extern int mhp_online_type_from_str(const char *str); -/* Default online_type (MMOP_*) when new memory blocks are added. */ -extern int mhp_default_online_type; /* If movable_node boot option specified */ extern bool movable_node_enabled; static inline bool movable_node_is_enabled(void) @@ -303,6 +301,9 @@ static inline void __remove_memory(u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_MEMORY_HOTPLUG +/* Default online_type (MMOP_*) when new memory blocks are added. */ +extern int mhp_get_default_online_type(void); +extern void mhp_set_default_online_type(int online_type); extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); extern int add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); diff --git a/mm/Kconfig b/mm/Kconfig index 7949ab121070..af163dbbaab1 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -550,20 +550,63 @@ menuconfig MEMORY_HOTPLUG if MEMORY_HOTPLUG -config MEMORY_HOTPLUG_DEFAULT_ONLINE - bool "Online the newly added memory blocks by default" - depends on MEMORY_HOTPLUG +choice + prompt "Memory Hotplug Default Online Type" + default MHP_DEFAULT_ONLINE_TYPE_OFFLINE help + Default memory type for driver managed hotplug memory. + This option sets the default policy setting for memory hotplug onlining policy (/sys/devices/system/memory/auto_online_blocks) which determines what happens to newly added memory regions. Policy setting can always be changed at runtime. + + The default is 'offline'. + + Select offline to defer onlining to drivers and user policy. + Select auto to let the kernel choose what zones to utilize. + Select online_kernel to generally allow kernel usage of this memory. + Select online_movable to generally disallow kernel usage of this memory. + + Example kernel usage would be page structs and page tables. + See Documentation/admin-guide/mm/memory-hotplug.rst for more information. - Say Y here if you want all hot-plugged memory blocks to appear in - 'online' state by default. - Say N here if you want the default policy to keep all hot-plugged - memory blocks in 'offline' state. +config MHP_DEFAULT_ONLINE_TYPE_OFFLINE + bool "offline" + help + Driver managed memory will not be onlined by default. + Choose this for systems with drivers and user policy that + handle onlining of hotplug memory policy. + +config MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO + bool "auto" + help + Select this if you want the kernel to automatically online + memory into the zone it thinks is reasonable. This memory + may be utilized for kernel data (e.g. page tables). + +config MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL + bool "kernel" + help + Select this if you want the kernel to automatically online + hotplug memory into a zone capable of being used for kernel + data (e.g. page tables). This typically means ZONE_NORMAL. + +config MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE + bool "movable" + help + Select this if you want the kernel to automatically online + hotplug memory into ZONE_MOVABLE. This memory will generally + not be utilized for kernel data (e.g. page tables). + + This should only be used when the admin knows sufficient + ZONE_NORMAL memory is available to describe hotplug memory, + otherwise hotplug memory may fail to online. For example, + sufficient kernel-capable memory (ZONE_NORMAL) must be + available to allocate page structs to describe ZONE_MOVABLE. + +endchoice config MEMORY_HOTREMOVE bool "Allow for memory hot remove" diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 3b6f93962481..e3655f07dd6e 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -219,11 +219,30 @@ void put_online_mems(void) bool movable_node_enabled = false; -#ifndef CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE -int mhp_default_online_type = MMOP_OFFLINE; -#else -int mhp_default_online_type = MMOP_ONLINE; -#endif +static int mhp_default_online_type = -1; +int mhp_get_default_online_type(void) +{ + if (mhp_default_online_type >= 0) + return mhp_default_online_type; + + if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_OFFLINE)) + mhp_default_online_type = MMOP_OFFLINE; + else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO)) + mhp_default_online_type = MMOP_ONLINE; + else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL)) + mhp_default_online_type = MMOP_ONLINE_KERNEL; + else if (IS_ENABLED(CONFIG_MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE)) + mhp_default_online_type = MMOP_ONLINE_MOVABLE; + else + mhp_default_online_type = MMOP_OFFLINE; + + return mhp_default_online_type; +} + +void mhp_set_default_online_type(int online_type) +{ + mhp_default_online_type = online_type; +} static int __init setup_memhp_default_state(char *str) { @@ -1328,7 +1347,7 @@ static int check_hotplug_memory_range(u64 start, u64 size) static int online_memory_block(struct memory_block *mem, void *arg) { - mem->online_type = mhp_default_online_type; + mem->online_type = mhp_get_default_online_type(); return device_online(&mem->dev); } @@ -1575,7 +1594,7 @@ int add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags) merge_system_ram_resource(res); /* online pages if requested */ - if (mhp_default_online_type != MMOP_OFFLINE) + if (mhp_get_default_online_type() != MMOP_OFFLINE) walk_memory_blocks(start, size, NULL, online_memory_block); return ret; From bb3e5ae390fecd795efbd2ef2b37a544f99d6b3d Mon Sep 17 00:00:00 2001 From: Gregory Price Date: Thu, 26 Dec 2024 11:29:18 -0700 Subject: [PATCH 170/504] mm-add-build-time-option-for-hotplug-memory-default-online-type-v4 update KConfig comments Link: https://lkml.kernel.org/r/20241226182918.648799-1-gourry@gourry.net Signed-off-by: Gregory Price Acked-by: David Hildenbrand Cc: Greg Kroah-Hartman Cc: Huacai Chen Cc: Jonathan Corbet Cc: Oscar Salvador Cc: "Rafael J. Wysocki" Cc: WANG Xuerui Signed-off-by: Andrew Morton --- mm/Kconfig | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/mm/Kconfig b/mm/Kconfig index af163dbbaab1..1b501db06417 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -554,7 +554,7 @@ choice prompt "Memory Hotplug Default Online Type" default MHP_DEFAULT_ONLINE_TYPE_OFFLINE help - Default memory type for driver managed hotplug memory. + Default memory type for hotplugged memory. This option sets the default policy setting for memory hotplug onlining policy (/sys/devices/system/memory/auto_online_blocks) which @@ -575,7 +575,7 @@ choice config MHP_DEFAULT_ONLINE_TYPE_OFFLINE bool "offline" help - Driver managed memory will not be onlined by default. + Hotplugged memory will not be onlined by default. Choose this for systems with drivers and user policy that handle onlining of hotplug memory policy. @@ -583,22 +583,22 @@ config MHP_DEFAULT_ONLINE_TYPE_ONLINE_AUTO bool "auto" help Select this if you want the kernel to automatically online - memory into the zone it thinks is reasonable. This memory - may be utilized for kernel data (e.g. page tables). + hotplugged memory into the zone it thinks is reasonable. + This memory may be utilized for kernel data. config MHP_DEFAULT_ONLINE_TYPE_ONLINE_KERNEL bool "kernel" help Select this if you want the kernel to automatically online - hotplug memory into a zone capable of being used for kernel - data (e.g. page tables). This typically means ZONE_NORMAL. + hotplugged memory into a zone capable of being used for kernel + data. This typically means ZONE_NORMAL. config MHP_DEFAULT_ONLINE_TYPE_ONLINE_MOVABLE bool "movable" help Select this if you want the kernel to automatically online hotplug memory into ZONE_MOVABLE. This memory will generally - not be utilized for kernel data (e.g. page tables). + not be utilized for kernel data. This should only be used when the admin knows sufficient ZONE_NORMAL memory is available to describe hotplug memory, From 2ee668bf3ee9ae03d469dc6f75094652e132bb8c Mon Sep 17 00:00:00 2001 From: Rik van Riel Date: Thu, 19 Dec 2024 15:32:53 -0500 Subject: [PATCH 171/504] mm: remove unnecessary calls to lru_add_drain There seem to be several categories of calls to lru_add_drain and lru_add_drain_all. The first are code paths that recently allocated, swapped in, or otherwise processed a batch of pages, and want them all on the LRU. These drain pages that were recently allocated, probably on the local CPU. A second category are code paths that are actively trying to reclaim, migrate, or offline memory. These often use lru_add_drain_all, to drain the caches on all CPUs. However, there also seem to be some other callers where we aren't really doing either. They are calling lru_add_drain(), despite operating on pages that may have been allocated long ago, and quite possibly on different CPUs. Those calls are not likely to be effective at anything but creating lock contention on the LRU locks. Remove the lru_add_drain calls in the latter category. For detailed reasoning, see [1] and [2]. Link: https://lkml.kernel.org/r/dca2824e8e88e826c6b260a831d79089b5b9c79d.camel@surriel.com [1] Link: https://lkml.kernel.org/r/xxfhcjaq2xxcl5adastz5omkytenq7izo2e5f4q7e3ns4z6lko@odigjjc7hqrg [2] Link: https://lkml.kernel.org/r/20241219153253.3da9e8aa@fangorn Signed-off-by: Rik van Riel Suggested-by: David Hildenbrand Acked-by: Shakeel Butt Acked-by: David Hildenbrand Acked-by: Lorenzo Stoakes Cc: Chris Li Cc: Matthew Wilcox (Oracle) Cc: Ryan Roberts Signed-off-by: Andrew Morton --- mm/memory.c | 1 - mm/mmap.c | 2 -- mm/swap_state.c | 1 - mm/vma.c | 2 -- 4 files changed, 6 deletions(-) diff --git a/mm/memory.c b/mm/memory.c index c870ca8f84a9..2e1d11581942 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2004,7 +2004,6 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, struct mmu_notifier_range range; struct mmu_gather tlb; - lru_add_drain(); mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm, address, end); hugetlb_zap_begin(vma, &range.start, &range.end); diff --git a/mm/mmap.c b/mm/mmap.c index aef835984b1c..3cc8de07411d 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1336,7 +1336,6 @@ void exit_mmap(struct mm_struct *mm) goto destroy; } - lru_add_drain(); flush_cache_mm(mm); tlb_gather_mmu_fullmm(&tlb, mm); /* update_hiwater_rss(mm) here? but nobody should be looking */ @@ -1779,7 +1778,6 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift) vma, new_start, length, false, true)) return -ENOMEM; - lru_add_drain(); tlb_gather_mmu(&tlb, mm); next = vma_next(&vmi); if (new_end > old_start) { diff --git a/mm/swap_state.c b/mm/swap_state.c index e0c0321b8ff7..ca42b2be64d9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -317,7 +317,6 @@ void free_pages_and_swap_cache(struct encoded_page **pages, int nr) struct folio_batch folios; unsigned int refs[PAGEVEC_SIZE]; - lru_add_drain(); folio_batch_init(&folios); for (int i = 0; i < nr; i++) { struct folio *folio = page_folio(encoded_page_ptr(pages[i])); diff --git a/mm/vma.c b/mm/vma.c index 6fa240e5b0c5..0caaeea899a9 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -430,7 +430,6 @@ void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; struct mmu_gather tlb; - lru_add_drain(); tlb_gather_mmu(&tlb, mm); update_hiwater_rss(mm); unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end, @@ -1132,7 +1131,6 @@ static inline void vms_clear_ptes(struct vma_munmap_struct *vms, * were isolated before we downgraded mmap_lock. */ mas_set(mas_detach, 1); - lru_add_drain(); tlb_gather_mmu(&tlb, vms->vma->vm_mm); update_hiwater_rss(vms->vma->vm_mm); unmap_vmas(&tlb, mas_detach, vms->vma, vms->start, vms->end, From aed6c44f2a5a615775d656bf84183603aca0442b Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:17 +0800 Subject: [PATCH 172/504] Revert "mm: pgtable: make ptlock be freed by RCU" This reverts commit 2f3443770437e49abc39af26962d293851cbab6d. Link: https://lkml.kernel.org/r/366002e0af83f0d5cad3f356db036cb6447492f7.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reviewed-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Peter Zijlstra Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Cc: Alexandre Ghiti Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 +- include/linux/mm_types.h | 9 +-------- mm/memory.c | 22 ++++++---------------- 3 files changed, 8 insertions(+), 25 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 33c8bd1ffeb1..e7c54b9aac6d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2925,7 +2925,7 @@ void ptlock_free(struct ptdesc *ptdesc); static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc) { - return &(ptdesc->ptl->ptl); + return ptdesc->ptl; } #else /* ALLOC_SPLIT_PTLOCKS */ static inline void ptlock_cache_init(void) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index c668a60a1dc3..5f1b2dc788e2 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -434,13 +434,6 @@ FOLIO_MATCH(flags, _flags_2a); FOLIO_MATCH(compound_head, _head_2a); #undef FOLIO_MATCH -#if ALLOC_SPLIT_PTLOCKS -struct pt_lock { - spinlock_t ptl; - struct rcu_head rcu; -}; -#endif - /** * struct ptdesc - Memory descriptor for page tables. * @__page_flags: Same as page flags. Powerpc only. @@ -489,7 +482,7 @@ struct ptdesc { union { unsigned long _pt_pad_2; #if ALLOC_SPLIT_PTLOCKS - struct pt_lock *ptl; + spinlock_t *ptl; #else spinlock_t ptl; #endif diff --git a/mm/memory.c b/mm/memory.c index 2e1d11581942..9defa853dbd2 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -7014,34 +7014,24 @@ static struct kmem_cache *page_ptl_cachep; void __init ptlock_cache_init(void) { - page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(struct pt_lock), 0, + page_ptl_cachep = kmem_cache_create("page->ptl", sizeof(spinlock_t), 0, SLAB_PANIC, NULL); } bool ptlock_alloc(struct ptdesc *ptdesc) { - struct pt_lock *pt_lock; + spinlock_t *ptl; - pt_lock = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); - if (!pt_lock) + ptl = kmem_cache_alloc(page_ptl_cachep, GFP_KERNEL); + if (!ptl) return false; - ptdesc->ptl = pt_lock; + ptdesc->ptl = ptl; return true; } -static void ptlock_free_rcu(struct rcu_head *head) -{ - struct pt_lock *pt_lock; - - pt_lock = container_of(head, struct pt_lock, rcu); - kmem_cache_free(page_ptl_cachep, pt_lock); -} - void ptlock_free(struct ptdesc *ptdesc) { - struct pt_lock *pt_lock = ptdesc->ptl; - - call_rcu(&pt_lock->rcu, ptlock_free_rcu); + kmem_cache_free(page_ptl_cachep, ptdesc->ptl); } #endif From 6bf19118f5a0f2f60858121190f240600e7d7b36 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Wed, 8 Jan 2025 14:57:18 +0800 Subject: [PATCH 173/504] riscv: mm: skip pgtable level check in {pud,p4d}_alloc_one Patch series "move pagetable_*_dtor() to __tlb_remove_table()", v5. As proposed [1] by Peter Zijlstra below, this patch series aims to move pagetable_*_dtor() into __tlb_remove_table(). This will cleanup pagetable_*_dtor() a bit and more gracefully fix the UAF issue [2] reported by syzbot. : Notably: : : - s390 pud isn't calling the existing pagetable_pud_[cd]tor() : - none of the p4d things have pagetable_p4d_[cd]tor() (x86,arm64,s390,riscv) : and they have inconsistent accounting : - while much of the _ctor calls are in generic code, many of the _dtor : calls are in arch code for hysterial raisins, this could easily be : fixed : - if we fix ptlock_free() to handle NULL, then all the _dtor() : functions can use it, and we can observe they're all identical : and can be folded : : after all that cleanup, you can move the _dtor from *_free_tlb() into : tlb_remove_table() -- which for the above case, would then have it called : from __tlb_remove_table_free(). This patch (of 16): {pmd,pud,p4d}_alloc_one() is never called if the corresponding page table level is folded, as {pmd,pud,p4d}_alloc() already does the required check. We can therefore remove the runtime page table level checks in {pud,p4d}_alloc_one. The PUD helper becomes equivalent to the generic version, so we remove it altogether. This is consistent with the way arm64 and x86 handle this situation (runtime check in p4d_free() only). Link: https://lkml.kernel.org/r/cover.1736317725.git.zhengqi.arch@bytedance.com Link: https://lkml.kernel.org/r/93a1c6bddc0ded9f1a9f15658c1e4af5c93d1194.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Kevin Brodsky Signed-off-by: Qi Zheng Acked-by: Dave Hansen Reviewed-by: Alexandre Ghiti Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Peter Zijlstra (Intel) Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/riscv/include/asm/pgalloc.h | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index f52264304f77..8ad0bbe838a2 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -12,7 +12,6 @@ #include #ifdef CONFIG_MMU -#define __HAVE_ARCH_PUD_ALLOC_ONE #define __HAVE_ARCH_PUD_FREE #include @@ -88,15 +87,6 @@ static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, } } -#define pud_alloc_one pud_alloc_one -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - if (pgtable_l4_enabled) - return __pud_alloc_one(mm, addr); - - return NULL; -} - #define pud_free pud_free static inline void pud_free(struct mm_struct *mm, pud_t *pud) { @@ -118,15 +108,11 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, #define p4d_alloc_one p4d_alloc_one static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) { - if (pgtable_l5_enabled) { - gfp_t gfp = GFP_PGTABLE_USER; + gfp_t gfp = GFP_PGTABLE_USER; - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - return (p4d_t *)get_zeroed_page(gfp); - } - - return NULL; + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + return (p4d_t *)get_zeroed_page(gfp); } static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d) From 2dd0d1d3f4aed1ad23ec7be8e256807b57dbc6a2 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Wed, 8 Jan 2025 14:57:19 +0800 Subject: [PATCH 174/504] asm-generic: pgalloc: provide generic p4d_{alloc_one,free} Four architectures currently implement 5-level pgtables: arm64, riscv, x86 and s390. The first three have essentially the same implementation for p4d_alloc_one() and p4d_free(), so we've got an opportunity to reduce duplication like at the lower levels. Provide a generic version of p4d_alloc_one() and p4d_free(), and make use of it on those architectures. Their implementation is the same as at PUD level, except that p4d_free() performs a runtime check by calling mm_p4d_folded(). 5-level pgtables depend on a runtime-detected hardware feature on all supported architectures, so we might as well include this check in the generic implementation. No runtime check is required in p4d_alloc_one() as the top-level p4d_alloc() already does the required check. Link: https://lkml.kernel.org/r/26d69c74a29183ecc335b9b407040d8e4cd70c6a.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Kevin Brodsky Signed-off-by: Qi Zheng Acked-by: Dave Hansen Acked-by: Arnd Bergmann [asm-generic] Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Peter Zijlstra (Intel) Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/arm64/include/asm/pgalloc.h | 17 ------------ arch/riscv/include/asm/pgalloc.h | 23 ---------------- arch/x86/include/asm/pgalloc.h | 18 ------------- include/asm-generic/pgalloc.h | 45 ++++++++++++++++++++++++++++++++ 4 files changed, 45 insertions(+), 58 deletions(-) diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index e75422864d1b..2965f5a7e39e 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -85,23 +85,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, p4d_t *p4dp) __pgd_populate(pgdp, __pa(p4dp), pgdval); } -static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - gfp_t gfp = GFP_PGTABLE_USER; - - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - return (p4d_t *)get_zeroed_page(gfp); -} - -static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) -{ - if (!pgtable_l5_enabled()) - return; - BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); - free_page((unsigned long)p4d); -} - #define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d) #else static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t p4dp, pgdval_t prot) diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index 8ad0bbe838a2..551d614d3369 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -105,29 +105,6 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, } } -#define p4d_alloc_one p4d_alloc_one -static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - gfp_t gfp = GFP_PGTABLE_USER; - - if (mm == &init_mm) - gfp = GFP_PGTABLE_KERNEL; - return (p4d_t *)get_zeroed_page(gfp); -} - -static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d) -{ - BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); - free_page((unsigned long)p4d); -} - -#define p4d_free p4d_free -static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) -{ - if (pgtable_l5_enabled) - __p4d_free(mm, p4d); -} - static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, unsigned long addr) { diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index dcd836b59beb..dd4841231bb9 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -147,24 +147,6 @@ static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4 set_pgd_safe(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); } -static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - gfp_t gfp = GFP_KERNEL_ACCOUNT; - - if (mm == &init_mm) - gfp &= ~__GFP_ACCOUNT; - return (p4d_t *)get_zeroed_page(gfp); -} - -static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) -{ - if (!pgtable_l5_enabled()) - return; - - BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); - free_page((unsigned long)p4d); -} - extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d); static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 7c48f5fbf8aa..59131629ac9c 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -215,6 +215,51 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) #endif /* CONFIG_PGTABLE_LEVELS > 3 */ +#if CONFIG_PGTABLE_LEVELS > 4 + +static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) +{ + gfp_t gfp = GFP_PGTABLE_USER; + struct ptdesc *ptdesc; + + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + gfp &= ~__GFP_HIGHMEM; + + ptdesc = pagetable_alloc_noprof(gfp, 0); + if (!ptdesc) + return NULL; + + return ptdesc_address(ptdesc); +} +#define __p4d_alloc_one(...) alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__)) + +#ifndef __HAVE_ARCH_P4D_ALLOC_ONE +static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long addr) +{ + return __p4d_alloc_one_noprof(mm, addr); +} +#define p4d_alloc_one(...) alloc_hooks(p4d_alloc_one_noprof(__VA_ARGS__)) +#endif + +static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + struct ptdesc *ptdesc = virt_to_ptdesc(p4d); + + BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); + pagetable_free(ptdesc); +} + +#ifndef __HAVE_ARCH_P4D_FREE +static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + if (!mm_p4d_folded(mm)) + __p4d_free(mm, p4d); +} +#endif + +#endif /* CONFIG_PGTABLE_LEVELS > 4 */ + #ifndef __HAVE_ARCH_PGD_FREE static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { From e3ca831f2102ab206cb8bd876709f6840b4d0884 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:20 +0800 Subject: [PATCH 175/504] mm: pgtable: add statistics for P4D level page table Like other levels of page tables, add statistics for P4D level page table. Link: https://lkml.kernel.org/r/d55fe3c286305aae84457da9e1066df99b3de125.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Originally-by: Peter Zijlstra (Intel) Reviewed-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/riscv/include/asm/pgalloc.h | 6 +++++- arch/x86/mm/pgtable.c | 3 +++ include/asm-generic/pgalloc.h | 2 ++ include/linux/mm.h | 16 ++++++++++++++++ 4 files changed, 26 insertions(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index 551d614d3369..3466fbe2e508 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -108,8 +108,12 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, unsigned long addr) { - if (pgtable_l5_enabled) + if (pgtable_l5_enabled) { + struct ptdesc *ptdesc = virt_to_ptdesc(p4d); + + pagetable_p4d_dtor(ptdesc); riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); + } } #endif /* __PAGETABLE_PMD_FOLDED */ diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 69a357b15974..3d6e84da45b2 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -94,6 +94,9 @@ void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) #if CONFIG_PGTABLE_LEVELS > 4 void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d) { + struct ptdesc *ptdesc = virt_to_ptdesc(p4d); + + pagetable_p4d_dtor(ptdesc); paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT); paravirt_tlb_remove_table(tlb, virt_to_page(p4d)); } diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 59131629ac9c..bb482eeca0c3 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -230,6 +230,7 @@ static inline p4d_t *__p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long if (!ptdesc) return NULL; + pagetable_p4d_ctor(ptdesc); return ptdesc_address(ptdesc); } #define __p4d_alloc_one(...) alloc_hooks(__p4d_alloc_one_noprof(__VA_ARGS__)) @@ -247,6 +248,7 @@ static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d) struct ptdesc *ptdesc = virt_to_ptdesc(p4d); BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); + pagetable_p4d_dtor(ptdesc); pagetable_free(ptdesc); } diff --git a/include/linux/mm.h b/include/linux/mm.h index e7c54b9aac6d..2e56a9634a97 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3175,6 +3175,22 @@ static inline void pagetable_pud_dtor(struct ptdesc *ptdesc) lruvec_stat_sub_folio(folio, NR_PAGETABLE); } +static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc) +{ + struct folio *folio = ptdesc_folio(ptdesc); + + __folio_set_pgtable(folio); + lruvec_stat_add_folio(folio, NR_PAGETABLE); +} + +static inline void pagetable_p4d_dtor(struct ptdesc *ptdesc) +{ + struct folio *folio = ptdesc_folio(ptdesc); + + __folio_clear_pgtable(folio); + lruvec_stat_sub_folio(folio, NR_PAGETABLE); +} + extern void __init pagecache_init(void); extern void free_initmem(void); From 27a85152da20b72ace9dfc717db722401462827e Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:21 +0800 Subject: [PATCH 176/504] arm64: pgtable: use mmu gather to free p4d level page table Like other levels of page tables, also use mmu gather mechanism to free p4d level page table. Link: https://lkml.kernel.org/r/3fd48525397b34a64f7c0eb76746da30814dc941.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Originally-by: Peter Zijlstra (Intel) Reviewed-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/arm64/include/asm/pgalloc.h | 1 - arch/arm64/include/asm/tlb.h | 14 ++++++++++++++ 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 2965f5a7e39e..1b4509d3382c 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h @@ -85,7 +85,6 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, p4d_t *p4dp) __pgd_populate(pgdp, __pa(p4dp), pgdval); } -#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d) #else static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t p4dp, pgdval_t prot) { diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index a947c6e784ed..445282cde9af 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -111,4 +111,18 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, } #endif +#if CONFIG_PGTABLE_LEVELS > 4 +static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4dp, + unsigned long addr) +{ + struct ptdesc *ptdesc = virt_to_ptdesc(p4dp); + + if (!pgtable_l5_enabled()) + return; + + pagetable_p4d_dtor(ptdesc); + tlb_remove_ptdesc(tlb, ptdesc); +} +#endif + #endif From af77534b547f1e63b77ae14b63b33a7ed1a60317 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:22 +0800 Subject: [PATCH 177/504] s390: pgtable: add statistics for PUD and P4D level page table Like PMD and PTE level page table, also add statistics for PUD and P4D page table. Link: https://lkml.kernel.org/r/4707dffce228ccec5c6662810566dd12b5741c4b.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Suggested-by: Peter Zijlstra (Intel) Reviewed-by: Kevin Brodsky Acked-by: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/s390/include/asm/pgalloc.h | 29 +++++++++++++++++++++-------- arch/s390/include/asm/tlb.h | 2 ++ 2 files changed, 23 insertions(+), 8 deletions(-) diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 7b84ef6dc4b6..a0c1ca5d8423 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -53,29 +53,42 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) { unsigned long *table = crst_table_alloc(mm); - if (table) - crst_table_init(table, _REGION2_ENTRY_EMPTY); + if (!table) + return NULL; + crst_table_init(table, _REGION2_ENTRY_EMPTY); + pagetable_p4d_ctor(virt_to_ptdesc(table)); + return (p4d_t *) table; } static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) { - if (!mm_p4d_folded(mm)) - crst_table_free(mm, (unsigned long *) p4d); + if (mm_p4d_folded(mm)) + return; + + pagetable_p4d_dtor(virt_to_ptdesc(p4d)); + crst_table_free(mm, (unsigned long *) p4d); } static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address) { unsigned long *table = crst_table_alloc(mm); - if (table) - crst_table_init(table, _REGION3_ENTRY_EMPTY); + + if (!table) + return NULL; + crst_table_init(table, _REGION3_ENTRY_EMPTY); + pagetable_pud_ctor(virt_to_ptdesc(table)); + return (pud_t *) table; } static inline void pud_free(struct mm_struct *mm, pud_t *pud) { - if (!mm_pud_folded(mm)) - crst_table_free(mm, (unsigned long *) pud); + if (mm_pud_folded(mm)) + return; + + pagetable_pud_dtor(virt_to_ptdesc(pud)); + crst_table_free(mm, (unsigned long *) pud); } static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr) diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index e95b2c8081eb..907d57a68145 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -122,6 +122,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, { if (mm_p4d_folded(tlb->mm)) return; + pagetable_p4d_dtor(virt_to_ptdesc(p4d)); __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; @@ -140,6 +141,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, { if (mm_pud_folded(tlb->mm)) return; + pagetable_pud_dtor(virt_to_ptdesc(pud)); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; tlb->cleared_p4ds = 1; From 9f2d68e8596f58b719f98a8fc818a3a5411bb785 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:23 +0800 Subject: [PATCH 178/504] mm: pgtable: introduce pagetable_dtor() The pagetable_p*_dtor() are exactly the same except for the handling of ptlock. If we make ptlock_free() handle the case where ptdesc->ptl is NULL and remove VM_BUG_ON_PAGE() from pmd_ptlock_free(), we can unify pagetable_p*_dtor() into one function. Let's introduce pagetable_dtor() to do this. Later, pagetable_dtor() will be moved to tlb_remove_ptdesc(), so that ptlock and page table pages can be freed together (regardless of whether RCU is used). This prevents the use-after-free problem where the ptlock is freed immediately but the page table pages is freed later via RCU. Link: https://lkml.kernel.org/r/47f44fff9dc68d9d9e9a0d6c036df275f820598a.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Originally-by: Peter Zijlstra (Intel) Reviewed-by: Kevin Brodsky Acked-by: Alexander Gordeev [s390] Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- Documentation/mm/split_page_table_lock.rst | 4 +- arch/arm/include/asm/tlb.h | 4 +- arch/arm64/include/asm/tlb.h | 8 ++-- arch/csky/include/asm/pgalloc.h | 2 +- arch/hexagon/include/asm/pgalloc.h | 2 +- arch/loongarch/include/asm/pgalloc.h | 2 +- arch/m68k/include/asm/mcf_pgalloc.h | 4 +- arch/m68k/include/asm/sun3_pgalloc.h | 2 +- arch/m68k/mm/motorola.c | 2 +- arch/mips/include/asm/pgalloc.h | 2 +- arch/nios2/include/asm/pgalloc.h | 2 +- arch/openrisc/include/asm/pgalloc.h | 2 +- arch/powerpc/mm/book3s64/mmu_context.c | 2 +- arch/powerpc/mm/book3s64/pgtable.c | 2 +- arch/powerpc/mm/pgtable-frag.c | 4 +- arch/riscv/include/asm/pgalloc.h | 8 ++-- arch/riscv/mm/init.c | 4 +- arch/s390/include/asm/pgalloc.h | 6 +-- arch/s390/include/asm/tlb.h | 6 +-- arch/s390/mm/pgalloc.c | 2 +- arch/sh/include/asm/pgalloc.h | 2 +- arch/sparc/mm/init_64.c | 2 +- arch/sparc/mm/srmmu.c | 2 +- arch/um/include/asm/pgalloc.h | 6 +-- arch/x86/mm/pgtable.c | 12 ++--- include/asm-generic/pgalloc.h | 8 ++-- include/linux/mm.h | 52 ++++------------------ mm/memory.c | 3 +- 28 files changed, 62 insertions(+), 95 deletions(-) diff --git a/Documentation/mm/split_page_table_lock.rst b/Documentation/mm/split_page_table_lock.rst index 581446d4a4eb..8e1ceb0a6619 100644 --- a/Documentation/mm/split_page_table_lock.rst +++ b/Documentation/mm/split_page_table_lock.rst @@ -62,7 +62,7 @@ Support of split page table lock by an architecture =================================================== There's no need in special enabling of PTE split page table lock: everything -required is done by pagetable_pte_ctor() and pagetable_pte_dtor(), which +required is done by pagetable_pte_ctor() and pagetable_dtor(), which must be called on PTE table allocation / freeing. Make sure the architecture doesn't use slab allocator for page table @@ -73,7 +73,7 @@ PMD split lock only makes sense if you have more than two page table levels. PMD split lock enabling requires pagetable_pmd_ctor() call on PMD table -allocation and pagetable_pmd_dtor() on freeing. +allocation and pagetable_dtor() on freeing. Allocation usually happens in pmd_alloc_one(), freeing in pmd_free() and pmd_free_tlb(), but make sure you cover all PMD table allocation / freeing diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f40d06ad5d2a..ef79bf1e8563 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -41,7 +41,7 @@ __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { struct ptdesc *ptdesc = page_ptdesc(pte); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); #ifndef CONFIG_ARM_LPAE /* @@ -61,7 +61,7 @@ __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) #ifdef CONFIG_ARM_LPAE struct ptdesc *ptdesc = virt_to_ptdesc(pmdp); - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); tlb_remove_ptdesc(tlb, ptdesc); #endif } diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 445282cde9af..408d0f36a8a8 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -82,7 +82,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { struct ptdesc *ptdesc = page_ptdesc(pte); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); tlb_remove_ptdesc(tlb, ptdesc); } @@ -92,7 +92,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, { struct ptdesc *ptdesc = virt_to_ptdesc(pmdp); - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); tlb_remove_ptdesc(tlb, ptdesc); } #endif @@ -106,7 +106,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, if (!pgtable_l4_enabled()) return; - pagetable_pud_dtor(ptdesc); + pagetable_dtor(ptdesc); tlb_remove_ptdesc(tlb, ptdesc); } #endif @@ -120,7 +120,7 @@ static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4dp, if (!pgtable_l5_enabled()) return; - pagetable_p4d_dtor(ptdesc); + pagetable_dtor(ptdesc); tlb_remove_ptdesc(tlb, ptdesc); } #endif diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h index 9c84c9012e53..f1ce5b7b28f2 100644 --- a/arch/csky/include/asm/pgalloc.h +++ b/arch/csky/include/asm/pgalloc.h @@ -63,7 +63,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) #define __pte_free_tlb(tlb, pte, address) \ do { \ - pagetable_pte_dtor(page_ptdesc(pte)); \ + pagetable_dtor(page_ptdesc(pte)); \ tlb_remove_page_ptdesc(tlb, page_ptdesc(pte)); \ } while (0) diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h index 55988625e6fb..40e42a0e7167 100644 --- a/arch/hexagon/include/asm/pgalloc.h +++ b/arch/hexagon/include/asm/pgalloc.h @@ -89,7 +89,7 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, #define __pte_free_tlb(tlb, pte, addr) \ do { \ - pagetable_pte_dtor((page_ptdesc(pte))); \ + pagetable_dtor((page_ptdesc(pte))); \ tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \ } while (0) diff --git a/arch/loongarch/include/asm/pgalloc.h b/arch/loongarch/include/asm/pgalloc.h index a7b9c9e73593..7211dff8c969 100644 --- a/arch/loongarch/include/asm/pgalloc.h +++ b/arch/loongarch/include/asm/pgalloc.h @@ -57,7 +57,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) #define __pte_free_tlb(tlb, pte, address) \ do { \ - pagetable_pte_dtor(page_ptdesc(pte)); \ + pagetable_dtor(page_ptdesc(pte)); \ tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \ } while (0) diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index 302c5bf67179..22d6c1fcabfb 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h @@ -37,7 +37,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pgtable, { struct ptdesc *ptdesc = virt_to_ptdesc(pgtable); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } @@ -61,7 +61,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable) { struct ptdesc *ptdesc = virt_to_ptdesc(pgtable); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h index 4a137eecb6fe..2b626cb3ad0a 100644 --- a/arch/m68k/include/asm/sun3_pgalloc.h +++ b/arch/m68k/include/asm/sun3_pgalloc.h @@ -19,7 +19,7 @@ extern const char bad_pmd_string[]; #define __pte_free_tlb(tlb, pte, addr) \ do { \ - pagetable_pte_dtor(page_ptdesc(pte)); \ + pagetable_dtor(page_ptdesc(pte)); \ tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \ } while (0) diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index c1761d309fc6..81715cece70c 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -201,7 +201,7 @@ int free_pointer_table(void *table, int type) list_del(dp); mmu_page_dtor((void *)page); if (type == TABLE_PTE) - pagetable_pte_dtor(virt_to_ptdesc((void *)page)); + pagetable_dtor(virt_to_ptdesc((void *)page)); free_page (page); return 1; } else if (ptable_list[type].next != dp) { diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index f4440edcd8fe..36d9805033c4 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -56,7 +56,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) #define __pte_free_tlb(tlb, pte, address) \ do { \ - pagetable_pte_dtor(page_ptdesc(pte)); \ + pagetable_dtor(page_ptdesc(pte)); \ tlb_remove_page_ptdesc((tlb), page_ptdesc(pte)); \ } while (0) diff --git a/arch/nios2/include/asm/pgalloc.h b/arch/nios2/include/asm/pgalloc.h index ce6bb8e74271..12a536b7bfbd 100644 --- a/arch/nios2/include/asm/pgalloc.h +++ b/arch/nios2/include/asm/pgalloc.h @@ -30,7 +30,7 @@ extern pgd_t *pgd_alloc(struct mm_struct *mm); #define __pte_free_tlb(tlb, pte, addr) \ do { \ - pagetable_pte_dtor(page_ptdesc(pte)); \ + pagetable_dtor(page_ptdesc(pte)); \ tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \ } while (0) diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h index c6a73772a546..596e2355824e 100644 --- a/arch/openrisc/include/asm/pgalloc.h +++ b/arch/openrisc/include/asm/pgalloc.h @@ -68,7 +68,7 @@ extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); #define __pte_free_tlb(tlb, pte, addr) \ do { \ - pagetable_pte_dtor(page_ptdesc(pte)); \ + pagetable_dtor(page_ptdesc(pte)); \ tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \ } while (0) diff --git a/arch/powerpc/mm/book3s64/mmu_context.c b/arch/powerpc/mm/book3s64/mmu_context.c index 1715b07c630c..4e1e45420bd4 100644 --- a/arch/powerpc/mm/book3s64/mmu_context.c +++ b/arch/powerpc/mm/book3s64/mmu_context.c @@ -253,7 +253,7 @@ static void pmd_frag_destroy(void *pmd_frag) count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } } diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 374542528080..3f28e4acd920 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -477,7 +477,7 @@ void pmd_fragment_free(unsigned long *pmd) BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0); if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) { - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } } diff --git a/arch/powerpc/mm/pgtable-frag.c b/arch/powerpc/mm/pgtable-frag.c index e89f64a0f24a..713268ccb1a0 100644 --- a/arch/powerpc/mm/pgtable-frag.c +++ b/arch/powerpc/mm/pgtable-frag.c @@ -25,7 +25,7 @@ void pte_frag_destroy(void *pte_frag) count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT; /* We allow PTE_FRAG_NR fragments from a PTE page */ if (atomic_sub_and_test(PTE_FRAG_NR - count, &ptdesc->pt_frag_refcount)) { - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } } @@ -111,7 +111,7 @@ static void pte_free_now(struct rcu_head *head) struct ptdesc *ptdesc; ptdesc = container_of(head, struct ptdesc, pt_rcu_head); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index 3466fbe2e508..b6793c5c9929 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -100,7 +100,7 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, if (pgtable_l4_enabled) { struct ptdesc *ptdesc = virt_to_ptdesc(pud); - pagetable_pud_dtor(ptdesc); + pagetable_dtor(ptdesc); riscv_tlb_remove_ptdesc(tlb, ptdesc); } } @@ -111,7 +111,7 @@ static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, if (pgtable_l5_enabled) { struct ptdesc *ptdesc = virt_to_ptdesc(p4d); - pagetable_p4d_dtor(ptdesc); + pagetable_dtor(ptdesc); riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); } } @@ -144,7 +144,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, { struct ptdesc *ptdesc = virt_to_ptdesc(pmd); - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); riscv_tlb_remove_ptdesc(tlb, ptdesc); } @@ -155,7 +155,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { struct ptdesc *ptdesc = page_ptdesc(pte); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); riscv_tlb_remove_ptdesc(tlb, ptdesc); } #endif /* CONFIG_MMU */ diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index fc53ce748c80..8d703fb51b1d 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -1558,7 +1558,7 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) return; } - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); if (PageReserved(page)) free_reserved_page(page); else @@ -1580,7 +1580,7 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, bool is_vmemm } if (!is_vmemmap) - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); if (PageReserved(page)) free_reserved_page(page); else diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index a0c1ca5d8423..5fced6d3c36b 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -66,7 +66,7 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) if (mm_p4d_folded(mm)) return; - pagetable_p4d_dtor(virt_to_ptdesc(p4d)); + pagetable_dtor(virt_to_ptdesc(p4d)); crst_table_free(mm, (unsigned long *) p4d); } @@ -87,7 +87,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) if (mm_pud_folded(mm)) return; - pagetable_pud_dtor(virt_to_ptdesc(pud)); + pagetable_dtor(virt_to_ptdesc(pud)); crst_table_free(mm, (unsigned long *) pud); } @@ -109,7 +109,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { if (mm_pmd_folded(mm)) return; - pagetable_pmd_dtor(virt_to_ptdesc(pmd)); + pagetable_dtor(virt_to_ptdesc(pmd)); crst_table_free(mm, (unsigned long *) pmd); } diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 907d57a68145..dde847a5be54 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -102,7 +102,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, { if (mm_pmd_folded(tlb->mm)) return; - pagetable_pmd_dtor(virt_to_ptdesc(pmd)); + pagetable_dtor(virt_to_ptdesc(pmd)); __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; @@ -122,7 +122,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, { if (mm_p4d_folded(tlb->mm)) return; - pagetable_p4d_dtor(virt_to_ptdesc(p4d)); + pagetable_dtor(virt_to_ptdesc(p4d)); __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; @@ -141,7 +141,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, { if (mm_pud_folded(tlb->mm)) return; - pagetable_pud_dtor(virt_to_ptdesc(pud)); + pagetable_dtor(virt_to_ptdesc(pud)); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; tlb->cleared_p4ds = 1; diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 58696a0c4e4a..569de24d3376 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -182,7 +182,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) static void pagetable_pte_dtor_free(struct ptdesc *ptdesc) { - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } diff --git a/arch/sh/include/asm/pgalloc.h b/arch/sh/include/asm/pgalloc.h index 5d8577ab1591..96d938fdf224 100644 --- a/arch/sh/include/asm/pgalloc.h +++ b/arch/sh/include/asm/pgalloc.h @@ -34,7 +34,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, #define __pte_free_tlb(tlb, pte, addr) \ do { \ - pagetable_pte_dtor(page_ptdesc(pte)); \ + pagetable_dtor(page_ptdesc(pte)); \ tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \ } while (0) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 21f8cbbd0581..05882bca5b73 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2915,7 +2915,7 @@ static void __pte_free(pgtable_t pte) { struct ptdesc *ptdesc = virt_to_ptdesc(pte); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 9df51a62333d..e3a72c884b86 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -372,7 +372,7 @@ void pte_free(struct mm_struct *mm, pgtable_t ptep) page = pfn_to_page(__nocache_pa((unsigned long)ptep) >> PAGE_SHIFT); spin_lock(&mm->page_table_lock); if (page_ref_dec_return(page) == 1) - pagetable_pte_dtor(page_ptdesc(page)); + pagetable_dtor(page_ptdesc(page)); spin_unlock(&mm->page_table_lock); srmmu_free_nocache(ptep, SRMMU_PTE_TABLE_SIZE); diff --git a/arch/um/include/asm/pgalloc.h b/arch/um/include/asm/pgalloc.h index 04fb4e6969a4..f0af23c3aeb2 100644 --- a/arch/um/include/asm/pgalloc.h +++ b/arch/um/include/asm/pgalloc.h @@ -27,7 +27,7 @@ extern pgd_t *pgd_alloc(struct mm_struct *); #define __pte_free_tlb(tlb, pte, address) \ do { \ - pagetable_pte_dtor(page_ptdesc(pte)); \ + pagetable_dtor(page_ptdesc(pte)); \ tlb_remove_page_ptdesc((tlb), (page_ptdesc(pte))); \ } while (0) @@ -35,7 +35,7 @@ do { \ #define __pmd_free_tlb(tlb, pmd, address) \ do { \ - pagetable_pmd_dtor(virt_to_ptdesc(pmd)); \ + pagetable_dtor(virt_to_ptdesc(pmd)); \ tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \ } while (0) @@ -43,7 +43,7 @@ do { \ #define __pud_free_tlb(tlb, pud, address) \ do { \ - pagetable_pud_dtor(virt_to_ptdesc(pud)); \ + pagetable_dtor(virt_to_ptdesc(pud)); \ tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \ } while (0) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 3d6e84da45b2..a6cd9660e29e 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -60,7 +60,7 @@ early_param("userpte", setup_userpte); void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) { - pagetable_pte_dtor(page_ptdesc(pte)); + pagetable_dtor(page_ptdesc(pte)); paravirt_release_pte(page_to_pfn(pte)); paravirt_tlb_remove_table(tlb, pte); } @@ -77,7 +77,7 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) #ifdef CONFIG_X86_PAE tlb->need_flush_all = 1; #endif - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); paravirt_tlb_remove_table(tlb, ptdesc_page(ptdesc)); } @@ -86,7 +86,7 @@ void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) { struct ptdesc *ptdesc = virt_to_ptdesc(pud); - pagetable_pud_dtor(ptdesc); + pagetable_dtor(ptdesc); paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); paravirt_tlb_remove_table(tlb, virt_to_page(pud)); } @@ -96,7 +96,7 @@ void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d) { struct ptdesc *ptdesc = virt_to_ptdesc(p4d); - pagetable_p4d_dtor(ptdesc); + pagetable_dtor(ptdesc); paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT); paravirt_tlb_remove_table(tlb, virt_to_page(p4d)); } @@ -233,7 +233,7 @@ static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count) if (pmds[i]) { ptdesc = virt_to_ptdesc(pmds[i]); - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); mm_dec_nr_pmds(mm); } @@ -867,7 +867,7 @@ int pud_free_pmd_page(pud_t *pud, unsigned long addr) free_page((unsigned long)pmd_sv); - pagetable_pmd_dtor(virt_to_ptdesc(pmd)); + pagetable_dtor(virt_to_ptdesc(pmd)); free_page((unsigned long)pmd); return 1; diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index bb482eeca0c3..4afb346eae25 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -109,7 +109,7 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) { struct ptdesc *ptdesc = page_ptdesc(pte_page); - pagetable_pte_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } @@ -153,7 +153,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) struct ptdesc *ptdesc = virt_to_ptdesc(pmd); BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); - pagetable_pmd_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } #endif @@ -202,7 +202,7 @@ static inline void __pud_free(struct mm_struct *mm, pud_t *pud) struct ptdesc *ptdesc = virt_to_ptdesc(pud); BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - pagetable_pud_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } @@ -248,7 +248,7 @@ static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d) struct ptdesc *ptdesc = virt_to_ptdesc(p4d); BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); - pagetable_p4d_dtor(ptdesc); + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } diff --git a/include/linux/mm.h b/include/linux/mm.h index 2e56a9634a97..a3b2263f1c1a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2992,6 +2992,15 @@ static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; } static inline void ptlock_free(struct ptdesc *ptdesc) {} #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */ +static inline void pagetable_dtor(struct ptdesc *ptdesc) +{ + struct folio *folio = ptdesc_folio(ptdesc); + + ptlock_free(ptdesc); + __folio_clear_pgtable(folio); + lruvec_stat_sub_folio(folio, NR_PAGETABLE); +} + static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc) { struct folio *folio = ptdesc_folio(ptdesc); @@ -3003,15 +3012,6 @@ static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc) return true; } -static inline void pagetable_pte_dtor(struct ptdesc *ptdesc) -{ - struct folio *folio = ptdesc_folio(ptdesc); - - ptlock_free(ptdesc); - __folio_clear_pgtable(folio); - lruvec_stat_sub_folio(folio, NR_PAGETABLE); -} - pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp); static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) @@ -3088,14 +3088,6 @@ static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) return ptlock_init(ptdesc); } -static inline void pmd_ptlock_free(struct ptdesc *ptdesc) -{ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc)); -#endif - ptlock_free(ptdesc); -} - #define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte) #else @@ -3106,7 +3098,6 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd) } static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; } -static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {} #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte) @@ -3131,15 +3122,6 @@ static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc) return true; } -static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc) -{ - struct folio *folio = ptdesc_folio(ptdesc); - - pmd_ptlock_free(ptdesc); - __folio_clear_pgtable(folio); - lruvec_stat_sub_folio(folio, NR_PAGETABLE); -} - /* * No scalability reason to split PUD locks yet, but follow the same pattern * as the PMD locks to make it easier if we decide to. The VM should not be @@ -3167,14 +3149,6 @@ static inline void pagetable_pud_ctor(struct ptdesc *ptdesc) lruvec_stat_add_folio(folio, NR_PAGETABLE); } -static inline void pagetable_pud_dtor(struct ptdesc *ptdesc) -{ - struct folio *folio = ptdesc_folio(ptdesc); - - __folio_clear_pgtable(folio); - lruvec_stat_sub_folio(folio, NR_PAGETABLE); -} - static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc) { struct folio *folio = ptdesc_folio(ptdesc); @@ -3183,14 +3157,6 @@ static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc) lruvec_stat_add_folio(folio, NR_PAGETABLE); } -static inline void pagetable_p4d_dtor(struct ptdesc *ptdesc) -{ - struct folio *folio = ptdesc_folio(ptdesc); - - __folio_clear_pgtable(folio); - lruvec_stat_sub_folio(folio, NR_PAGETABLE); -} - extern void __init pagecache_init(void); extern void free_initmem(void); diff --git a/mm/memory.c b/mm/memory.c index 9defa853dbd2..2a20e3810534 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -7031,7 +7031,8 @@ bool ptlock_alloc(struct ptdesc *ptdesc) void ptlock_free(struct ptdesc *ptdesc) { - kmem_cache_free(page_ptl_cachep, ptdesc->ptl); + if (ptdesc->ptl) + kmem_cache_free(page_ptl_cachep, ptdesc->ptl); } #endif From 772587878b5c80af14aef778bf83486e29cdccb1 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:24 +0800 Subject: [PATCH 179/504] arm: pgtable: move pagetable_dtor() to __tlb_remove_table() Move pagetable_dtor() to __tlb_remove_table(), so that ptlock and page table pages can be freed together (regardless of whether RCU is used). This prevents the use-after-free problem where the ptlock is freed immediately but the page table pages is freed later via RCU. Page tables shouldn't have swap cache, so use pagetable_free() instead of free_page_and_swap_cache() to free page table pages. Link: https://lkml.kernel.org/r/327b4b8990729edd4ce97d9d5acbdaff2d9fa1d1.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Suggested-by: Peter Zijlstra (Intel) Reviewed-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/arm/include/asm/tlb.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index ef79bf1e8563..59854c6b97bc 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -26,12 +26,14 @@ #else /* !CONFIG_MMU */ -#include #include static inline void __tlb_remove_table(void *_table) { - free_page_and_swap_cache((struct page *)_table); + struct ptdesc *ptdesc = (struct ptdesc *)_table; + + pagetable_dtor(ptdesc); + pagetable_free(ptdesc); } #include @@ -41,7 +43,9 @@ __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { struct ptdesc *ptdesc = page_ptdesc(pte); +#ifndef CONFIG_MMU_GATHER_TABLE_FREE pagetable_dtor(ptdesc); +#endif #ifndef CONFIG_ARM_LPAE /* @@ -61,7 +65,6 @@ __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr) #ifdef CONFIG_ARM_LPAE struct ptdesc *ptdesc = virt_to_ptdesc(pmdp); - pagetable_dtor(ptdesc); tlb_remove_ptdesc(tlb, ptdesc); #endif } From a090eecb823ced0b5416ee50fb222fec088fcf2c Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:25 +0800 Subject: [PATCH 180/504] arm64: pgtable: move pagetable_dtor() to __tlb_remove_table() Move pagetable_dtor() to __tlb_remove_table(), so that ptlock and page table pages can be freed together (regardless of whether RCU is used). This prevents the use-after-free problem where the ptlock is freed immediately but the page table pages is freed later via RCU. Page tables shouldn't have swap cache, so use pagetable_free() instead of free_page_and_swap_cache() to free page table pages. Link: https://lkml.kernel.org/r/cf4b847caf390f96a3e3d534dacb2c174e16c154.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Suggested-by: Peter Zijlstra (Intel) Reviewed-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/arm64/include/asm/tlb.h | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 408d0f36a8a8..93591a80b5bf 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -9,11 +9,13 @@ #define __ASM_TLB_H #include -#include static inline void __tlb_remove_table(void *_table) { - free_page_and_swap_cache((struct page *)_table); + struct ptdesc *ptdesc = (struct ptdesc *)_table; + + pagetable_dtor(ptdesc); + pagetable_free(ptdesc); } #define tlb_flush tlb_flush @@ -82,7 +84,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { struct ptdesc *ptdesc = page_ptdesc(pte); - pagetable_dtor(ptdesc); tlb_remove_ptdesc(tlb, ptdesc); } @@ -92,7 +93,6 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, { struct ptdesc *ptdesc = virt_to_ptdesc(pmdp); - pagetable_dtor(ptdesc); tlb_remove_ptdesc(tlb, ptdesc); } #endif @@ -106,7 +106,6 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, if (!pgtable_l4_enabled()) return; - pagetable_dtor(ptdesc); tlb_remove_ptdesc(tlb, ptdesc); } #endif @@ -120,7 +119,6 @@ static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4dp, if (!pgtable_l5_enabled()) return; - pagetable_dtor(ptdesc); tlb_remove_ptdesc(tlb, ptdesc); } #endif From 6a7aa070004eb0e44f114c06fb61e24c7b7bae54 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:26 +0800 Subject: [PATCH 181/504] riscv: pgtable: move pagetable_dtor() to __tlb_remove_table() Move pagetable_dtor() to __tlb_remove_table(), so that ptlock and page table pages can be freed together (regardless of whether RCU is used). This prevents the use-after-free problem where the ptlock is freed immediately but the page table pages is freed later via RCU. Page tables shouldn't have swap cache, so use pagetable_free() instead of free_page_and_swap_cache() to free page table pages. By the way, move the comment above __tlb_remove_table() to riscv_tlb_remove_ptdesc(), it will be more appropriate. Link: https://lkml.kernel.org/r/b89d77c965507b1b102cbabe988e69365cb288b6.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Suggested-by: Peter Zijlstra (Intel) Reviewed-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/riscv/include/asm/pgalloc.h | 38 ++++++++++++++------------------ arch/riscv/include/asm/tlb.h | 14 ++++-------- 2 files changed, 21 insertions(+), 31 deletions(-) diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index b6793c5c9929..c8907b831711 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -15,12 +15,22 @@ #define __HAVE_ARCH_PUD_FREE #include +/* + * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to + * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use + * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this + * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the + * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h + * for more details. + */ static inline void riscv_tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt) { - if (riscv_use_sbi_for_rfence()) + if (riscv_use_sbi_for_rfence()) { tlb_remove_ptdesc(tlb, pt); - else + } else { + pagetable_dtor(pt); tlb_remove_page_ptdesc(tlb, pt); + } } static inline void pmd_populate_kernel(struct mm_struct *mm, @@ -97,23 +107,15 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud) static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, unsigned long addr) { - if (pgtable_l4_enabled) { - struct ptdesc *ptdesc = virt_to_ptdesc(pud); - - pagetable_dtor(ptdesc); - riscv_tlb_remove_ptdesc(tlb, ptdesc); - } + if (pgtable_l4_enabled) + riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud)); } static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, unsigned long addr) { - if (pgtable_l5_enabled) { - struct ptdesc *ptdesc = virt_to_ptdesc(p4d); - - pagetable_dtor(ptdesc); + if (pgtable_l5_enabled) riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); - } } #endif /* __PAGETABLE_PMD_FOLDED */ @@ -142,10 +144,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, unsigned long addr) { - struct ptdesc *ptdesc = virt_to_ptdesc(pmd); - - pagetable_dtor(ptdesc); - riscv_tlb_remove_ptdesc(tlb, ptdesc); + riscv_tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd)); } #endif /* __PAGETABLE_PMD_FOLDED */ @@ -153,10 +152,7 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { - struct ptdesc *ptdesc = page_ptdesc(pte); - - pagetable_dtor(ptdesc); - riscv_tlb_remove_ptdesc(tlb, ptdesc); + riscv_tlb_remove_ptdesc(tlb, page_ptdesc(pte)); } #endif /* CONFIG_MMU */ diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index 1f6c38420d8e..ded8724b3c4f 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -11,19 +11,13 @@ struct mmu_gather; static void tlb_flush(struct mmu_gather *tlb); #ifdef CONFIG_MMU -#include -/* - * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to - * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use - * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this - * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the - * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h - * for more details. - */ static inline void __tlb_remove_table(void *table) { - free_page_and_swap_cache(table); + struct ptdesc *ptdesc = (struct ptdesc *)table; + + pagetable_dtor(ptdesc); + pagetable_free(ptdesc); } #endif /* CONFIG_MMU */ From 4484116dc26c54e3ccb6602c18aa7f95ff21aab8 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:27 +0800 Subject: [PATCH 182/504] x86: pgtable: convert __tlb_remove_table() to use struct ptdesc Convert __tlb_remove_table() to use struct ptdesc, which will help to move pagetable_dtor() to __tlb_remove_table(). And page tables shouldn't have swap cache, so use pagetable_free() instead of free_page_and_swap_cache() to free page table pages. Link: https://lkml.kernel.org/r/39f60f93143ff77cf5d6b3c3e75af0ffc1480adb.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reviewed-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Peter Zijlstra (Intel) Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/x86/include/asm/tlb.h | 16 +++++++++------- arch/x86/kernel/paravirt.c | 4 +++- arch/x86/mm/pgtable.c | 12 +++++++----- 3 files changed, 19 insertions(+), 13 deletions(-) diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 73f0786181cc..680ec3d47915 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -31,24 +31,26 @@ static inline void tlb_flush(struct mmu_gather *tlb) */ static inline void __tlb_remove_table(void *table) { - free_page_and_swap_cache(table); + struct ptdesc *ptdesc = (struct ptdesc *)table; + + pagetable_free(ptdesc); } #ifdef CONFIG_PT_RECLAIM static inline void __tlb_remove_table_one_rcu(struct rcu_head *head) { - struct page *page; + struct ptdesc *ptdesc; - page = container_of(head, struct page, rcu_head); - put_page(page); + ptdesc = container_of(head, struct ptdesc, pt_rcu_head); + __tlb_remove_table(ptdesc); } static inline void __tlb_remove_table_one(void *table) { - struct page *page; + struct ptdesc *ptdesc; - page = table; - call_rcu(&page->rcu_head, __tlb_remove_table_one_rcu); + ptdesc = table; + call_rcu(&ptdesc->pt_rcu_head, __tlb_remove_table_one_rcu); } #define __tlb_remove_table_one __tlb_remove_table_one #endif /* CONFIG_PT_RECLAIM */ diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 89688921ea62..e9bd0ba2d453 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -62,7 +62,9 @@ void __init native_pv_lock_init(void) #ifndef CONFIG_PT_RECLAIM static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) { - tlb_remove_page(tlb, table); + struct ptdesc *ptdesc = (struct ptdesc *)table; + + tlb_remove_page(tlb, ptdesc_page(ptdesc)); } #else static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index a6cd9660e29e..f9516024cbe5 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -23,7 +23,9 @@ EXPORT_SYMBOL(physical_mask); static inline void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) { - tlb_remove_page(tlb, table); + struct ptdesc *ptdesc = (struct ptdesc *)table; + + tlb_remove_page(tlb, ptdesc_page(ptdesc)); } #else static inline @@ -62,7 +64,7 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) { pagetable_dtor(page_ptdesc(pte)); paravirt_release_pte(page_to_pfn(pte)); - paravirt_tlb_remove_table(tlb, pte); + paravirt_tlb_remove_table(tlb, page_ptdesc(pte)); } #if CONFIG_PGTABLE_LEVELS > 2 @@ -78,7 +80,7 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) tlb->need_flush_all = 1; #endif pagetable_dtor(ptdesc); - paravirt_tlb_remove_table(tlb, ptdesc_page(ptdesc)); + paravirt_tlb_remove_table(tlb, ptdesc); } #if CONFIG_PGTABLE_LEVELS > 3 @@ -88,7 +90,7 @@ void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) pagetable_dtor(ptdesc); paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); - paravirt_tlb_remove_table(tlb, virt_to_page(pud)); + paravirt_tlb_remove_table(tlb, ptdesc); } #if CONFIG_PGTABLE_LEVELS > 4 @@ -98,7 +100,7 @@ void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d) pagetable_dtor(ptdesc); paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT); - paravirt_tlb_remove_table(tlb, virt_to_page(p4d)); + paravirt_tlb_remove_table(tlb, ptdesc); } #endif /* CONFIG_PGTABLE_LEVELS > 4 */ #endif /* CONFIG_PGTABLE_LEVELS > 3 */ From 3532b2e726656e5020550d66e57b462dd4c85379 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:28 +0800 Subject: [PATCH 183/504] x86: pgtable: move pagetable_dtor() to __tlb_remove_table() Move pagetable_dtor() to __tlb_remove_table(), so that ptlock and page table pages can be freed together (regardless of whether RCU is used). This prevents the use-after-free problem where the ptlock is freed immediately but the page table pages is freed later via RCU. Link: https://lkml.kernel.org/r/27b3cdc8786bebd4f748380bf82f796482718504.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Suggested-by: Peter Zijlstra (Intel) Reviewed-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/x86/include/asm/tlb.h | 1 + arch/x86/kernel/paravirt.c | 1 + arch/x86/mm/pgtable.c | 16 ++++------------ 3 files changed, 6 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 680ec3d47915..f64730be5ad6 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -33,6 +33,7 @@ static inline void __tlb_remove_table(void *table) { struct ptdesc *ptdesc = (struct ptdesc *)table; + pagetable_dtor(ptdesc); pagetable_free(ptdesc); } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index e9bd0ba2d453..70161999c973 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -64,6 +64,7 @@ static void native_tlb_remove_table(struct mmu_gather *tlb, void *table) { struct ptdesc *ptdesc = (struct ptdesc *)table; + pagetable_dtor(ptdesc); tlb_remove_page(tlb, ptdesc_page(ptdesc)); } #else diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index f9516024cbe5..c02aa0427a6a 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -25,6 +25,7 @@ void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) { struct ptdesc *ptdesc = (struct ptdesc *)table; + pagetable_dtor(ptdesc); tlb_remove_page(tlb, ptdesc_page(ptdesc)); } #else @@ -62,7 +63,6 @@ early_param("userpte", setup_userpte); void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) { - pagetable_dtor(page_ptdesc(pte)); paravirt_release_pte(page_to_pfn(pte)); paravirt_tlb_remove_table(tlb, page_ptdesc(pte)); } @@ -70,7 +70,6 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) #if CONFIG_PGTABLE_LEVELS > 2 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) { - struct ptdesc *ptdesc = virt_to_ptdesc(pmd); paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); /* * NOTE! For PAE, any changes to the top page-directory-pointer-table @@ -79,28 +78,21 @@ void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) #ifdef CONFIG_X86_PAE tlb->need_flush_all = 1; #endif - pagetable_dtor(ptdesc); - paravirt_tlb_remove_table(tlb, ptdesc); + paravirt_tlb_remove_table(tlb, virt_to_ptdesc(pmd)); } #if CONFIG_PGTABLE_LEVELS > 3 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) { - struct ptdesc *ptdesc = virt_to_ptdesc(pud); - - pagetable_dtor(ptdesc); paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); - paravirt_tlb_remove_table(tlb, ptdesc); + paravirt_tlb_remove_table(tlb, virt_to_ptdesc(pud)); } #if CONFIG_PGTABLE_LEVELS > 4 void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d) { - struct ptdesc *ptdesc = virt_to_ptdesc(p4d); - - pagetable_dtor(ptdesc); paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT); - paravirt_tlb_remove_table(tlb, ptdesc); + paravirt_tlb_remove_table(tlb, virt_to_ptdesc(p4d)); } #endif /* CONFIG_PGTABLE_LEVELS > 4 */ #endif /* CONFIG_PGTABLE_LEVELS > 3 */ From 5d44d6851190b8fc31661f5d66bcc0c9e1cd2a4e Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:29 +0800 Subject: [PATCH 184/504] s390: pgtable: consolidate PxD and PTE TLB free paths Call pagetable_dtor() for PMD|PUD|P4D tables just before ptdesc is freed - same as it is done for PTE tables. That allows consolidating TLB free paths for all table types. Link: https://lkml.kernel.org/r/ac69360a5f3350ebb2f63cd14b7b45316a130ee4.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Suggested-by: Peter Zijlstra (Intel) Reviewed-by: Kevin Brodsky Acked-by: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/s390/include/asm/tlb.h | 3 --- arch/s390/mm/pgalloc.c | 14 ++++---------- 2 files changed, 4 insertions(+), 13 deletions(-) diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index dde847a5be54..d5b27a2445c9 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -102,7 +102,6 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, { if (mm_pmd_folded(tlb->mm)) return; - pagetable_dtor(virt_to_ptdesc(pmd)); __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; @@ -122,7 +121,6 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, { if (mm_p4d_folded(tlb->mm)) return; - pagetable_dtor(virt_to_ptdesc(p4d)); __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; @@ -141,7 +139,6 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, { if (mm_pud_folded(tlb->mm)) return; - pagetable_dtor(virt_to_ptdesc(pud)); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; tlb->cleared_p4ds = 1; diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 569de24d3376..c73b89811a26 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -180,7 +180,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) return table; } -static void pagetable_pte_dtor_free(struct ptdesc *ptdesc) +static void pagetable_dtor_free(struct ptdesc *ptdesc) { pagetable_dtor(ptdesc); pagetable_free(ptdesc); @@ -190,20 +190,14 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) { struct ptdesc *ptdesc = virt_to_ptdesc(table); - pagetable_pte_dtor_free(ptdesc); + pagetable_dtor_free(ptdesc); } void __tlb_remove_table(void *table) { struct ptdesc *ptdesc = virt_to_ptdesc(table); - struct page *page = ptdesc_page(ptdesc); - if (compound_order(page) == CRST_ALLOC_ORDER) { - /* pmd, pud, or p4d */ - pagetable_free(ptdesc); - return; - } - pagetable_pte_dtor_free(ptdesc); + pagetable_dtor_free(ptdesc); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -211,7 +205,7 @@ static void pte_free_now(struct rcu_head *head) { struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head); - pagetable_pte_dtor_free(ptdesc); + pagetable_dtor_free(ptdesc); } void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) From d5a52cd0e18e0d09f0115c26e52019b5da6acabe Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:30 +0800 Subject: [PATCH 185/504] mm: pgtable: introduce generic __tlb_remove_table() Several architectures (arm, arm64, riscv and x86) define exactly the same __tlb_remove_table(), just introduce generic __tlb_remove_table() to eliminate these duplications. The s390 __tlb_remove_table() is nearly the same, so also make s390 __tlb_remove_table() version generic. Link: https://lkml.kernel.org/r/ea372633d94f4d3f9f56a7ec5994bf050bf77e39.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reviewed-by: Kevin Brodsky Acked-by: Andreas Larsson [sparc] Acked-by: Alexander Gordeev [s390] Acked-by: Arnd Bergmann [asm-generic] Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Aneesh Kumar K.V (Arm) Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Peter Zijlstra (Intel) Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/arm/include/asm/tlb.h | 9 --------- arch/arm64/include/asm/tlb.h | 7 ------- arch/powerpc/include/asm/tlb.h | 1 + arch/riscv/include/asm/tlb.h | 12 ------------ arch/s390/include/asm/tlb.h | 9 ++++----- arch/s390/mm/pgalloc.c | 7 ------- arch/sparc/include/asm/tlb_64.h | 1 + arch/x86/include/asm/tlb.h | 17 ----------------- include/asm-generic/tlb.h | 15 +++++++++++++-- 9 files changed, 19 insertions(+), 59 deletions(-) diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 59854c6b97bc..b8eebdb59863 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -27,15 +27,6 @@ #else /* !CONFIG_MMU */ #include - -static inline void __tlb_remove_table(void *_table) -{ - struct ptdesc *ptdesc = (struct ptdesc *)_table; - - pagetable_dtor(ptdesc); - pagetable_free(ptdesc); -} - #include static inline void diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 93591a80b5bf..8d762607285c 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h @@ -10,13 +10,6 @@ #include -static inline void __tlb_remove_table(void *_table) -{ - struct ptdesc *ptdesc = (struct ptdesc *)_table; - - pagetable_dtor(ptdesc); - pagetable_free(ptdesc); -} #define tlb_flush tlb_flush static void tlb_flush(struct mmu_gather *tlb); diff --git a/arch/powerpc/include/asm/tlb.h b/arch/powerpc/include/asm/tlb.h index 1ca7d4c4b90d..2058e8d3e013 100644 --- a/arch/powerpc/include/asm/tlb.h +++ b/arch/powerpc/include/asm/tlb.h @@ -37,6 +37,7 @@ extern void tlb_flush(struct mmu_gather *tlb); */ #define tlb_needs_table_invalidate() radix_enabled() +#define __HAVE_ARCH_TLB_REMOVE_TABLE /* Get the generic bits... */ #include diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index ded8724b3c4f..50b63b5c15bd 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -10,18 +10,6 @@ struct mmu_gather; static void tlb_flush(struct mmu_gather *tlb); -#ifdef CONFIG_MMU - -static inline void __tlb_remove_table(void *table) -{ - struct ptdesc *ptdesc = (struct ptdesc *)table; - - pagetable_dtor(ptdesc); - pagetable_free(ptdesc); -} - -#endif /* CONFIG_MMU */ - #define tlb_flush tlb_flush #include diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index d5b27a2445c9..f39f8c4723f1 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h @@ -22,7 +22,6 @@ * Pages used for the page tables is a different story. FIXME: more */ -void __tlb_remove_table(void *_table); static inline void tlb_flush(struct mmu_gather *tlb); static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, bool delay_rmap, int page_size); @@ -87,7 +86,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, tlb->cleared_pmds = 1; if (mm_alloc_pgste(tlb->mm)) gmap_unlink(tlb->mm, (unsigned long *)pte, address); - tlb_remove_ptdesc(tlb, pte); + tlb_remove_ptdesc(tlb, virt_to_ptdesc(pte)); } /* @@ -106,7 +105,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; tlb->cleared_puds = 1; - tlb_remove_ptdesc(tlb, pmd); + tlb_remove_ptdesc(tlb, virt_to_ptdesc(pmd)); } /* @@ -124,7 +123,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, __tlb_adjust_range(tlb, address, PAGE_SIZE); tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; - tlb_remove_ptdesc(tlb, p4d); + tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d)); } /* @@ -142,7 +141,7 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, tlb->mm->context.flush_mm = 1; tlb->freed_tables = 1; tlb->cleared_p4ds = 1; - tlb_remove_ptdesc(tlb, pud); + tlb_remove_ptdesc(tlb, virt_to_ptdesc(pud)); } diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index c73b89811a26..3e002dea6278 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -193,13 +193,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) pagetable_dtor_free(ptdesc); } -void __tlb_remove_table(void *table) -{ - struct ptdesc *ptdesc = virt_to_ptdesc(table); - - pagetable_dtor_free(ptdesc); -} - #ifdef CONFIG_TRANSPARENT_HUGEPAGE static void pte_free_now(struct rcu_head *head) { diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h index 3037187482db..1a6e694418e3 100644 --- a/arch/sparc/include/asm/tlb_64.h +++ b/arch/sparc/include/asm/tlb_64.h @@ -33,6 +33,7 @@ void flush_tlb_pending(void); #define tlb_needs_table_invalidate() (false) #endif +#define __HAVE_ARCH_TLB_REMOVE_TABLE #include #endif /* _SPARC64_TLB_H */ diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index f64730be5ad6..3858dbf75880 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -20,23 +20,6 @@ static inline void tlb_flush(struct mmu_gather *tlb) flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); } -/* - * While x86 architecture in general requires an IPI to perform TLB - * shootdown, enablement code for several hypervisors overrides - * .flush_tlb_others hook in pv_mmu_ops and implements it by issuing - * a hypercall. To keep software pagetable walkers safe in this case we - * switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the comment - * below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h - * for more details. - */ -static inline void __tlb_remove_table(void *table) -{ - struct ptdesc *ptdesc = (struct ptdesc *)table; - - pagetable_dtor(ptdesc); - pagetable_free(ptdesc); -} - #ifdef CONFIG_PT_RECLAIM static inline void __tlb_remove_table_one_rcu(struct rcu_head *head) { diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 709830274b75..69de47c7ef3c 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -153,8 +153,9 @@ * * Useful if your architecture has non-page page directories. * - * When used, an architecture is expected to provide __tlb_remove_table() - * which does the actual freeing of these pages. + * When used, an architecture is expected to provide __tlb_remove_table() or + * use the generic __tlb_remove_table(), which does the actual freeing of these + * pages. * * MMU_GATHER_RCU_TABLE_FREE * @@ -207,6 +208,16 @@ struct mmu_table_batch { #define MAX_TABLE_BATCH \ ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) +#ifndef __HAVE_ARCH_TLB_REMOVE_TABLE +static inline void __tlb_remove_table(void *table) +{ + struct ptdesc *ptdesc = (struct ptdesc *)table; + + pagetable_dtor(ptdesc); + pagetable_free(ptdesc); +} +#endif + extern void tlb_remove_table(struct mmu_gather *tlb, void *table); #else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */ From 1f9c31c2a986ece59580094ea35c71e73225144b Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:31 +0800 Subject: [PATCH 186/504] mm: pgtable: completely move pagetable_dtor() to generic tlb_remove_table() For the generic tlb_remove_table(), it is implemented in the following two forms: 1) CONFIG_MMU_GATHER_TABLE_FREE is enabled tlb_remove_table --> generic __tlb_remove_table() 2) CONFIG_MMU_GATHER_TABLE_FREE is disabled tlb_remove_table --> tlb_remove_page For case 1), the pagetable_dtor() has already been moved to generic __tlb_remove_table(). For case 2), now only arm will call tlb_remove_table()/tlb_remove_ptdesc() when CONFIG_MMU_GATHER_TABLE_FREE is disabled. Let's move pagetable_dtor() completely to generic tlb_remove_table(), so that the architectures can follow more easily. Link: https://lkml.kernel.org/r/0c733ac867b287ec08190676496d1decebf49da2.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Suggested-by: Kevin Brodsky Reviewed-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Peter Zijlstra (Intel) Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/arm/include/asm/tlb.h | 4 ---- include/asm-generic/tlb.h | 10 ++++++++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index b8eebdb59863..ea4fbe7b17f6 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -34,10 +34,6 @@ __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr) { struct ptdesc *ptdesc = page_ptdesc(pte); -#ifndef CONFIG_MMU_GATHER_TABLE_FREE - pagetable_dtor(ptdesc); -#endif - #ifndef CONFIG_ARM_LPAE /* * With the classic ARM MMU, a pte page has two corresponding pmd diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 69de47c7ef3c..53ae7748f555 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -220,14 +220,20 @@ static inline void __tlb_remove_table(void *table) extern void tlb_remove_table(struct mmu_gather *tlb, void *table); -#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */ +#else /* !CONFIG_MMU_GATHER_TABLE_FREE */ +static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page); /* * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based * page directories and we can use the normal page batching to free them. */ -#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page)) +static inline void tlb_remove_table(struct mmu_gather *tlb, void *table) +{ + struct page *page = (struct page *)table; + pagetable_dtor(page_ptdesc(page)); + tlb_remove_page(tlb, page); +} #endif /* CONFIG_MMU_GATHER_TABLE_FREE */ #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE From b127ae70bad47bd3dc56a1f0260de52f8ad979fe Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:32 +0800 Subject: [PATCH 187/504] mm: pgtable: move __tlb_remove_table_one() in x86 to generic file The __tlb_remove_table_one() in x86 does not contain architecture-specific content, so move it to the generic file. Link: https://lkml.kernel.org/r/aab8a449bc67167943fd2cb5aab0a3a23b7b1cd7.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Reviewed-by: Kevin Brodsky Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Peter Zijlstra (Intel) Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/x86/include/asm/tlb.h | 19 ------------------- mm/mmu_gather.c | 20 ++++++++++++++++++-- 2 files changed, 18 insertions(+), 21 deletions(-) diff --git a/arch/x86/include/asm/tlb.h b/arch/x86/include/asm/tlb.h index 3858dbf75880..77f52bc1578a 100644 --- a/arch/x86/include/asm/tlb.h +++ b/arch/x86/include/asm/tlb.h @@ -20,25 +20,6 @@ static inline void tlb_flush(struct mmu_gather *tlb) flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables); } -#ifdef CONFIG_PT_RECLAIM -static inline void __tlb_remove_table_one_rcu(struct rcu_head *head) -{ - struct ptdesc *ptdesc; - - ptdesc = container_of(head, struct ptdesc, pt_rcu_head); - __tlb_remove_table(ptdesc); -} - -static inline void __tlb_remove_table_one(void *table) -{ - struct ptdesc *ptdesc; - - ptdesc = table; - call_rcu(&ptdesc->pt_rcu_head, __tlb_remove_table_one_rcu); -} -#define __tlb_remove_table_one __tlb_remove_table_one -#endif /* CONFIG_PT_RECLAIM */ - static inline void invlpg(unsigned long addr) { asm volatile("invlpg (%0)" ::"r" (addr) : "memory"); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index 1e21022bcf33..7aa6f18c500b 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -311,13 +311,29 @@ static inline void tlb_table_invalidate(struct mmu_gather *tlb) } } -#ifndef __tlb_remove_table_one +#ifdef CONFIG_PT_RECLAIM +static inline void __tlb_remove_table_one_rcu(struct rcu_head *head) +{ + struct ptdesc *ptdesc; + + ptdesc = container_of(head, struct ptdesc, pt_rcu_head); + __tlb_remove_table(ptdesc); +} + +static inline void __tlb_remove_table_one(void *table) +{ + struct ptdesc *ptdesc; + + ptdesc = table; + call_rcu(&ptdesc->pt_rcu_head, __tlb_remove_table_one_rcu); +} +#else static inline void __tlb_remove_table_one(void *table) { tlb_remove_table_sync_one(); __tlb_remove_table(table); } -#endif +#endif /* CONFIG_PT_RECLAIM */ static void tlb_remove_table_one(void *table) { From 6c3ee0229fd1ee78c9a4f7a4f112c5f328f9e1de Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Wed, 8 Jan 2025 14:57:33 +0800 Subject: [PATCH 188/504] mm: pgtable: introduce generic pagetable_dtor_free() The pte_free(), pmd_free(), __pud_free() and __p4d_free() in asm-generic/pgalloc.h and the generic __tlb_remove_table() are basically the same, so let's introduce pagetable_dtor_free() to deduplicate them. In addition, the pagetable_dtor_free() in s390 does the same thing, so let's s390 also calls generic pagetable_dtor_free(). Link: https://lkml.kernel.org/r/1663a0565aca881d1338ceb7d1db4aa9c333abd6.1736317725.git.zhengqi.arch@bytedance.com Signed-off-by: Qi Zheng Suggested-by: Peter Zijlstra (Intel) Reviewed-by: Kevin Brodsky Acked-by: Alexander Gordeev [s390] Cc: Alexandre Ghiti Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Aneesh Kumar K.V (Arm) Cc: Arnd Bergmann Cc: Dave Hansen Cc: David Hildenbrand Cc: David Rientjes Cc: Hugh Dickins Cc: Jann Horn Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Muchun Song Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Vishal Moola (Oracle) Cc: Will Deacon Cc: Yu Zhao Signed-off-by: Andrew Morton --- arch/s390/mm/pgalloc.c | 6 ------ include/asm-generic/pgalloc.h | 12 ++++-------- include/asm-generic/tlb.h | 3 +-- include/linux/mm.h | 6 ++++++ 4 files changed, 11 insertions(+), 16 deletions(-) diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 3e002dea6278..a4e761902093 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -180,12 +180,6 @@ unsigned long *page_table_alloc(struct mm_struct *mm) return table; } -static void pagetable_dtor_free(struct ptdesc *ptdesc) -{ - pagetable_dtor(ptdesc); - pagetable_free(ptdesc); -} - void page_table_free(struct mm_struct *mm, unsigned long *table) { struct ptdesc *ptdesc = virt_to_ptdesc(table); diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index 4afb346eae25..e3977ddca15e 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -109,8 +109,7 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page) { struct ptdesc *ptdesc = page_ptdesc(pte_page); - pagetable_dtor(ptdesc); - pagetable_free(ptdesc); + pagetable_dtor_free(ptdesc); } @@ -153,8 +152,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) struct ptdesc *ptdesc = virt_to_ptdesc(pmd); BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); - pagetable_dtor(ptdesc); - pagetable_free(ptdesc); + pagetable_dtor_free(ptdesc); } #endif @@ -202,8 +200,7 @@ static inline void __pud_free(struct mm_struct *mm, pud_t *pud) struct ptdesc *ptdesc = virt_to_ptdesc(pud); BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); - pagetable_dtor(ptdesc); - pagetable_free(ptdesc); + pagetable_dtor_free(ptdesc); } #ifndef __HAVE_ARCH_PUD_FREE @@ -248,8 +245,7 @@ static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d) struct ptdesc *ptdesc = virt_to_ptdesc(p4d); BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); - pagetable_dtor(ptdesc); - pagetable_free(ptdesc); + pagetable_dtor_free(ptdesc); } #ifndef __HAVE_ARCH_P4D_FREE diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 53ae7748f555..e402aef79c93 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -213,8 +213,7 @@ static inline void __tlb_remove_table(void *table) { struct ptdesc *ptdesc = (struct ptdesc *)table; - pagetable_dtor(ptdesc); - pagetable_free(ptdesc); + pagetable_dtor_free(ptdesc); } #endif diff --git a/include/linux/mm.h b/include/linux/mm.h index a3b2263f1c1a..15a903d59d09 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3001,6 +3001,12 @@ static inline void pagetable_dtor(struct ptdesc *ptdesc) lruvec_stat_sub_folio(folio, NR_PAGETABLE); } +static inline void pagetable_dtor_free(struct ptdesc *ptdesc) +{ + pagetable_dtor(ptdesc); + pagetable_free(ptdesc); +} + static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc) { struct folio *folio = ptdesc_folio(ptdesc); From 5ee2acfc49a7c21a065af9ef577e6a994514c0f8 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:48 -0800 Subject: [PATCH 189/504] mm: introduce vma_start_read_locked{_nested} helpers Patch series "reimplement per-vma lock as a refcount", v9. Back when per-vma locks were introduces, vm_lock was moved out of vm_area_struct in [1] because of the performance regression caused by false cacheline sharing. Recent investigation [2] revealed that the regressions is limited to a rather old Broadwell microarchitecture and even there it can be mitigated by disabling adjacent cacheline prefetching, see [3]. Splitting single logical structure into multiple ones leads to more complicated management, extra pointer dereferences and overall less maintainable code. When that split-away part is a lock, it complicates things even further. With no performance benefits, there are no reasons for this split. Merging the vm_lock back into vm_area_struct also allows vm_area_struct to use SLAB_TYPESAFE_BY_RCU later in this patchset. This patchset: 1. moves vm_lock back into vm_area_struct, aligning it at the cacheline boundary and changing the cache to be cacheline-aligned to minimize cacheline sharing; 2. changes vm_area_struct initialization to mark new vma as detached until it is inserted into vma tree; 3. replaces vm_lock and vma->detached flag with a reference counter; 4. regroups vm_area_struct members to fit them into 3 cachelines; 5. changes vm_area_struct cache to SLAB_TYPESAFE_BY_RCU to allow for their reuse and to minimize call_rcu() calls. Pagefault microbenchmarks show performance improvement: Hmean faults/cpu-1 507926.5547 ( 0.00%) 506519.3692 * -0.28%* Hmean faults/cpu-4 479119.7051 ( 0.00%) 481333.6802 * 0.46%* Hmean faults/cpu-7 452880.2961 ( 0.00%) 455845.6211 * 0.65%* Hmean faults/cpu-12 347639.1021 ( 0.00%) 352004.2254 * 1.26%* Hmean faults/cpu-21 200061.2238 ( 0.00%) 229597.0317 * 14.76%* Hmean faults/cpu-30 145251.2001 ( 0.00%) 164202.5067 * 13.05%* Hmean faults/cpu-48 106848.4434 ( 0.00%) 120641.5504 * 12.91%* Hmean faults/cpu-56 92472.3835 ( 0.00%) 103464.7916 * 11.89%* Hmean faults/sec-1 507566.1468 ( 0.00%) 506139.0811 * -0.28%* Hmean faults/sec-4 1880478.2402 ( 0.00%) 1886795.6329 * 0.34%* Hmean faults/sec-7 3106394.3438 ( 0.00%) 3140550.7485 * 1.10%* Hmean faults/sec-12 4061358.4795 ( 0.00%) 4112477.0206 * 1.26%* Hmean faults/sec-21 3988619.1169 ( 0.00%) 4577747.1436 * 14.77%* Hmean faults/sec-30 3909839.5449 ( 0.00%) 4311052.2787 * 10.26%* Hmean faults/sec-48 4761108.4691 ( 0.00%) 5283790.5026 * 10.98%* Hmean faults/sec-56 4885561.4590 ( 0.00%) 5415839.4045 * 10.85%* This patch (of 9): Introduce helper functions which can be used to read-lock a VMA when holding mmap_lock for read. Replace direct accesses to vma->vm_lock with these new helpers. Link: https://lkml.kernel.org/r/20250111042604.3230628-1-surenb@google.com Link: https://lkml.kernel.org/r/20250111042604.3230628-2-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Lorenzo Stoakes Reviewed-by: Davidlohr Bueso Reviewed-by: Shakeel Butt Reviewed-by: Vlastimil Babka Reviewed-by: Liam R. Howlett Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Mattew Wilcox [English fixes] Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 24 ++++++++++++++++++++++++ mm/userfaultfd.c | 22 +++++----------------- 2 files changed, 29 insertions(+), 17 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 15a903d59d09..3747f2e51139 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -735,6 +735,30 @@ static inline bool vma_start_read(struct vm_area_struct *vma) return true; } +/* + * Use only while holding mmap read lock which guarantees that locking will not + * fail (nobody can concurrently write-lock the vma). vma_start_read() should + * not be used in such cases because it might fail due to mm_lock_seq overflow. + * This functionality is used to obtain vma read lock and drop the mmap read lock. + */ +static inline void vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) +{ + mmap_assert_locked(vma->vm_mm); + down_read_nested(&vma->vm_lock->lock, subclass); +} + +/* + * Use only while holding mmap read lock which guarantees that locking will not + * fail (nobody can concurrently write-lock the vma). vma_start_read() should + * not be used in such cases because it might fail due to mm_lock_seq overflow. + * This functionality is used to obtain vma read lock and drop the mmap read lock. + */ +static inline void vma_start_read_locked(struct vm_area_struct *vma) +{ + mmap_assert_locked(vma->vm_mm); + down_read(&vma->vm_lock->lock); +} + static inline void vma_end_read(struct vm_area_struct *vma) { rcu_read_lock(); /* keeps vma alive till the end of up_read */ diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index af3dfc3633db..4527c385935b 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -84,16 +84,8 @@ static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm, mmap_read_lock(mm); vma = find_vma_and_prepare_anon(mm, address); - if (!IS_ERR(vma)) { - /* - * We cannot use vma_start_read() as it may fail due to - * false locked (see comment in vma_start_read()). We - * can avoid that by directly locking vm_lock under - * mmap_lock, which guarantees that nobody can lock the - * vma for write (vma_start_write()) under us. - */ - down_read(&vma->vm_lock->lock); - } + if (!IS_ERR(vma)) + vma_start_read_locked(vma); mmap_read_unlock(mm); return vma; @@ -1491,14 +1483,10 @@ static int uffd_move_lock(struct mm_struct *mm, mmap_read_lock(mm); err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap); if (!err) { - /* - * See comment in uffd_lock_vma() as to why not using - * vma_start_read() here. - */ - down_read(&(*dst_vmap)->vm_lock->lock); + vma_start_read_locked(*dst_vmap); if (*dst_vmap != *src_vmap) - down_read_nested(&(*src_vmap)->vm_lock->lock, - SINGLE_DEPTH_NESTING); + vma_start_read_locked_nested(*src_vmap, + SINGLE_DEPTH_NESTING); } mmap_read_unlock(mm); return err; From ae24af7dfce4007948bd984d7badfb704a2cda9a Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:49 -0800 Subject: [PATCH 190/504] mm: move per-vma lock into vm_area_struct Back when per-vma locks were introduces, vm_lock was moved out of vm_area_struct in [1] because of the performance regression caused by false cacheline sharing. Recent investigation [2] revealed that the regressions is limited to a rather old Broadwell microarchitecture and even there it can be mitigated by disabling adjacent cacheline prefetching, see [3]. Splitting single logical structure into multiple ones leads to more complicated management, extra pointer dereferences and overall less maintainable code. When that split-away part is a lock, it complicates things even further. With no performance benefits, there are no reasons for this split. Merging the vm_lock back into vm_area_struct also allows vm_area_struct to use SLAB_TYPESAFE_BY_RCU later in this patchset. Move vm_lock back into vm_area_struct, aligning it at the cacheline boundary and changing the cache to be cacheline-aligned as well. With kernel compiled using defconfig, this causes VMA memory consumption to grow from 160 (vm_area_struct) + 40 (vm_lock) bytes to 256 bytes: slabinfo before: ... : ... vma_lock ... 40 102 1 : ... vm_area_struct ... 160 51 2 : ... slabinfo after moving vm_lock: ... : ... vm_area_struct ... 256 32 2 : ... Aggregate VMA memory consumption per 1000 VMAs grows from 50 to 64 pages, which is 5.5MB per 100000 VMAs. Note that the size of this structure is dependent on the kernel configuration and typically the original size is higher than 160 bytes. Therefore these calculations are close to the worst case scenario. A more realistic vm_area_struct usage before this change is: ... : ... vma_lock ... 40 102 1 : ... vm_area_struct ... 176 46 2 : ... Aggregate VMA memory consumption per 1000 VMAs grows from 54 to 64 pages, which is 3.9MB per 100000 VMAs. This memory consumption growth can be addressed later by optimizing the vm_lock. [1] https://lore.kernel.org/all/20230227173632.3292573-34-surenb@google.com/ [2] https://lore.kernel.org/all/ZsQyI%2F087V34JoIt@xsang-OptiPlex-9020/ [3] https://lore.kernel.org/all/CAJuCfpEisU8Lfe96AYJDZ+OM4NoPmnw9bP53cT_kbfP_pR+-2g@mail.gmail.com/ Link: https://lkml.kernel.org/r/20250111042604.3230628-3-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Lorenzo Stoakes Reviewed-by: Shakeel Butt Reviewed-by: Vlastimil Babka Reviewed-by: Liam R. Howlett Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 28 ++++++++++-------- include/linux/mm_types.h | 6 ++-- kernel/fork.c | 49 ++++---------------------------- tools/testing/vma/vma_internal.h | 33 +++++---------------- 4 files changed, 32 insertions(+), 84 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 3747f2e51139..f3a49b9b7d53 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -697,6 +697,12 @@ static inline void vma_numab_state_free(struct vm_area_struct *vma) {} #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_PER_VMA_LOCK +static inline void vma_lock_init(struct vm_area_struct *vma) +{ + init_rwsem(&vma->vm_lock.lock); + vma->vm_lock_seq = UINT_MAX; +} + /* * Try to read-lock a vma. The function is allowed to occasionally yield false * locked result to avoid performance overhead, in which case we fall back to @@ -714,7 +720,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence)) return false; - if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0)) + if (unlikely(down_read_trylock(&vma->vm_lock.lock) == 0)) return false; /* @@ -729,7 +735,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * This pairs with RELEASE semantics in vma_end_write_all(). */ if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) { - up_read(&vma->vm_lock->lock); + up_read(&vma->vm_lock.lock); return false; } return true; @@ -744,7 +750,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) static inline void vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) { mmap_assert_locked(vma->vm_mm); - down_read_nested(&vma->vm_lock->lock, subclass); + down_read_nested(&vma->vm_lock.lock, subclass); } /* @@ -756,13 +762,13 @@ static inline void vma_start_read_locked_nested(struct vm_area_struct *vma, int static inline void vma_start_read_locked(struct vm_area_struct *vma) { mmap_assert_locked(vma->vm_mm); - down_read(&vma->vm_lock->lock); + down_read(&vma->vm_lock.lock); } static inline void vma_end_read(struct vm_area_struct *vma) { rcu_read_lock(); /* keeps vma alive till the end of up_read */ - up_read(&vma->vm_lock->lock); + up_read(&vma->vm_lock.lock); rcu_read_unlock(); } @@ -791,7 +797,7 @@ static inline void vma_start_write(struct vm_area_struct *vma) if (__is_vma_write_locked(vma, &mm_lock_seq)) return; - down_write(&vma->vm_lock->lock); + down_write(&vma->vm_lock.lock); /* * We should use WRITE_ONCE() here because we can have concurrent reads * from the early lockless pessimistic check in vma_start_read(). @@ -799,7 +805,7 @@ static inline void vma_start_write(struct vm_area_struct *vma) * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. */ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); - up_write(&vma->vm_lock->lock); + up_write(&vma->vm_lock.lock); } static inline void vma_assert_write_locked(struct vm_area_struct *vma) @@ -811,7 +817,7 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma) static inline void vma_assert_locked(struct vm_area_struct *vma) { - if (!rwsem_is_locked(&vma->vm_lock->lock)) + if (!rwsem_is_locked(&vma->vm_lock.lock)) vma_assert_write_locked(vma); } @@ -844,6 +850,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, #else /* CONFIG_PER_VMA_LOCK */ +static inline void vma_lock_init(struct vm_area_struct *vma) {} static inline bool vma_start_read(struct vm_area_struct *vma) { return false; } static inline void vma_end_read(struct vm_area_struct *vma) {} @@ -878,10 +885,6 @@ static inline void assert_fault_locked(struct vm_fault *vmf) extern const struct vm_operations_struct vma_dummy_vm_ops; -/* - * WARNING: vma_init does not initialize vma->vm_lock. - * Use vm_area_alloc()/vm_area_free() if vma needs locking. - */ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) { memset(vma, 0, sizeof(*vma)); @@ -890,6 +893,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) INIT_LIST_HEAD(&vma->anon_vma_chain); vma_mark_detached(vma, false); vma_numab_state_init(vma); + vma_lock_init(vma); } /* Use when VMA is not part of the VMA tree and needs no locking */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 5f1b2dc788e2..6573d95f1d1e 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -730,8 +730,6 @@ struct vm_area_struct { * slowpath. */ unsigned int vm_lock_seq; - /* Unstable RCU readers are allowed to read this. */ - struct vma_lock *vm_lock; #endif /* @@ -784,6 +782,10 @@ struct vm_area_struct { struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +#ifdef CONFIG_PER_VMA_LOCK + /* Unstable RCU readers are allowed to read this. */ + struct vma_lock vm_lock ____cacheline_aligned_in_smp; +#endif } __randomize_layout; #ifdef CONFIG_NUMA diff --git a/kernel/fork.c b/kernel/fork.c index ded49f18cd95..40a8e615499f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -436,35 +436,6 @@ static struct kmem_cache *vm_area_cachep; /* SLAB cache for mm_struct structures (tsk->mm) */ static struct kmem_cache *mm_cachep; -#ifdef CONFIG_PER_VMA_LOCK - -/* SLAB cache for vm_area_struct.lock */ -static struct kmem_cache *vma_lock_cachep; - -static bool vma_lock_alloc(struct vm_area_struct *vma) -{ - vma->vm_lock = kmem_cache_alloc(vma_lock_cachep, GFP_KERNEL); - if (!vma->vm_lock) - return false; - - init_rwsem(&vma->vm_lock->lock); - vma->vm_lock_seq = UINT_MAX; - - return true; -} - -static inline void vma_lock_free(struct vm_area_struct *vma) -{ - kmem_cache_free(vma_lock_cachep, vma->vm_lock); -} - -#else /* CONFIG_PER_VMA_LOCK */ - -static inline bool vma_lock_alloc(struct vm_area_struct *vma) { return true; } -static inline void vma_lock_free(struct vm_area_struct *vma) {} - -#endif /* CONFIG_PER_VMA_LOCK */ - struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) { struct vm_area_struct *vma; @@ -474,10 +445,6 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) return NULL; vma_init(vma, mm); - if (!vma_lock_alloc(vma)) { - kmem_cache_free(vm_area_cachep, vma); - return NULL; - } return vma; } @@ -496,10 +463,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) * will be reinitialized. */ data_race(memcpy(new, orig, sizeof(*new))); - if (!vma_lock_alloc(new)) { - kmem_cache_free(vm_area_cachep, new); - return NULL; - } + vma_lock_init(new); INIT_LIST_HEAD(&new->anon_vma_chain); vma_numab_state_init(new); dup_anon_vma_name(orig, new); @@ -511,7 +475,6 @@ void __vm_area_free(struct vm_area_struct *vma) { vma_numab_state_free(vma); free_anon_vma_name(vma); - vma_lock_free(vma); kmem_cache_free(vm_area_cachep, vma); } @@ -522,7 +485,7 @@ static void vm_area_free_rcu_cb(struct rcu_head *head) vm_rcu); /* The vma should not be locked while being destroyed. */ - VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock->lock), vma); + VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock.lock), vma); __vm_area_free(vma); } #endif @@ -3188,11 +3151,9 @@ void __init proc_caches_init(void) sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); - - vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); -#ifdef CONFIG_PER_VMA_LOCK - vma_lock_cachep = KMEM_CACHE(vma_lock, SLAB_PANIC|SLAB_ACCOUNT); -#endif + vm_area_cachep = KMEM_CACHE(vm_area_struct, + SLAB_HWCACHE_ALIGN|SLAB_NO_MERGE|SLAB_PANIC| + SLAB_ACCOUNT); mmap_init(); nsproxy_cache_init(); } diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index ae635eecbfa8..d19ce6fcab83 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -270,10 +270,10 @@ struct vm_area_struct { /* * Can only be written (using WRITE_ONCE()) while holding both: * - mmap_lock (in write mode) - * - vm_lock->lock (in write mode) + * - vm_lock.lock (in write mode) * Can be read reliably while holding one of: * - mmap_lock (in read or write mode) - * - vm_lock->lock (in read or write mode) + * - vm_lock.lock (in read or write mode) * Can be read unreliably (using READ_ONCE()) for pessimistic bailout * while holding nothing (except RCU to keep the VMA struct allocated). * @@ -282,7 +282,7 @@ struct vm_area_struct { * slowpath. */ unsigned int vm_lock_seq; - struct vma_lock *vm_lock; + struct vma_lock vm_lock; #endif /* @@ -459,17 +459,10 @@ static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) return mas_find(&vmi->mas, ULONG_MAX); } -static inline bool vma_lock_alloc(struct vm_area_struct *vma) +static inline void vma_lock_init(struct vm_area_struct *vma) { - vma->vm_lock = calloc(1, sizeof(struct vma_lock)); - - if (!vma->vm_lock) - return false; - - init_rwsem(&vma->vm_lock->lock); + init_rwsem(&vma->vm_lock.lock); vma->vm_lock_seq = UINT_MAX; - - return true; } static inline void vma_assert_write_locked(struct vm_area_struct *); @@ -492,6 +485,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); vma_mark_detached(vma, false); + vma_lock_init(vma); } static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) @@ -502,10 +496,6 @@ static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) return NULL; vma_init(vma, mm); - if (!vma_lock_alloc(vma)) { - free(vma); - return NULL; - } return vma; } @@ -518,10 +508,7 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) return NULL; memcpy(new, orig, sizeof(*new)); - if (!vma_lock_alloc(new)) { - free(new); - return NULL; - } + vma_lock_init(new); INIT_LIST_HEAD(&new->anon_vma_chain); return new; @@ -691,14 +678,8 @@ static inline void mpol_put(struct mempolicy *) { } -static inline void vma_lock_free(struct vm_area_struct *vma) -{ - free(vma->vm_lock); -} - static inline void __vm_area_free(struct vm_area_struct *vma) { - vma_lock_free(vma); free(vma); } From 2e3fecb2725e5c5e34233895528ca15e62aaf798 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:50 -0800 Subject: [PATCH 191/504] mm: mark vma as detached until it's added into vma tree Current implementation does not set detached flag when a VMA is first allocated. This does not represent the real state of the VMA, which is detached until it is added into mm's VMA tree. Fix this by marking new VMAs as detached and resetting detached flag only after VMA is added into a tree. Introduce vma_mark_attached() to make the API more readable and to simplify possible future cleanup when vma->vm_mm might be used to indicate detached vma and vma_mark_attached() will need an additional mm parameter. Link: https://lkml.kernel.org/r/20250111042604.3230628-4-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Shakeel Butt Reviewed-by: Lorenzo Stoakes Reviewed-by: Vlastimil Babka Reviewed-by: Liam R. Howlett Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Lokesh Gidra Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 27 ++++++++++++++++++++------- kernel/fork.c | 4 ++++ mm/memory.c | 2 +- mm/vma.c | 6 +++--- mm/vma.h | 2 ++ tools/testing/vma/vma_internal.h | 17 ++++++++++++----- 6 files changed, 42 insertions(+), 16 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index f3a49b9b7d53..3632214d0a1e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -821,12 +821,21 @@ static inline void vma_assert_locked(struct vm_area_struct *vma) vma_assert_write_locked(vma); } -static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) +static inline void vma_mark_attached(struct vm_area_struct *vma) +{ + vma->detached = false; +} + +static inline void vma_mark_detached(struct vm_area_struct *vma) { /* When detaching vma should be write-locked */ - if (detached) - vma_assert_write_locked(vma); - vma->detached = detached; + vma_assert_write_locked(vma); + vma->detached = true; +} + +static inline bool is_vma_detached(struct vm_area_struct *vma) +{ + return vma->detached; } static inline void release_fault_lock(struct vm_fault *vmf) @@ -857,8 +866,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} static inline void vma_assert_write_locked(struct vm_area_struct *vma) { mmap_assert_write_locked(vma->vm_mm); } -static inline void vma_mark_detached(struct vm_area_struct *vma, - bool detached) {} +static inline void vma_mark_attached(struct vm_area_struct *vma) {} +static inline void vma_mark_detached(struct vm_area_struct *vma) {} static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address) @@ -891,7 +900,10 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - vma_mark_detached(vma, false); +#ifdef CONFIG_PER_VMA_LOCK + /* vma is not locked, can't use vma_mark_detached() */ + vma->detached = true; +#endif vma_numab_state_init(vma); vma_lock_init(vma); } @@ -1086,6 +1098,7 @@ static inline int vma_iter_bulk_store(struct vma_iterator *vmi, if (unlikely(mas_is_err(&vmi->mas))) return -ENOMEM; + vma_mark_attached(vma); return 0; } diff --git a/kernel/fork.c b/kernel/fork.c index 40a8e615499f..f2f9e7b427ad 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -465,6 +465,10 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) data_race(memcpy(new, orig, sizeof(*new))); vma_lock_init(new); INIT_LIST_HEAD(&new->anon_vma_chain); +#ifdef CONFIG_PER_VMA_LOCK + /* vma is not locked, can't use vma_mark_detached() */ + new->detached = true; +#endif vma_numab_state_init(new); dup_anon_vma_name(orig, new); diff --git a/mm/memory.c b/mm/memory.c index 2a20e3810534..d0dee2282325 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6349,7 +6349,7 @@ retry: goto inval; /* Check if the VMA got isolated after we found it */ - if (vma->detached) { + if (is_vma_detached(vma)) { vma_end_read(vma); count_vm_vma_lock_event(VMA_LOCK_MISS); /* The area was replaced with another one */ diff --git a/mm/vma.c b/mm/vma.c index 0caaeea899a9..476146c25283 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -327,7 +327,7 @@ static void vma_complete(struct vma_prepare *vp, struct vma_iterator *vmi, if (vp->remove) { again: - vma_mark_detached(vp->remove, true); + vma_mark_detached(vp->remove); if (vp->file) { uprobe_munmap(vp->remove, vp->remove->vm_start, vp->remove->vm_end); @@ -1220,7 +1220,7 @@ static void reattach_vmas(struct ma_state *mas_detach) mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) - vma_mark_detached(vma, false); + vma_mark_attached(vma); __mt_destroy(mas_detach->tree); } @@ -1295,7 +1295,7 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms, if (error) goto munmap_gather_failed; - vma_mark_detached(next, true); + vma_mark_detached(next); nrpages = vma_pages(next); vms->nr_pages += nrpages; diff --git a/mm/vma.h b/mm/vma.h index 61ed044b6145..24636a2b0acf 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -157,6 +157,7 @@ static inline int vma_iter_store_gfp(struct vma_iterator *vmi, if (unlikely(mas_is_err(&vmi->mas))) return -ENOMEM; + vma_mark_attached(vma); return 0; } @@ -389,6 +390,7 @@ static inline void vma_iter_store(struct vma_iterator *vmi, __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); mas_store_prealloc(&vmi->mas, vma); + vma_mark_attached(vma); } static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index d19ce6fcab83..2a624f9304da 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -465,13 +465,17 @@ static inline void vma_lock_init(struct vm_area_struct *vma) vma->vm_lock_seq = UINT_MAX; } +static inline void vma_mark_attached(struct vm_area_struct *vma) +{ + vma->detached = false; +} + static inline void vma_assert_write_locked(struct vm_area_struct *); -static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached) +static inline void vma_mark_detached(struct vm_area_struct *vma) { /* When detaching vma should be write-locked */ - if (detached) - vma_assert_write_locked(vma); - vma->detached = detached; + vma_assert_write_locked(vma); + vma->detached = true; } extern const struct vm_operations_struct vma_dummy_vm_ops; @@ -484,7 +488,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - vma_mark_detached(vma, false); + /* vma is not locked, can't use vma_mark_detached() */ + vma->detached = true; vma_lock_init(vma); } @@ -510,6 +515,8 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) memcpy(new, orig, sizeof(*new)); vma_lock_init(new); INIT_LIST_HEAD(&new->anon_vma_chain); + /* vma is not locked, can't use vma_mark_detached() */ + new->detached = true; return new; } From 485d5cce88f9cd3e1d97381c6db6436604910e6c Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:51 -0800 Subject: [PATCH 192/504] mm: introduce vma_iter_store_attached() to use with attached vmas vma_iter_store() functions can be used both when adding a new vma and when updating an existing one. However for existing ones we do not need to mark them attached as they are already marked that way. Introduce vma_iter_store_attached() to be used with already attached vmas. Link: https://lkml.kernel.org/r/20250111042604.3230628-5-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 12 ++++++++++++ mm/vma.c | 8 ++++---- mm/vma.h | 11 +++++++++-- 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 3632214d0a1e..554c379592b9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -821,6 +821,16 @@ static inline void vma_assert_locked(struct vm_area_struct *vma) vma_assert_write_locked(vma); } +static inline void vma_assert_attached(struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(vma->detached, vma); +} + +static inline void vma_assert_detached(struct vm_area_struct *vma) +{ + VM_BUG_ON_VMA(!vma->detached, vma); +} + static inline void vma_mark_attached(struct vm_area_struct *vma) { vma->detached = false; @@ -866,6 +876,8 @@ static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} static inline void vma_assert_write_locked(struct vm_area_struct *vma) { mmap_assert_write_locked(vma->vm_mm); } +static inline void vma_assert_attached(struct vm_area_struct *vma) {} +static inline void vma_assert_detached(struct vm_area_struct *vma) {} static inline void vma_mark_attached(struct vm_area_struct *vma) {} static inline void vma_mark_detached(struct vm_area_struct *vma) {} diff --git a/mm/vma.c b/mm/vma.c index 476146c25283..eb74e1ebed8d 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -660,14 +660,14 @@ static int commit_merge(struct vma_merge_struct *vmg, vma_set_range(vmg->vma, vmg->start, vmg->end, vmg->pgoff); if (expanded) - vma_iter_store(vmg->vmi, vmg->vma); + vma_iter_store_attached(vmg->vmi, vmg->vma); if (adj_start) { adjust->vm_start += adj_start; adjust->vm_pgoff += PHYS_PFN(adj_start); if (adj_start < 0) { WARN_ON(expanded); - vma_iter_store(vmg->vmi, adjust); + vma_iter_store_attached(vmg->vmi, adjust); } } @@ -2785,7 +2785,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address) anon_vma_interval_tree_pre_update_vma(vma); vma->vm_end = address; /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); + vma_iter_store_attached(&vmi, vma); anon_vma_interval_tree_post_update_vma(vma); perf_event_mmap(vma); @@ -2865,7 +2865,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address) vma->vm_start = address; vma->vm_pgoff -= grow; /* Overwrite old entry in mtree. */ - vma_iter_store(&vmi, vma); + vma_iter_store_attached(&vmi, vma); anon_vma_interval_tree_post_update_vma(vma); perf_event_mmap(vma); diff --git a/mm/vma.h b/mm/vma.h index 24636a2b0acf..bf2be39ab046 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -365,9 +365,10 @@ static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi) } /* Store a VMA with preallocated memory */ -static inline void vma_iter_store(struct vma_iterator *vmi, - struct vm_area_struct *vma) +static inline void vma_iter_store_attached(struct vma_iterator *vmi, + struct vm_area_struct *vma) { + vma_assert_attached(vma); #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start && @@ -390,7 +391,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi, __mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1); mas_store_prealloc(&vmi->mas, vma); +} + +static inline void vma_iter_store(struct vma_iterator *vmi, + struct vm_area_struct *vma) +{ vma_mark_attached(vma); + vma_iter_store_attached(vmi, vma); } static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) From 9605dd573587d69d663335f72b8f70524a07fe09 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:52 -0800 Subject: [PATCH 193/504] mm: mark vmas detached upon exit When exit_mmap() removes vmas belonging to an exiting task, it does not mark them as detached since they can't be reached by other tasks and they will be freed shortly. Once we introduce vma reuse, all vmas will have to be in detached state before they are freed to ensure vma when reused is in a consistent state. Add missing vma_mark_detached() before freeing the vma. Link: https://lkml.kernel.org/r/20250111042604.3230628-6-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- mm/vma.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/vma.c b/mm/vma.c index eb74e1ebed8d..9299fdf7ef39 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -413,10 +413,12 @@ void remove_vma(struct vm_area_struct *vma, bool unreachable) if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); - if (unreachable) + if (unreachable) { + vma_mark_detached(vma); __vm_area_free(vma); - else + } else { vm_area_free(vma); + } } /* From cc533c591fe42602517a8e057837c96c23037ac7 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:53 -0800 Subject: [PATCH 194/504] types: move struct rcuwait into types.h Move rcuwait struct definition into types.h so that rcuwait can be used without including rcuwait.h which includes other headers. Without this change mm_types.h can't use rcuwait due to a the following circular dependency: mm_types.h -> rcuwait.h -> signal.h -> mm_types.h Link: https://lkml.kernel.org/r/20250111042604.3230628-7-surenb@google.com Suggested-by: Matthew Wilcox Signed-off-by: Suren Baghdasaryan Acked-by: Davidlohr Bueso Acked-by: Liam R. Howlett Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Vlastimil Babka Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/rcuwait.h | 13 +------------ include/linux/types.h | 12 ++++++++++++ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h index 27343424225c..9ad134a04b41 100644 --- a/include/linux/rcuwait.h +++ b/include/linux/rcuwait.h @@ -4,18 +4,7 @@ #include #include - -/* - * rcuwait provides a way of blocking and waking up a single - * task in an rcu-safe manner. - * - * The only time @task is non-nil is when a user is blocked (or - * checking if it needs to) on a condition, and reset as soon as we - * know that the condition has succeeded and are awoken. - */ -struct rcuwait { - struct task_struct __rcu *task; -}; +#include #define __RCUWAIT_INITIALIZER(name) \ { .task = NULL, } diff --git a/include/linux/types.h b/include/linux/types.h index 2d7b9ae8714c..f1356a9a5730 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -248,5 +248,17 @@ typedef void (*swap_func_t)(void *a, void *b, int size); typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv); typedef int (*cmp_func_t)(const void *a, const void *b); +/* + * rcuwait provides a way of blocking and waking up a single + * task in an rcu-safe manner. + * + * The only time @task is non-nil is when a user is blocked (or + * checking if it needs to) on a condition, and reset as soon as we + * know that the condition has succeeded and are awoken. + */ +struct rcuwait { + struct task_struct __rcu *task; +}; + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_TYPES_H */ From 933227a439223f2dda9a54a551f20a99ddd95b12 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:54 -0800 Subject: [PATCH 195/504] mm: allow vma_start_read_locked/vma_start_read_locked_nested to fail With upcoming replacement of vm_lock with vm_refcnt, we need to handle a possibility of vma_start_read_locked/vma_start_read_locked_nested failing due to refcount overflow. Prepare for such possibility by changing these APIs and adjusting their users. Link: https://lkml.kernel.org/r/20250111042604.3230628-8-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Vlastimil Babka Cc: Lokesh Gidra Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 6 ++++-- mm/userfaultfd.c | 18 +++++++++++++----- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 554c379592b9..d03ae8e9848e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -747,10 +747,11 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * not be used in such cases because it might fail due to mm_lock_seq overflow. * This functionality is used to obtain vma read lock and drop the mmap read lock. */ -static inline void vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) +static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) { mmap_assert_locked(vma->vm_mm); down_read_nested(&vma->vm_lock.lock, subclass); + return true; } /* @@ -759,10 +760,11 @@ static inline void vma_start_read_locked_nested(struct vm_area_struct *vma, int * not be used in such cases because it might fail due to mm_lock_seq overflow. * This functionality is used to obtain vma read lock and drop the mmap read lock. */ -static inline void vma_start_read_locked(struct vm_area_struct *vma) +static inline bool vma_start_read_locked(struct vm_area_struct *vma) { mmap_assert_locked(vma->vm_mm); down_read(&vma->vm_lock.lock); + return true; } static inline void vma_end_read(struct vm_area_struct *vma) diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 4527c385935b..411a663932c4 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -85,7 +85,8 @@ static struct vm_area_struct *uffd_lock_vma(struct mm_struct *mm, mmap_read_lock(mm); vma = find_vma_and_prepare_anon(mm, address); if (!IS_ERR(vma)) - vma_start_read_locked(vma); + if (!vma_start_read_locked(vma)) + vma = ERR_PTR(-EAGAIN); mmap_read_unlock(mm); return vma; @@ -1483,10 +1484,17 @@ static int uffd_move_lock(struct mm_struct *mm, mmap_read_lock(mm); err = find_vmas_mm_locked(mm, dst_start, src_start, dst_vmap, src_vmap); if (!err) { - vma_start_read_locked(*dst_vmap); - if (*dst_vmap != *src_vmap) - vma_start_read_locked_nested(*src_vmap, - SINGLE_DEPTH_NESTING); + if (vma_start_read_locked(*dst_vmap)) { + if (*dst_vmap != *src_vmap) { + if (!vma_start_read_locked_nested(*src_vmap, + SINGLE_DEPTH_NESTING)) { + vma_end_read(*dst_vmap); + err = -EAGAIN; + } + } + } else { + err = -EAGAIN; + } } mmap_read_unlock(mm); return err; From 7056ad9fb81ca64e87b354cdaf6c3c9a4c767a77 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:55 -0800 Subject: [PATCH 196/504] mm: move mmap_init_lock() out of the header file mmap_init_lock() is used only from mm_init() in fork.c, therefore it does not have to reside in the header file. This move lets us avoid including additional headers in mmap_lock.h later, when mmap_init_lock() needs to initialize rcuwait object. Link: https://lkml.kernel.org/r/20250111042604.3230628-9-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mmap_lock.h | 6 ------ kernel/fork.c | 6 ++++++ 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 45a21faa3ff6..4706c6769902 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -122,12 +122,6 @@ static inline bool mmap_lock_speculate_retry(struct mm_struct *mm, unsigned int #endif /* CONFIG_PER_VMA_LOCK */ -static inline void mmap_init_lock(struct mm_struct *mm) -{ - init_rwsem(&mm->mmap_lock); - mm_lock_seqcount_init(mm); -} - static inline void mmap_write_lock(struct mm_struct *mm) { __mmap_lock_trace_start_locking(mm, true); diff --git a/kernel/fork.c b/kernel/fork.c index f2f9e7b427ad..d4c75428ccaf 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1219,6 +1219,12 @@ static void mm_init_uprobes_state(struct mm_struct *mm) #endif } +static inline void mmap_init_lock(struct mm_struct *mm) +{ + init_rwsem(&mm->mmap_lock); + mm_lock_seqcount_init(mm); +} + static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, struct user_namespace *user_ns) { From a9e943490c1a7d17669ed98a8c103d774324f2f9 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:56 -0800 Subject: [PATCH 197/504] mm: uninline the main body of vma_start_write() vma_start_write() is used in many places and will grow in size very soon. It is not used in performance critical paths and uninlining it should limit the future code size growth. No functional changes. Link: https://lkml.kernel.org/r/20250111042604.3230628-10-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 12 +++--------- mm/memory.c | 14 ++++++++++++++ 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index d03ae8e9848e..dd15c36cd850 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -787,6 +787,8 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, unsigned int *mm_l return (vma->vm_lock_seq == *mm_lock_seq); } +void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq); + /* * Begin writing to a VMA. * Exclude concurrent readers under the per-VMA lock until the currently @@ -799,15 +801,7 @@ static inline void vma_start_write(struct vm_area_struct *vma) if (__is_vma_write_locked(vma, &mm_lock_seq)) return; - down_write(&vma->vm_lock.lock); - /* - * We should use WRITE_ONCE() here because we can have concurrent reads - * from the early lockless pessimistic check in vma_start_read(). - * We don't really care about the correctness of that early check, but - * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. - */ - WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); - up_write(&vma->vm_lock.lock); + __vma_start_write(vma, mm_lock_seq); } static inline void vma_assert_write_locked(struct vm_area_struct *vma) diff --git a/mm/memory.c b/mm/memory.c index d0dee2282325..236fdecd44d6 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6328,6 +6328,20 @@ fail: #endif #ifdef CONFIG_PER_VMA_LOCK +void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq) +{ + down_write(&vma->vm_lock.lock); + /* + * We should use WRITE_ONCE() here because we can have concurrent reads + * from the early lockless pessimistic check in vma_start_read(). + * We don't really care about the correctness of that early check, but + * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. + */ + WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); + up_write(&vma->vm_lock.lock); +} +EXPORT_SYMBOL_GPL(__vma_start_write); + /* * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be * stable and not isolated. If the VMA is not found or is being modified the From 432713f6ac075c034fea82dec4811061d30010e5 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:57 -0800 Subject: [PATCH 198/504] refcount: introduce __refcount_{add|inc}_not_zero_limited Introduce functions to increase refcount but with a top limit above which they will fail to increase (the limit is inclusive). Setting the limit to INT_MAX indicates no limit. Link: https://lkml.kernel.org/r/20250111042604.3230628-11-surenb@google.com Signed-off-by: Suren Baghdasaryan Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Vlastimil Babka Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/refcount.h | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 35f039ecb272..5072ba99f05e 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -137,13 +137,23 @@ static inline unsigned int refcount_read(const refcount_t *r) } static inline __must_check __signed_wrap -bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) +bool __refcount_add_not_zero_limited(int i, refcount_t *r, int *oldp, + int limit) { int old = refcount_read(r); do { if (!old) break; + + if (statically_true(limit == INT_MAX)) + continue; + + if (i > limit - old) { + if (oldp) + *oldp = old; + return false; + } } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); if (oldp) @@ -155,6 +165,12 @@ bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) return old; } +static inline __must_check __signed_wrap +bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) +{ + return __refcount_add_not_zero_limited(i, r, oldp, INT_MAX); +} + /** * refcount_add_not_zero - add a value to a refcount unless it is 0 * @i: the value to add to the refcount @@ -213,6 +229,12 @@ static inline void refcount_add(int i, refcount_t *r) __refcount_add(i, r, NULL); } +static inline __must_check bool __refcount_inc_not_zero_limited(refcount_t *r, + int *oldp, int limit) +{ + return __refcount_add_not_zero_limited(1, r, oldp, limit); +} + static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp) { return __refcount_add_not_zero(1, r, oldp); From e3067b240da299234c7d555f103d63c9b0b9157a Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:58 -0800 Subject: [PATCH 199/504] mm: replace vm_lock and detached flag with a reference count rw_semaphore is a sizable structure of 40 bytes and consumes considerable space for each vm_area_struct. However vma_lock has two important specifics which can be used to replace rw_semaphore with a simpler structure: 1. Readers never wait. They try to take the vma_lock and fall back to mmap_lock if that fails. 2. Only one writer at a time will ever try to write-lock a vma_lock because writers first take mmap_lock in write mode. Because of these requirements, full rw_semaphore functionality is not needed and we can replace rw_semaphore and the vma->detached flag with a refcount (vm_refcnt). When vma is in detached state, vm_refcnt is 0 and only a call to vma_mark_attached() can take it out of this state. Note that unlike before, now we enforce both vma_mark_attached() and vma_mark_detached() to be done only after vma has been write-locked. vma_mark_attached() changes vm_refcnt to 1 to indicate that it has been attached to the vma tree. When a reader takes read lock, it increments vm_refcnt, unless the top usable bit of vm_refcnt (0x40000000) is set, indicating presence of a writer. When writer takes write lock, it sets the top usable bit to indicate its presence. If there are readers, writer will wait using newly introduced mm->vma_writer_wait. Since all writers take mmap_lock in write mode first, there can be only one writer at a time. The last reader to release the lock will signal the writer to wake up. refcount might overflow if there are many competing readers, in which case read-locking will fail. Readers are expected to handle such failures. In summary: 1. all readers increment the vm_refcnt; 2. writer sets top usable (writer) bit of vm_refcnt; 3. readers cannot increment the vm_refcnt if the writer bit is set; 4. in the presence of readers, writer must wait for the vm_refcnt to drop to 1 (ignoring the writer bit), indicating an attached vma with no readers; 5. vm_refcnt overflow is handled by the readers. While this vm_lock replacement does not yet result in a smaller vm_area_struct (it stays at 256 bytes due to cacheline alignment), it allows for further size optimization by structure member regrouping to bring the size of vm_area_struct below 192 bytes. Link: https://lkml.kernel.org/r/20250111042604.3230628-12-surenb@google.com Signed-off-by: Suren Baghdasaryan Suggested-by: Peter Zijlstra Suggested-by: Matthew Wilcox Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 102 +++++++++++++++++++++---------- include/linux/mm_types.h | 22 +++---- kernel/fork.c | 13 ++-- mm/init-mm.c | 1 + mm/memory.c | 80 +++++++++++++++++++++--- tools/testing/vma/linux/atomic.h | 5 ++ tools/testing/vma/vma_internal.h | 66 +++++++++++--------- 7 files changed, 198 insertions(+), 91 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index dd15c36cd850..cac0877a04e1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -32,6 +32,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -697,12 +698,43 @@ static inline void vma_numab_state_free(struct vm_area_struct *vma) {} #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_PER_VMA_LOCK -static inline void vma_lock_init(struct vm_area_struct *vma) +static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) { - init_rwsem(&vma->vm_lock.lock); +#ifdef CONFIG_DEBUG_LOCK_ALLOC + static struct lock_class_key lockdep_key; + + lockdep_init_map(&vma->vmlock_dep_map, "vm_lock", &lockdep_key, 0); +#endif + if (reset_refcnt) + refcount_set(&vma->vm_refcnt, 0); vma->vm_lock_seq = UINT_MAX; } +static inline bool is_vma_writer_only(int refcnt) +{ + /* + * With a writer and no readers, refcnt is VMA_LOCK_OFFSET if the vma + * is detached and (VMA_LOCK_OFFSET + 1) if it is attached. Waiting on + * a detached vma happens only in vma_mark_detached() and is a rare + * case, therefore most of the time there will be no unnecessary wakeup. + */ + return refcnt & VMA_LOCK_OFFSET && refcnt <= VMA_LOCK_OFFSET + 1; +} + +static inline void vma_refcount_put(struct vm_area_struct *vma) +{ + /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */ + struct mm_struct *mm = vma->vm_mm; + int oldcnt; + + rwsem_release(&vma->vmlock_dep_map, _RET_IP_); + if (!__refcount_dec_and_test(&vma->vm_refcnt, &oldcnt)) { + + if (is_vma_writer_only(oldcnt - 1)) + rcuwait_wake_up(&mm->vma_writer_wait); + } +} + /* * Try to read-lock a vma. The function is allowed to occasionally yield false * locked result to avoid performance overhead, in which case we fall back to @@ -710,6 +742,8 @@ static inline void vma_lock_init(struct vm_area_struct *vma) */ static inline bool vma_start_read(struct vm_area_struct *vma) { + int oldcnt; + /* * Check before locking. A race might cause false locked result. * We can use READ_ONCE() for the mm_lock_seq here, and don't need @@ -720,13 +754,19 @@ static inline bool vma_start_read(struct vm_area_struct *vma) if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence)) return false; - if (unlikely(down_read_trylock(&vma->vm_lock.lock) == 0)) + /* + * If VMA_LOCK_OFFSET is set, __refcount_inc_not_zero_limited() will fail + * because VMA_REF_LIMIT is less than VMA_LOCK_OFFSET. + */ + if (unlikely(!__refcount_inc_not_zero_limited(&vma->vm_refcnt, &oldcnt, + VMA_REF_LIMIT))) return false; + rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_); /* - * Overflow might produce false locked result. + * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result. * False unlocked result is impossible because we modify and check - * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq + * vma->vm_lock_seq under vma->vm_refcnt protection and mm->mm_lock_seq * modification invalidates all existing locks. * * We must use ACQUIRE semantics for the mm_lock_seq so that if we are @@ -735,9 +775,10 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * This pairs with RELEASE semantics in vma_end_write_all(). */ if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) { - up_read(&vma->vm_lock.lock); + vma_refcount_put(vma); return false; } + return true; } @@ -749,8 +790,14 @@ static inline bool vma_start_read(struct vm_area_struct *vma) */ static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int subclass) { + int oldcnt; + mmap_assert_locked(vma->vm_mm); - down_read_nested(&vma->vm_lock.lock, subclass); + if (unlikely(!__refcount_inc_not_zero_limited(&vma->vm_refcnt, &oldcnt, + VMA_REF_LIMIT))) + return false; + + rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_); return true; } @@ -762,16 +809,12 @@ static inline bool vma_start_read_locked_nested(struct vm_area_struct *vma, int */ static inline bool vma_start_read_locked(struct vm_area_struct *vma) { - mmap_assert_locked(vma->vm_mm); - down_read(&vma->vm_lock.lock); - return true; + return vma_start_read_locked_nested(vma, 0); } static inline void vma_end_read(struct vm_area_struct *vma) { - rcu_read_lock(); /* keeps vma alive till the end of up_read */ - up_read(&vma->vm_lock.lock); - rcu_read_unlock(); + vma_refcount_put(vma); } /* WARNING! Can only be used if mmap_lock is expected to be write-locked */ @@ -813,36 +856,33 @@ static inline void vma_assert_write_locked(struct vm_area_struct *vma) static inline void vma_assert_locked(struct vm_area_struct *vma) { - if (!rwsem_is_locked(&vma->vm_lock.lock)) + if (refcount_read(&vma->vm_refcnt) <= 1) vma_assert_write_locked(vma); } +/* + * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these + * assertions should be made either under mmap_write_lock or when the object + * has been isolated under mmap_write_lock, ensuring no competing writers. + */ static inline void vma_assert_attached(struct vm_area_struct *vma) { - VM_BUG_ON_VMA(vma->detached, vma); + VM_BUG_ON_VMA(!refcount_read(&vma->vm_refcnt), vma); } static inline void vma_assert_detached(struct vm_area_struct *vma) { - VM_BUG_ON_VMA(!vma->detached, vma); + VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt), vma); } static inline void vma_mark_attached(struct vm_area_struct *vma) { - vma->detached = false; -} - -static inline void vma_mark_detached(struct vm_area_struct *vma) -{ - /* When detaching vma should be write-locked */ vma_assert_write_locked(vma); - vma->detached = true; + vma_assert_detached(vma); + refcount_set(&vma->vm_refcnt, 1); } -static inline bool is_vma_detached(struct vm_area_struct *vma) -{ - return vma->detached; -} +void vma_mark_detached(struct vm_area_struct *vma); static inline void release_fault_lock(struct vm_fault *vmf) { @@ -865,7 +905,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, #else /* CONFIG_PER_VMA_LOCK */ -static inline void vma_lock_init(struct vm_area_struct *vma) {} +static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {} static inline bool vma_start_read(struct vm_area_struct *vma) { return false; } static inline void vma_end_read(struct vm_area_struct *vma) {} @@ -908,12 +948,8 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); -#ifdef CONFIG_PER_VMA_LOCK - /* vma is not locked, can't use vma_mark_detached() */ - vma->detached = true; -#endif vma_numab_state_init(vma); - vma_lock_init(vma); + vma_lock_init(vma, false); } /* Use when VMA is not part of the VMA tree and needs no locking */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 6573d95f1d1e..9228d19662c6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -19,6 +19,7 @@ #include #include #include +#include #include @@ -629,9 +630,8 @@ static inline struct anon_vma_name *anon_vma_name_alloc(const char *name) } #endif -struct vma_lock { - struct rw_semaphore lock; -}; +#define VMA_LOCK_OFFSET 0x40000000 +#define VMA_REF_LIMIT (VMA_LOCK_OFFSET - 1) struct vma_numab_state { /* @@ -709,19 +709,13 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK - /* - * Flag to indicate areas detached from the mm->mm_mt tree. - * Unstable RCU readers are allowed to read this. - */ - bool detached; - /* * Can only be written (using WRITE_ONCE()) while holding both: * - mmap_lock (in write mode) - * - vm_lock->lock (in write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set * Can be read reliably while holding one of: * - mmap_lock (in read or write mode) - * - vm_lock->lock (in read or write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout * while holding nothing (except RCU to keep the VMA struct allocated). * @@ -784,7 +778,10 @@ struct vm_area_struct { struct vm_userfaultfd_ctx vm_userfaultfd_ctx; #ifdef CONFIG_PER_VMA_LOCK /* Unstable RCU readers are allowed to read this. */ - struct vma_lock vm_lock ____cacheline_aligned_in_smp; + refcount_t vm_refcnt ____cacheline_aligned_in_smp; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map vmlock_dep_map; +#endif #endif } __randomize_layout; @@ -919,6 +916,7 @@ struct mm_struct { * by mmlist_lock */ #ifdef CONFIG_PER_VMA_LOCK + struct rcuwait vma_writer_wait; /* * This field has lock-like semantics, meaning it is sometimes * accessed with ACQUIRE/RELEASE semantics. diff --git a/kernel/fork.c b/kernel/fork.c index d4c75428ccaf..9d9275783cf8 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -463,12 +463,8 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) * will be reinitialized. */ data_race(memcpy(new, orig, sizeof(*new))); - vma_lock_init(new); + vma_lock_init(new, true); INIT_LIST_HEAD(&new->anon_vma_chain); -#ifdef CONFIG_PER_VMA_LOCK - /* vma is not locked, can't use vma_mark_detached() */ - new->detached = true; -#endif vma_numab_state_init(new); dup_anon_vma_name(orig, new); @@ -477,6 +473,8 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) void __vm_area_free(struct vm_area_struct *vma) { + /* The vma should be detached while being destroyed. */ + vma_assert_detached(vma); vma_numab_state_free(vma); free_anon_vma_name(vma); kmem_cache_free(vm_area_cachep, vma); @@ -488,8 +486,6 @@ static void vm_area_free_rcu_cb(struct rcu_head *head) struct vm_area_struct *vma = container_of(head, struct vm_area_struct, vm_rcu); - /* The vma should not be locked while being destroyed. */ - VM_BUG_ON_VMA(rwsem_is_locked(&vma->vm_lock.lock), vma); __vm_area_free(vma); } #endif @@ -1223,6 +1219,9 @@ static inline void mmap_init_lock(struct mm_struct *mm) { init_rwsem(&mm->mmap_lock); mm_lock_seqcount_init(mm); +#ifdef CONFIG_PER_VMA_LOCK + rcuwait_init(&mm->vma_writer_wait); +#endif } static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, diff --git a/mm/init-mm.c b/mm/init-mm.c index 6af3ad675930..4600e7605cab 100644 --- a/mm/init-mm.c +++ b/mm/init-mm.c @@ -40,6 +40,7 @@ struct mm_struct init_mm = { .arg_lock = __SPIN_LOCK_UNLOCKED(init_mm.arg_lock), .mmlist = LIST_HEAD_INIT(init_mm.mmlist), #ifdef CONFIG_PER_VMA_LOCK + .vma_writer_wait = __RCUWAIT_INITIALIZER(init_mm.vma_writer_wait), .mm_lock_seq = SEQCNT_ZERO(init_mm.mm_lock_seq), #endif .user_ns = &init_user_ns, diff --git a/mm/memory.c b/mm/memory.c index 236fdecd44d6..dc16b67beefa 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6328,9 +6328,47 @@ fail: #endif #ifdef CONFIG_PER_VMA_LOCK +static inline bool __vma_enter_locked(struct vm_area_struct *vma, bool detaching) +{ + unsigned int tgt_refcnt = VMA_LOCK_OFFSET; + + /* Additional refcnt if the vma is attached. */ + if (!detaching) + tgt_refcnt++; + + /* + * If vma is detached then only vma_mark_attached() can raise the + * vm_refcnt. mmap_write_lock prevents racing with vma_mark_attached(). + */ + if (!refcount_add_not_zero(VMA_LOCK_OFFSET, &vma->vm_refcnt)) + return false; + + rwsem_acquire(&vma->vmlock_dep_map, 0, 0, _RET_IP_); + rcuwait_wait_event(&vma->vm_mm->vma_writer_wait, + refcount_read(&vma->vm_refcnt) == tgt_refcnt, + TASK_UNINTERRUPTIBLE); + lock_acquired(&vma->vmlock_dep_map, _RET_IP_); + + return true; +} + +static inline void __vma_exit_locked(struct vm_area_struct *vma, bool *detached) +{ + *detached = refcount_sub_and_test(VMA_LOCK_OFFSET, &vma->vm_refcnt); + rwsem_release(&vma->vmlock_dep_map, _RET_IP_); +} + void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq) { - down_write(&vma->vm_lock.lock); + bool locked; + + /* + * __vma_enter_locked() returns false immediately if the vma is not + * attached, otherwise it waits until refcnt is indicating that vma + * is attached with no readers. + */ + locked = __vma_enter_locked(vma, false); + /* * We should use WRITE_ONCE() here because we can have concurrent reads * from the early lockless pessimistic check in vma_start_read(). @@ -6338,10 +6376,40 @@ void __vma_start_write(struct vm_area_struct *vma, unsigned int mm_lock_seq) * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy. */ WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq); - up_write(&vma->vm_lock.lock); + + if (locked) { + bool detached; + + __vma_exit_locked(vma, &detached); + VM_BUG_ON_VMA(detached, vma); /* vma should remain attached */ + } } EXPORT_SYMBOL_GPL(__vma_start_write); +void vma_mark_detached(struct vm_area_struct *vma) +{ + vma_assert_write_locked(vma); + vma_assert_attached(vma); + + /* + * We are the only writer, so no need to use vma_refcount_put(). + * The condition below is unlikely because the vma has been already + * write-locked and readers can increment vm_refcnt only temporarily + * before they check vm_lock_seq, realize the vma is locked and drop + * back the vm_refcnt. That is a narrow window for observing a raised + * vm_refcnt. + */ + if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { + /* Wait until vma is detached with no readers. */ + if (__vma_enter_locked(vma, true)) { + bool detached; + + __vma_exit_locked(vma, &detached); + VM_BUG_ON_VMA(!detached, vma); + } + } +} + /* * Lookup and lock a VMA under RCU protection. Returned VMA is guaranteed to be * stable and not isolated. If the VMA is not found or is being modified the @@ -6354,7 +6422,6 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, struct vm_area_struct *vma; rcu_read_lock(); -retry: vma = mas_walk(&mas); if (!vma) goto inval; @@ -6362,13 +6429,6 @@ retry: if (!vma_start_read(vma)) goto inval; - /* Check if the VMA got isolated after we found it */ - if (is_vma_detached(vma)) { - vma_end_read(vma); - count_vm_vma_lock_event(VMA_LOCK_MISS); - /* The area was replaced with another one */ - goto retry; - } /* * At this point, we have a stable reference to a VMA: The VMA is * locked and we know it hasn't already been isolated. diff --git a/tools/testing/vma/linux/atomic.h b/tools/testing/vma/linux/atomic.h index 3e1b6adc027b..788c597c4fde 100644 --- a/tools/testing/vma/linux/atomic.h +++ b/tools/testing/vma/linux/atomic.h @@ -9,4 +9,9 @@ #define atomic_set(x, y) uatomic_set(x, y) #define U8_MAX UCHAR_MAX +#ifndef atomic_cmpxchg_relaxed +#define atomic_cmpxchg_relaxed uatomic_cmpxchg +#define atomic_cmpxchg_release uatomic_cmpxchg +#endif /* atomic_cmpxchg_relaxed */ + #endif /* _LINUX_ATOMIC_H */ diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 2a624f9304da..1e8cd2f013fa 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -25,7 +25,7 @@ #include #include #include -#include +#include extern unsigned long stack_guard_gap; #ifdef CONFIG_MMU @@ -132,10 +132,6 @@ typedef __bitwise unsigned int vm_fault_t; */ #define pr_warn_once pr_err -typedef struct refcount_struct { - atomic_t refs; -} refcount_t; - struct kref { refcount_t refcount; }; @@ -228,15 +224,12 @@ struct mm_struct { unsigned long def_flags; }; -struct vma_lock { - struct rw_semaphore lock; -}; - - struct file { struct address_space *f_mapping; }; +#define VMA_LOCK_OFFSET 0x40000000 + struct vm_area_struct { /* The first cache line has the info for VMA tree walking. */ @@ -264,16 +257,13 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK - /* Flag to indicate areas detached from the mm->mm_mt tree */ - bool detached; - /* * Can only be written (using WRITE_ONCE()) while holding both: * - mmap_lock (in write mode) - * - vm_lock.lock (in write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set * Can be read reliably while holding one of: * - mmap_lock (in read or write mode) - * - vm_lock.lock (in read or write mode) + * - vm_refcnt bit at VMA_LOCK_OFFSET is set or vm_refcnt > 1 * Can be read unreliably (using READ_ONCE()) for pessimistic bailout * while holding nothing (except RCU to keep the VMA struct allocated). * @@ -282,7 +272,6 @@ struct vm_area_struct { * slowpath. */ unsigned int vm_lock_seq; - struct vma_lock vm_lock; #endif /* @@ -335,6 +324,10 @@ struct vm_area_struct { struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; +#ifdef CONFIG_PER_VMA_LOCK + /* Unstable RCU readers are allowed to read this. */ + refcount_t vm_refcnt; +#endif } __randomize_layout; struct vm_fault {}; @@ -459,23 +452,41 @@ static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi) return mas_find(&vmi->mas, ULONG_MAX); } -static inline void vma_lock_init(struct vm_area_struct *vma) +/* + * WARNING: to avoid racing with vma_mark_attached()/vma_mark_detached(), these + * assertions should be made either under mmap_write_lock or when the object + * has been isolated under mmap_write_lock, ensuring no competing writers. + */ +static inline void vma_assert_attached(struct vm_area_struct *vma) { - init_rwsem(&vma->vm_lock.lock); - vma->vm_lock_seq = UINT_MAX; + VM_BUG_ON_VMA(!refcount_read(&vma->vm_refcnt), vma); } -static inline void vma_mark_attached(struct vm_area_struct *vma) +static inline void vma_assert_detached(struct vm_area_struct *vma) { - vma->detached = false; + VM_BUG_ON_VMA(refcount_read(&vma->vm_refcnt), vma); } static inline void vma_assert_write_locked(struct vm_area_struct *); +static inline void vma_mark_attached(struct vm_area_struct *vma) +{ + vma_assert_write_locked(vma); + vma_assert_detached(vma); + refcount_set(&vma->vm_refcnt, 1); +} + static inline void vma_mark_detached(struct vm_area_struct *vma) { - /* When detaching vma should be write-locked */ vma_assert_write_locked(vma); - vma->detached = true; + vma_assert_attached(vma); + + /* We are the only writer, so no need to use vma_refcount_put(). */ + if (unlikely(!refcount_dec_and_test(&vma->vm_refcnt))) { + /* + * Reader must have temporarily raised vm_refcnt but it will + * drop it without using the vma since vma is write-locked. + */ + } } extern const struct vm_operations_struct vma_dummy_vm_ops; @@ -488,9 +499,7 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - /* vma is not locked, can't use vma_mark_detached() */ - vma->detached = true; - vma_lock_init(vma); + vma->vm_lock_seq = UINT_MAX; } static inline struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) @@ -513,10 +522,9 @@ static inline struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) return NULL; memcpy(new, orig, sizeof(*new)); - vma_lock_init(new); + refcount_set(&new->vm_refcnt, 0); + new->vm_lock_seq = UINT_MAX; INIT_LIST_HEAD(&new->anon_vma_chain); - /* vma is not locked, can't use vma_mark_detached() */ - new->detached = true; return new; } From fb9130ab191d0167b87bbeb909c415200414b207 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:25:59 -0800 Subject: [PATCH 200/504] mm: move lesser used vma_area_struct members into the last cacheline Move several vma_area_struct members which are rarely or never used during page fault handling into the last cacheline to better pack vm_area_struct. As a result vm_area_struct will fit into 3 as opposed to 4 cachelines. New typical vm_area_struct layout: struct vm_area_struct { union { struct { long unsigned int vm_start; /* 0 8 */ long unsigned int vm_end; /* 8 8 */ }; /* 0 16 */ freeptr_t vm_freeptr; /* 0 8 */ }; /* 0 16 */ struct mm_struct * vm_mm; /* 16 8 */ pgprot_t vm_page_prot; /* 24 8 */ union { const vm_flags_t vm_flags; /* 32 8 */ vm_flags_t __vm_flags; /* 32 8 */ }; /* 32 8 */ unsigned int vm_lock_seq; /* 40 4 */ /* XXX 4 bytes hole, try to pack */ struct list_head anon_vma_chain; /* 48 16 */ /* --- cacheline 1 boundary (64 bytes) --- */ struct anon_vma * anon_vma; /* 64 8 */ const struct vm_operations_struct * vm_ops; /* 72 8 */ long unsigned int vm_pgoff; /* 80 8 */ struct file * vm_file; /* 88 8 */ void * vm_private_data; /* 96 8 */ atomic_long_t swap_readahead_info; /* 104 8 */ struct mempolicy * vm_policy; /* 112 8 */ struct vma_numab_state * numab_state; /* 120 8 */ /* --- cacheline 2 boundary (128 bytes) --- */ refcount_t vm_refcnt (__aligned__(64)); /* 128 4 */ /* XXX 4 bytes hole, try to pack */ struct { struct rb_node rb (__aligned__(8)); /* 136 24 */ long unsigned int rb_subtree_last; /* 160 8 */ } __attribute__((__aligned__(8))) shared; /* 136 32 */ struct anon_vma_name * anon_name; /* 168 8 */ struct vm_userfaultfd_ctx vm_userfaultfd_ctx; /* 176 8 */ /* size: 192, cachelines: 3, members: 18 */ /* sum members: 176, holes: 2, sum holes: 8 */ /* padding: 8 */ /* forced alignments: 2, forced holes: 1, sum forced holes: 4 */ } __attribute__((__aligned__(64))); Memory consumption per 1000 VMAs becomes 48 pages: slabinfo after vm_area_struct changes: ... : ... vm_area_struct ... 192 42 2 : ... Link: https://lkml.kernel.org/r/20250111042604.3230628-13-surenb@google.com Signed-off-by: Suren Baghdasaryan Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Vlastimil Babka Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm_types.h | 38 ++++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 20 deletions(-) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 9228d19662c6..d902e6730654 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -725,17 +725,6 @@ struct vm_area_struct { */ unsigned int vm_lock_seq; #endif - - /* - * For areas with an address space and backing store, - * linkage into the address_space->i_mmap interval tree. - * - */ - struct { - struct rb_node rb; - unsigned long rb_subtree_last; - } shared; - /* * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma * list, after a COW of one of the file pages. A MAP_SHARED vma @@ -755,14 +744,6 @@ struct vm_area_struct { struct file * vm_file; /* File we map to (can be NULL). */ void * vm_private_data; /* was vm_pte (shared mem) */ -#ifdef CONFIG_ANON_VMA_NAME - /* - * For private and shared anonymous mappings, a pointer to a null - * terminated string containing the name given to the vma, or NULL if - * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. - */ - struct anon_vma_name *anon_name; -#endif #ifdef CONFIG_SWAP atomic_long_t swap_readahead_info; #endif @@ -775,7 +756,6 @@ struct vm_area_struct { #ifdef CONFIG_NUMA_BALANCING struct vma_numab_state *numab_state; /* NUMA Balancing state */ #endif - struct vm_userfaultfd_ctx vm_userfaultfd_ctx; #ifdef CONFIG_PER_VMA_LOCK /* Unstable RCU readers are allowed to read this. */ refcount_t vm_refcnt ____cacheline_aligned_in_smp; @@ -783,6 +763,24 @@ struct vm_area_struct { struct lockdep_map vmlock_dep_map; #endif #endif + /* + * For areas with an address space and backing store, + * linkage into the address_space->i_mmap interval tree. + * + */ + struct { + struct rb_node rb; + unsigned long rb_subtree_last; + } shared; +#ifdef CONFIG_ANON_VMA_NAME + /* + * For private and shared anonymous mappings, a pointer to a null + * terminated string containing the name given to the vma, or NULL if + * unnamed. Serialized by mmap_lock. Use anon_vma_name to access. + */ + struct anon_vma_name *anon_name; +#endif + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; } __randomize_layout; #ifdef CONFIG_NUMA From 4930d3b17a2a703a5dcdd8967fdfe84beec6a735 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:26:00 -0800 Subject: [PATCH 201/504] mm/debug: print vm_refcnt state when dumping the vma vm_refcnt encodes a number of useful states: - whether vma is attached or detached - the number of current vma readers - presence of a vma writer Let's include it in the vma dump. Link: https://lkml.kernel.org/r/20250111042604.3230628-14-surenb@google.com Signed-off-by: Suren Baghdasaryan Acked-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- mm/debug.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mm/debug.c b/mm/debug.c index 95b6ab809c0e..9c4cbc3733b0 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -178,6 +178,17 @@ EXPORT_SYMBOL(dump_page); void dump_vma(const struct vm_area_struct *vma) { +#ifdef CONFIG_PER_VMA_LOCK + pr_emerg("vma %px start %px end %px mm %px\n" + "prot %lx anon_vma %px vm_ops %px\n" + "pgoff %lx file %px private_data %px\n" + "flags: %#lx(%pGv) refcnt %x\n", + vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_mm, + (unsigned long)pgprot_val(vma->vm_page_prot), + vma->anon_vma, vma->vm_ops, vma->vm_pgoff, + vma->vm_file, vma->vm_private_data, + vma->vm_flags, &vma->vm_flags, refcount_read(&vma->vm_refcnt)); +#else pr_emerg("vma %px start %px end %px mm %px\n" "prot %lx anon_vma %px vm_ops %px\n" "pgoff %lx file %px private_data %px\n" @@ -187,6 +198,7 @@ void dump_vma(const struct vm_area_struct *vma) vma->anon_vma, vma->vm_ops, vma->vm_pgoff, vma->vm_file, vma->vm_private_data, vma->vm_flags, &vma->vm_flags); +#endif } EXPORT_SYMBOL(dump_vma); From 0ac451e5d3b6611801421e31f44d3f837579412b Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:26:01 -0800 Subject: [PATCH 202/504] mm: remove extra vma_numab_state_init() call vma_init() already memset's the whole vm_area_struct to 0, so there is no need to an additional vma_numab_state_init(). Link: https://lkml.kernel.org/r/20250111042604.3230628-15-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 - 1 file changed, 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index cac0877a04e1..0c132a22d7f4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -948,7 +948,6 @@ static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm) vma->vm_mm = mm; vma->vm_ops = &vma_dummy_vm_ops; INIT_LIST_HEAD(&vma->anon_vma_chain); - vma_numab_state_init(vma); vma_lock_init(vma, false); } From 79fbc8d6d169b236ff07071599e8589ba0cc5317 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:26:02 -0800 Subject: [PATCH 203/504] mm: prepare lock_vma_under_rcu() for vma reuse possibility Once we make vma cache SLAB_TYPESAFE_BY_RCU, it will be possible for a vma to be reused and attached to another mm after lock_vma_under_rcu() locks the vma. lock_vma_under_rcu() should ensure that vma_start_read() is using the original mm and after locking the vma it should ensure that vma->vm_mm has not changed from under us. Link: https://lkml.kernel.org/r/20250111042604.3230628-16-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 10 ++++++---- mm/memory.c | 7 ++++--- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 0c132a22d7f4..29befffda941 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -739,8 +739,10 @@ static inline void vma_refcount_put(struct vm_area_struct *vma) * Try to read-lock a vma. The function is allowed to occasionally yield false * locked result to avoid performance overhead, in which case we fall back to * using mmap_lock. The function should never yield false unlocked result. + * False locked result is possible if mm_lock_seq overflows or if vma gets + * reused and attached to a different mm before we lock it. */ -static inline bool vma_start_read(struct vm_area_struct *vma) +static inline bool vma_start_read(struct mm_struct *mm, struct vm_area_struct *vma) { int oldcnt; @@ -751,7 +753,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * we don't rely on for anything - the mm_lock_seq read against which we * need ordering is below. */ - if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq.sequence)) + if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(mm->mm_lock_seq.sequence)) return false; /* @@ -774,7 +776,7 @@ static inline bool vma_start_read(struct vm_area_struct *vma) * after it has been unlocked. * This pairs with RELEASE semantics in vma_end_write_all(). */ - if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&vma->vm_mm->mm_lock_seq))) { + if (unlikely(vma->vm_lock_seq == raw_read_seqcount(&mm->mm_lock_seq))) { vma_refcount_put(vma); return false; } @@ -906,7 +908,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, #else /* CONFIG_PER_VMA_LOCK */ static inline void vma_lock_init(struct vm_area_struct *vma, bool reset_refcnt) {} -static inline bool vma_start_read(struct vm_area_struct *vma) +static inline bool vma_start_read(struct mm_struct *mm, struct vm_area_struct *vma) { return false; } static inline void vma_end_read(struct vm_area_struct *vma) {} static inline void vma_start_write(struct vm_area_struct *vma) {} diff --git a/mm/memory.c b/mm/memory.c index dc16b67beefa..67cfcebb0f94 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -6426,7 +6426,7 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, if (!vma) goto inval; - if (!vma_start_read(vma)) + if (!vma_start_read(mm, vma)) goto inval; /* @@ -6436,8 +6436,9 @@ struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, * fields are accessible for RCU readers. */ - /* Check since vm_start/vm_end might change before we lock the VMA */ - if (unlikely(address < vma->vm_start || address >= vma->vm_end)) + /* Check if the vma we locked is the right one. */ + if (unlikely(vma->vm_mm != mm || + address < vma->vm_start || address >= vma->vm_end)) goto inval_end_read; rcu_read_unlock(); From be96d5653b7abbda2de27cb27f09939637166e2e Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:26:03 -0800 Subject: [PATCH 204/504] mm: make vma cache SLAB_TYPESAFE_BY_RCU To enable SLAB_TYPESAFE_BY_RCU for vma cache we need to ensure that object reuse before RCU grace period is over will be detected by lock_vma_under_rcu(). Current checks are sufficient as long as vma is detached before it is freed. The only place this is not currently happening is in exit_mmap(). Add the missing vma_mark_detached() in exit_mmap(). Another issue which might trick lock_vma_under_rcu() during vma reuse is vm_area_dup(), which copies the entire content of the vma into a new one, overriding new vma's vm_refcnt and temporarily making it appear as attached. This might trick a racing lock_vma_under_rcu() to operate on a reused vma if it found the vma before it got reused. To prevent this situation, we should ensure that vm_refcnt stays at detached state (0) when it is copied and advances to attached state only after it is added into the vma tree. Introduce vm_area_init_from() which preserves new vma's vm_refcnt and use it in vm_area_dup(). Since all vmas are in detached state with no current readers when they are freed, lock_vma_under_rcu() will not be able to take vm_refcnt after vma got detached even if vma is reused. Finally, make vm_area_cachep SLAB_TYPESAFE_BY_RCU. This will facilitate vm_area_struct reuse and will minimize the number of call_rcu() calls. Link: https://lkml.kernel.org/r/20250111042604.3230628-17-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Vlastimil Babka Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Liam R. Howlett Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mm.h | 2 - include/linux/mm_types.h | 13 ++++-- include/linux/slab.h | 6 --- kernel/fork.c | 73 ++++++++++++++++++++------------ mm/mmap.c | 3 +- mm/vma.c | 11 ++--- mm/vma.h | 2 +- tools/testing/vma/vma_internal.h | 7 +-- 8 files changed, 63 insertions(+), 54 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 29befffda941..7d3718e11047 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -258,8 +258,6 @@ void setup_initial_init_mm(void *start_code, void *end_code, struct vm_area_struct *vm_area_alloc(struct mm_struct *); struct vm_area_struct *vm_area_dup(struct vm_area_struct *); void vm_area_free(struct vm_area_struct *); -/* Use only if VMA has no other users */ -void __vm_area_free(struct vm_area_struct *vma); #ifndef CONFIG_MMU extern struct rb_root nommu_region_tree; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index d902e6730654..d366ec6302e6 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -574,6 +574,12 @@ static inline void *folio_get_private(struct folio *folio) typedef unsigned long vm_flags_t; +/* + * freeptr_t represents a SLUB freelist pointer, which might be encoded + * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. + */ +typedef struct { unsigned long v; } freeptr_t; + /* * A region containing a mapping of a non-memory backed file under NOMMU * conditions. These are held in a global tree and are pinned by the VMAs that @@ -677,6 +683,9 @@ struct vma_numab_state { * * Only explicitly marked struct members may be accessed by RCU readers before * getting a stable reference. + * + * WARNING: when adding new members, please update vm_area_init_from() to copy + * them during vm_area_struct content duplication. */ struct vm_area_struct { /* The first cache line has the info for VMA tree walking. */ @@ -687,9 +696,7 @@ struct vm_area_struct { unsigned long vm_start; unsigned long vm_end; }; -#ifdef CONFIG_PER_VMA_LOCK - struct rcu_head vm_rcu; /* Used for deferred freeing. */ -#endif + freeptr_t vm_freeptr; /* Pointer used by SLAB_TYPESAFE_BY_RCU */ }; /* diff --git a/include/linux/slab.h b/include/linux/slab.h index 10a971c2bde3..681b685b6c4e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -234,12 +234,6 @@ enum _slab_flag_bits { #define SLAB_NO_OBJ_EXT __SLAB_FLAG_UNUSED #endif -/* - * freeptr_t represents a SLUB freelist pointer, which might be encoded - * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. - */ -typedef struct { unsigned long v; } freeptr_t; - /* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * diff --git a/kernel/fork.c b/kernel/fork.c index 9d9275783cf8..151b40627c14 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -449,6 +449,42 @@ struct vm_area_struct *vm_area_alloc(struct mm_struct *mm) return vma; } +static void vm_area_init_from(const struct vm_area_struct *src, + struct vm_area_struct *dest) +{ + dest->vm_mm = src->vm_mm; + dest->vm_ops = src->vm_ops; + dest->vm_start = src->vm_start; + dest->vm_end = src->vm_end; + dest->anon_vma = src->anon_vma; + dest->vm_pgoff = src->vm_pgoff; + dest->vm_file = src->vm_file; + dest->vm_private_data = src->vm_private_data; + vm_flags_init(dest, src->vm_flags); + memcpy(&dest->vm_page_prot, &src->vm_page_prot, + sizeof(dest->vm_page_prot)); + /* + * src->shared.rb may be modified concurrently when called from + * dup_mmap(), but the clone will reinitialize it. + */ + data_race(memcpy(&dest->shared, &src->shared, sizeof(dest->shared))); + memcpy(&dest->vm_userfaultfd_ctx, &src->vm_userfaultfd_ctx, + sizeof(dest->vm_userfaultfd_ctx)); +#ifdef CONFIG_ANON_VMA_NAME + dest->anon_name = src->anon_name; +#endif +#ifdef CONFIG_SWAP + memcpy(&dest->swap_readahead_info, &src->swap_readahead_info, + sizeof(dest->swap_readahead_info)); +#endif +#ifndef CONFIG_MMU + dest->vm_region = src->vm_region; +#endif +#ifdef CONFIG_NUMA + dest->vm_policy = src->vm_policy; +#endif +} + struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) { struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); @@ -458,11 +494,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) ASSERT_EXCLUSIVE_WRITER(orig->vm_flags); ASSERT_EXCLUSIVE_WRITER(orig->vm_file); - /* - * orig->shared.rb may be modified concurrently, but the clone - * will be reinitialized. - */ - data_race(memcpy(new, orig, sizeof(*new))); + vm_area_init_from(orig, new); vma_lock_init(new, true); INIT_LIST_HEAD(&new->anon_vma_chain); vma_numab_state_init(new); @@ -471,7 +503,7 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig) return new; } -void __vm_area_free(struct vm_area_struct *vma) +void vm_area_free(struct vm_area_struct *vma) { /* The vma should be detached while being destroyed. */ vma_assert_detached(vma); @@ -480,25 +512,6 @@ void __vm_area_free(struct vm_area_struct *vma) kmem_cache_free(vm_area_cachep, vma); } -#ifdef CONFIG_PER_VMA_LOCK -static void vm_area_free_rcu_cb(struct rcu_head *head) -{ - struct vm_area_struct *vma = container_of(head, struct vm_area_struct, - vm_rcu); - - __vm_area_free(vma); -} -#endif - -void vm_area_free(struct vm_area_struct *vma) -{ -#ifdef CONFIG_PER_VMA_LOCK - call_rcu(&vma->vm_rcu, vm_area_free_rcu_cb); -#else - __vm_area_free(vma); -#endif -} - static void account_kernel_stack(struct task_struct *tsk, int account) { if (IS_ENABLED(CONFIG_VMAP_STACK)) { @@ -3144,6 +3157,11 @@ void __init mm_cache_init(void) void __init proc_caches_init(void) { + struct kmem_cache_args args = { + .use_freeptr_offset = true, + .freeptr_offset = offsetof(struct vm_area_struct, vm_freeptr), + }; + sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| @@ -3160,8 +3178,9 @@ void __init proc_caches_init(void) sizeof(struct fs_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); - vm_area_cachep = KMEM_CACHE(vm_area_struct, - SLAB_HWCACHE_ALIGN|SLAB_NO_MERGE|SLAB_PANIC| + vm_area_cachep = kmem_cache_create("vm_area_struct", + sizeof(struct vm_area_struct), &args, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| SLAB_ACCOUNT); mmap_init(); nsproxy_cache_init(); diff --git a/mm/mmap.c b/mm/mmap.c index 3cc8de07411d..7fdc4207fe98 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1364,7 +1364,8 @@ void exit_mmap(struct mm_struct *mm) do { if (vma->vm_flags & VM_ACCOUNT) nr_accounted += vma_pages(vma); - remove_vma(vma, /* unreachable = */ true); + vma_mark_detached(vma); + remove_vma(vma); count++; cond_resched(); vma = vma_next(&vmi); diff --git a/mm/vma.c b/mm/vma.c index 9299fdf7ef39..87508abdd3cc 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -406,19 +406,14 @@ static bool can_vma_merge_right(struct vma_merge_struct *vmg, /* * Close a vm structure and free it. */ -void remove_vma(struct vm_area_struct *vma, bool unreachable) +void remove_vma(struct vm_area_struct *vma) { might_sleep(); vma_close(vma); if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); - if (unreachable) { - vma_mark_detached(vma); - __vm_area_free(vma); - } else { - vm_area_free(vma); - } + vm_area_free(vma); } /* @@ -1200,7 +1195,7 @@ static void vms_complete_munmap_vmas(struct vma_munmap_struct *vms, /* Remove and clean up vmas */ mas_set(mas_detach, 0); mas_for_each(mas_detach, vma, ULONG_MAX) - remove_vma(vma, /* unreachable = */ false); + remove_vma(vma); vm_unacct_memory(vms->nr_accounted); validate_mm(mm); diff --git a/mm/vma.h b/mm/vma.h index bf2be39ab046..bd54f2245a86 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -170,7 +170,7 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool unlock); -void remove_vma(struct vm_area_struct *vma, bool unreachable); +void remove_vma(struct vm_area_struct *vma); void unmap_region(struct ma_state *mas, struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *next); diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index 1e8cd2f013fa..c7c580ec9a2d 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -693,14 +693,9 @@ static inline void mpol_put(struct mempolicy *) { } -static inline void __vm_area_free(struct vm_area_struct *vma) -{ - free(vma); -} - static inline void vm_area_free(struct vm_area_struct *vma) { - __vm_area_free(vma); + free(vma); } static inline void lru_add_drain(void) From 76532c193eff0b366061169fe1df48dc7f63d90a Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 10 Jan 2025 20:26:04 -0800 Subject: [PATCH 205/504] docs/mm: document latest changes to vm_lock Change the documentation to reflect that vm_lock is integrated into vma and replaced with vm_refcnt. Document newly introduced vma_start_read_locked{_nested} functions. Link: https://lkml.kernel.org/r/20250111042604.3230628-18-surenb@google.com Signed-off-by: Suren Baghdasaryan Reviewed-by: Liam R. Howlett Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickens Cc: Jann Horn Cc: Johannes Weiner Cc: Jonathan Corbet Cc: kernel test robot Cc: Klara Modin Cc: Lokesh Gidra Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Mattew Wilcox Cc: Mel Gorman Cc: Michal Hocko Cc: Minchan Kim Cc: Oleg Nesterov Cc: Pasha Tatashin Cc: "Paul E . McKenney" Cc: Peter Xu Cc: Peter Zijlstra (Intel) Cc: Shakeel Butt Cc: Sourav Panda Cc: Vlastimil Babka Cc: Wei Yang Signed-off-by: Andrew Morton --- Documentation/mm/process_addrs.rst | 44 ++++++++++++++++++------------ 1 file changed, 26 insertions(+), 18 deletions(-) diff --git a/Documentation/mm/process_addrs.rst b/Documentation/mm/process_addrs.rst index 81417fa2ed20..f573de936b5d 100644 --- a/Documentation/mm/process_addrs.rst +++ b/Documentation/mm/process_addrs.rst @@ -716,9 +716,14 @@ calls :c:func:`!rcu_read_lock` to ensure that the VMA is looked up in an RCU critical section, then attempts to VMA lock it via :c:func:`!vma_start_read`, before releasing the RCU lock via :c:func:`!rcu_read_unlock`. -VMA read locks hold the read lock on the :c:member:`!vma->vm_lock` semaphore for -their duration and the caller of :c:func:`!lock_vma_under_rcu` must release it -via :c:func:`!vma_end_read`. +In cases when the user already holds mmap read lock, :c:func:`!vma_start_read_locked` +and :c:func:`!vma_start_read_locked_nested` can be used. These functions do not +fail due to lock contention but the caller should still check their return values +in case they fail for other reasons. + +VMA read locks increment :c:member:`!vma.vm_refcnt` reference counter for their +duration and the caller of :c:func:`!lock_vma_under_rcu` must drop it via +:c:func:`!vma_end_read`. VMA **write** locks are acquired via :c:func:`!vma_start_write` in instances where a VMA is about to be modified, unlike :c:func:`!vma_start_read` the lock is always @@ -726,9 +731,9 @@ acquired. An mmap write lock **must** be held for the duration of the VMA write lock, releasing or downgrading the mmap write lock also releases the VMA write lock so there is no :c:func:`!vma_end_write` function. -Note that a semaphore write lock is not held across a VMA lock. Rather, a -sequence number is used for serialisation, and the write semaphore is only -acquired at the point of write lock to update this. +Note that when write-locking a VMA lock, the :c:member:`!vma.vm_refcnt` is temporarily +modified so that readers can detect the presense of a writer. The reference counter is +restored once the vma sequence number used for serialisation is updated. This ensures the semantics we require - VMA write locks provide exclusive write access to the VMA. @@ -738,7 +743,7 @@ Implementation details The VMA lock mechanism is designed to be a lightweight means of avoiding the use of the heavily contended mmap lock. It is implemented using a combination of a -read/write semaphore and sequence numbers belonging to the containing +reference counter and sequence numbers belonging to the containing :c:struct:`!struct mm_struct` and the VMA. Read locks are acquired via :c:func:`!vma_start_read`, which is an optimistic @@ -779,28 +784,31 @@ release of any VMA locks on its release makes sense, as you would never want to keep VMAs locked across entirely separate write operations. It also maintains correct lock ordering. -Each time a VMA read lock is acquired, we acquire a read lock on the -:c:member:`!vma->vm_lock` read/write semaphore and hold it, while checking that -the sequence count of the VMA does not match that of the mm. +Each time a VMA read lock is acquired, we increment :c:member:`!vma.vm_refcnt` +reference counter and check that the sequence count of the VMA does not match +that of the mm. -If it does, the read lock fails. If it does not, we hold the lock, excluding -writers, but permitting other readers, who will also obtain this lock under RCU. +If it does, the read lock fails and :c:member:`!vma.vm_refcnt` is dropped. +If it does not, we keep the reference counter raised, excluding writers, but +permitting other readers, who can also obtain this lock under RCU. Importantly, maple tree operations performed in :c:func:`!lock_vma_under_rcu` are also RCU safe, so the whole read lock operation is guaranteed to function correctly. -On the write side, we acquire a write lock on the :c:member:`!vma->vm_lock` -read/write semaphore, before setting the VMA's sequence number under this lock, -also simultaneously holding the mmap write lock. +On the write side, we set a bit in :c:member:`!vma.vm_refcnt` which can't be +modified by readers and wait for all readers to drop their reference count. +Once there are no readers, VMA's sequence number is set to match that of the +mm. During this entire operation mmap write lock is held. This way, if any read locks are in effect, :c:func:`!vma_start_write` will sleep until these are finished and mutual exclusion is achieved. -After setting the VMA's sequence number, the lock is released, avoiding -complexity with a long-term held write lock. +After setting the VMA's sequence number, the bit in :c:member:`!vma.vm_refcnt` +indicating a writer is cleared. From this point on, VMA's sequence number will +indicate VMA's write-locked state until mmap write lock is dropped or downgraded. -This clever combination of a read/write semaphore and sequence count allows for +This clever combination of a reference counter and sequence count allows for fast RCU-based per-VMA lock acquisition (especially on page fault, though utilised elsewhere) with minimal complexity around lock ordering. From e2f060b344b16e229517bde304b5057a8bb2e44e Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 17 Dec 2024 00:04:32 +0900 Subject: [PATCH 206/504] mm/zsmalloc: add zpdesc memory descriptor for zswap.zpool Patch series "Add zpdesc memory descriptor for zswap.zpool", v9. This patch series introduces a new memory descriptor for zswap.zpool that currently overlaps with struct page for now. This is part of the effort to reduce the size of struct page and to enable dynamic allocation of memory descriptors [1]. This series does not bloat anything for zsmalloc and no functional change is intended (except for using zpdesc and folios). In the near future, the removal of page->index from struct page [2] will be addressed and the project also depends on this patch series. Thanks to everyone got involved in this series, especially, Alex who's been pushing it forward this year. [1] https://lore.kernel.org/linux-mm/ZvRKzKizOfEWBtJp@casper.infradead.org [2] https://lore.kernel.org/linux-mm/Z09hOy-UY9KC8WMb@casper.infradead.org This patch (of 18): The 1st patch introduces new memory descriptor zpdesc and renames zspage.first_page to zspage.first_zpdesc, with no functional change. We removed the comment about PG_owner_priv_1 since it is no longer used after commit a41ec880aa7b ("zsmalloc: move huge compressed obj from page to zspage"). [42.hyeyoo@gmail.com: rework comments a little bit] Link: https://lkml.kernel.org/r/20241216150450.1228021-1-42.hyeyoo@gmail.com Link: https://lkml.kernel.org/r/20241216150450.1228021-2-42.hyeyoo@gmail.com Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Alex Shi Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zpdesc.h | 107 ++++++++++++++++++++++++++++++++++++++++++++++++++ mm/zsmalloc.c | 28 +++---------- 2 files changed, 112 insertions(+), 23 deletions(-) create mode 100644 mm/zpdesc.h diff --git a/mm/zpdesc.h b/mm/zpdesc.h new file mode 100644 index 000000000000..e0852498aecf --- /dev/null +++ b/mm/zpdesc.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* zpdesc.h: zswap.zpool memory descriptor + * + * Written by Alex Shi + * Hyeonggon Yoo <42.hyeyoo@gmail.com> + */ +#ifndef __MM_ZPDESC_H__ +#define __MM_ZPDESC_H__ + +/* + * struct zpdesc - Memory descriptor for zpool memory. + * @flags: Page flags, mostly unused by zsmalloc. + * @lru: Indirectly used by page migration. + * @movable_ops: Used by page migration. + * @next: Next zpdesc in a zspage in zsmalloc zpool. + * @handle: For huge zspage in zsmalloc zpool. + * @zspage: Points to the zspage this zpdesc is a part of. + * @first_obj_offset: First object offset in zsmalloc zpool. + * @_refcount: The number of references to this zpdesc. + * + * This struct overlays struct page for now. Do not modify without a good + * understanding of the issues. In particular, do not expand into the overlap + * with memcg_data. + * + * Page flags used: + * * PG_private identifies the first component page. + * * PG_locked is used by page migration code. + */ +struct zpdesc { + unsigned long flags; + struct list_head lru; + unsigned long movable_ops; + union { + struct zpdesc *next; + unsigned long handle; + }; + struct zspage *zspage; + /* + * Only the lower 24 bits are available for offset, limiting a page + * to 16 MiB. The upper 8 bits are reserved for PGTY_zsmalloc. + * + * Do not access this field directly. + * Instead, use {get,set}_first_obj_offset() helpers. + */ + unsigned int first_obj_offset; + atomic_t _refcount; +}; +#define ZPDESC_MATCH(pg, zp) \ + static_assert(offsetof(struct page, pg) == offsetof(struct zpdesc, zp)) + +ZPDESC_MATCH(flags, flags); +ZPDESC_MATCH(lru, lru); +ZPDESC_MATCH(mapping, movable_ops); +ZPDESC_MATCH(index, next); +ZPDESC_MATCH(index, handle); +ZPDESC_MATCH(private, zspage); +ZPDESC_MATCH(page_type, first_obj_offset); +ZPDESC_MATCH(_refcount, _refcount); +#undef ZPDESC_MATCH +static_assert(sizeof(struct zpdesc) <= sizeof(struct page)); + +/* + * zpdesc_page - The first struct page allocated for a zpdesc + * @zp: The zpdesc. + * + * A convenience wrapper for converting zpdesc to the first struct page of the + * underlying folio, to communicate with code not yet converted to folio or + * struct zpdesc. + * + */ +#define zpdesc_page(zp) (_Generic((zp), \ + const struct zpdesc *: (const struct page *)(zp), \ + struct zpdesc *: (struct page *)(zp))) + +/** + * zpdesc_folio - The folio allocated for a zpdesc + * @zpdesc: The zpdesc. + * + * Zpdescs are descriptors for zpool memory. The zpool memory itself is + * allocated as folios that contain the zpool objects, and zpdesc uses specific + * fields in the first struct page of the folio - those fields are now accessed + * by struct zpdesc. + * + * It is occasionally necessary convert to back to a folio in order to + * communicate with the rest of the mm. Please use this helper function + * instead of casting yourself, as the implementation may change in the future. + */ +#define zpdesc_folio(zp) (_Generic((zp), \ + const struct zpdesc *: (const struct folio *)(zp), \ + struct zpdesc *: (struct folio *)(zp))) +/** + * page_zpdesc - Converts from first struct page to zpdesc. + * @p: The first (either head of compound or single) page of zpdesc. + * + * A temporary wrapper to convert struct page to struct zpdesc in situations + * where we know the page is the compound head, or single order-0 page. + * + * Long-term ideally everything would work with struct zpdesc directly or go + * through folio to struct zpdesc. + * + * Return: The zpdesc which contains this page + */ +#define page_zpdesc(p) (_Generic((p), \ + const struct page *: (const struct zpdesc *)(p), \ + struct page *: (struct zpdesc *)(p))) + +#endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 64b66a4d3e6e..00d111f011be 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -13,24 +13,6 @@ * Released under the terms of GNU General Public License Version 2.0 */ -/* - * Following is how we use various fields and flags of underlying - * struct page(s) to form a zspage. - * - * Usage of struct page fields: - * page->private: points to zspage - * page->index: links together all component pages of a zspage - * For the huge page, this is always 0, so we use this field - * to store handle. - * page->page_type: PGTY_zsmalloc, lower 24 bits locate the first object - * offset in a subpage of a zspage - * - * Usage of struct page flags: - * PG_private: identifies the first component page - * PG_owner_priv_1: identifies the huge component page - * - */ - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* @@ -67,6 +49,7 @@ #include #include #include +#include "zpdesc.h" #define ZSPAGE_MAGIC 0x58 @@ -254,7 +237,7 @@ struct zspage { }; unsigned int inuse; unsigned int freeobj; - struct page *first_page; + struct zpdesc *first_zpdesc; struct list_head list; /* fullness list */ struct zs_pool *pool; rwlock_t lock; @@ -459,7 +442,7 @@ static inline void mod_zspage_inuse(struct zspage *zspage, int val) static inline struct page *get_first_page(struct zspage *zspage) { - struct page *first_page = zspage->first_page; + struct page *first_page = zpdesc_page(zspage->first_zpdesc); VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); return first_page; @@ -952,7 +935,7 @@ static void create_page_chain(struct size_class *class, struct zspage *zspage, set_page_private(page, (unsigned long)zspage); page->index = 0; if (i == 0) { - zspage->first_page = page; + zspage->first_zpdesc = page_zpdesc(page); SetPagePrivate(page); if (unlikely(class->objs_per_zspage == 1 && class->pages_per_zspage == 1)) @@ -1317,8 +1300,7 @@ static unsigned long obj_malloc(struct zs_pool *pool, /* record handle in the header of allocated chunk */ link->handle = handle | OBJ_ALLOCATED_TAG; else - /* record handle to page->index */ - zspage->first_page->index = handle | OBJ_ALLOCATED_TAG; + zspage->first_zpdesc->handle = handle | OBJ_ALLOCATED_TAG; kunmap_local(vaddr); mod_zspage_inuse(zspage, 1); From e6e20de0090cdf0123620279224f5136eb862b6a Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 17 Dec 2024 00:04:33 +0900 Subject: [PATCH 207/504] mm/zsmalloc: use zpdesc in trylock_zspage()/lock_zspage() Convert trylock_zspage() and lock_zspage() to use zpdesc. To achieve that, introduce a couple of helper functions: - zpdesc_lock() - zpdesc_unlock() - zpdesc_trylock() - zpdesc_wait_locked() - zpdesc_get() - zpdesc_put() Here we use the folio version of functions for 2 reasons. First, zswap.zpool currently only uses order-0 pages and using folio could save some compound_head checks. Second, folio_put could bypass devmap checking that we don't need. BTW, thanks Intel LKP found a build warning on the patch. Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Link: https://lkml.kernel.org/r/20241216150450.1228021-3-42.hyeyoo@gmail.com Signed-off-by: Alex Shi Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zpdesc.h | 30 ++++++++++++++++++++++++ mm/zsmalloc.c | 64 ++++++++++++++++++++++++++++++++++----------------- 2 files changed, 73 insertions(+), 21 deletions(-) diff --git a/mm/zpdesc.h b/mm/zpdesc.h index e0852498aecf..c866758feec3 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -104,4 +104,34 @@ static_assert(sizeof(struct zpdesc) <= sizeof(struct page)); const struct page *: (const struct zpdesc *)(p), \ struct page *: (struct zpdesc *)(p))) +static inline void zpdesc_lock(struct zpdesc *zpdesc) +{ + folio_lock(zpdesc_folio(zpdesc)); +} + +static inline bool zpdesc_trylock(struct zpdesc *zpdesc) +{ + return folio_trylock(zpdesc_folio(zpdesc)); +} + +static inline void zpdesc_unlock(struct zpdesc *zpdesc) +{ + folio_unlock(zpdesc_folio(zpdesc)); +} + +static inline void zpdesc_wait_locked(struct zpdesc *zpdesc) +{ + folio_wait_locked(zpdesc_folio(zpdesc)); +} + +static inline void zpdesc_get(struct zpdesc *zpdesc) +{ + folio_get(zpdesc_folio(zpdesc)); +} + +static inline void zpdesc_put(struct zpdesc *zpdesc) +{ + folio_put(zpdesc_folio(zpdesc)); +} + #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 00d111f011be..51f4a9b78023 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -428,13 +428,17 @@ static __maybe_unused int is_first_page(struct page *page) return PagePrivate(page); } +static inline bool is_first_zpdesc(struct zpdesc *zpdesc) +{ + return PagePrivate(zpdesc_page(zpdesc)); +} + /* Protected by class->lock */ static inline int get_zspage_inuse(struct zspage *zspage) { return zspage->inuse; } - static inline void mod_zspage_inuse(struct zspage *zspage, int val) { zspage->inuse += val; @@ -448,6 +452,14 @@ static inline struct page *get_first_page(struct zspage *zspage) return first_page; } +static struct zpdesc *get_first_zpdesc(struct zspage *zspage) +{ + struct zpdesc *first_zpdesc = zspage->first_zpdesc; + + VM_BUG_ON_PAGE(!is_first_zpdesc(first_zpdesc), zpdesc_page(first_zpdesc)); + return first_zpdesc; +} + #define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff static inline unsigned int get_first_obj_offset(struct page *page) @@ -734,6 +746,16 @@ static struct page *get_next_page(struct page *page) return (struct page *)page->index; } +static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc) +{ + struct zspage *zspage = get_zspage(zpdesc_page(zpdesc)); + + if (unlikely(ZsHugePage(zspage))) + return NULL; + + return zpdesc->next; +} + /** * obj_to_location - get (, ) from encoded object value * @obj: the encoded object value @@ -803,11 +825,11 @@ static void reset_page(struct page *page) static int trylock_zspage(struct zspage *zspage) { - struct page *cursor, *fail; + struct zpdesc *cursor, *fail; - for (cursor = get_first_page(zspage); cursor != NULL; cursor = - get_next_page(cursor)) { - if (!trylock_page(cursor)) { + for (cursor = get_first_zpdesc(zspage); cursor != NULL; cursor = + get_next_zpdesc(cursor)) { + if (!zpdesc_trylock(cursor)) { fail = cursor; goto unlock; } @@ -815,9 +837,9 @@ static int trylock_zspage(struct zspage *zspage) return 1; unlock: - for (cursor = get_first_page(zspage); cursor != fail; cursor = - get_next_page(cursor)) - unlock_page(cursor); + for (cursor = get_first_zpdesc(zspage); cursor != fail; cursor = + get_next_zpdesc(cursor)) + zpdesc_unlock(cursor); return 0; } @@ -1635,7 +1657,7 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage) */ static void lock_zspage(struct zspage *zspage) { - struct page *curr_page, *page; + struct zpdesc *curr_zpdesc, *zpdesc; /* * Pages we haven't locked yet can be migrated off the list while we're @@ -1647,24 +1669,24 @@ static void lock_zspage(struct zspage *zspage) */ while (1) { migrate_read_lock(zspage); - page = get_first_page(zspage); - if (trylock_page(page)) + zpdesc = get_first_zpdesc(zspage); + if (zpdesc_trylock(zpdesc)) break; - get_page(page); + zpdesc_get(zpdesc); migrate_read_unlock(zspage); - wait_on_page_locked(page); - put_page(page); + zpdesc_wait_locked(zpdesc); + zpdesc_put(zpdesc); } - curr_page = page; - while ((page = get_next_page(curr_page))) { - if (trylock_page(page)) { - curr_page = page; + curr_zpdesc = zpdesc; + while ((zpdesc = get_next_zpdesc(curr_zpdesc))) { + if (zpdesc_trylock(zpdesc)) { + curr_zpdesc = zpdesc; } else { - get_page(page); + zpdesc_get(zpdesc); migrate_read_unlock(zspage); - wait_on_page_locked(page); - put_page(page); + zpdesc_wait_locked(zpdesc); + zpdesc_put(zpdesc); migrate_read_lock(zspage); } } From 7bbafcdc7c49dc6a473a18cb6de2d8df0001f0ce Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:34 +0900 Subject: [PATCH 208/504] mm/zsmalloc: convert __zs_map_object/__zs_unmap_object to use zpdesc These two functions take a pointer to an array of struct page. Make __zs_{map,unmap}_object() take pointer to an array of zpdesc instead of page. Add silly type casting when calling them. Casting will be removed later. Link: https://lkml.kernel.org/r/20241216150450.1228021-4-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 51f4a9b78023..c038caaef3a8 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1049,7 +1049,7 @@ static inline void __zs_cpu_down(struct mapping_area *area) } static void *__zs_map_object(struct mapping_area *area, - struct page *pages[2], int off, int size) + struct zpdesc *zpdescs[2], int off, int size) { size_t sizes[2]; char *buf = area->vm_buf; @@ -1065,14 +1065,14 @@ static void *__zs_map_object(struct mapping_area *area, sizes[1] = size - sizes[0]; /* copy object to per-cpu buffer */ - memcpy_from_page(buf, pages[0], off, sizes[0]); - memcpy_from_page(buf + sizes[0], pages[1], 0, sizes[1]); + memcpy_from_page(buf, zpdesc_page(zpdescs[0]), off, sizes[0]); + memcpy_from_page(buf + sizes[0], zpdesc_page(zpdescs[1]), 0, sizes[1]); out: return area->vm_buf; } static void __zs_unmap_object(struct mapping_area *area, - struct page *pages[2], int off, int size) + struct zpdesc *zpdescs[2], int off, int size) { size_t sizes[2]; char *buf; @@ -1090,8 +1090,8 @@ static void __zs_unmap_object(struct mapping_area *area, sizes[1] = size - sizes[0]; /* copy per-cpu buffer to object */ - memcpy_to_page(pages[0], off, buf, sizes[0]); - memcpy_to_page(pages[1], 0, buf + sizes[0], sizes[1]); + memcpy_to_page(zpdesc_page(zpdescs[0]), off, buf, sizes[0]); + memcpy_to_page(zpdesc_page(zpdescs[1]), 0, buf + sizes[0], sizes[1]); out: /* enable page faults to match kunmap_local() return conditions */ @@ -1230,7 +1230,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, pages[1] = get_next_page(page); BUG_ON(!pages[1]); - ret = __zs_map_object(area, pages, off, class->size); + ret = __zs_map_object(area, (struct zpdesc **)pages, off, class->size); out: if (likely(!ZsHugePage(zspage))) ret += ZS_HANDLE_SIZE; @@ -1265,7 +1265,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) pages[1] = get_next_page(page); BUG_ON(!pages[1]); - __zs_unmap_object(area, pages, off, class->size); + __zs_unmap_object(area, (struct zpdesc **)pages, off, class->size); } local_unlock(&zs_map_area.lock); From fcdbe63c349812056c6fbf73f11582914d1f68ea Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:35 +0900 Subject: [PATCH 209/504] mm/zsmalloc: add and use pfn/zpdesc seeking funcs Add pfn_zpdesc(), pfn_zpdesc() and kmap_local_zpdesc(). Convert obj_to_location() to take zpdesc and also convert its users to use zpdesc. Link: https://lkml.kernel.org/r/20241216150450.1228021-5-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zpdesc.h | 14 ++++++++++ mm/zsmalloc.c | 77 ++++++++++++++++++++++++++------------------------- 2 files changed, 53 insertions(+), 38 deletions(-) diff --git a/mm/zpdesc.h b/mm/zpdesc.h index c866758feec3..223d0381a444 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -134,4 +134,18 @@ static inline void zpdesc_put(struct zpdesc *zpdesc) folio_put(zpdesc_folio(zpdesc)); } +static inline void *kmap_local_zpdesc(struct zpdesc *zpdesc) +{ + return kmap_local_page(zpdesc_page(zpdesc)); +} + +static inline unsigned long zpdesc_pfn(struct zpdesc *zpdesc) +{ + return page_to_pfn(zpdesc_page(zpdesc)); +} + +static inline struct zpdesc *pfn_zpdesc(unsigned long pfn) +{ + return page_zpdesc(pfn_to_page(pfn)); +} #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c038caaef3a8..e71da84ad73a 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -757,15 +757,15 @@ static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc) } /** - * obj_to_location - get (, ) from encoded object value + * obj_to_location - get (, ) from encoded object value * @obj: the encoded object value - * @page: page object resides in zspage + * @zpdesc: zpdesc object resides in zspage * @obj_idx: object index */ -static void obj_to_location(unsigned long obj, struct page **page, +static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc, unsigned int *obj_idx) { - *page = pfn_to_page(obj >> OBJ_INDEX_BITS); + *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS); *obj_idx = (obj & OBJ_INDEX_MASK); } @@ -1181,13 +1181,13 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm) { struct zspage *zspage; - struct page *page; + struct zpdesc *zpdesc; unsigned long obj, off; unsigned int obj_idx; struct size_class *class; struct mapping_area *area; - struct page *pages[2]; + struct zpdesc *zpdescs[2]; void *ret; /* @@ -1200,8 +1200,8 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, /* It guarantees it can get zspage from handle safely */ read_lock(&pool->migrate_lock); obj = handle_to_obj(handle); - obj_to_location(obj, &page, &obj_idx); - zspage = get_zspage(page); + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc_page(zpdesc)); /* * migration cannot move any zpages in this zspage. Here, class->lock @@ -1220,17 +1220,17 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ - area->vm_addr = kmap_local_page(page); + area->vm_addr = kmap_local_zpdesc(zpdesc); ret = area->vm_addr + off; goto out; } /* this object spans two pages */ - pages[0] = page; - pages[1] = get_next_page(page); - BUG_ON(!pages[1]); + zpdescs[0] = zpdesc; + zpdescs[1] = get_next_zpdesc(zpdesc); + BUG_ON(!zpdescs[1]); - ret = __zs_map_object(area, (struct zpdesc **)pages, off, class->size); + ret = __zs_map_object(area, zpdescs, off, class->size); out: if (likely(!ZsHugePage(zspage))) ret += ZS_HANDLE_SIZE; @@ -1242,7 +1242,7 @@ EXPORT_SYMBOL_GPL(zs_map_object); void zs_unmap_object(struct zs_pool *pool, unsigned long handle) { struct zspage *zspage; - struct page *page; + struct zpdesc *zpdesc; unsigned long obj, off; unsigned int obj_idx; @@ -1250,8 +1250,8 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) struct mapping_area *area; obj = handle_to_obj(handle); - obj_to_location(obj, &page, &obj_idx); - zspage = get_zspage(page); + obj_to_location(obj, &zpdesc, &obj_idx); + zspage = get_zspage(zpdesc_page(zpdesc)); class = zspage_class(pool, zspage); off = offset_in_page(class->size * obj_idx); @@ -1259,13 +1259,13 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) if (off + class->size <= PAGE_SIZE) kunmap_local(area->vm_addr); else { - struct page *pages[2]; + struct zpdesc *zpdescs[2]; - pages[0] = page; - pages[1] = get_next_page(page); - BUG_ON(!pages[1]); + zpdescs[0] = zpdesc; + zpdescs[1] = get_next_zpdesc(zpdesc); + BUG_ON(!zpdescs[1]); - __zs_unmap_object(area, (struct zpdesc **)pages, off, class->size); + __zs_unmap_object(area, zpdescs, off, class->size); } local_unlock(&zs_map_area.lock); @@ -1406,23 +1406,24 @@ static void obj_free(int class_size, unsigned long obj) { struct link_free *link; struct zspage *zspage; - struct page *f_page; + struct zpdesc *f_zpdesc; unsigned long f_offset; unsigned int f_objidx; void *vaddr; - obj_to_location(obj, &f_page, &f_objidx); - f_offset = offset_in_page(class_size * f_objidx); - zspage = get_zspage(f_page); - vaddr = kmap_local_page(f_page); + obj_to_location(obj, &f_zpdesc, &f_objidx); + f_offset = offset_in_page(class_size * f_objidx); + zspage = get_zspage(zpdesc_page(f_zpdesc)); + + vaddr = kmap_local_zpdesc(f_zpdesc); link = (struct link_free *)(vaddr + f_offset); /* Insert this object in containing zspage's freelist */ if (likely(!ZsHugePage(zspage))) link->next = get_freeobj(zspage) << OBJ_TAG_BITS; else - f_page->index = 0; + f_zpdesc->handle = 0; set_freeobj(zspage, f_objidx); kunmap_local(vaddr); @@ -1467,7 +1468,7 @@ EXPORT_SYMBOL_GPL(zs_free); static void zs_object_copy(struct size_class *class, unsigned long dst, unsigned long src) { - struct page *s_page, *d_page; + struct zpdesc *s_zpdesc, *d_zpdesc; unsigned int s_objidx, d_objidx; unsigned long s_off, d_off; void *s_addr, *d_addr; @@ -1476,8 +1477,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, s_size = d_size = class->size; - obj_to_location(src, &s_page, &s_objidx); - obj_to_location(dst, &d_page, &d_objidx); + obj_to_location(src, &s_zpdesc, &s_objidx); + obj_to_location(dst, &d_zpdesc, &d_objidx); s_off = offset_in_page(class->size * s_objidx); d_off = offset_in_page(class->size * d_objidx); @@ -1488,8 +1489,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, if (d_off + class->size > PAGE_SIZE) d_size = PAGE_SIZE - d_off; - s_addr = kmap_local_page(s_page); - d_addr = kmap_local_page(d_page); + s_addr = kmap_local_zpdesc(s_zpdesc); + d_addr = kmap_local_zpdesc(d_zpdesc); while (1) { size = min(s_size, d_size); @@ -1514,17 +1515,17 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, if (s_off >= PAGE_SIZE) { kunmap_local(d_addr); kunmap_local(s_addr); - s_page = get_next_page(s_page); - s_addr = kmap_local_page(s_page); - d_addr = kmap_local_page(d_page); + s_zpdesc = get_next_zpdesc(s_zpdesc); + s_addr = kmap_local_zpdesc(s_zpdesc); + d_addr = kmap_local_zpdesc(d_zpdesc); s_size = class->size - written; s_off = 0; } if (d_off >= PAGE_SIZE) { kunmap_local(d_addr); - d_page = get_next_page(d_page); - d_addr = kmap_local_page(d_page); + d_zpdesc = get_next_zpdesc(d_zpdesc); + d_addr = kmap_local_zpdesc(d_zpdesc); d_size = class->size - written; d_off = 0; } @@ -1763,7 +1764,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, struct zs_pool *pool; struct size_class *class; struct zspage *zspage; - struct page *dummy; + struct zpdesc *dummy; void *s_addr, *d_addr, *addr; unsigned int offset; unsigned long handle; From 0219ee62b471e3ec6c371eabd40c345a2ec9fe2c Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:36 +0900 Subject: [PATCH 210/504] mm/zsmalloc: convert obj_malloc() to use zpdesc Use get_first_zpdesc/get_next_zpdesc to replace get_first_page/get_next_page. no functional change. Link: https://lkml.kernel.org/r/20241216150450.1228021-6-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index e71da84ad73a..b7fab2e28d87 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1295,12 +1295,12 @@ EXPORT_SYMBOL_GPL(zs_huge_class_size); static unsigned long obj_malloc(struct zs_pool *pool, struct zspage *zspage, unsigned long handle) { - int i, nr_page, offset; + int i, nr_zpdesc, offset; unsigned long obj; struct link_free *link; struct size_class *class; - struct page *m_page; + struct zpdesc *m_zpdesc; unsigned long m_offset; void *vaddr; @@ -1308,14 +1308,14 @@ static unsigned long obj_malloc(struct zs_pool *pool, obj = get_freeobj(zspage); offset = obj * class->size; - nr_page = offset >> PAGE_SHIFT; + nr_zpdesc = offset >> PAGE_SHIFT; m_offset = offset_in_page(offset); - m_page = get_first_page(zspage); + m_zpdesc = get_first_zpdesc(zspage); - for (i = 0; i < nr_page; i++) - m_page = get_next_page(m_page); + for (i = 0; i < nr_zpdesc; i++) + m_zpdesc = get_next_zpdesc(m_zpdesc); - vaddr = kmap_local_page(m_page); + vaddr = kmap_local_zpdesc(m_zpdesc); link = (struct link_free *)vaddr + m_offset / sizeof(*link); set_freeobj(zspage, link->next >> OBJ_TAG_BITS); if (likely(!ZsHugePage(zspage))) @@ -1327,7 +1327,7 @@ static unsigned long obj_malloc(struct zs_pool *pool, kunmap_local(vaddr); mod_zspage_inuse(zspage, 1); - obj = location_to_obj(m_page, obj); + obj = location_to_obj(zpdesc_page(m_zpdesc), obj); record_obj(handle, obj); return obj; From 7382c2664de6c1839f909146ace81e2465ba0187 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 17 Dec 2024 00:04:37 +0900 Subject: [PATCH 211/504] mm/zsmalloc: convert create_page_chain() and its users to use zpdesc Introduce a few helper functions for conversion to convert create_page_chain() to use zpdesc, then use zpdesc in replace_sub_page(). Link: https://lkml.kernel.org/r/20241216150450.1228021-7-42.hyeyoo@gmail.com Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zpdesc.h | 6 +++ mm/zsmalloc.c | 109 ++++++++++++++++++++++++++++++++------------------ 2 files changed, 76 insertions(+), 39 deletions(-) diff --git a/mm/zpdesc.h b/mm/zpdesc.h index 223d0381a444..9aca8d307796 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -148,4 +148,10 @@ static inline struct zpdesc *pfn_zpdesc(unsigned long pfn) { return page_zpdesc(pfn_to_page(pfn)); } + +static inline void __zpdesc_set_movable(struct zpdesc *zpdesc, + const struct movable_operations *mops) +{ + __SetPageMovable(zpdesc_page(zpdesc), mops); +} #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index b7fab2e28d87..59a30c61160f 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -228,6 +228,35 @@ struct zs_pool { atomic_t compaction_in_progress; }; +static inline void zpdesc_set_first(struct zpdesc *zpdesc) +{ + SetPagePrivate(zpdesc_page(zpdesc)); +} + +static inline void zpdesc_inc_zone_page_state(struct zpdesc *zpdesc) +{ + inc_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); +} + +static inline void zpdesc_dec_zone_page_state(struct zpdesc *zpdesc) +{ + dec_zone_page_state(zpdesc_page(zpdesc), NR_ZSPAGES); +} + +static inline struct zpdesc *alloc_zpdesc(gfp_t gfp) +{ + struct page *page = alloc_page(gfp); + + return page_zpdesc(page); +} + +static inline void free_zpdesc(struct zpdesc *zpdesc) +{ + struct page *page = zpdesc_page(zpdesc); + + __free_page(page); +} + struct zspage { struct { unsigned int huge:HUGE_BITS; @@ -937,35 +966,35 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) } static void create_page_chain(struct size_class *class, struct zspage *zspage, - struct page *pages[]) + struct zpdesc *zpdescs[]) { int i; - struct page *page; - struct page *prev_page = NULL; - int nr_pages = class->pages_per_zspage; + struct zpdesc *zpdesc; + struct zpdesc *prev_zpdesc = NULL; + int nr_zpdescs = class->pages_per_zspage; /* * Allocate individual pages and link them together as: - * 1. all pages are linked together using page->index - * 2. each sub-page point to zspage using page->private + * 1. all pages are linked together using zpdesc->next + * 2. each sub-page point to zspage using zpdesc->zspage * - * we set PG_private to identify the first page (i.e. no other sub-page + * we set PG_private to identify the first zpdesc (i.e. no other zpdesc * has this flag set). */ - for (i = 0; i < nr_pages; i++) { - page = pages[i]; - set_page_private(page, (unsigned long)zspage); - page->index = 0; + for (i = 0; i < nr_zpdescs; i++) { + zpdesc = zpdescs[i]; + zpdesc->zspage = zspage; + zpdesc->next = NULL; if (i == 0) { - zspage->first_zpdesc = page_zpdesc(page); - SetPagePrivate(page); + zspage->first_zpdesc = zpdesc; + zpdesc_set_first(zpdesc); if (unlikely(class->objs_per_zspage == 1 && class->pages_per_zspage == 1)) SetZsHugePage(zspage); } else { - prev_page->index = (unsigned long)page; + prev_zpdesc->next = zpdesc; } - prev_page = page; + prev_zpdesc = zpdesc; } } @@ -977,7 +1006,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, gfp_t gfp) { int i; - struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE]; + struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE]; struct zspage *zspage = cache_alloc_zspage(pool, gfp); if (!zspage) @@ -987,25 +1016,25 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, migrate_lock_init(zspage); for (i = 0; i < class->pages_per_zspage; i++) { - struct page *page; + struct zpdesc *zpdesc; - page = alloc_page(gfp); - if (!page) { + zpdesc = alloc_zpdesc(gfp); + if (!zpdesc) { while (--i >= 0) { - dec_zone_page_state(pages[i], NR_ZSPAGES); - __ClearPageZsmalloc(pages[i]); - __free_page(pages[i]); + zpdesc_dec_zone_page_state(zpdescs[i]); + __ClearPageZsmalloc(zpdesc_page(zpdescs[i])); + free_zpdesc(zpdescs[i]); } cache_free_zspage(pool, zspage); return NULL; } - __SetPageZsmalloc(page); + __SetPageZsmalloc(zpdesc_page(zpdesc)); - inc_zone_page_state(page, NR_ZSPAGES); - pages[i] = page; + zpdesc_inc_zone_page_state(zpdesc); + zpdescs[i] = zpdesc; } - create_page_chain(class, zspage, pages); + create_page_chain(class, zspage, zpdescs); init_zspage(class, zspage); zspage->pool = pool; zspage->class = class->index; @@ -1725,26 +1754,28 @@ static void migrate_write_unlock(struct zspage *zspage) static const struct movable_operations zsmalloc_mops; static void replace_sub_page(struct size_class *class, struct zspage *zspage, - struct page *newpage, struct page *oldpage) + struct zpdesc *newzpdesc, struct zpdesc *oldzpdesc) { - struct page *page; - struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; + struct zpdesc *zpdesc; + struct zpdesc *zpdescs[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, }; + unsigned int first_obj_offset; int idx = 0; - page = get_first_page(zspage); + zpdesc = get_first_zpdesc(zspage); do { - if (page == oldpage) - pages[idx] = newpage; + if (zpdesc == oldzpdesc) + zpdescs[idx] = newzpdesc; else - pages[idx] = page; + zpdescs[idx] = zpdesc; idx++; - } while ((page = get_next_page(page)) != NULL); + } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); - create_page_chain(class, zspage, pages); - set_first_obj_offset(newpage, get_first_obj_offset(oldpage)); + create_page_chain(class, zspage, zpdescs); + first_obj_offset = get_first_obj_offset(zpdesc_page(oldzpdesc)); + set_first_obj_offset(zpdesc_page(newzpdesc), first_obj_offset); if (unlikely(ZsHugePage(zspage))) - newpage->index = oldpage->index; - __SetPageMovable(newpage, &zsmalloc_mops); + newzpdesc->handle = oldzpdesc->handle; + __zpdesc_set_movable(newzpdesc, &zsmalloc_mops); } static bool zs_page_isolate(struct page *page, isolate_mode_t mode) @@ -1817,7 +1848,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, } kunmap_local(s_addr); - replace_sub_page(class, zspage, newpage, page); + replace_sub_page(class, zspage, page_zpdesc(newpage), page_zpdesc(page)); /* * Since we complete the data copy and set up new zspage structure, * it's okay to release migration_lock. From 12e27e4af99bc7abb37b6223c2cc1e980dd7bf99 Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:38 +0900 Subject: [PATCH 212/504] mm/zsmalloc: convert obj_allocated() and related helpers to use zpdesc Convert obj_allocated(), and related helpers to take zpdesc. Also make its callers to cast (struct page *) to (struct zpdesc *) when calling them. The users will be converted gradually as there are many. Link: https://lkml.kernel.org/r/20241216150450.1228021-8-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 59a30c61160f..a3e2e596b4f3 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -823,15 +823,15 @@ static unsigned long handle_to_obj(unsigned long handle) return *(unsigned long *)handle; } -static inline bool obj_allocated(struct page *page, void *obj, +static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj, unsigned long *phandle) { unsigned long handle; - struct zspage *zspage = get_zspage(page); + struct zspage *zspage = get_zspage(zpdesc_page(zpdesc)); if (unlikely(ZsHugePage(zspage))) { - VM_BUG_ON_PAGE(!is_first_page(page), page); - handle = page->index; + VM_BUG_ON_PAGE(!is_first_zpdesc(zpdesc), zpdesc_page(zpdesc)); + handle = zpdesc->handle; } else handle = *(unsigned long *)obj; @@ -1569,18 +1569,18 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, * return handle. */ static unsigned long find_alloced_obj(struct size_class *class, - struct page *page, int *obj_idx) + struct zpdesc *zpdesc, int *obj_idx) { unsigned int offset; int index = *obj_idx; unsigned long handle = 0; - void *addr = kmap_local_page(page); + void *addr = kmap_local_zpdesc(zpdesc); - offset = get_first_obj_offset(page); + offset = get_first_obj_offset(zpdesc_page(zpdesc)); offset += class->size * index; while (offset < PAGE_SIZE) { - if (obj_allocated(page, addr + offset, &handle)) + if (obj_allocated(zpdesc, addr + offset, &handle)) break; offset += class->size; @@ -1604,7 +1604,7 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage, struct size_class *class = pool->size_class[src_zspage->class]; while (1) { - handle = find_alloced_obj(class, s_page, &obj_idx); + handle = find_alloced_obj(class, page_zpdesc(s_page), &obj_idx); if (!handle) { s_page = get_next_page(s_page); if (!s_page) @@ -1837,7 +1837,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE; addr += class->size) { - if (obj_allocated(page, addr, &handle)) { + if (obj_allocated(page_zpdesc(page), addr, &handle)) { old_obj = handle_to_obj(handle); obj_to_location(old_obj, &dummy, &obj_idx); From b7b7d321bba520e96d2a6f143a0d34e4b64c0b91 Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:39 +0900 Subject: [PATCH 213/504] mm/zsmalloc: convert init_zspage() to use zpdesc Replace get_first/next_page func series and kmap_atomic to new helper, no functional change. Link: https://lkml.kernel.org/r/20241216150450.1228021-9-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index a3e2e596b4f3..83d48cffe96f 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -925,16 +925,16 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) { unsigned int freeobj = 1; unsigned long off = 0; - struct page *page = get_first_page(zspage); + struct zpdesc *zpdesc = get_first_zpdesc(zspage); - while (page) { - struct page *next_page; + while (zpdesc) { + struct zpdesc *next_zpdesc; struct link_free *link; void *vaddr; - set_first_obj_offset(page, off); + set_first_obj_offset(zpdesc_page(zpdesc), off); - vaddr = kmap_local_page(page); + vaddr = kmap_local_zpdesc(zpdesc); link = (struct link_free *)vaddr + off / sizeof(*link); while ((off += class->size) < PAGE_SIZE) { @@ -947,8 +947,8 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) * page, which must point to the first object on the next * page (if present) */ - next_page = get_next_page(page); - if (next_page) { + next_zpdesc = get_next_zpdesc(zpdesc); + if (next_zpdesc) { link->next = freeobj++ << OBJ_TAG_BITS; } else { /* @@ -958,7 +958,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) link->next = -1UL << OBJ_TAG_BITS; } kunmap_local(vaddr); - page = next_page; + zpdesc = next_zpdesc; off %= PAGE_SIZE; } From 5ee1ea301cd542e892a063c33321ea79b468f420 Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:40 +0900 Subject: [PATCH 214/504] mm/zsmalloc: convert obj_to_page() and zs_free() to use zpdesc Rename obj_to_page() to obj_to_zpdesc() and also convert it and its user zs_free() to use zpdesc. Link: https://lkml.kernel.org/r/20241216150450.1228021-10-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 83d48cffe96f..112603f9449f 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -798,9 +798,9 @@ static void obj_to_location(unsigned long obj, struct zpdesc **zpdesc, *obj_idx = (obj & OBJ_INDEX_MASK); } -static void obj_to_page(unsigned long obj, struct page **page) +static void obj_to_zpdesc(unsigned long obj, struct zpdesc **zpdesc) { - *page = pfn_to_page(obj >> OBJ_INDEX_BITS); + *zpdesc = pfn_zpdesc(obj >> OBJ_INDEX_BITS); } /** @@ -1462,7 +1462,7 @@ static void obj_free(int class_size, unsigned long obj) void zs_free(struct zs_pool *pool, unsigned long handle) { struct zspage *zspage; - struct page *f_page; + struct zpdesc *f_zpdesc; unsigned long obj; struct size_class *class; int fullness; @@ -1476,8 +1476,8 @@ void zs_free(struct zs_pool *pool, unsigned long handle) */ read_lock(&pool->migrate_lock); obj = handle_to_obj(handle); - obj_to_page(obj, &f_page); - zspage = get_zspage(f_page); + obj_to_zpdesc(obj, &f_zpdesc); + zspage = get_zspage(zpdesc_page(f_zpdesc)); class = zspage_class(pool, zspage); spin_lock(&class->lock); read_unlock(&pool->migrate_lock); From f8f35af800eee33d2ce9036460cdcdf36e8bd987 Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:41 +0900 Subject: [PATCH 215/504] mm/zsmalloc: add two helpers for zs_page_migrate() and make it use zpdesc To convert page to zpdesc in zs_page_migrate(), we added zpdesc_is_isolated()/zpdesc_zone() helpers. No functional change. Link: https://lkml.kernel.org/r/20241216150450.1228021-11-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zpdesc.h | 11 +++++++++++ mm/zsmalloc.c | 30 ++++++++++++++++-------------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/mm/zpdesc.h b/mm/zpdesc.h index 9aca8d307796..c7c52e05e737 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -154,4 +154,15 @@ static inline void __zpdesc_set_movable(struct zpdesc *zpdesc, { __SetPageMovable(zpdesc_page(zpdesc), mops); } + +static inline bool zpdesc_is_isolated(struct zpdesc *zpdesc) +{ + return PageIsolated(zpdesc_page(zpdesc)); +} + +static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc) +{ + return page_zone(zpdesc_page(zpdesc)); +} + #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 112603f9449f..432e78e61d2e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1796,19 +1796,21 @@ static int zs_page_migrate(struct page *newpage, struct page *page, struct size_class *class; struct zspage *zspage; struct zpdesc *dummy; + struct zpdesc *newzpdesc = page_zpdesc(newpage); + struct zpdesc *zpdesc = page_zpdesc(page); void *s_addr, *d_addr, *addr; unsigned int offset; unsigned long handle; unsigned long old_obj, new_obj; unsigned int obj_idx; - VM_BUG_ON_PAGE(!PageIsolated(page), page); + VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc)); /* We're committed, tell the world that this is a Zsmalloc page. */ - __SetPageZsmalloc(newpage); + __SetPageZsmalloc(zpdesc_page(newzpdesc)); /* The page is locked, so this pointer must remain valid */ - zspage = get_zspage(page); + zspage = get_zspage(zpdesc_page(zpdesc)); pool = zspage->pool; /* @@ -1825,30 +1827,30 @@ static int zs_page_migrate(struct page *newpage, struct page *page, /* the migrate_write_lock protects zpage access via zs_map_object */ migrate_write_lock(zspage); - offset = get_first_obj_offset(page); - s_addr = kmap_local_page(page); + offset = get_first_obj_offset(zpdesc_page(zpdesc)); + s_addr = kmap_local_zpdesc(zpdesc); /* * Here, any user cannot access all objects in the zspage so let's move. */ - d_addr = kmap_local_page(newpage); + d_addr = kmap_local_zpdesc(newzpdesc); copy_page(d_addr, s_addr); kunmap_local(d_addr); for (addr = s_addr + offset; addr < s_addr + PAGE_SIZE; addr += class->size) { - if (obj_allocated(page_zpdesc(page), addr, &handle)) { + if (obj_allocated(zpdesc, addr, &handle)) { old_obj = handle_to_obj(handle); obj_to_location(old_obj, &dummy, &obj_idx); - new_obj = (unsigned long)location_to_obj(newpage, + new_obj = (unsigned long)location_to_obj(zpdesc_page(newzpdesc), obj_idx); record_obj(handle, new_obj); } } kunmap_local(s_addr); - replace_sub_page(class, zspage, page_zpdesc(newpage), page_zpdesc(page)); + replace_sub_page(class, zspage, newzpdesc, zpdesc); /* * Since we complete the data copy and set up new zspage structure, * it's okay to release migration_lock. @@ -1857,14 +1859,14 @@ static int zs_page_migrate(struct page *newpage, struct page *page, spin_unlock(&class->lock); migrate_write_unlock(zspage); - get_page(newpage); - if (page_zone(newpage) != page_zone(page)) { - dec_zone_page_state(page, NR_ZSPAGES); - inc_zone_page_state(newpage, NR_ZSPAGES); + zpdesc_get(newzpdesc); + if (zpdesc_zone(newzpdesc) != zpdesc_zone(zpdesc)) { + zpdesc_dec_zone_page_state(zpdesc); + zpdesc_inc_zone_page_state(newzpdesc); } reset_page(page); - put_page(page); + zpdesc_put(zpdesc); return MIGRATEPAGE_SUCCESS; } From ef47ed47e0c31a8d9f0194198896f84016e8193e Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 17 Dec 2024 00:04:42 +0900 Subject: [PATCH 216/504] mm/zsmalloc: convert reset_page to reset_zpdesc zpdesc.zspage matches with page.private, zpdesc.next matches with page.index. They will be reset in reset_page() which is called prior to free base pages of a zspage. Since the fields that need to be initialized are independent of the order in struct zpdesc, Keep it to use struct page to ensure robustness against potential rearrangements of struct zpdesc fields in the future. [42.hyeyoo@gmail.com: keep reset_zpdesc() to use struct page fields] Link: https://lkml.kernel.org/r/20241216150450.1228021-12-42.hyeyoo@gmail.com Signed-off-by: Alex Shi Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 432e78e61d2e..dded6d1f3b7a 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -843,8 +843,10 @@ static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj, return true; } -static void reset_page(struct page *page) +static void reset_zpdesc(struct zpdesc *zpdesc) { + struct page *page = zpdesc_page(zpdesc); + __ClearPageMovable(page); ClearPagePrivate(page); set_page_private(page, 0); @@ -887,7 +889,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, do { VM_BUG_ON_PAGE(!PageLocked(page), page); next = get_next_page(page); - reset_page(page); + reset_zpdesc(page_zpdesc(page)); unlock_page(page); dec_zone_page_state(page, NR_ZSPAGES); put_page(page); @@ -1865,7 +1867,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, zpdesc_inc_zone_page_state(newzpdesc); } - reset_page(page); + reset_zpdesc(zpdesc); zpdesc_put(zpdesc); return MIGRATEPAGE_SUCCESS; From 916004d8f49d2ab504b407d311ae160d07aca34a Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:43 +0900 Subject: [PATCH 217/504] mm/zsmalloc: convert __free_zspage() to use zpdesc Introduce zpdesc_is_locked() and convert __free_zspage() to use zpdesc. Link: https://lkml.kernel.org/r/20241216150450.1228021-13-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zpdesc.h | 4 ++++ mm/zsmalloc.c | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/mm/zpdesc.h b/mm/zpdesc.h index c7c52e05e737..fa80c50993c6 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -165,4 +165,8 @@ static inline struct zone *zpdesc_zone(struct zpdesc *zpdesc) return page_zone(zpdesc_page(zpdesc)); } +static inline bool zpdesc_is_locked(struct zpdesc *zpdesc) +{ + return folio_test_locked(zpdesc_folio(zpdesc)); +} #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index dded6d1f3b7a..e1f501d51226 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -878,23 +878,23 @@ unlock: static void __free_zspage(struct zs_pool *pool, struct size_class *class, struct zspage *zspage) { - struct page *page, *next; + struct zpdesc *zpdesc, *next; assert_spin_locked(&class->lock); VM_BUG_ON(get_zspage_inuse(zspage)); VM_BUG_ON(zspage->fullness != ZS_INUSE_RATIO_0); - next = page = get_first_page(zspage); + next = zpdesc = get_first_zpdesc(zspage); do { - VM_BUG_ON_PAGE(!PageLocked(page), page); - next = get_next_page(page); - reset_zpdesc(page_zpdesc(page)); - unlock_page(page); - dec_zone_page_state(page, NR_ZSPAGES); - put_page(page); - page = next; - } while (page != NULL); + VM_BUG_ON_PAGE(!zpdesc_is_locked(zpdesc), zpdesc_page(zpdesc)); + next = get_next_zpdesc(zpdesc); + reset_zpdesc(zpdesc); + zpdesc_unlock(zpdesc); + zpdesc_dec_zone_page_state(zpdesc); + zpdesc_put(zpdesc); + zpdesc = next; + } while (zpdesc != NULL); cache_free_zspage(pool, zspage); From 7861e318347137084430a21ca0a02b4db3c1f4ba Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:44 +0900 Subject: [PATCH 218/504] mm/zsmalloc: convert location_to_obj() to take zpdesc As all users of location_to_obj() now use zpdesc, convert location_to_obj() to take zpdesc. Link: https://lkml.kernel.org/r/20241216150450.1228021-14-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index e1f501d51226..37212964a365 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -804,15 +804,15 @@ static void obj_to_zpdesc(unsigned long obj, struct zpdesc **zpdesc) } /** - * location_to_obj - get obj value encoded from (, ) - * @page: page object resides in zspage + * location_to_obj - get obj value encoded from (, ) + * @zpdesc: zpdesc object resides in zspage * @obj_idx: object index */ -static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) +static unsigned long location_to_obj(struct zpdesc *zpdesc, unsigned int obj_idx) { unsigned long obj; - obj = page_to_pfn(page) << OBJ_INDEX_BITS; + obj = zpdesc_pfn(zpdesc) << OBJ_INDEX_BITS; obj |= obj_idx & OBJ_INDEX_MASK; return obj; @@ -1358,7 +1358,7 @@ static unsigned long obj_malloc(struct zs_pool *pool, kunmap_local(vaddr); mod_zspage_inuse(zspage, 1); - obj = location_to_obj(zpdesc_page(m_zpdesc), obj); + obj = location_to_obj(m_zpdesc, obj); record_obj(handle, obj); return obj; @@ -1845,8 +1845,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, old_obj = handle_to_obj(handle); obj_to_location(old_obj, &dummy, &obj_idx); - new_obj = (unsigned long)location_to_obj(zpdesc_page(newzpdesc), - obj_idx); + new_obj = (unsigned long)location_to_obj(newzpdesc, obj_idx); record_obj(handle, new_obj); } } From 6de1a049da160ef6f4021bbf46590e0126585bbe Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:45 +0900 Subject: [PATCH 219/504] mm/zsmalloc: convert migrate_zspage() to use zpdesc Use get_first_zpdesc/get_next_zpdesc to replace get_first/next_page. No functional change. Link: https://lkml.kernel.org/r/20241216150450.1228021-15-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 37212964a365..19c1ca3957f2 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1602,14 +1602,14 @@ static void migrate_zspage(struct zs_pool *pool, struct zspage *src_zspage, unsigned long used_obj, free_obj; unsigned long handle; int obj_idx = 0; - struct page *s_page = get_first_page(src_zspage); + struct zpdesc *s_zpdesc = get_first_zpdesc(src_zspage); struct size_class *class = pool->size_class[src_zspage->class]; while (1) { - handle = find_alloced_obj(class, page_zpdesc(s_page), &obj_idx); + handle = find_alloced_obj(class, s_zpdesc, &obj_idx); if (!handle) { - s_page = get_next_page(s_page); - if (!s_page) + s_zpdesc = get_next_zpdesc(s_zpdesc); + if (!s_zpdesc) break; obj_idx = 0; continue; From b979fd9b1efbee3699bbb5cfea6ef60691fc4063 Mon Sep 17 00:00:00 2001 From: Hyeonggon Yoo <42.hyeyoo@gmail.com> Date: Tue, 17 Dec 2024 00:04:46 +0900 Subject: [PATCH 220/504] mm/zsmalloc: convert get_zspage() to take zpdesc Now that all users except get_next_page() (which will be removed in later patch) use zpdesc, convert get_zspage() to take zpdesc instead of page. Link: https://lkml.kernel.org/r/20241216150450.1228021-16-42.hyeyoo@gmail.com Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 19c1ca3957f2..a1a620192596 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -757,9 +757,9 @@ out: return newfg; } -static struct zspage *get_zspage(struct page *page) +static struct zspage *get_zspage(struct zpdesc *zpdesc) { - struct zspage *zspage = (struct zspage *)page_private(page); + struct zspage *zspage = zpdesc->zspage; BUG_ON(zspage->magic != ZSPAGE_MAGIC); return zspage; @@ -767,7 +767,7 @@ static struct zspage *get_zspage(struct page *page) static struct page *get_next_page(struct page *page) { - struct zspage *zspage = get_zspage(page); + struct zspage *zspage = get_zspage(page_zpdesc(page)); if (unlikely(ZsHugePage(zspage))) return NULL; @@ -777,7 +777,7 @@ static struct page *get_next_page(struct page *page) static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc) { - struct zspage *zspage = get_zspage(zpdesc_page(zpdesc)); + struct zspage *zspage = get_zspage(zpdesc); if (unlikely(ZsHugePage(zspage))) return NULL; @@ -827,7 +827,7 @@ static inline bool obj_allocated(struct zpdesc *zpdesc, void *obj, unsigned long *phandle) { unsigned long handle; - struct zspage *zspage = get_zspage(zpdesc_page(zpdesc)); + struct zspage *zspage = get_zspage(zpdesc); if (unlikely(ZsHugePage(zspage))) { VM_BUG_ON_PAGE(!is_first_zpdesc(zpdesc), zpdesc_page(zpdesc)); @@ -1232,7 +1232,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, read_lock(&pool->migrate_lock); obj = handle_to_obj(handle); obj_to_location(obj, &zpdesc, &obj_idx); - zspage = get_zspage(zpdesc_page(zpdesc)); + zspage = get_zspage(zpdesc); /* * migration cannot move any zpages in this zspage. Here, class->lock @@ -1282,7 +1282,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) obj = handle_to_obj(handle); obj_to_location(obj, &zpdesc, &obj_idx); - zspage = get_zspage(zpdesc_page(zpdesc)); + zspage = get_zspage(zpdesc); class = zspage_class(pool, zspage); off = offset_in_page(class->size * obj_idx); @@ -1445,7 +1445,7 @@ static void obj_free(int class_size, unsigned long obj) obj_to_location(obj, &f_zpdesc, &f_objidx); f_offset = offset_in_page(class_size * f_objidx); - zspage = get_zspage(zpdesc_page(f_zpdesc)); + zspage = get_zspage(f_zpdesc); vaddr = kmap_local_zpdesc(f_zpdesc); link = (struct link_free *)(vaddr + f_offset); @@ -1479,7 +1479,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) read_lock(&pool->migrate_lock); obj = handle_to_obj(handle); obj_to_zpdesc(obj, &f_zpdesc); - zspage = get_zspage(zpdesc_page(f_zpdesc)); + zspage = get_zspage(f_zpdesc); class = zspage_class(pool, zspage); spin_lock(&class->lock); read_unlock(&pool->migrate_lock); @@ -1812,7 +1812,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, __SetPageZsmalloc(zpdesc_page(newzpdesc)); /* The page is locked, so this pointer must remain valid */ - zspage = get_zspage(zpdesc_page(zpdesc)); + zspage = get_zspage(zpdesc); pool = zspage->pool; /* From fdeac51e55d152d6dcd38beda01125cf71f2f828 Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 17 Dec 2024 00:04:47 +0900 Subject: [PATCH 221/504] mm/zsmalloc: convert SetZsPageMovable and remove unused funcs Convert SetZsPageMovable() to use zpdesc, and then remove unused funcs: get_next_page()/get_first_page()/is_first_page(). Link: https://lkml.kernel.org/r/20241216150450.1228021-17-42.hyeyoo@gmail.com Originally-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Signed-off-by: Alex Shi Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 33 +++++---------------------------- 1 file changed, 5 insertions(+), 28 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index a1a620192596..1801dce2f7ca 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -452,11 +452,6 @@ static DEFINE_PER_CPU(struct mapping_area, zs_map_area) = { .lock = INIT_LOCAL_LOCK(lock), }; -static __maybe_unused int is_first_page(struct page *page) -{ - return PagePrivate(page); -} - static inline bool is_first_zpdesc(struct zpdesc *zpdesc) { return PagePrivate(zpdesc_page(zpdesc)); @@ -473,14 +468,6 @@ static inline void mod_zspage_inuse(struct zspage *zspage, int val) zspage->inuse += val; } -static inline struct page *get_first_page(struct zspage *zspage) -{ - struct page *first_page = zpdesc_page(zspage->first_zpdesc); - - VM_BUG_ON_PAGE(!is_first_page(first_page), first_page); - return first_page; -} - static struct zpdesc *get_first_zpdesc(struct zspage *zspage) { struct zpdesc *first_zpdesc = zspage->first_zpdesc; @@ -765,16 +752,6 @@ static struct zspage *get_zspage(struct zpdesc *zpdesc) return zspage; } -static struct page *get_next_page(struct page *page) -{ - struct zspage *zspage = get_zspage(page_zpdesc(page)); - - if (unlikely(ZsHugePage(zspage))) - return NULL; - - return (struct page *)page->index; -} - static struct zpdesc *get_next_zpdesc(struct zpdesc *zpdesc) { struct zspage *zspage = get_zspage(zpdesc); @@ -1936,13 +1913,13 @@ static void init_deferred_free(struct zs_pool *pool) static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) { - struct page *page = get_first_page(zspage); + struct zpdesc *zpdesc = get_first_zpdesc(zspage); do { - WARN_ON(!trylock_page(page)); - __SetPageMovable(page, &zsmalloc_mops); - unlock_page(page); - } while ((page = get_next_page(page)) != NULL); + WARN_ON(!zpdesc_trylock(zpdesc)); + __zpdesc_set_movable(zpdesc, &zsmalloc_mops); + zpdesc_unlock(zpdesc); + } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); } #else static inline void zs_flush_migration(struct zs_pool *pool) { } From b3fdd1188ba164c3d61475ea1781f8bf89d51ebf Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 17 Dec 2024 00:04:48 +0900 Subject: [PATCH 222/504] mm/zsmalloc: convert get/set_first_obj_offset() to take zpdesc Now that all users of get/set_first_obj_offset() are converted to use zpdesc, convert them to take zpdesc. Link: https://lkml.kernel.org/r/20241216150450.1228021-18-42.hyeyoo@gmail.com Signed-off-by: Alex Shi Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 1801dce2f7ca..3a841e16746e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -478,20 +478,20 @@ static struct zpdesc *get_first_zpdesc(struct zspage *zspage) #define FIRST_OBJ_PAGE_TYPE_MASK 0xffffff -static inline unsigned int get_first_obj_offset(struct page *page) +static inline unsigned int get_first_obj_offset(struct zpdesc *zpdesc) { - VM_WARN_ON_ONCE(!PageZsmalloc(page)); - return page->page_type & FIRST_OBJ_PAGE_TYPE_MASK; + VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); + return zpdesc->first_obj_offset & FIRST_OBJ_PAGE_TYPE_MASK; } -static inline void set_first_obj_offset(struct page *page, unsigned int offset) +static inline void set_first_obj_offset(struct zpdesc *zpdesc, unsigned int offset) { /* With 24 bits available, we can support offsets into 16 MiB pages. */ BUILD_BUG_ON(PAGE_SIZE > SZ_16M); - VM_WARN_ON_ONCE(!PageZsmalloc(page)); + VM_WARN_ON_ONCE(!PageZsmalloc(zpdesc_page(zpdesc))); VM_WARN_ON_ONCE(offset & ~FIRST_OBJ_PAGE_TYPE_MASK); - page->page_type &= ~FIRST_OBJ_PAGE_TYPE_MASK; - page->page_type |= offset & FIRST_OBJ_PAGE_TYPE_MASK; + zpdesc->first_obj_offset &= ~FIRST_OBJ_PAGE_TYPE_MASK; + zpdesc->first_obj_offset |= offset & FIRST_OBJ_PAGE_TYPE_MASK; } static inline unsigned int get_freeobj(struct zspage *zspage) @@ -911,7 +911,7 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) struct link_free *link; void *vaddr; - set_first_obj_offset(zpdesc_page(zpdesc), off); + set_first_obj_offset(zpdesc, off); vaddr = kmap_local_zpdesc(zpdesc); link = (struct link_free *)vaddr + off / sizeof(*link); @@ -1555,7 +1555,7 @@ static unsigned long find_alloced_obj(struct size_class *class, unsigned long handle = 0; void *addr = kmap_local_zpdesc(zpdesc); - offset = get_first_obj_offset(zpdesc_page(zpdesc)); + offset = get_first_obj_offset(zpdesc); offset += class->size * index; while (offset < PAGE_SIZE) { @@ -1750,8 +1750,8 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage, } while ((zpdesc = get_next_zpdesc(zpdesc)) != NULL); create_page_chain(class, zspage, zpdescs); - first_obj_offset = get_first_obj_offset(zpdesc_page(oldzpdesc)); - set_first_obj_offset(zpdesc_page(newzpdesc), first_obj_offset); + first_obj_offset = get_first_obj_offset(oldzpdesc); + set_first_obj_offset(newzpdesc, first_obj_offset); if (unlikely(ZsHugePage(zspage))) newzpdesc->handle = oldzpdesc->handle; __zpdesc_set_movable(newzpdesc, &zsmalloc_mops); @@ -1806,7 +1806,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, /* the migrate_write_lock protects zpage access via zs_map_object */ migrate_write_lock(zspage); - offset = get_first_obj_offset(zpdesc_page(zpdesc)); + offset = get_first_obj_offset(zpdesc); s_addr = kmap_local_zpdesc(zpdesc); /* From 54cacb6447fde67d287118c2ba48fd264999171c Mon Sep 17 00:00:00 2001 From: Alex Shi Date: Tue, 17 Dec 2024 00:04:49 +0900 Subject: [PATCH 223/504] mm/zsmalloc: introduce __zpdesc_clear/set_zsmalloc() Add helper __zpdesc_clear_zsmalloc() for __ClearPageZsmalloc(), __zpdesc_set_zsmalloc() for __SetPageZsmalloc(), and use them in callers. [42.hyeyoo@gmail.com: keep reset_zpdesc() to use struct page] Link: https://lkml.kernel.org/r/20241216150450.1228021-19-42.hyeyoo@gmail.com Signed-off-by: Alex Shi Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Acked-by: Sergey Senozhatsky Tested-by: Sergey Senozhatsky Cc: Matthew Wilcox (Oracle) Cc: Minchan Kim Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/zpdesc.h | 10 ++++++++++ mm/zsmalloc.c | 6 +++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/mm/zpdesc.h b/mm/zpdesc.h index fa80c50993c6..2da58339ac5b 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -155,6 +155,16 @@ static inline void __zpdesc_set_movable(struct zpdesc *zpdesc, __SetPageMovable(zpdesc_page(zpdesc), mops); } +static inline void __zpdesc_set_zsmalloc(struct zpdesc *zpdesc) +{ + __SetPageZsmalloc(zpdesc_page(zpdesc)); +} + +static inline void __zpdesc_clear_zsmalloc(struct zpdesc *zpdesc) +{ + __ClearPageZsmalloc(zpdesc_page(zpdesc)); +} + static inline bool zpdesc_is_isolated(struct zpdesc *zpdesc) { return PageIsolated(zpdesc_page(zpdesc)); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 3a841e16746e..dae32e051779 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1001,13 +1001,13 @@ static struct zspage *alloc_zspage(struct zs_pool *pool, if (!zpdesc) { while (--i >= 0) { zpdesc_dec_zone_page_state(zpdescs[i]); - __ClearPageZsmalloc(zpdesc_page(zpdescs[i])); + __zpdesc_clear_zsmalloc(zpdescs[i]); free_zpdesc(zpdescs[i]); } cache_free_zspage(pool, zspage); return NULL; } - __SetPageZsmalloc(zpdesc_page(zpdesc)); + __zpdesc_set_zsmalloc(zpdesc); zpdesc_inc_zone_page_state(zpdesc); zpdescs[i] = zpdesc; @@ -1786,7 +1786,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, VM_BUG_ON_PAGE(!zpdesc_is_isolated(zpdesc), zpdesc_page(zpdesc)); /* We're committed, tell the world that this is a Zsmalloc page. */ - __SetPageZsmalloc(zpdesc_page(newzpdesc)); + __zpdesc_set_zsmalloc(newzpdesc); /* The page is locked, so this pointer must remain valid */ zspage = get_zspage(zpdesc); From 65c5adb81587e5810dffd90964dd08fa87b2b66e Mon Sep 17 00:00:00 2001 From: Chen Ridong Date: Tue, 24 Dec 2024 02:52:38 +0000 Subject: [PATCH 224/504] memcg: fix soft lockup in the OOM process MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A soft lockup issue was found in the product with about 56,000 tasks were in the OOM cgroup, it was traversing them when the soft lockup was triggered. watchdog: BUG: soft lockup - CPU#2 stuck for 23s! [VM Thread:1503066] CPU: 2 PID: 1503066 Comm: VM Thread Kdump: loaded Tainted: G Hardware name: Huawei Cloud OpenStack Nova, BIOS RIP: 0010:console_unlock+0x343/0x540 RSP: 0000:ffffb751447db9a0 EFLAGS: 00000247 ORIG_RAX: ffffffffffffff13 RAX: 0000000000000001 RBX: 0000000000000000 RCX: 00000000ffffffff RDX: 0000000000000000 RSI: 0000000000000004 RDI: 0000000000000247 RBP: ffffffffafc71f90 R08: 0000000000000000 R09: 0000000000000040 R10: 0000000000000080 R11: 0000000000000000 R12: ffffffffafc74bd0 R13: ffffffffaf60a220 R14: 0000000000000247 R15: 0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f2fe6ad91f0 CR3: 00000004b2076003 CR4: 0000000000360ee0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: vprintk_emit+0x193/0x280 printk+0x52/0x6e dump_task+0x114/0x130 mem_cgroup_scan_tasks+0x76/0x100 dump_header+0x1fe/0x210 oom_kill_process+0xd1/0x100 out_of_memory+0x125/0x570 mem_cgroup_out_of_memory+0xb5/0xd0 try_charge+0x720/0x770 mem_cgroup_try_charge+0x86/0x180 mem_cgroup_try_charge_delay+0x1c/0x40 do_anonymous_page+0xb5/0x390 handle_mm_fault+0xc4/0x1f0 This is because thousands of processes are in the OOM cgroup, it takes a long time to traverse all of them. As a result, this lead to soft lockup in the OOM process. To fix this issue, call 'cond_resched' in the 'mem_cgroup_scan_tasks' function per 1000 iterations. For global OOM, call 'touch_softlockup_watchdog' per 1000 iterations to avoid this issue. Link: https://lkml.kernel.org/r/20241224025238.3768787-1-chenridong@huaweicloud.com Fixes: 9cbb78bb3143 ("mm, memcg: introduce own oom handler to iterate only over its own threads") Signed-off-by: Chen Ridong Acked-by: Michal Hocko Cc: Roman Gushchin Cc: Johannes Weiner Cc: Shakeel Butt Cc: Muchun Song Cc: Michal Koutný Cc: Signed-off-by: Andrew Morton --- mm/memcontrol.c | 7 ++++++- mm/oom_kill.c | 8 +++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 65fb5eee1466..46f8b372d212 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1161,6 +1161,7 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, { struct mem_cgroup *iter; int ret = 0; + int i = 0; BUG_ON(mem_cgroup_is_root(memcg)); @@ -1169,8 +1170,12 @@ void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, struct task_struct *task; css_task_iter_start(&iter->css, CSS_TASK_ITER_PROCS, &it); - while (!ret && (task = css_task_iter_next(&it))) + while (!ret && (task = css_task_iter_next(&it))) { + /* Avoid potential softlockup warning */ + if ((++i & 1023) == 0) + cond_resched(); ret = fn(task, arg); + } css_task_iter_end(&it); if (ret) { mem_cgroup_iter_break(memcg, iter); diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 1c485beb0b93..044ebab2c941 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -44,6 +44,7 @@ #include #include #include +#include #include #include "internal.h" @@ -430,10 +431,15 @@ static void dump_tasks(struct oom_control *oc) mem_cgroup_scan_tasks(oc->memcg, dump_task, oc); else { struct task_struct *p; + int i = 0; rcu_read_lock(); - for_each_process(p) + for_each_process(p) { + /* Avoid potential softlockup warning */ + if ((++i & 1023) == 0) + touch_softlockup_watchdog(); dump_task(p, oc); + } rcu_read_unlock(); } } From f4b45be6de39813817b247e7b9a1fd08c9853345 Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Thu, 26 Dec 2024 13:16:38 -0800 Subject: [PATCH 225/504] alloc_tag: avoid current->alloc_tag manipulations when profiling is disabled When memory allocation profiling is disabled there is no need to update current->alloc_tag and these manipulations add unnecessary overhead. Fix the overhead by skipping these extra updates. Link: https://lkml.kernel.org/r/20241226211639.1357704-1-surenb@google.com Fixes: b951aaff5035 ("mm: enable page allocation tagging") Signed-off-by: Suren Baghdasaryan Cc: David Wang <00107082@163.com> Cc: Kent Overstreet Cc: Yu Zhao Cc: Zhenhua Huang Signed-off-by: Andrew Morton --- include/linux/alloc_tag.h | 11 ++++++++--- lib/alloc_tag.c | 2 ++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 0bbbe537c5f9..a946e0203e6d 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -224,9 +224,14 @@ static inline void alloc_tag_sub(union codetag_ref *ref, size_t bytes) {} #define alloc_hooks_tag(_tag, _do_alloc) \ ({ \ - struct alloc_tag * __maybe_unused _old = alloc_tag_save(_tag); \ - typeof(_do_alloc) _res = _do_alloc; \ - alloc_tag_restore(_tag, _old); \ + typeof(_do_alloc) _res; \ + if (mem_alloc_profiling_enabled()) { \ + struct alloc_tag * __maybe_unused _old; \ + _old = alloc_tag_save(_tag); \ + _res = _do_alloc; \ + alloc_tag_restore(_tag, _old); \ + } else \ + _res = _do_alloc; \ _res; \ }) diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 65e706e1bc19..4e5d7af3eaa2 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -29,6 +29,8 @@ EXPORT_SYMBOL(_shared_alloc_tag); DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, mem_alloc_profiling_key); +EXPORT_SYMBOL(mem_alloc_profiling_key); + DEFINE_STATIC_KEY_FALSE(mem_profiling_compressed); struct alloc_tag_kernel_section kernel_tags = { NULL, 0 }; From 6ac0eedfce7422c18b3d89ab5dd51eb3ae265474 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:09 +0800 Subject: [PATCH 226/504] mm, swap: minor clean up for swap entry allocation Patch series "mm, swap: rework of swap allocator locks", v3. This series greatly improves swap performance by reworking the locking design and simplify a lot of code path. Test showed a up to 400% vm-scalability improvement with pmem as SWAP, and up to 37% reduce of kernel compile real time with ZRAM as SWAP (up to 60% improvement in system time). This is part of the new swap allocator discussed during the "Swap Abstraction" discussion at LSF/MM 2024, and "mTHP and swap allocator" discussion at LPC 2024. This is a follow up of previous swap cluster allocator series: https://lore.kernel.org/linux-mm/20240730-swap-allocator-v5-0-cb9c148b9297@kernel.org/ Also enables further optimizations which will come later. Previous series introduced a fully cluster based allocator, this series completely get rid of the old allocator and makes the new allocator avoid touching the si->lock unless needed. This bring huge performance gain and get rid of slot cache for freeing path. Currently, swap locking is mainly composed of two locks, cluster lock (ci->lock) and device lock (si->lock). The device lock is widely used to protect many things, causing it to be the main bottleneck for SWAP. Cluster lock is much more fine-grained, so it will be best to use ci->lock instead of si->lock as much as possible. `perf lock` indicates this issue clearly. Doing linux kernel build using tmpfs and ZRAM with limited memory (make -j64 with 1G memcg and 4k pages), result of "perf lock contention -ab sleep 3" shows: contended total wait max wait avg wait type caller 34948 53.63 s 7.11 ms 1.53 ms spinlock free_swap_and_cache_nr+0x350 16569 40.05 s 6.45 ms 2.42 ms spinlock get_swap_pages+0x231 11191 28.41 s 7.03 ms 2.54 ms spinlock swapcache_free_entries+0x59 4147 22.78 s 122.66 ms 5.49 ms spinlock page_vma_mapped_walk+0x6f3 4595 7.17 s 6.79 ms 1.56 ms spinlock swapcache_free_entries+0x59 406027 2.74 s 2.59 ms 6.74 us spinlock list_lru_add+0x39 ...snip... The top 5 caller are all users of si->lock, total wait time sums to several minutes in the 3 seconds time window. Following the new allocator design, many operation doesn't need to touch si->lock at all. We only need to take si->lock when doing operations across multiple clusters (changing the cluster list). So ideally allocator should always take ci->lock first, then take si->lock only if needed. But due to historical reasons, ci->lock is used inside si->lock critical section, causing lock inversion if we simply try to acquire si->lock after acquiring ci->lock. This series audited all si->lock usage, clean up legacy codes, eliminate usage of si->lock as much as possible by introducing new designs based on the new cluster allocator. Old HDD allocation codes are removed, cluster allocator is adapted with small changes for HDD usage, test is looking OK. And this also removed slot cache for freeing path. The performance is even better without it now, and this enables other clean up and optimizations as discussed before: https://lore.kernel.org/all/CAMgjq7ACohT_uerSz8E_994ZZCv709Zor+43hdmesW_59W1BWw@mail.gmail.com/ After this series, lock contention on si->lock is nearly unobservable with `perf lock` with the same test above: contended total wait max wait avg wait type caller ... snip ... 91 204.62 us 4.51 us 2.25 us spinlock cluster_move+0x2e ... snip ... 47 125.62 us 4.47 us 2.67 us spinlock cluster_move+0x2e ... snip ... 23 63.15 us 3.95 us 2.74 us spinlock cluster_move+0x2e ... snip ... 17 41.26 us 4.58 us 2.43 us spinlock cluster_isolate_lock+0x1d ... snip ... `cluster_move` and `cluster_isolate_lock` (two new introduced helper) are basically the only users of si->lock now, performance gain is huge, and LOC is reduced. Tests Results: vm-scalability ============== Running `usemem --init-time -O -y -x -R -31 1G` from vm-scalability in a 12G memory cgroup using simulated pmem as SWAP backend (32G pmem, 32 CPUs). Using 4K folio by default, 64k mTHP and sequential access (!-R) results are also provided. 6 test runs for each case, Total Throughput: Test Before (KB/s) (stdev) After (KB/s) (stdev) Delta --------------------------------------------------------------------------- Random (4K): 69937.11 (16449.77) 369816.17 (24476.68) +428.78% Random (64k): 123442.83 (13207.51) 216379.00 (25024.83) +75.28% Sequential (4K): 6313909.83 (148856.12) 6419860.66 (183563.38) +1.7% Sequential access will cause lower stress for the allocator so the gain is limited, but with random access (which is much closer to real workloads) the performance gain is huge. Build kernel with defconfig on tmpfs with ZRAM ============================================== Below results shows a test matrix using different memory cgroup limit and job numbets, and scaled up progressive for a intuitive result. Done on a 48c96t system. 6 test run for each case, it can be seen clearly that as concurrent job number goes higher the performance gain is higher, but even -j6 is showing slight improvement. make -j | System Time (seconds) | Total Time (seconds) (NR / Mem / ZRAM) | (Before / After / Delta) | (Before / After / Delta) With 4k pages only: 6 / 192M / 3G | 1533 / 1522 / -0.7% | 1420 / 1414 / -0.3% 12 / 256M / 4G | 2275 / 2226 / -2.2% | 758 / 742 / -2.1% 24 / 384M / 5G | 3596 / 3154 / -12.3% | 476 / 422 / -11.3% 48 / 768M / 7G | 8159 / 3605 / -55.8% | 330 / 221 / -33.0% 96 / 1.5G / 10G | 18541 / 6462 / -65.1% | 283 / 180 / -36.4% With 64k mTHP: 24 / 512M / 5G | 3585 / 3469 / -3.2% | 293 / 290 / -0.1% 48 / 1G / 7G | 8173 / 3607 / -55.9% | 251 / 158 / -37.0% 96 / 2G / 10G | 16305 / 7791 / -52.2% | 226 / 144 / -36.3% The fragmentation are reduced too: With: make -j96 / 1152M memcg, 64K mTHP: (avg of 4 test run) Before: hugepages-64kB/stats/swpout: 1696184 hugepages-64kB/stats/swpout_fallback: 414318 After: (-63.2% mTHP swapout failure) hugepages-64kB/stats/swpout: 1866267 hugepages-64kB/stats/swpout_fallback: 158330 There is a up to 65.1% improvement in sys time for build kernel test, and lower fragmentation rate. Build kernel with tinyconfig on tmpfs with HDD as swap: ======================================================= This test is similar to above, but HDD test is very noisy and slow, the deviation is huge, so just use tinyconfig instead and take the median test result of 3 test run, which looks OK: Before this series: 114.44user 29.11system 39:42.90elapsed 6%CPU 2901232inputs+0outputs (238877major+4227640minor)pagefaults After this commit: 113.90user 23.81system 38:11.77elapsed 6%CPU 2548728inputs+0outputs (235471major+4238110minor)pagefaults Single thread SWAP: =================== Sequential SWAP should also be slightly faster as we removed a lot of unnecessary parts. Test using micro benchmark for swapout/in 4G zero memory using ZRAM, 10 test runs: Swapout Before (avg. 3359304): 3353796 3358551 3371305 3356043 3367524 3355303 3355924 3354513 3360776 Swapin Before (avg. 1928698): 1920283 1927183 1934105 1921373 1926562 1938261 1927726 1928636 1934155 Swapout After (avg. 3347511, -0.4%): 3337863 3347948 3355235 3339081 3333134 3353006 3354917 3346055 3360359 Swapin After (avg. 1922290, -0.3%): 1919101 1925743 1916810 1917007 1923930 1935152 1917403 1923549 1921913 The gain is limited at noise level but seems slightly better. This patch (of 13): Direct reclaim can skip the whole folio after reclaimed a set of folio based slots. Also simplify the code for allocation, reduce indention. Link: https://lkml.kernel.org/r/20241230174621.61185-1-ryncsn@gmail.com Link: https://lkml.kernel.org/r/20241230174621.61185-2-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Baoquan He Cc: Barry Song Cc: Chris Li Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Cc: "Huang, Ying" Signed-off-by: Andrew Morton --- mm/swapfile.c | 59 +++++++++++++++++++++++++-------------------------- 1 file changed, 29 insertions(+), 30 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index b0a9071cfe1d..f8002f110104 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -604,23 +604,28 @@ static bool cluster_reclaim_range(struct swap_info_struct *si, unsigned long start, unsigned long end) { unsigned char *map = si->swap_map; - unsigned long offset; + unsigned long offset = start; + int nr_reclaim; spin_unlock(&ci->lock); spin_unlock(&si->lock); - for (offset = start; offset < end; offset++) { + do { switch (READ_ONCE(map[offset])) { case 0: - continue; + offset++; + break; case SWAP_HAS_CACHE: - if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT) > 0) - continue; - goto out; + nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT); + if (nr_reclaim > 0) + offset += nr_reclaim; + else + goto out; + break; default: goto out; } - } + } while (offset < end); out: spin_lock(&si->lock); spin_lock(&ci->lock); @@ -838,35 +843,30 @@ new_cluster: &found, order, usage); frags++; if (found) - break; + goto done; } - if (!found) { + /* + * Nonfull clusters are moved to frag tail if we reached + * here, count them too, don't over scan the frag list. + */ + while (frags < si->frag_cluster_nr[order]) { + ci = list_first_entry(&si->frag_clusters[order], + struct swap_cluster_info, list); /* - * Nonfull clusters are moved to frag tail if we reached - * here, count them too, don't over scan the frag list. + * Rotate the frag list to iterate, they were all failing + * high order allocation or moved here due to per-CPU usage, + * this help keeping usable cluster ahead. */ - while (frags < si->frag_cluster_nr[order]) { - ci = list_first_entry(&si->frag_clusters[order], - struct swap_cluster_info, list); - /* - * Rotate the frag list to iterate, they were all failing - * high order allocation or moved here due to per-CPU usage, - * this help keeping usable cluster ahead. - */ - list_move_tail(&ci->list, &si->frag_clusters[order]); - offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), - &found, order, usage); - frags++; - if (found) - break; - } + list_move_tail(&ci->list, &si->frag_clusters[order]); + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, order, usage); + frags++; + if (found) + goto done; } } - if (found) - goto done; - if (!list_empty(&si->discard_clusters)) { /* * we don't have free cluster but have some clusters in @@ -904,7 +904,6 @@ new_cluster: goto done; } } - done: cluster->next[order] = offset; return found; From f38b2f642114a9f1610c466cbaeab5049236d5b7 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:10 +0800 Subject: [PATCH 227/504] mm, swap: fold swap_info_get_cont in the only caller The name of the function is confusing, and the code is much easier to follow after folding, also rename the confusing naming "p" to more meaningful "si". Link: https://lkml.kernel.org/r/20241230174621.61185-3-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Baoquan He Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/swapfile.c | 39 +++++++++++++++------------------------ 1 file changed, 15 insertions(+), 24 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index f8002f110104..574059158627 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1375,22 +1375,6 @@ out: return NULL; } -static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry, - struct swap_info_struct *q) -{ - struct swap_info_struct *p; - - p = _swap_info_get(entry); - - if (p != q) { - if (q != NULL) - spin_unlock(&q->lock); - if (p != NULL) - spin_lock(&p->lock); - } - return p; -} - static unsigned char __swap_entry_free_locked(struct swap_info_struct *si, unsigned long offset, unsigned char usage) @@ -1687,14 +1671,14 @@ static int swp_entry_cmp(const void *ent1, const void *ent2) void swapcache_free_entries(swp_entry_t *entries, int n) { - struct swap_info_struct *p, *prev; + struct swap_info_struct *si, *prev; int i; if (n <= 0) return; prev = NULL; - p = NULL; + si = NULL; /* * Sort swap entries by swap device, so each lock is only taken once. @@ -1704,13 +1688,20 @@ void swapcache_free_entries(swp_entry_t *entries, int n) if (nr_swapfiles > 1) sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL); for (i = 0; i < n; ++i) { - p = swap_info_get_cont(entries[i], prev); - if (p) - swap_entry_range_free(p, entries[i], 1); - prev = p; + si = _swap_info_get(entries[i]); + + if (si != prev) { + if (prev != NULL) + spin_unlock(&prev->lock); + if (si != NULL) + spin_lock(&si->lock); + } + if (si) + swap_entry_range_free(si, entries[i], 1); + prev = si; } - if (p) - spin_unlock(&p->lock); + if (si) + spin_unlock(&si->lock); } int __swap_count(swp_entry_t entry) From 943f963296deb86c91bd3c766eada8bd875e4721 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:11 +0800 Subject: [PATCH 228/504] mm, swap: remove old allocation path for HDD We are currently using different swap allocation algorithm for HDD and non-HDD. This leads to the existence of a different set of locks, and the code path is heavily bloated, causing difficulties for further optimization and maintenance. This commit removes all HDD swap allocation and related dead code, and uses the cluster allocation algorithm instead. The performance may drop temporarily, but this should be negligible: The main advantage of the legacy HDD allocation algorithm is that it tends to use continuous slots, but swap device gets fragmented quickly anyway, and the attempt to use continuous slots will fail easily. This commit also enables mTHP swap on HDD, which is expected to be beneficial, and following commits will adapt and optimize the cluster allocator for HDD. Link: https://lkml.kernel.org/r/20241230174621.61185-4-ryncsn@gmail.com Signed-off-by: Kairui Song Suggested-by: Chris Li Suggested-by: "Huang, Ying" Reviewed-by: Baoquan He Cc: Barry Song Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 3 - mm/swapfile.c | 235 ++----------------------------------------- 2 files changed, 9 insertions(+), 229 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 187715eec3cb..0c681aa5cb98 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -310,9 +310,6 @@ struct swap_info_struct { unsigned int highest_bit; /* index of last free in swap_map */ unsigned int pages; /* total of usable pages of swap */ unsigned int inuse_pages; /* number of those currently in use */ - unsigned int cluster_next; /* likely index for next allocation */ - unsigned int cluster_nr; /* countdown to next cluster search */ - unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 574059158627..fca58d43b836 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1001,49 +1001,6 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries); } -static void set_cluster_next(struct swap_info_struct *si, unsigned long next) -{ - unsigned long prev; - - if (!(si->flags & SWP_SOLIDSTATE)) { - si->cluster_next = next; - return; - } - - prev = this_cpu_read(*si->cluster_next_cpu); - /* - * Cross the swap address space size aligned trunk, choose - * another trunk randomly to avoid lock contention on swap - * address space if possible. - */ - if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) != - (next >> SWAP_ADDRESS_SPACE_SHIFT)) { - /* No free swap slots available */ - if (si->highest_bit <= si->lowest_bit) - return; - next = get_random_u32_inclusive(si->lowest_bit, si->highest_bit); - next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES); - next = max_t(unsigned int, next, si->lowest_bit); - } - this_cpu_write(*si->cluster_next_cpu, next); -} - -static bool swap_offset_available_and_locked(struct swap_info_struct *si, - unsigned long offset) -{ - if (data_race(!si->swap_map[offset])) { - spin_lock(&si->lock); - return true; - } - - if (vm_swap_full() && READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { - spin_lock(&si->lock); - return true; - } - - return false; -} - static int cluster_alloc_swap(struct swap_info_struct *si, unsigned char usage, int nr, swp_entry_t slots[], int order) @@ -1071,13 +1028,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si, unsigned char usage, int nr, swp_entry_t slots[], int order) { - unsigned long offset; - unsigned long scan_base; - unsigned long last_in_cluster = 0; - int latency_ration = LATENCY_LIMIT; unsigned int nr_pages = 1 << order; - int n_ret = 0; - bool scanned_many = false; /* * We try to cluster swap pages by allocating them sequentially @@ -1089,7 +1040,6 @@ static int scan_swap_map_slots(struct swap_info_struct *si, * But we do now try to find an empty cluster. -Andrea * And we let swap pages go all over an SSD partition. Hugh */ - if (order > 0) { /* * Should not even be attempting large allocations when huge @@ -1109,158 +1059,7 @@ static int scan_swap_map_slots(struct swap_info_struct *si, return 0; } - if (si->cluster_info) - return cluster_alloc_swap(si, usage, nr, slots, order); - - si->flags += SWP_SCANNING; - - /* For HDD, sequential access is more important. */ - scan_base = si->cluster_next; - offset = scan_base; - - if (unlikely(!si->cluster_nr--)) { - if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { - si->cluster_nr = SWAPFILE_CLUSTER - 1; - goto checks; - } - - spin_unlock(&si->lock); - - /* - * If seek is expensive, start searching for new cluster from - * start of partition, to minimize the span of allocated swap. - */ - scan_base = offset = si->lowest_bit; - last_in_cluster = offset + SWAPFILE_CLUSTER - 1; - - /* Locate the first empty (unaligned) cluster */ - for (; last_in_cluster <= READ_ONCE(si->highest_bit); offset++) { - if (si->swap_map[offset]) - last_in_cluster = offset + SWAPFILE_CLUSTER; - else if (offset == last_in_cluster) { - spin_lock(&si->lock); - offset -= SWAPFILE_CLUSTER - 1; - si->cluster_next = offset; - si->cluster_nr = SWAPFILE_CLUSTER - 1; - goto checks; - } - if (unlikely(--latency_ration < 0)) { - cond_resched(); - latency_ration = LATENCY_LIMIT; - } - } - - offset = scan_base; - spin_lock(&si->lock); - si->cluster_nr = SWAPFILE_CLUSTER - 1; - } - -checks: - if (!(si->flags & SWP_WRITEOK)) - goto no_page; - if (!si->highest_bit) - goto no_page; - if (offset > si->highest_bit) - scan_base = offset = si->lowest_bit; - - /* reuse swap entry of cache-only swap if not busy. */ - if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { - int swap_was_freed; - spin_unlock(&si->lock); - swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT); - spin_lock(&si->lock); - /* entry was freed successfully, try to use this again */ - if (swap_was_freed > 0) - goto checks; - goto scan; /* check next one */ - } - - if (si->swap_map[offset]) { - if (!n_ret) - goto scan; - else - goto done; - } - memset(si->swap_map + offset, usage, nr_pages); - - swap_range_alloc(si, offset, nr_pages); - slots[n_ret++] = swp_entry(si->type, offset); - - /* got enough slots or reach max slots? */ - if ((n_ret == nr) || (offset >= si->highest_bit)) - goto done; - - /* search for next available slot */ - - /* time to take a break? */ - if (unlikely(--latency_ration < 0)) { - if (n_ret) - goto done; - spin_unlock(&si->lock); - cond_resched(); - spin_lock(&si->lock); - latency_ration = LATENCY_LIMIT; - } - - if (si->cluster_nr && !si->swap_map[++offset]) { - /* non-ssd case, still more slots in cluster? */ - --si->cluster_nr; - goto checks; - } - - /* - * Even if there's no free clusters available (fragmented), - * try to scan a little more quickly with lock held unless we - * have scanned too many slots already. - */ - if (!scanned_many) { - unsigned long scan_limit; - - if (offset < scan_base) - scan_limit = scan_base; - else - scan_limit = si->highest_bit; - for (; offset <= scan_limit && --latency_ration > 0; - offset++) { - if (!si->swap_map[offset]) - goto checks; - } - } - -done: - if (order == 0) - set_cluster_next(si, offset + 1); - si->flags -= SWP_SCANNING; - return n_ret; - -scan: - VM_WARN_ON(order > 0); - spin_unlock(&si->lock); - while (++offset <= READ_ONCE(si->highest_bit)) { - if (unlikely(--latency_ration < 0)) { - cond_resched(); - latency_ration = LATENCY_LIMIT; - scanned_many = true; - } - if (swap_offset_available_and_locked(si, offset)) - goto checks; - } - offset = si->lowest_bit; - while (offset < scan_base) { - if (unlikely(--latency_ration < 0)) { - cond_resched(); - latency_ration = LATENCY_LIMIT; - scanned_many = true; - } - if (swap_offset_available_and_locked(si, offset)) - goto checks; - offset++; - } - spin_lock(&si->lock); - -no_page: - si->flags -= SWP_SCANNING; - return n_ret; + return cluster_alloc_swap(si, usage, nr, slots, order); } int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) @@ -2871,8 +2670,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) mutex_unlock(&swapon_mutex); free_percpu(p->percpu_cluster); p->percpu_cluster = NULL; - free_percpu(p->cluster_next_cpu); - p->cluster_next_cpu = NULL; vfree(swap_map); kvfree(zeromap); kvfree(cluster_info); @@ -3184,8 +2981,6 @@ static unsigned long read_swap_header(struct swap_info_struct *si, } si->lowest_bit = 1; - si->cluster_next = 1; - si->cluster_nr = 0; maxpages = swapfile_maximum_size; last_page = swap_header->info.last_page; @@ -3271,7 +3066,6 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, unsigned long maxpages) { unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); - unsigned long col = si->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS; struct swap_cluster_info *cluster_info; unsigned long i, j, k, idx; int cpu, err = -ENOMEM; @@ -3283,15 +3077,6 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, for (i = 0; i < nr_clusters; i++) spin_lock_init(&cluster_info[i].lock); - si->cluster_next_cpu = alloc_percpu(unsigned int); - if (!si->cluster_next_cpu) - goto err_free; - - /* Random start position to help with wear leveling */ - for_each_possible_cpu(cpu) - per_cpu(*si->cluster_next_cpu, cpu) = - get_random_u32_inclusive(1, si->highest_bit); - si->percpu_cluster = alloc_percpu(struct percpu_cluster); if (!si->percpu_cluster) goto err_free; @@ -3333,7 +3118,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, * sharing same address space. */ for (k = 0; k < SWAP_CLUSTER_COLS; k++) { - j = (k + col) % SWAP_CLUSTER_COLS; + j = k % SWAP_CLUSTER_COLS; for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) { struct swap_cluster_info *ci; idx = i * SWAP_CLUSTER_COLS + j; @@ -3483,18 +3268,18 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) if (si->bdev && bdev_nonrot(si->bdev)) { si->flags |= SWP_SOLIDSTATE; - - cluster_info = setup_clusters(si, swap_header, maxpages); - if (IS_ERR(cluster_info)) { - error = PTR_ERR(cluster_info); - cluster_info = NULL; - goto bad_swap_unlock_inode; - } } else { atomic_inc(&nr_rotate_swap); inced_nr_rotate_swap = true; } + cluster_info = setup_clusters(si, swap_header, maxpages); + if (IS_ERR(cluster_info)) { + error = PTR_ERR(cluster_info); + cluster_info = NULL; + goto bad_swap_unlock_inode; + } + if ((swap_flags & SWAP_FLAG_DISCARD) && si->bdev && bdev_max_discard_sectors(si->bdev)) { /* @@ -3575,8 +3360,6 @@ bad_swap_unlock_inode: bad_swap: free_percpu(si->percpu_cluster); si->percpu_cluster = NULL; - free_percpu(si->cluster_next_cpu); - si->cluster_next_cpu = NULL; inode = NULL; destroy_swap_extents(si); swap_cgroup_swapoff(si->type); From 5b3bd90e0be11aaacb6906804271a561940c2b62 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:12 +0800 Subject: [PATCH 229/504] mm, swap: use cluster lock for HDD Cluster lock (ci->lock) was introduced to reduce contention for certain operations. Using cluster lock for HDD is not helpful as HDD have a poor performance, so locking isn't the bottleneck. But having different set of locks for HDD / non-HDD prevents further rework of device lock (si->lock). This commit just changed all lock_cluster_or_swap_info to lock_cluster, which is a safe and straight conversion since cluster info is always allocated now, also removed all cluster_info related checks. Link: https://lkml.kernel.org/r/20241230174621.61185-5-ryncsn@gmail.com Signed-off-by: Kairui Song Suggested-by: Chris Li Reviewed-by: Baoquan He Cc: Barry Song Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/swapfile.c | 107 ++++++++++++++++---------------------------------- 1 file changed, 34 insertions(+), 73 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index fca58d43b836..d0e5b9fa0c48 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -58,10 +58,9 @@ static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries); static bool folio_swapcache_freeable(struct folio *folio); -static struct swap_cluster_info *lock_cluster_or_swap_info( - struct swap_info_struct *si, unsigned long offset); -static void unlock_cluster_or_swap_info(struct swap_info_struct *si, - struct swap_cluster_info *ci); +static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, + unsigned long offset); +static void unlock_cluster(struct swap_cluster_info *ci); static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; @@ -222,9 +221,9 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, * swap_map is HAS_CACHE only, which means the slots have no page table * reference or pending writeback, and can't be allocated to others. */ - ci = lock_cluster_or_swap_info(si, offset); + ci = lock_cluster(si, offset); need_reclaim = swap_is_has_cache(si, offset, nr_pages); - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); if (!need_reclaim) goto out_unlock; @@ -404,45 +403,15 @@ static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si { struct swap_cluster_info *ci; - ci = si->cluster_info; - if (ci) { - ci += offset / SWAPFILE_CLUSTER; - spin_lock(&ci->lock); - } + ci = &si->cluster_info[offset / SWAPFILE_CLUSTER]; + spin_lock(&ci->lock); + return ci; } static inline void unlock_cluster(struct swap_cluster_info *ci) { - if (ci) - spin_unlock(&ci->lock); -} - -/* - * Determine the locking method in use for this device. Return - * swap_cluster_info if SSD-style cluster-based locking is in place. - */ -static inline struct swap_cluster_info *lock_cluster_or_swap_info( - struct swap_info_struct *si, unsigned long offset) -{ - struct swap_cluster_info *ci; - - /* Try to use fine-grained SSD-style locking if available: */ - ci = lock_cluster(si, offset); - /* Otherwise, fall back to traditional, coarse locking: */ - if (!ci) - spin_lock(&si->lock); - - return ci; -} - -static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, - struct swap_cluster_info *ci) -{ - if (ci) - unlock_cluster(ci); - else - spin_unlock(&si->lock); + spin_unlock(&ci->lock); } /* Add a cluster to discard list and schedule it to do discard */ @@ -558,9 +527,6 @@ static void inc_cluster_info_page(struct swap_info_struct *si, unsigned long idx = page_nr / SWAPFILE_CLUSTER; struct swap_cluster_info *ci; - if (!cluster_info) - return; - ci = cluster_info + idx; ci->count++; @@ -576,9 +542,6 @@ static void inc_cluster_info_page(struct swap_info_struct *si, static void dec_cluster_info_page(struct swap_info_struct *si, struct swap_cluster_info *ci, int nr_pages) { - if (!si->cluster_info) - return; - VM_BUG_ON(ci->count < nr_pages); VM_BUG_ON(cluster_is_free(ci)); lockdep_assert_held(&si->lock); @@ -1007,8 +970,6 @@ static int cluster_alloc_swap(struct swap_info_struct *si, { int n_ret = 0; - VM_BUG_ON(!si->cluster_info); - si->flags += SWP_SCANNING; while (n_ret < nr) { @@ -1052,10 +1013,10 @@ static int scan_swap_map_slots(struct swap_info_struct *si, } /* - * Swapfile is not block device or not using clusters so unable + * Swapfile is not block device so unable * to allocate large entries. */ - if (!(si->flags & SWP_BLKDEV) || !si->cluster_info) + if (!(si->flags & SWP_BLKDEV)) return 0; } @@ -1295,9 +1256,9 @@ static unsigned char __swap_entry_free(struct swap_info_struct *si, unsigned long offset = swp_offset(entry); unsigned char usage; - ci = lock_cluster_or_swap_info(si, offset); + ci = lock_cluster(si, offset); usage = __swap_entry_free_locked(si, offset, 1); - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); if (!usage) free_swap_slot(entry); @@ -1320,14 +1281,14 @@ static bool __swap_entries_free(struct swap_info_struct *si, if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER) goto fallback; - ci = lock_cluster_or_swap_info(si, offset); + ci = lock_cluster(si, offset); if (!swap_is_last_map(si, offset, nr, &has_cache)) { - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); goto fallback; } for (i = 0; i < nr; i++) WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE); - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); if (!has_cache) { for (i = 0; i < nr; i++) @@ -1383,7 +1344,7 @@ static void cluster_swap_free_nr(struct swap_info_struct *si, DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 }; int i, nr; - ci = lock_cluster_or_swap_info(si, offset); + ci = lock_cluster(si, offset); while (nr_pages) { nr = min(BITS_PER_LONG, nr_pages); for (i = 0; i < nr; i++) { @@ -1391,18 +1352,18 @@ static void cluster_swap_free_nr(struct swap_info_struct *si, bitmap_set(to_free, i, 1); } if (!bitmap_empty(to_free, BITS_PER_LONG)) { - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); for_each_set_bit(i, to_free, BITS_PER_LONG) free_swap_slot(swp_entry(si->type, offset + i)); if (nr == nr_pages) return; bitmap_clear(to_free, 0, BITS_PER_LONG); - ci = lock_cluster_or_swap_info(si, offset); + ci = lock_cluster(si, offset); } offset += nr; nr_pages -= nr; } - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); } /* @@ -1441,9 +1402,9 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) if (!si) return; - ci = lock_cluster_or_swap_info(si, offset); + ci = lock_cluster(si, offset); if (size > 1 && swap_is_has_cache(si, offset, size)) { - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); spin_lock(&si->lock); swap_entry_range_free(si, entry, size); spin_unlock(&si->lock); @@ -1451,14 +1412,14 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) } for (int i = 0; i < size; i++, entry.val++) { if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); free_swap_slot(entry); if (i == size - 1) return; - lock_cluster_or_swap_info(si, offset); + lock_cluster(si, offset); } } - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); } static int swp_entry_cmp(const void *ent1, const void *ent2) @@ -1522,9 +1483,9 @@ int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) struct swap_cluster_info *ci; int count; - ci = lock_cluster_or_swap_info(si, offset); + ci = lock_cluster(si, offset); count = swap_count(si->swap_map[offset]); - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); return count; } @@ -1547,7 +1508,7 @@ int swp_swapcount(swp_entry_t entry) offset = swp_offset(entry); - ci = lock_cluster_or_swap_info(si, offset); + ci = lock_cluster(si, offset); count = swap_count(si->swap_map[offset]); if (!(count & COUNT_CONTINUED)) @@ -1570,7 +1531,7 @@ int swp_swapcount(swp_entry_t entry) n *= (SWAP_CONT_MAX + 1); } while (tmp_count & COUNT_CONTINUED); out: - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); return count; } @@ -1585,8 +1546,8 @@ static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, int i; bool ret = false; - ci = lock_cluster_or_swap_info(si, offset); - if (!ci || nr_pages == 1) { + ci = lock_cluster(si, offset); + if (nr_pages == 1) { if (swap_count(map[roffset])) ret = true; goto unlock_out; @@ -1598,7 +1559,7 @@ static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, } } unlock_out: - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); return ret; } @@ -3428,7 +3389,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr) offset = swp_offset(entry); VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER); VM_WARN_ON(usage == 1 && nr > 1); - ci = lock_cluster_or_swap_info(si, offset); + ci = lock_cluster(si, offset); err = 0; for (i = 0; i < nr; i++) { @@ -3483,7 +3444,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr) } unlock_out: - unlock_cluster_or_swap_info(si, ci); + unlock_cluster(ci); return err; } From bf9ce0f9ef676a2768322383df20331ebd95e556 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:13 +0800 Subject: [PATCH 230/504] mm, swap: clean up device availability check Remove highest_bit and lowest_bit. After the HDD allocation path has been removed, the only purpose of these two fields is to determine whether the device is full or not, which can instead be determined by checking the inuse_pages. Link: https://lkml.kernel.org/r/20241230174621.61185-6-ryncsn@gmail.com Signed-off-by: Kairui Song Reviewed-by: Baoquan He Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- fs/btrfs/inode.c | 1 - fs/f2fs/data.c | 1 - fs/iomap/swapfile.c | 1 - include/linux/swap.h | 2 -- mm/page_io.c | 1 - mm/swapfile.c | 38 ++++++++------------------------------ 6 files changed, 8 insertions(+), 36 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 27b2fe7f735d..3b99b1e19371 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -10110,7 +10110,6 @@ out_unlock_mmap: *span = bsi.highest_ppage - bsi.lowest_ppage + 1; sis->max = bsi.nr_pages; sis->pages = bsi.nr_pages - 1; - sis->highest_bit = bsi.nr_pages - 1; return bsi.nr_extents; } #else diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c index a2478c2afb3a..a9eddd782dbc 100644 --- a/fs/f2fs/data.c +++ b/fs/f2fs/data.c @@ -4043,7 +4043,6 @@ retry: cur_lblock = 1; /* force Empty message */ sis->max = cur_lblock; sis->pages = cur_lblock - 1; - sis->highest_bit = cur_lblock - 1; out: if (not_aligned) f2fs_warn(sbi, "Swapfile (%u) is not align to section: 1) creat(), 2) ioctl(F2FS_IOC_SET_PIN_FILE), 3) fallocate(%lu * N)", diff --git a/fs/iomap/swapfile.c b/fs/iomap/swapfile.c index 5fc0ac36dee3..b90d0eda9e51 100644 --- a/fs/iomap/swapfile.c +++ b/fs/iomap/swapfile.c @@ -189,7 +189,6 @@ int iomap_swapfile_activate(struct swap_info_struct *sis, *pagespan = 1 + isi.highest_ppage - isi.lowest_ppage; sis->max = isi.nr_pages; sis->pages = isi.nr_pages - 1; - sis->highest_bit = isi.nr_pages - 1; return isi.nr_extents; } EXPORT_SYMBOL_GPL(iomap_swapfile_activate); diff --git a/include/linux/swap.h b/include/linux/swap.h index 0c681aa5cb98..0c222017b5c6 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -306,8 +306,6 @@ struct swap_info_struct { struct list_head frag_clusters[SWAP_NR_ORDERS]; /* list of cluster that are fragmented or contented */ unsigned int frag_cluster_nr[SWAP_NR_ORDERS]; - unsigned int lowest_bit; /* index of first free in swap_map */ - unsigned int highest_bit; /* index of last free in swap_map */ unsigned int pages; /* total of usable pages of swap */ unsigned int inuse_pages; /* number of those currently in use */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ diff --git a/mm/page_io.c b/mm/page_io.c index 4b4ea8e49cf6..9b983de351f9 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -163,7 +163,6 @@ reprobe: page_no = 1; /* force Empty message */ sis->max = page_no; sis->pages = page_no - 1; - sis->highest_bit = page_no - 1; out: return ret; bad_bmap: diff --git a/mm/swapfile.c b/mm/swapfile.c index d0e5b9fa0c48..7963a0c646a4 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -55,7 +55,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, static void free_swap_count_continuations(struct swap_info_struct *); static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry, unsigned int nr_pages); -static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, +static void swap_range_alloc(struct swap_info_struct *si, unsigned int nr_entries); static bool folio_swapcache_freeable(struct folio *folio); static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, @@ -650,7 +650,7 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster } memset(si->swap_map + start, usage, nr_pages); - swap_range_alloc(si, start, nr_pages); + swap_range_alloc(si, nr_pages); ci->count += nr_pages; if (ci->count == SWAPFILE_CLUSTER) { @@ -888,19 +888,11 @@ static void del_from_avail_list(struct swap_info_struct *si) spin_unlock(&swap_avail_lock); } -static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, +static void swap_range_alloc(struct swap_info_struct *si, unsigned int nr_entries) { - unsigned int end = offset + nr_entries - 1; - - if (offset == si->lowest_bit) - si->lowest_bit += nr_entries; - if (end == si->highest_bit) - WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries); WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries); if (si->inuse_pages == si->pages) { - si->lowest_bit = si->max; - si->highest_bit = 0; del_from_avail_list(si); if (si->cluster_info && vm_swap_full()) @@ -933,15 +925,8 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, for (i = 0; i < nr_entries; i++) clear_bit(offset + i, si->zeromap); - if (offset < si->lowest_bit) - si->lowest_bit = offset; - if (end > si->highest_bit) { - bool was_full = !si->highest_bit; - - WRITE_ONCE(si->highest_bit, end); - if (was_full && (si->flags & SWP_WRITEOK)) - add_to_avail_list(si); - } + if (si->inuse_pages == si->pages) + add_to_avail_list(si); if (si->flags & SWP_BLKDEV) swap_slot_free_notify = si->bdev->bd_disk->fops->swap_slot_free_notify; @@ -1051,15 +1036,12 @@ start_over: plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); spin_lock(&si->lock); - if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { + if ((si->inuse_pages == si->pages) || !(si->flags & SWP_WRITEOK)) { spin_lock(&swap_avail_lock); if (plist_node_empty(&si->avail_lists[node])) { spin_unlock(&si->lock); goto nextsi; } - WARN(!si->highest_bit, - "swap_info %d in list but !highest_bit\n", - si->type); WARN(!(si->flags & SWP_WRITEOK), "swap_info %d in list but !SWP_WRITEOK\n", si->type); @@ -2441,8 +2423,8 @@ static void _enable_swap_info(struct swap_info_struct *si) */ plist_add(&si->list, &swap_active_head); - /* add to available list iff swap device is not full */ - if (si->highest_bit) + /* add to available list if swap device is not full */ + if (si->inuse_pages < si->pages) add_to_avail_list(si); } @@ -2606,7 +2588,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) drain_mmlist(); /* wait for anyone still in scan_swap_map_slots */ - p->highest_bit = 0; /* cuts scans short */ while (p->flags >= SWP_SCANNING) { spin_unlock(&p->lock); spin_unlock(&swap_lock); @@ -2941,8 +2922,6 @@ static unsigned long read_swap_header(struct swap_info_struct *si, return 0; } - si->lowest_bit = 1; - maxpages = swapfile_maximum_size; last_page = swap_header->info.last_page; if (!last_page) { @@ -2959,7 +2938,6 @@ static unsigned long read_swap_header(struct swap_info_struct *si, if ((unsigned int)maxpages == 0) maxpages = UINT_MAX; } - si->highest_bit = maxpages - 1; if (!maxpages) return 0; From d765475fecc54a7e9273c4e5f373f70da621dd95 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:14 +0800 Subject: [PATCH 231/504] mm, swap: clean up plist removal and adding When the swap device is full (inuse_pages == pages), it should be removed from the allocation available plist. If any slot is freed, the swap device should be added back to the plist. Additionally, during swapon or swapoff, the swap device is forcefully added or removed. Currently, the condition (inuse_pages == pages) is checked after every counter update, then remove or add the device accordingly. This is serialized by si->lock. This commit decouples it from the protection of si->lock and reworked plist removal and adding, making it possible to get rid of the hard dependency on si->lock in allocation path in later commits. To achieve this, simply using another lock is not an optimal approach, as the overhead is observable for a hot counter, and may cause complex locking issues. Thus, this commit manages to make it a lock-free atomic operation, by embedding the plist state into the second highest bit of the atomic counter. Simply making the counter an atomic will not work, if the update and plist status check are not performed atomically, we may miss an addition or removal. With the embedded info we can update the counter and check the plist status with single atomic operations, and avoid any extra overheads: If the counter is full (inuse_pages == pages) and the off-list bit is unset, we attempt to remove it from the plist. If the counter is not full (inuse_pages != pages) and the off-list bit is set, we attempt to add it to the plist. Removing, adding and bit update is serialized with a lock, which is a cold path. Ordinary counter updates will be lock-free. Link: https://lkml.kernel.org/r/20241230174621.61185-7-ryncsn@gmail.com Signed-off-by: Kairui Song Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 2 +- mm/swapfile.c | 190 +++++++++++++++++++++++++++++++------------ 2 files changed, 140 insertions(+), 52 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 0c222017b5c6..e1eeea6307cd 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -307,7 +307,7 @@ struct swap_info_struct { /* list of cluster that are fragmented or contented */ unsigned int frag_cluster_nr[SWAP_NR_ORDERS]; unsigned int pages; /* total of usable pages of swap */ - unsigned int inuse_pages; /* number of those currently in use */ + atomic_long_t inuse_pages; /* number of those currently in use */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 7963a0c646a4..e6e58cfb5178 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -128,6 +128,26 @@ static inline unsigned char swap_count(unsigned char ent) return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ } +/* + * Use the second highest bit of inuse_pages counter as the indicator + * of if one swap device is on the available plist, so the atomic can + * still be updated arithmetic while having special data embedded. + * + * inuse_pages counter is the only thing indicating if a device should + * be on avail_lists or not (except swapon / swapoff). By embedding the + * on-list bit in the atomic counter, updates no longer need any lock + * to check the list status. + * + * This bit will be set if the device is not on the plist and not + * usable, will be cleared if the device is on the plist. + */ +#define SWAP_USAGE_OFFLIST_BIT (1UL << (BITS_PER_TYPE(atomic_t) - 2)) +#define SWAP_USAGE_COUNTER_MASK (~SWAP_USAGE_OFFLIST_BIT) +static long swap_usage_in_pages(struct swap_info_struct *si) +{ + return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK; +} + /* Reclaim the swap entry anyway if possible */ #define TTRS_ANYWAY 0x1 /* @@ -717,7 +737,7 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) int nr_reclaim; if (force) - to_scan = si->inuse_pages / SWAPFILE_CLUSTER; + to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER; while (!list_empty(&si->full_clusters)) { ci = list_first_entry(&si->full_clusters, struct swap_cluster_info, list); @@ -872,44 +892,130 @@ done: return found; } -static void __del_from_avail_list(struct swap_info_struct *si) +/* SWAP_USAGE_OFFLIST_BIT can only be cleared by this helper. */ +static void del_from_avail_list(struct swap_info_struct *si, bool swapoff) { int nid; + unsigned long pages; + + spin_lock(&swap_avail_lock); + + if (swapoff) { + /* + * Forcefully remove it. Clear the SWP_WRITEOK flags for + * swapoff here so it's synchronized by both si->lock and + * swap_avail_lock, to ensure the result can be seen by + * add_to_avail_list. + */ + lockdep_assert_held(&si->lock); + si->flags &= ~SWP_WRITEOK; + atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); + } else { + /* + * If not called by swapoff, take it off-list only if it's + * full and SWAP_USAGE_OFFLIST_BIT is not set (strictly + * si->inuse_pages == pages), any concurrent slot freeing, + * or device already removed from plist by someone else + * will make this return false. + */ + pages = si->pages; + if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages, + pages | SWAP_USAGE_OFFLIST_BIT)) + goto skip; + } - assert_spin_locked(&si->lock); for_each_node(nid) plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]); + +skip: + spin_unlock(&swap_avail_lock); } -static void del_from_avail_list(struct swap_info_struct *si) +/* SWAP_USAGE_OFFLIST_BIT can only be set by this helper. */ +static void add_to_avail_list(struct swap_info_struct *si, bool swapon) { + int nid; + long val; + unsigned long pages; + spin_lock(&swap_avail_lock); - __del_from_avail_list(si); + + /* Corresponding to SWP_WRITEOK clearing in del_from_avail_list */ + if (swapon) { + lockdep_assert_held(&si->lock); + si->flags |= SWP_WRITEOK; + } else { + if (!(READ_ONCE(si->flags) & SWP_WRITEOK)) + goto skip; + } + + if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT)) + goto skip; + + val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); + + /* + * When device is full and device is on the plist, only one updater will + * see (inuse_pages == si->pages) and will call del_from_avail_list. If + * that updater happen to be here, just skip adding. + */ + pages = si->pages; + if (val == pages) { + /* Just like the cmpxchg in del_from_avail_list */ + if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages, + pages | SWAP_USAGE_OFFLIST_BIT)) + goto skip; + } + + for_each_node(nid) + plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]); + +skip: spin_unlock(&swap_avail_lock); } +/* + * swap_usage_add / swap_usage_sub of each slot are serialized by ci->lock + * within each cluster, so the total contribution to the global counter should + * always be positive and cannot exceed the total number of usable slots. + */ +static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries) +{ + long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages); + + /* + * If device is full, and SWAP_USAGE_OFFLIST_BIT is not set, + * remove it from the plist. + */ + if (unlikely(val == si->pages)) { + del_from_avail_list(si, false); + return true; + } + + return false; +} + +static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries) +{ + long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages); + + /* + * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set, + * remove it from the plist. + */ + if (unlikely(val & SWAP_USAGE_OFFLIST_BIT)) + add_to_avail_list(si, false); +} + static void swap_range_alloc(struct swap_info_struct *si, unsigned int nr_entries) { - WRITE_ONCE(si->inuse_pages, si->inuse_pages + nr_entries); - if (si->inuse_pages == si->pages) { - del_from_avail_list(si); - + if (swap_usage_add(si, nr_entries)) { if (si->cluster_info && vm_swap_full()) schedule_work(&si->reclaim_work); } } -static void add_to_avail_list(struct swap_info_struct *si) -{ - int nid; - - spin_lock(&swap_avail_lock); - for_each_node(nid) - plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]); - spin_unlock(&swap_avail_lock); -} - static void swap_range_free(struct swap_info_struct *si, unsigned long offset, unsigned int nr_entries) { @@ -925,8 +1031,6 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, for (i = 0; i < nr_entries; i++) clear_bit(offset + i, si->zeromap); - if (si->inuse_pages == si->pages) - add_to_avail_list(si); if (si->flags & SWP_BLKDEV) swap_slot_free_notify = si->bdev->bd_disk->fops->swap_slot_free_notify; @@ -946,7 +1050,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, */ smp_wmb(); atomic_long_add(nr_entries, &nr_swap_pages); - WRITE_ONCE(si->inuse_pages, si->inuse_pages - nr_entries); + swap_usage_sub(si, nr_entries); } static int cluster_alloc_swap(struct swap_info_struct *si, @@ -1036,19 +1140,6 @@ start_over: plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); spin_lock(&si->lock); - if ((si->inuse_pages == si->pages) || !(si->flags & SWP_WRITEOK)) { - spin_lock(&swap_avail_lock); - if (plist_node_empty(&si->avail_lists[node])) { - spin_unlock(&si->lock); - goto nextsi; - } - WARN(!(si->flags & SWP_WRITEOK), - "swap_info %d in list but !SWP_WRITEOK\n", - si->type); - __del_from_avail_list(si); - spin_unlock(&si->lock); - goto nextsi; - } n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, n_goal, swp_entries, order); spin_unlock(&si->lock); @@ -1057,7 +1148,6 @@ start_over: cond_resched(); spin_lock(&swap_avail_lock); -nextsi: /* * if we got here, it's likely that si was almost full before, * and since scan_swap_map_slots() can drop the si->lock, @@ -1789,7 +1879,7 @@ unsigned int count_swap_pages(int type, int free) if (sis->flags & SWP_WRITEOK) { n = sis->pages; if (free) - n -= sis->inuse_pages; + n -= swap_usage_in_pages(sis); } spin_unlock(&sis->lock); } @@ -2124,7 +2214,7 @@ static int try_to_unuse(unsigned int type) swp_entry_t entry; unsigned int i; - if (!READ_ONCE(si->inuse_pages)) + if (!swap_usage_in_pages(si)) goto success; retry: @@ -2137,7 +2227,7 @@ retry: spin_lock(&mmlist_lock); p = &init_mm.mmlist; - while (READ_ONCE(si->inuse_pages) && + while (swap_usage_in_pages(si) && !signal_pending(current) && (p = p->next) != &init_mm.mmlist) { @@ -2165,7 +2255,7 @@ retry: mmput(prev_mm); i = 0; - while (READ_ONCE(si->inuse_pages) && + while (swap_usage_in_pages(si) && !signal_pending(current) && (i = find_next_to_unuse(si, i)) != 0) { @@ -2200,7 +2290,7 @@ retry: * folio_alloc_swap(), temporarily hiding that swap. It's easy * and robust (though cpu-intensive) just to keep retrying. */ - if (READ_ONCE(si->inuse_pages)) { + if (swap_usage_in_pages(si)) { if (!signal_pending(current)) goto retry; return -EINTR; @@ -2209,7 +2299,7 @@ retry: success: /* * Make sure that further cleanups after try_to_unuse() returns happen - * after swap_range_free() reduces si->inuse_pages to 0. + * after swap_range_free() reduces inuse_pages to 0. */ smp_mb(); return 0; @@ -2227,7 +2317,7 @@ static void drain_mmlist(void) unsigned int type; for (type = 0; type < nr_swapfiles; type++) - if (swap_info[type]->inuse_pages) + if (swap_usage_in_pages(swap_info[type])) return; spin_lock(&mmlist_lock); list_for_each_safe(p, next, &init_mm.mmlist) @@ -2406,7 +2496,6 @@ static void setup_swap_info(struct swap_info_struct *si, int prio, static void _enable_swap_info(struct swap_info_struct *si) { - si->flags |= SWP_WRITEOK; atomic_long_add(si->pages, &nr_swap_pages); total_swap_pages += si->pages; @@ -2423,9 +2512,8 @@ static void _enable_swap_info(struct swap_info_struct *si) */ plist_add(&si->list, &swap_active_head); - /* add to available list if swap device is not full */ - if (si->inuse_pages < si->pages) - add_to_avail_list(si); + /* Add back to available list */ + add_to_avail_list(si, true); } static void enable_swap_info(struct swap_info_struct *si, int prio, @@ -2523,7 +2611,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) goto out_dput; } spin_lock(&p->lock); - del_from_avail_list(p); + del_from_avail_list(p, true); if (p->prio < 0) { struct swap_info_struct *si = p; int nid; @@ -2541,7 +2629,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) plist_del(&p->list, &swap_active_head); atomic_long_sub(p->pages, &nr_swap_pages); total_swap_pages -= p->pages; - p->flags &= ~SWP_WRITEOK; spin_unlock(&p->lock); spin_unlock(&swap_lock); @@ -2721,7 +2808,7 @@ static int swap_show(struct seq_file *swap, void *v) } bytes = K(si->pages); - inuse = K(READ_ONCE(si->inuse_pages)); + inuse = K(swap_usage_in_pages(si)); file = si->swap_file; len = seq_file_path(swap, file, " \t\n\\"); @@ -2838,6 +2925,7 @@ static struct swap_info_struct *alloc_swap_info(void) } spin_lock_init(&p->lock); spin_lock_init(&p->cont_lock); + atomic_long_set(&p->inuse_pages, SWAP_USAGE_OFFLIST_BIT); init_completion(&p->comp); return p; @@ -3335,7 +3423,7 @@ void si_swapinfo(struct sysinfo *val) struct swap_info_struct *si = swap_info[type]; if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) - nr_to_be_unused += READ_ONCE(si->inuse_pages); + nr_to_be_unused += swap_usage_in_pages(si); } val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; val->totalswap = total_swap_pages + nr_to_be_unused; From 86b2f5cb95ef0a8e4fa4e5ef6ce250ccd94adbd4 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:15 +0800 Subject: [PATCH 232/504] mm, swap: hold a reference during scan and cleanup flag usage The flag SWP_SCANNING was used as an indicator of whether a device is being scanned for allocation, and prevents swapoff. Combined with SWP_WRITEOK, they work as a set of barriers for a clean swapoff: 1. Swapoff clears SWP_WRITEOK, allocation requests will see ~SWP_WRITEOK and abort as it's serialized by si->lock. 2. Swapoff unuses all allocated entries. 3. Swapoff waits for SWP_SCANNING flag to be cleared, so ongoing allocations will stop, preventing UAF. 4. Now swapoff can free everything safely. This will make the allocation path have a hard dependency on si->lock. Allocation always have to acquire si->lock first for setting SWP_SCANNING and checking SWP_WRITEOK. This commit removes this flag, and just uses the existing per-CPU refcount instead to prevent UAF in step 3, which serves well for such usage without dependency on si->lock, and scales very well too. Just hold a reference during the whole scan and allocation process. Swapoff will kill and wait for the counter. And for preventing any allocation from happening after step 1 so the unuse in step 2 can ensure all slots are free, swapoff will acquire the ci->lock of each cluster one by one to ensure all allocations see ~SWP_WRITEOK and abort. This way these dependences on si->lock are gone. And worth noting we can't kill the refcount as the first step for swapoff as the unuse process have to acquire the refcount. Link: https://lkml.kernel.org/r/20241230174621.61185-8-ryncsn@gmail.com Signed-off-by: Kairui Song Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 1 - mm/swapfile.c | 90 ++++++++++++++++++++++++++++---------------- 2 files changed, 57 insertions(+), 34 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index e1eeea6307cd..02120f1005d5 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -219,7 +219,6 @@ enum { SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */ SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */ /* add others here before... */ - SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */ }; #define SWAP_CLUSTER_MAX 32UL diff --git a/mm/swapfile.c b/mm/swapfile.c index e6e58cfb5178..99fd0b0d84a2 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -658,6 +658,8 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster { unsigned int nr_pages = 1 << order; + lockdep_assert_held(&ci->lock); + if (!(si->flags & SWP_WRITEOK)) return false; @@ -1059,8 +1061,6 @@ static int cluster_alloc_swap(struct swap_info_struct *si, { int n_ret = 0; - si->flags += SWP_SCANNING; - while (n_ret < nr) { unsigned long offset = cluster_alloc_swap_entry(si, order, usage); @@ -1069,8 +1069,6 @@ static int cluster_alloc_swap(struct swap_info_struct *si, slots[n_ret++] = swp_entry(si->type, offset); } - si->flags -= SWP_SCANNING; - return n_ret; } @@ -1112,6 +1110,22 @@ static int scan_swap_map_slots(struct swap_info_struct *si, return cluster_alloc_swap(si, usage, nr, slots, order); } +static bool get_swap_device_info(struct swap_info_struct *si) +{ + if (!percpu_ref_tryget_live(&si->users)) + return false; + /* + * Guarantee the si->users are checked before accessing other + * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is + * up to dated. + * + * Paired with the spin_unlock() after setup_swap_info() in + * enable_swap_info(), and smp_wmb() in swapoff. + */ + smp_rmb(); + return true; +} + int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order) { int order = swap_entry_order(entry_order); @@ -1139,13 +1153,16 @@ start_over: /* requeue si to after same-priority siblings */ plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); - spin_lock(&si->lock); - n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, - n_goal, swp_entries, order); - spin_unlock(&si->lock); - if (n_ret || size > 1) - goto check_out; - cond_resched(); + if (get_swap_device_info(si)) { + spin_lock(&si->lock); + n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, + n_goal, swp_entries, order); + spin_unlock(&si->lock); + put_swap_device(si); + if (n_ret || size > 1) + goto check_out; + cond_resched(); + } spin_lock(&swap_avail_lock); /* @@ -1296,16 +1313,8 @@ struct swap_info_struct *get_swap_device(swp_entry_t entry) si = swp_swap_info(entry); if (!si) goto bad_nofile; - if (!percpu_ref_tryget_live(&si->users)) + if (!get_swap_device_info(si)) goto out; - /* - * Guarantee the si->users are checked before accessing other - * fields of swap_info_struct. - * - * Paired with the spin_unlock() after setup_swap_info() in - * enable_swap_info(). - */ - smp_rmb(); offset = swp_offset(entry); if (offset >= si->max) goto put_out; @@ -1785,10 +1794,13 @@ swp_entry_t get_swap_page_of_type(int type) goto fail; /* This is called for allocating swap entry, not cache */ - spin_lock(&si->lock); - if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0)) - atomic_long_dec(&nr_swap_pages); - spin_unlock(&si->lock); + if (get_swap_device_info(si)) { + spin_lock(&si->lock); + if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0)) + atomic_long_dec(&nr_swap_pages); + spin_unlock(&si->lock); + put_swap_device(si); + } fail: return entry; } @@ -2562,6 +2574,25 @@ bool has_usable_swap(void) return ret; } +/* + * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range + * see the updated flags, so there will be no more allocations. + */ +static void wait_for_allocation(struct swap_info_struct *si) +{ + unsigned long offset; + unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER); + struct swap_cluster_info *ci; + + BUG_ON(si->flags & SWP_WRITEOK); + + for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) { + ci = lock_cluster(si, offset); + unlock_cluster(ci); + offset += SWAPFILE_CLUSTER; + } +} + SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) { struct swap_info_struct *p = NULL; @@ -2632,6 +2663,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) spin_unlock(&p->lock); spin_unlock(&swap_lock); + wait_for_allocation(p); + disable_swap_slots_cache_lock(); set_current_oom_origin(); @@ -2674,15 +2707,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) spin_lock(&p->lock); drain_mmlist(); - /* wait for anyone still in scan_swap_map_slots */ - while (p->flags >= SWP_SCANNING) { - spin_unlock(&p->lock); - spin_unlock(&swap_lock); - schedule_timeout_uninterruptible(1); - spin_lock(&swap_lock); - spin_lock(&p->lock); - } - swap_file = p->swap_file; p->swap_file = NULL; p->max = 0; From 5842e9fc2456516378f8a539e45d9caf0422386e Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:16 +0800 Subject: [PATCH 233/504] mm, swap: use an enum to define all cluster flags and wrap flags changes Currently, we are only using flags to indicate which list the cluster is on. Using one bit for each list type might be a waste, as the list type grows, we will consume too many bits. Additionally, the current mixed usage of '&' and '==' is a bit confusing. Make it clean by using an enum to define all possible cluster statuses. Only an off-list cluster will have the NONE (0) flag. And use a wrapper to annotate and sanitize all flag settings and list movements. Link: https://lkml.kernel.org/r/20241230174621.61185-9-ryncsn@gmail.com Suggested-by: Chris Li Signed-off-by: Kairui Song Cc: Barry Song Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 17 +++++++--- mm/swapfile.c | 75 +++++++++++++++++++++++--------------------- 2 files changed, 52 insertions(+), 40 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 02120f1005d5..339d7f0192ff 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -257,10 +257,19 @@ struct swap_cluster_info { u8 order; struct list_head list; }; -#define CLUSTER_FLAG_FREE 1 /* This cluster is free */ -#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */ -#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */ -#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */ + +/* All on-list cluster must have a non-zero flag. */ +enum swap_cluster_flags { + CLUSTER_FLAG_NONE = 0, /* For temporary off-list cluster */ + CLUSTER_FLAG_FREE, + CLUSTER_FLAG_NONFULL, + CLUSTER_FLAG_FRAG, + /* Clusters with flags above are allocatable */ + CLUSTER_FLAG_USABLE = CLUSTER_FLAG_FRAG, + CLUSTER_FLAG_FULL, + CLUSTER_FLAG_DISCARD, + CLUSTER_FLAG_MAX, +}; /* * The first page in the swap file is the swap header, which is always marked diff --git a/mm/swapfile.c b/mm/swapfile.c index 99fd0b0d84a2..7795a3d27273 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -403,7 +403,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, static inline bool cluster_is_free(struct swap_cluster_info *info) { - return info->flags & CLUSTER_FLAG_FREE; + return info->flags == CLUSTER_FLAG_FREE; } static inline unsigned int cluster_index(struct swap_info_struct *si, @@ -434,6 +434,27 @@ static inline void unlock_cluster(struct swap_cluster_info *ci) spin_unlock(&ci->lock); } +static void cluster_move(struct swap_info_struct *si, + struct swap_cluster_info *ci, struct list_head *list, + enum swap_cluster_flags new_flags) +{ + VM_WARN_ON(ci->flags == new_flags); + BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX); + + if (ci->flags == CLUSTER_FLAG_NONE) { + list_add_tail(&ci->list, list); + } else { + if (ci->flags == CLUSTER_FLAG_FRAG) { + VM_WARN_ON(!si->frag_cluster_nr[ci->order]); + si->frag_cluster_nr[ci->order]--; + } + list_move_tail(&ci->list, list); + } + ci->flags = new_flags; + if (new_flags == CLUSTER_FLAG_FRAG) + si->frag_cluster_nr[ci->order]++; +} + /* Add a cluster to discard list and schedule it to do discard */ static void swap_cluster_schedule_discard(struct swap_info_struct *si, struct swap_cluster_info *ci) @@ -447,10 +468,8 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, */ memset(si->swap_map + idx * SWAPFILE_CLUSTER, SWAP_MAP_BAD, SWAPFILE_CLUSTER); - - VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); - list_move_tail(&ci->list, &si->discard_clusters); - ci->flags = 0; + VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); + cluster_move(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD); schedule_work(&si->discard_work); } @@ -458,12 +477,7 @@ static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info { lockdep_assert_held(&si->lock); lockdep_assert_held(&ci->lock); - - if (ci->flags) - list_move_tail(&ci->list, &si->free_clusters); - else - list_add_tail(&ci->list, &si->free_clusters); - ci->flags = CLUSTER_FLAG_FREE; + cluster_move(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); ci->order = 0; } @@ -479,6 +493,8 @@ static void swap_do_scheduled_discard(struct swap_info_struct *si) while (!list_empty(&si->discard_clusters)) { ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); list_del(&ci->list); + /* Must clear flag when taking a cluster off-list */ + ci->flags = CLUSTER_FLAG_NONE; idx = cluster_index(si, ci); spin_unlock(&si->lock); @@ -519,9 +535,6 @@ static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info * lockdep_assert_held(&si->lock); lockdep_assert_held(&ci->lock); - if (ci->flags & CLUSTER_FLAG_FRAG) - si->frag_cluster_nr[ci->order]--; - /* * If the swap is discardable, prepare discard the cluster * instead of free it immediately. The cluster will be freed @@ -573,13 +586,9 @@ static void dec_cluster_info_page(struct swap_info_struct *si, return; } - if (!(ci->flags & CLUSTER_FLAG_NONFULL)) { - VM_BUG_ON(ci->flags & CLUSTER_FLAG_FREE); - if (ci->flags & CLUSTER_FLAG_FRAG) - si->frag_cluster_nr[ci->order]--; - list_move_tail(&ci->list, &si->nonfull_clusters[ci->order]); - ci->flags = CLUSTER_FLAG_NONFULL; - } + if (ci->flags != CLUSTER_FLAG_NONFULL) + cluster_move(si, ci, &si->nonfull_clusters[ci->order], + CLUSTER_FLAG_NONFULL); } static bool cluster_reclaim_range(struct swap_info_struct *si, @@ -663,11 +672,13 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster if (!(si->flags & SWP_WRITEOK)) return false; + VM_BUG_ON(ci->flags == CLUSTER_FLAG_NONE); + VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE); + if (cluster_is_free(ci)) { - if (nr_pages < SWAPFILE_CLUSTER) { - list_move_tail(&ci->list, &si->nonfull_clusters[order]); - ci->flags = CLUSTER_FLAG_NONFULL; - } + if (nr_pages < SWAPFILE_CLUSTER) + cluster_move(si, ci, &si->nonfull_clusters[order], + CLUSTER_FLAG_NONFULL); ci->order = order; } @@ -675,14 +686,8 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster swap_range_alloc(si, nr_pages); ci->count += nr_pages; - if (ci->count == SWAPFILE_CLUSTER) { - VM_BUG_ON(!(ci->flags & - (CLUSTER_FLAG_FREE | CLUSTER_FLAG_NONFULL | CLUSTER_FLAG_FRAG))); - if (ci->flags & CLUSTER_FLAG_FRAG) - si->frag_cluster_nr[ci->order]--; - list_move_tail(&ci->list, &si->full_clusters); - ci->flags = CLUSTER_FLAG_FULL; - } + if (ci->count == SWAPFILE_CLUSTER) + cluster_move(si, ci, &si->full_clusters, CLUSTER_FLAG_FULL); return true; } @@ -821,9 +826,7 @@ new_cluster: while (!list_empty(&si->nonfull_clusters[order])) { ci = list_first_entry(&si->nonfull_clusters[order], struct swap_cluster_info, list); - list_move_tail(&ci->list, &si->frag_clusters[order]); - ci->flags = CLUSTER_FLAG_FRAG; - si->frag_cluster_nr[order]++; + cluster_move(si, ci, &si->frag_clusters[order], CLUSTER_FLAG_FRAG); offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage); frags++; From 21baf72a9826d4aa9995208c8f33b2ffea818b95 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:17 +0800 Subject: [PATCH 234/504] mm, swap: reduce contention on device lock Currently, swap locking is mainly composed of two locks: the cluster lock (ci->lock) and the device lock (si->lock). The cluster lock is much more fine-grained, so it is best to use ci->lock instead of si->lock as much as possible. We have cleaned up other hard dependencies on si->lock. Following the new cluster allocator design, most operations don't need to touch si->lock at all. In practice, we only need to take si->lock when moving clusters between lists. To achieve this, this commit reworks the locking pattern of all si->lock and ci->lock users, eliminates all usage of ci->lock inside si->lock, and introduces a new design to avoid touching si->lock unless needed. For minimal contention and easier understanding of the system, two ideas are introduced with the corresponding helpers: isolation and relocation. - Clusters will be `isolated` from the list when iterating the list to search for an allocatable cluster. This ensures other CPUs won't walk into the same cluster easily, and it releases si->lock after acquiring ci->lock, providing the only place that handles the inversion of two locks, and avoids contention. Iterating the cluster list almost always moves the cluster (free -> nonfull, nonfull -> frag, frag -> frag tail), but it doesn't know where the cluster should be moved to until scanning is done. So keeping the cluster off-list is a good option with low overhead. The off-list time window of a cluster is also minimal. In the worst case, one CPU will return the cluster after scanning the 512 entries on it, which we used to busy wait with a spin lock. This is done with the new helper `cluster_isolate_lock`. - Clusters will be `relocated` after allocation or freeing, according to their usage count and status. Allocations no longer hold si->lock now, and may drop ci->lock for reclaim, so the cluster could be moved to any location while no lock is held. Besides, isolation clears all flags when it takes the cluster off the list (the flags must be in sync with the list status, so cluster users don't need to touch si->lock for checking its list status). So the cluster has to be relocated to the right list according to its usage after allocation or freeing. Relocation is optional, if the cluster flags indicate it's already on the right list, it will skip touching the list or si->lock. This is done with relocate_cluster after allocation or with [partial_]free_cluster after freeing. This handled usage of all kinds of clusters in a clean way. Scanning and allocation by iterating the cluster list is handled by "isolate - - relocate". Scanning and allocation of per-CPU clusters will only involve " - relocate", as it knows which cluster to lock and use. Freeing will only involve "relocate". Each CPU will keep using its per-CPU cluster until the 512 entries are all consumed. Freeing also has to free 512 entries to trigger cluster movement in the best case, so si->lock is rarely touched. Testing with building the Linux kernel with defconfig showed huge improvement: tiem make -j96 / 768M memcg, 4K pages, 10G ZRAM, on Intel 8255C: Before: Sys time: 73578.30, Real time: 864.05 After: (-50.7% sys time, -44.8% real time) Sys time: 36227.49, Real time: 476.66 time make -j96 / 1152M memcg, 64K mTHP, 10G ZRAM, on Intel 8255C: (avg of 4 test run) Before: Sys time: 74044.85, Real time: 846.51 hugepages-64kB/stats/swpout: 1735216 hugepages-64kB/stats/swpout_fallback: 430333 After: (-40.4% sys time, -37.1% real time) Sys time: 44160.56, Real time: 532.07 hugepages-64kB/stats/swpout: 1786288 hugepages-64kB/stats/swpout_fallback: 243384 time make -j32 / 512M memcg, 4K pages, 5G ZRAM, on AMD 7K62: Before: Sys time: 8098.21, Real time: 401.3 After: (-22.6% sys time, -12.8% real time ) Sys time: 6265.02, Real time: 349.83 The allocation success rate also slightly improved as we sanitized the usage of clusters with new defined helpers, previously dropping si->lock or ci->lock during scan will cause cluster order shuffle. Link: https://lkml.kernel.org/r/20241230174621.61185-10-ryncsn@gmail.com Signed-off-by: Kairui Song Suggested-by: Chris Li Cc: Barry Song Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 3 +- mm/swapfile.c | 447 ++++++++++++++++++++++++------------------- 2 files changed, 252 insertions(+), 198 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 339d7f0192ff..c4ff31cb6bde 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -291,6 +291,7 @@ enum swap_cluster_flags { * throughput. */ struct percpu_cluster { + local_lock_t lock; /* Protect the percpu_cluster above */ unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; @@ -313,7 +314,7 @@ struct swap_info_struct { /* list of cluster that contains at least one free slot */ struct list_head frag_clusters[SWAP_NR_ORDERS]; /* list of cluster that are fragmented or contented */ - unsigned int frag_cluster_nr[SWAP_NR_ORDERS]; + atomic_long_t frag_cluster_nr[SWAP_NR_ORDERS]; unsigned int pages; /* total of usable pages of swap */ atomic_long_t inuse_pages; /* number of those currently in use */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 7795a3d27273..dadd4fead689 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -261,12 +261,10 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, folio_ref_sub(folio, nr_pages); folio_set_dirty(folio); - spin_lock(&si->lock); /* Only sinple page folio can be backed by zswap */ if (nr_pages == 1) zswap_invalidate(entry); swap_entry_range_free(si, entry, nr_pages); - spin_unlock(&si->lock); ret = nr_pages; out_unlock: folio_unlock(folio); @@ -403,7 +401,21 @@ static void discard_swap_cluster(struct swap_info_struct *si, static inline bool cluster_is_free(struct swap_cluster_info *info) { - return info->flags == CLUSTER_FLAG_FREE; + return info->count == 0; +} + +static inline bool cluster_is_discard(struct swap_cluster_info *info) +{ + return info->flags == CLUSTER_FLAG_DISCARD; +} + +static inline bool cluster_is_usable(struct swap_cluster_info *ci, int order) +{ + if (unlikely(ci->flags > CLUSTER_FLAG_USABLE)) + return false; + if (!order) + return true; + return cluster_is_free(ci) || order == ci->order; } static inline unsigned int cluster_index(struct swap_info_struct *si, @@ -440,19 +452,20 @@ static void cluster_move(struct swap_info_struct *si, { VM_WARN_ON(ci->flags == new_flags); BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX); + lockdep_assert_held(&ci->lock); - if (ci->flags == CLUSTER_FLAG_NONE) { + spin_lock(&si->lock); + if (ci->flags == CLUSTER_FLAG_NONE) list_add_tail(&ci->list, list); - } else { - if (ci->flags == CLUSTER_FLAG_FRAG) { - VM_WARN_ON(!si->frag_cluster_nr[ci->order]); - si->frag_cluster_nr[ci->order]--; - } + else list_move_tail(&ci->list, list); - } + spin_unlock(&si->lock); + + if (ci->flags == CLUSTER_FLAG_FRAG) + atomic_long_dec(&si->frag_cluster_nr[ci->order]); + else if (new_flags == CLUSTER_FLAG_FRAG) + atomic_long_inc(&si->frag_cluster_nr[ci->order]); ci->flags = new_flags; - if (new_flags == CLUSTER_FLAG_FRAG) - si->frag_cluster_nr[ci->order]++; } /* Add a cluster to discard list and schedule it to do discard */ @@ -475,39 +488,90 @@ static void swap_cluster_schedule_discard(struct swap_info_struct *si, static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { - lockdep_assert_held(&si->lock); lockdep_assert_held(&ci->lock); cluster_move(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); ci->order = 0; } +/* + * Isolate and lock the first cluster that is not contented on a list, + * clean its flag before taken off-list. Cluster flag must be in sync + * with list status, so cluster updaters can always know the cluster + * list status without touching si lock. + * + * Note it's possible that all clusters on a list are contented so + * this returns NULL for an non-empty list. + */ +static struct swap_cluster_info *cluster_isolate_lock( + struct swap_info_struct *si, struct list_head *list) +{ + struct swap_cluster_info *ci, *ret = NULL; + + spin_lock(&si->lock); + + if (unlikely(!(si->flags & SWP_WRITEOK))) + goto out; + + list_for_each_entry(ci, list, list) { + if (!spin_trylock(&ci->lock)) + continue; + + /* We may only isolate and clear flags of following lists */ + VM_BUG_ON(!ci->flags); + VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE && + ci->flags != CLUSTER_FLAG_FULL); + + list_del(&ci->list); + ci->flags = CLUSTER_FLAG_NONE; + ret = ci; + break; + } +out: + spin_unlock(&si->lock); + + return ret; +} + /* * Doing discard actually. After a cluster discard is finished, the cluster - * will be added to free cluster list. caller should hold si->lock. -*/ -static void swap_do_scheduled_discard(struct swap_info_struct *si) + * will be added to free cluster list. Discard cluster is a bit special as + * they don't participate in allocation or reclaim, so clusters marked as + * CLUSTER_FLAG_DISCARD must remain off-list or on discard list. + */ +static bool swap_do_scheduled_discard(struct swap_info_struct *si) { struct swap_cluster_info *ci; + bool ret = false; unsigned int idx; + spin_lock(&si->lock); while (!list_empty(&si->discard_clusters)) { ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); + /* + * Delete the cluster from list but don't clear its flags until + * discard is done, so isolation and relocation will skip it. + */ list_del(&ci->list); - /* Must clear flag when taking a cluster off-list */ - ci->flags = CLUSTER_FLAG_NONE; idx = cluster_index(si, ci); spin_unlock(&si->lock); - discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, SWAPFILE_CLUSTER); - spin_lock(&si->lock); spin_lock(&ci->lock); - __free_cluster(si, ci); + /* + * Discard is done, clear its flags as it's now off-list, + * then return the cluster to allocation list. + */ + ci->flags = CLUSTER_FLAG_NONE; memset(si->swap_map + idx * SWAPFILE_CLUSTER, 0, SWAPFILE_CLUSTER); + __free_cluster(si, ci); spin_unlock(&ci->lock); + ret = true; + spin_lock(&si->lock); } + spin_unlock(&si->lock); + return ret; } static void swap_discard_work(struct work_struct *work) @@ -516,9 +580,7 @@ static void swap_discard_work(struct work_struct *work) si = container_of(work, struct swap_info_struct, discard_work); - spin_lock(&si->lock); swap_do_scheduled_discard(si); - spin_unlock(&si->lock); } static void swap_users_ref_free(struct percpu_ref *ref) @@ -529,10 +591,14 @@ static void swap_users_ref_free(struct percpu_ref *ref) complete(&si->comp); } +/* + * Must be called after freeing if ci->count == 0, moves the cluster to free + * or discard list. + */ static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) { VM_BUG_ON(ci->count != 0); - lockdep_assert_held(&si->lock); + VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); lockdep_assert_held(&ci->lock); /* @@ -549,6 +615,48 @@ static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info * __free_cluster(si, ci); } +/* + * Must be called after freeing if ci->count != 0, moves the cluster to + * nonfull list. + */ +static void partial_free_cluster(struct swap_info_struct *si, + struct swap_cluster_info *ci) +{ + VM_BUG_ON(!ci->count || ci->count == SWAPFILE_CLUSTER); + lockdep_assert_held(&ci->lock); + + if (ci->flags != CLUSTER_FLAG_NONFULL) + cluster_move(si, ci, &si->nonfull_clusters[ci->order], + CLUSTER_FLAG_NONFULL); +} + +/* + * Must be called after allocation, moves the cluster to full or frag list. + * Note: allocation doesn't acquire si lock, and may drop the ci lock for + * reclaim, so the cluster could be any where when called. + */ +static void relocate_cluster(struct swap_info_struct *si, + struct swap_cluster_info *ci) +{ + lockdep_assert_held(&ci->lock); + + /* Discard cluster must remain off-list or on discard list */ + if (cluster_is_discard(ci)) + return; + + if (!ci->count) { + free_cluster(si, ci); + } else if (ci->count != SWAPFILE_CLUSTER) { + if (ci->flags != CLUSTER_FLAG_FRAG) + cluster_move(si, ci, &si->frag_clusters[ci->order], + CLUSTER_FLAG_FRAG); + } else { + if (ci->flags != CLUSTER_FLAG_FULL) + cluster_move(si, ci, &si->full_clusters, + CLUSTER_FLAG_FULL); + } +} + /* * The cluster corresponding to page_nr will be used. The cluster will not be * added to free cluster list and its usage counter will be increased by 1. @@ -567,30 +675,6 @@ static void inc_cluster_info_page(struct swap_info_struct *si, VM_BUG_ON(ci->flags); } -/* - * The cluster ci decreases @nr_pages usage. If the usage counter becomes 0, - * which means no page in the cluster is in use, we can optionally discard - * the cluster and add it to free cluster list. - */ -static void dec_cluster_info_page(struct swap_info_struct *si, - struct swap_cluster_info *ci, int nr_pages) -{ - VM_BUG_ON(ci->count < nr_pages); - VM_BUG_ON(cluster_is_free(ci)); - lockdep_assert_held(&si->lock); - lockdep_assert_held(&ci->lock); - ci->count -= nr_pages; - - if (!ci->count) { - free_cluster(si, ci); - return; - } - - if (ci->flags != CLUSTER_FLAG_NONFULL) - cluster_move(si, ci, &si->nonfull_clusters[ci->order], - CLUSTER_FLAG_NONFULL); -} - static bool cluster_reclaim_range(struct swap_info_struct *si, struct swap_cluster_info *ci, unsigned long start, unsigned long end) @@ -600,8 +684,6 @@ static bool cluster_reclaim_range(struct swap_info_struct *si, int nr_reclaim; spin_unlock(&ci->lock); - spin_unlock(&si->lock); - do { switch (READ_ONCE(map[offset])) { case 0: @@ -619,9 +701,7 @@ static bool cluster_reclaim_range(struct swap_info_struct *si, } } while (offset < end); out: - spin_lock(&si->lock); spin_lock(&ci->lock); - /* * Recheck the range no matter reclaim succeeded or not, the slot * could have been be freed while we are not holding the lock. @@ -635,11 +715,11 @@ out: static bool cluster_scan_range(struct swap_info_struct *si, struct swap_cluster_info *ci, - unsigned long start, unsigned int nr_pages) + unsigned long start, unsigned int nr_pages, + bool *need_reclaim) { unsigned long offset, end = start + nr_pages; unsigned char *map = si->swap_map; - bool need_reclaim = false; for (offset = start; offset < end; offset++) { switch (READ_ONCE(map[offset])) { @@ -648,16 +728,13 @@ static bool cluster_scan_range(struct swap_info_struct *si, case SWAP_HAS_CACHE: if (!vm_swap_full()) return false; - need_reclaim = true; + *need_reclaim = true; continue; default: return false; } } - if (need_reclaim) - return cluster_reclaim_range(si, ci, start, end); - return true; } @@ -672,23 +749,13 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster if (!(si->flags & SWP_WRITEOK)) return false; - VM_BUG_ON(ci->flags == CLUSTER_FLAG_NONE); - VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE); - - if (cluster_is_free(ci)) { - if (nr_pages < SWAPFILE_CLUSTER) - cluster_move(si, ci, &si->nonfull_clusters[order], - CLUSTER_FLAG_NONFULL); + if (cluster_is_free(ci)) ci->order = order; - } memset(si->swap_map + start, usage, nr_pages); swap_range_alloc(si, nr_pages); ci->count += nr_pages; - if (ci->count == SWAPFILE_CLUSTER) - cluster_move(si, ci, &si->full_clusters, CLUSTER_FLAG_FULL); - return true; } @@ -699,37 +766,55 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigne unsigned long start = offset & ~(SWAPFILE_CLUSTER - 1); unsigned long end = min(start + SWAPFILE_CLUSTER, si->max); unsigned int nr_pages = 1 << order; + bool need_reclaim, ret; struct swap_cluster_info *ci; - if (end < nr_pages) - return SWAP_NEXT_INVALID; - end -= nr_pages; + ci = &si->cluster_info[offset / SWAPFILE_CLUSTER]; + lockdep_assert_held(&ci->lock); - ci = lock_cluster(si, offset); - if (ci->count + nr_pages > SWAPFILE_CLUSTER) { + if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER) { offset = SWAP_NEXT_INVALID; - goto done; + goto out; } - while (offset <= end) { - if (cluster_scan_range(si, ci, offset, nr_pages)) { - if (!cluster_alloc_range(si, ci, offset, usage, order)) { + for (end -= nr_pages; offset <= end; offset += nr_pages) { + need_reclaim = false; + if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) + continue; + if (need_reclaim) { + ret = cluster_reclaim_range(si, ci, start, end); + /* + * Reclaim drops ci->lock and cluster could be used + * by another order. Not checking flag as off-list + * cluster has no flag set, and change of list + * won't cause fragmentation. + */ + if (!cluster_is_usable(ci, order)) { offset = SWAP_NEXT_INVALID; - goto done; + goto out; } - *foundp = offset; - if (ci->count == SWAPFILE_CLUSTER) { - offset = SWAP_NEXT_INVALID; - goto done; - } - offset += nr_pages; - break; + if (cluster_is_free(ci)) + offset = start; + /* Reclaim failed but cluster is usable, try next */ + if (!ret) + continue; + } + if (!cluster_alloc_range(si, ci, offset, usage, order)) { + offset = SWAP_NEXT_INVALID; + goto out; + } + *foundp = offset; + if (ci->count == SWAPFILE_CLUSTER) { + offset = SWAP_NEXT_INVALID; + goto out; } offset += nr_pages; + break; } if (offset > end) offset = SWAP_NEXT_INVALID; -done: +out: + relocate_cluster(si, ci); unlock_cluster(ci); return offset; } @@ -746,18 +831,17 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) if (force) to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER; - while (!list_empty(&si->full_clusters)) { - ci = list_first_entry(&si->full_clusters, struct swap_cluster_info, list); - list_move_tail(&ci->list, &si->full_clusters); + while ((ci = cluster_isolate_lock(si, &si->full_clusters))) { offset = cluster_offset(si, ci); end = min(si->max, offset + SWAPFILE_CLUSTER); to_scan--; - spin_unlock(&si->lock); while (offset < end) { if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) { + spin_unlock(&ci->lock); nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT); + spin_lock(&ci->lock); if (nr_reclaim) { offset += abs(nr_reclaim); continue; @@ -765,8 +849,8 @@ static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) } offset++; } - spin_lock(&si->lock); + unlock_cluster(ci); if (to_scan <= 0) break; } @@ -778,9 +862,7 @@ static void swap_reclaim_work(struct work_struct *work) si = container_of(work, struct swap_info_struct, reclaim_work); - spin_lock(&si->lock); swap_reclaim_full_clusters(si, true); - spin_unlock(&si->lock); } /* @@ -791,29 +873,34 @@ static void swap_reclaim_work(struct work_struct *work) static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order, unsigned char usage) { - struct percpu_cluster *cluster; struct swap_cluster_info *ci; unsigned int offset, found = 0; -new_cluster: - lockdep_assert_held(&si->lock); - cluster = this_cpu_ptr(si->percpu_cluster); - offset = cluster->next[order]; + /* Fast path using per CPU cluster */ + local_lock(&si->percpu_cluster->lock); + offset = __this_cpu_read(si->percpu_cluster->next[order]); if (offset) { - offset = alloc_swap_scan_cluster(si, offset, &found, order, usage); + ci = lock_cluster(si, offset); + /* Cluster could have been used by another order */ + if (cluster_is_usable(ci, order)) { + if (cluster_is_free(ci)) + offset = cluster_offset(si, ci); + offset = alloc_swap_scan_cluster(si, offset, &found, + order, usage); + } else { + unlock_cluster(ci); + } if (found) goto done; } - if (!list_empty(&si->free_clusters)) { - ci = list_first_entry(&si->free_clusters, struct swap_cluster_info, list); - offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage); - /* - * Either we didn't touch the cluster due to swapoff, - * or the allocation must success. - */ - VM_BUG_ON((si->flags & SWP_WRITEOK) && !found); - goto done; +new_cluster: + ci = cluster_isolate_lock(si, &si->free_clusters); + if (ci) { + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, order, usage); + if (found) + goto done; } /* Try reclaim from full clusters if free clusters list is drained */ @@ -821,49 +908,45 @@ new_cluster: swap_reclaim_full_clusters(si, false); if (order < PMD_ORDER) { - unsigned int frags = 0; + unsigned int frags = 0, frags_existing; - while (!list_empty(&si->nonfull_clusters[order])) { - ci = list_first_entry(&si->nonfull_clusters[order], - struct swap_cluster_info, list); - cluster_move(si, ci, &si->frag_clusters[order], CLUSTER_FLAG_FRAG); + while ((ci = cluster_isolate_lock(si, &si->nonfull_clusters[order]))) { offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), &found, order, usage); - frags++; - if (found) - goto done; - } - - /* - * Nonfull clusters are moved to frag tail if we reached - * here, count them too, don't over scan the frag list. - */ - while (frags < si->frag_cluster_nr[order]) { - ci = list_first_entry(&si->frag_clusters[order], - struct swap_cluster_info, list); /* - * Rotate the frag list to iterate, they were all failing - * high order allocation or moved here due to per-CPU usage, - * this help keeping usable cluster ahead. + * With `fragmenting` set to true, it will surely take + * the cluster off nonfull list */ - list_move_tail(&ci->list, &si->frag_clusters[order]); - offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), - &found, order, usage); - frags++; if (found) goto done; + frags++; + } + + frags_existing = atomic_long_read(&si->frag_cluster_nr[order]); + while (frags < frags_existing && + (ci = cluster_isolate_lock(si, &si->frag_clusters[order]))) { + atomic_long_dec(&si->frag_cluster_nr[order]); + /* + * Rotate the frag list to iterate, they were all + * failing high order allocation or moved here due to + * per-CPU usage, but they could contain newly released + * reclaimable (eg. lazy-freed swap cache) slots. + */ + offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), + &found, order, usage); + if (found) + goto done; + frags++; } } - if (!list_empty(&si->discard_clusters)) { - /* - * we don't have free cluster but have some clusters in - * discarding, do discard now and reclaim them, then - * reread cluster_next_cpu since we dropped si->lock - */ - swap_do_scheduled_discard(si); + /* + * We don't have free cluster but have some clusters in + * discarding, do discard now and reclaim them, then + * reread cluster_next_cpu since we dropped si->lock + */ + if ((si->flags & SWP_PAGE_DISCARD) && swap_do_scheduled_discard(si)) goto new_cluster; - } if (order) goto done; @@ -874,26 +957,25 @@ new_cluster: * Clusters here have at least one usable slots and can't fail order 0 * allocation, but reclaim may drop si->lock and race with another user. */ - while (!list_empty(&si->frag_clusters[o])) { - ci = list_first_entry(&si->frag_clusters[o], - struct swap_cluster_info, list); + while ((ci = cluster_isolate_lock(si, &si->frag_clusters[o]))) { + atomic_long_dec(&si->frag_cluster_nr[o]); offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), - &found, 0, usage); + &found, order, usage); if (found) goto done; } - while (!list_empty(&si->nonfull_clusters[o])) { - ci = list_first_entry(&si->nonfull_clusters[o], - struct swap_cluster_info, list); + while ((ci = cluster_isolate_lock(si, &si->nonfull_clusters[o]))) { offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), - &found, 0, usage); + &found, order, usage); if (found) goto done; } } done: - cluster->next[order] = offset; + __this_cpu_write(si->percpu_cluster->next[order], offset); + local_unlock(&si->percpu_cluster->lock); + return found; } @@ -1157,14 +1239,11 @@ start_over: plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); spin_unlock(&swap_avail_lock); if (get_swap_device_info(si)) { - spin_lock(&si->lock); n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, n_goal, swp_entries, order); - spin_unlock(&si->lock); put_swap_device(si); if (n_ret || size > 1) goto check_out; - cond_resched(); } spin_lock(&swap_avail_lock); @@ -1377,9 +1456,7 @@ static bool __swap_entries_free(struct swap_info_struct *si, if (!has_cache) { for (i = 0; i < nr; i++) zswap_invalidate(swp_entry(si->type, offset + i)); - spin_lock(&si->lock); swap_entry_range_free(si, entry, nr); - spin_unlock(&si->lock); } return has_cache; @@ -1408,16 +1485,27 @@ static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry unsigned char *map_end = map + nr_pages; struct swap_cluster_info *ci; + /* It should never free entries across different clusters */ + VM_BUG_ON((offset / SWAPFILE_CLUSTER) != ((offset + nr_pages - 1) / SWAPFILE_CLUSTER)); + ci = lock_cluster(si, offset); + VM_BUG_ON(cluster_is_free(ci)); + VM_BUG_ON(ci->count < nr_pages); + + ci->count -= nr_pages; do { VM_BUG_ON(*map != SWAP_HAS_CACHE); *map = 0; } while (++map < map_end); - dec_cluster_info_page(si, ci, nr_pages); - unlock_cluster(ci); mem_cgroup_uncharge_swap(entry, nr_pages); swap_range_free(si, offset, nr_pages); + + if (!ci->count) + free_cluster(si, ci); + else + partial_free_cluster(si, ci); + unlock_cluster(ci); } static void cluster_swap_free_nr(struct swap_info_struct *si, @@ -1489,9 +1577,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) ci = lock_cluster(si, offset); if (size > 1 && swap_is_has_cache(si, offset, size)) { unlock_cluster(ci); - spin_lock(&si->lock); swap_entry_range_free(si, entry, size); - spin_unlock(&si->lock); return; } for (int i = 0; i < size; i++, entry.val++) { @@ -1506,46 +1592,19 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) unlock_cluster(ci); } -static int swp_entry_cmp(const void *ent1, const void *ent2) -{ - const swp_entry_t *e1 = ent1, *e2 = ent2; - - return (int)swp_type(*e1) - (int)swp_type(*e2); -} - void swapcache_free_entries(swp_entry_t *entries, int n) { - struct swap_info_struct *si, *prev; int i; + struct swap_info_struct *si = NULL; if (n <= 0) return; - prev = NULL; - si = NULL; - - /* - * Sort swap entries by swap device, so each lock is only taken once. - * nr_swapfiles isn't absolutely correct, but the overhead of sort() is - * so low that it isn't necessary to optimize further. - */ - if (nr_swapfiles > 1) - sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL); for (i = 0; i < n; ++i) { si = _swap_info_get(entries[i]); - - if (si != prev) { - if (prev != NULL) - spin_unlock(&prev->lock); - if (si != NULL) - spin_lock(&si->lock); - } if (si) swap_entry_range_free(si, entries[i], 1); - prev = si; } - if (si) - spin_unlock(&si->lock); } int __swap_count(swp_entry_t entry) @@ -1797,13 +1856,8 @@ swp_entry_t get_swap_page_of_type(int type) goto fail; /* This is called for allocating swap entry, not cache */ - if (get_swap_device_info(si)) { - spin_lock(&si->lock); - if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0)) - atomic_long_dec(&nr_swap_pages); - spin_unlock(&si->lock); - put_swap_device(si); - } + if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0)) + atomic_long_dec(&nr_swap_pages); fail: return entry; } @@ -3141,6 +3195,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, cluster = per_cpu_ptr(si->percpu_cluster, cpu); for (i = 0; i < SWAP_NR_ORDERS; i++) cluster->next[i] = SWAP_NEXT_INVALID; + local_lock_init(&cluster->lock); } /* @@ -3164,7 +3219,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, for (i = 0; i < SWAP_NR_ORDERS; i++) { INIT_LIST_HEAD(&si->nonfull_clusters[i]); INIT_LIST_HEAD(&si->frag_clusters[i]); - si->frag_cluster_nr[i] = 0; + atomic_long_set(&si->frag_cluster_nr[i], 0); } /* @@ -3646,7 +3701,6 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) */ goto outer; } - spin_lock(&si->lock); offset = swp_offset(entry); @@ -3711,7 +3765,6 @@ out_unlock_cont: spin_unlock(&si->cont_lock); out: unlock_cluster(ci); - spin_unlock(&si->lock); put_swap_device(si); outer: if (page) From 243dd93e678dd4638b1005a13b085c3c5439447c Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:18 +0800 Subject: [PATCH 235/504] mm, swap: simplify percpu cluster updating Instead of using a returning argument, we can simply store the next cluster offset to the fixed percpu location, which reduce the stack usage and simplify the function: Object size: ./scripts/bloat-o-meter mm/swapfile.o mm/swapfile.o.new add/remove: 0/0 grow/shrink: 0/2 up/down: 0/-271 (-271) Function old new delta get_swap_pages 2847 2733 -114 alloc_swap_scan_cluster 894 737 -157 Total: Before=30833, After=30562, chg -0.88% Stack usage: Before: swapfile.c:1190:5:get_swap_pages 240 static After: swapfile.c:1185:5:get_swap_pages 216 static Link: https://lkml.kernel.org/r/20241230174621.61185-11-ryncsn@gmail.com Signed-off-by: Kairui Song Cc: Barry Song Cc: Chris Li Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 4 +-- mm/swapfile.c | 66 +++++++++++++++++++------------------------- 2 files changed, 31 insertions(+), 39 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index c4ff31cb6bde..4c1d2e69689f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -275,9 +275,9 @@ enum swap_cluster_flags { * The first page in the swap file is the swap header, which is always marked * bad to prevent it from being allocated as an entry. This also prevents the * cluster to which it belongs being marked free. Therefore 0 is safe to use as - * a sentinel to indicate next is not valid in percpu_cluster. + * a sentinel to indicate an entry is not valid. */ -#define SWAP_NEXT_INVALID 0 +#define SWAP_ENTRY_INVALID 0 #ifdef CONFIG_THP_SWAP #define SWAP_NR_ORDERS (PMD_ORDER + 1) diff --git a/mm/swapfile.c b/mm/swapfile.c index dadd4fead689..60a650ba88fd 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -759,23 +759,23 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster return true; } -static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigned long offset, - unsigned int *foundp, unsigned int order, +/* Try use a new cluster for current CPU and allocate from it. */ +static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, + struct swap_cluster_info *ci, + unsigned long offset, + unsigned int order, unsigned char usage) { - unsigned long start = offset & ~(SWAPFILE_CLUSTER - 1); + unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID; + unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER); unsigned long end = min(start + SWAPFILE_CLUSTER, si->max); unsigned int nr_pages = 1 << order; bool need_reclaim, ret; - struct swap_cluster_info *ci; - ci = &si->cluster_info[offset / SWAPFILE_CLUSTER]; lockdep_assert_held(&ci->lock); - if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER) { - offset = SWAP_NEXT_INVALID; + if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER) goto out; - } for (end -= nr_pages; offset <= end; offset += nr_pages) { need_reclaim = false; @@ -789,34 +789,27 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, unsigne * cluster has no flag set, and change of list * won't cause fragmentation. */ - if (!cluster_is_usable(ci, order)) { - offset = SWAP_NEXT_INVALID; + if (!cluster_is_usable(ci, order)) goto out; - } if (cluster_is_free(ci)) offset = start; /* Reclaim failed but cluster is usable, try next */ if (!ret) continue; } - if (!cluster_alloc_range(si, ci, offset, usage, order)) { - offset = SWAP_NEXT_INVALID; - goto out; - } - *foundp = offset; - if (ci->count == SWAPFILE_CLUSTER) { - offset = SWAP_NEXT_INVALID; - goto out; - } + if (!cluster_alloc_range(si, ci, offset, usage, order)) + break; + found = offset; offset += nr_pages; + if (ci->count < SWAPFILE_CLUSTER && offset <= end) + next = offset; break; } - if (offset > end) - offset = SWAP_NEXT_INVALID; out: relocate_cluster(si, ci); unlock_cluster(ci); - return offset; + __this_cpu_write(si->percpu_cluster->next[order], next); + return found; } /* Return true if reclaimed a whole cluster */ @@ -885,8 +878,8 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o if (cluster_is_usable(ci, order)) { if (cluster_is_free(ci)) offset = cluster_offset(si, ci); - offset = alloc_swap_scan_cluster(si, offset, &found, - order, usage); + found = alloc_swap_scan_cluster(si, ci, offset, + order, usage); } else { unlock_cluster(ci); } @@ -897,8 +890,8 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o new_cluster: ci = cluster_isolate_lock(si, &si->free_clusters); if (ci) { - offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), - &found, order, usage); + found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), + order, usage); if (found) goto done; } @@ -911,8 +904,8 @@ new_cluster: unsigned int frags = 0, frags_existing; while ((ci = cluster_isolate_lock(si, &si->nonfull_clusters[order]))) { - offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), - &found, order, usage); + found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), + order, usage); /* * With `fragmenting` set to true, it will surely take * the cluster off nonfull list @@ -932,8 +925,8 @@ new_cluster: * per-CPU usage, but they could contain newly released * reclaimable (eg. lazy-freed swap cache) slots. */ - offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), - &found, order, usage); + found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), + order, usage); if (found) goto done; frags++; @@ -959,21 +952,20 @@ new_cluster: */ while ((ci = cluster_isolate_lock(si, &si->frag_clusters[o]))) { atomic_long_dec(&si->frag_cluster_nr[o]); - offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), - &found, order, usage); + found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), + 0, usage); if (found) goto done; } while ((ci = cluster_isolate_lock(si, &si->nonfull_clusters[o]))) { - offset = alloc_swap_scan_cluster(si, cluster_offset(si, ci), - &found, order, usage); + found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci), + 0, usage); if (found) goto done; } } done: - __this_cpu_write(si->percpu_cluster->next[order], offset); local_unlock(&si->percpu_cluster->lock); return found; @@ -3194,7 +3186,7 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, cluster = per_cpu_ptr(si->percpu_cluster, cpu); for (i = 0; i < SWAP_NR_ORDERS; i++) - cluster->next[i] = SWAP_NEXT_INVALID; + cluster->next[i] = SWAP_ENTRY_INVALID; local_lock_init(&cluster->lock); } From e714e1ab956da559206fbf07a7f6382f811afaa8 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:19 +0800 Subject: [PATCH 236/504] mm, swap: introduce a helper for retrieving cluster from offset It's a common operation to retrieve the cluster info from offset, introduce a helper for this. Link: https://lkml.kernel.org/r/20241230174621.61185-12-ryncsn@gmail.com Signed-off-by: Kairui Song Suggested-by: Chris Li Cc: Barry Song Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/swapfile.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 60a650ba88fd..a3d1239d944b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -424,6 +424,12 @@ static inline unsigned int cluster_index(struct swap_info_struct *si, return ci - si->cluster_info; } +static inline struct swap_cluster_info *offset_to_cluster(struct swap_info_struct *si, + unsigned long offset) +{ + return &si->cluster_info[offset / SWAPFILE_CLUSTER]; +} + static inline unsigned int cluster_offset(struct swap_info_struct *si, struct swap_cluster_info *ci) { @@ -435,7 +441,7 @@ static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si { struct swap_cluster_info *ci; - ci = &si->cluster_info[offset / SWAPFILE_CLUSTER]; + ci = offset_to_cluster(si, offset); spin_lock(&ci->lock); return ci; @@ -1477,10 +1483,10 @@ static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry unsigned char *map_end = map + nr_pages; struct swap_cluster_info *ci; - /* It should never free entries across different clusters */ - VM_BUG_ON((offset / SWAPFILE_CLUSTER) != ((offset + nr_pages - 1) / SWAPFILE_CLUSTER)); - ci = lock_cluster(si, offset); + + /* It should never free entries across different clusters */ + VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1)); VM_BUG_ON(cluster_is_free(ci)); VM_BUG_ON(ci->count < nr_pages); From 635d3ad5159870a6c8e9aeffcdafd1cff36897d7 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:20 +0800 Subject: [PATCH 237/504] mm, swap: use a global swap cluster for non-rotation devices Non-rotational devices (SSD / ZRAM) can tolerate fragmentation, so the goal of the SWAP allocator is to avoid contention for clusters. It uses a per-CPU cluster design, and each CPU will use a different cluster as much as possible. However, HDDs are very sensitive to fragmentation, contention is trivial in comparison. Therefore, we use one global cluster instead. This ensures that each order will be written to the same cluster as much as possible, which helps make the I/O more continuous. This ensures that the performance of the cluster allocator is as good as that of the old allocator. Tests after this commit compared to those before this series: Tested using 'make -j32' with tinyconfig, a 1G memcg limit, and HDD swap: make -j32 with tinyconfig, using 1G memcg limit and HDD swap: Before this series: 114.44user 29.11system 39:42.90elapsed 6%CPU (0avgtext+0avgdata 157284maxresident)k 2901232inputs+0outputs (238877major+4227640minor)pagefaults After this commit: 113.90user 23.81system 38:11.77elapsed 6%CPU (0avgtext+0avgdata 157260maxresident)k 2548728inputs+0outputs (235471major+4238110minor)pagefaults Link: https://lkml.kernel.org/r/20241230174621.61185-13-ryncsn@gmail.com Signed-off-by: Kairui Song Suggested-by: Chris Li Cc: Barry Song Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 2 ++ mm/swapfile.c | 51 ++++++++++++++++++++++++++++++++------------ 2 files changed, 39 insertions(+), 14 deletions(-) diff --git a/include/linux/swap.h b/include/linux/swap.h index 4c1d2e69689f..b13b72645db3 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -318,6 +318,8 @@ struct swap_info_struct { unsigned int pages; /* total of usable pages of swap */ atomic_long_t inuse_pages; /* number of those currently in use */ struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */ + struct percpu_cluster *global_cluster; /* Use one global cluster for rotating device */ + spinlock_t global_cluster_lock; /* Serialize usage of global cluster */ struct rb_root swap_extent_root;/* root of the swap extent rbtree */ struct block_device *bdev; /* swap device or bdev of swap file */ struct file *swap_file; /* seldom referenced */ diff --git a/mm/swapfile.c b/mm/swapfile.c index a3d1239d944b..e57e5453a25b 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -814,7 +814,10 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, out: relocate_cluster(si, ci); unlock_cluster(ci); - __this_cpu_write(si->percpu_cluster->next[order], next); + if (si->flags & SWP_SOLIDSTATE) + __this_cpu_write(si->percpu_cluster->next[order], next); + else + si->global_cluster->next[order] = next; return found; } @@ -875,9 +878,16 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o struct swap_cluster_info *ci; unsigned int offset, found = 0; - /* Fast path using per CPU cluster */ - local_lock(&si->percpu_cluster->lock); - offset = __this_cpu_read(si->percpu_cluster->next[order]); + if (si->flags & SWP_SOLIDSTATE) { + /* Fast path using per CPU cluster */ + local_lock(&si->percpu_cluster->lock); + offset = __this_cpu_read(si->percpu_cluster->next[order]); + } else { + /* Serialize HDD SWAP allocation for each device. */ + spin_lock(&si->global_cluster_lock); + offset = si->global_cluster->next[order]; + } + if (offset) { ci = lock_cluster(si, offset); /* Cluster could have been used by another order */ @@ -972,8 +982,10 @@ new_cluster: } } done: - local_unlock(&si->percpu_cluster->lock); - + if (si->flags & SWP_SOLIDSTATE) + local_unlock(&si->percpu_cluster->lock); + else + spin_unlock(&si->global_cluster_lock); return found; } @@ -2778,6 +2790,8 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) mutex_unlock(&swapon_mutex); free_percpu(p->percpu_cluster); p->percpu_cluster = NULL; + kfree(p->global_cluster); + p->global_cluster = NULL; vfree(swap_map); kvfree(zeromap); kvfree(cluster_info); @@ -3183,17 +3197,24 @@ static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, for (i = 0; i < nr_clusters; i++) spin_lock_init(&cluster_info[i].lock); - si->percpu_cluster = alloc_percpu(struct percpu_cluster); - if (!si->percpu_cluster) - goto err_free; + if (si->flags & SWP_SOLIDSTATE) { + si->percpu_cluster = alloc_percpu(struct percpu_cluster); + if (!si->percpu_cluster) + goto err_free; - for_each_possible_cpu(cpu) { - struct percpu_cluster *cluster; + for_each_possible_cpu(cpu) { + struct percpu_cluster *cluster; - cluster = per_cpu_ptr(si->percpu_cluster, cpu); + cluster = per_cpu_ptr(si->percpu_cluster, cpu); + for (i = 0; i < SWAP_NR_ORDERS; i++) + cluster->next[i] = SWAP_ENTRY_INVALID; + local_lock_init(&cluster->lock); + } + } else { + si->global_cluster = kmalloc(sizeof(*si->global_cluster), GFP_KERNEL); for (i = 0; i < SWAP_NR_ORDERS; i++) - cluster->next[i] = SWAP_ENTRY_INVALID; - local_lock_init(&cluster->lock); + si->global_cluster->next[i] = SWAP_ENTRY_INVALID; + spin_lock_init(&si->global_cluster_lock); } /* @@ -3467,6 +3488,8 @@ bad_swap_unlock_inode: bad_swap: free_percpu(si->percpu_cluster); si->percpu_cluster = NULL; + kfree(si->global_cluster); + si->global_cluster = NULL; inode = NULL; destroy_swap_extents(si); swap_cgroup_swapoff(si->type); From bb579b48ddb84a674523c0df973e895da17005ba Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Tue, 31 Dec 2024 01:46:21 +0800 Subject: [PATCH 238/504] mm, swap_slots: remove slot cache for freeing path The slot cache for freeing path is mostly for reducing the overhead of si->lock. As we have basically eliminated the si->lock usage for freeing path, it can be removed. This helps simplify the code, and avoids swap entries from being hold in cache upon freeing. The delayed freeing of entries have been causing trouble for further optimizations for zswap [1] and in theory will also cause more fragmentation, and extra overhead. Test with build linux kernel showed both performance and fragmentation is better without the cache: tiem make -j96 / 768M memcg, 4K pages, 10G ZRAM, avg of 4 test run:: Before: Sys time: 36047.78, Real time: 472.43 After: (-7.6% sys time, -7.3% real time) Sys time: 33314.76, Real time: 437.67 time make -j96 / 1152M memcg, 64K mTHP, 10G ZRAM, avg of 4 test run: Before: Sys time: 46859.04, Real time: 562.63 hugepages-64kB/stats/swpout: 1783392 hugepages-64kB/stats/swpout_fallback: 240875 After: (-23.3% sys time, -21.3% real time) Sys time: 35958.87, Real time: 442.69 hugepages-64kB/stats/swpout: 1866267 hugepages-64kB/stats/swpout_fallback: 158330 Sequential SWAP should be also slightly faster, tests didn't show a measurable difference though, at least no regression: Swapin 4G zero page on ZRAM (time in us): Before (avg. 1923756) 1912391 1927023 1927957 1916527 1918263 1914284 1934753 1940813 1921791 After (avg. 1922290): 1919101 1925743 1916810 1917007 1923930 1935152 1917403 1923549 1921913 Link: https://lore.kernel.org/all/CAMgjq7ACohT_uerSz8E_994ZZCv709Zor+43hdmesW_59W1BWw@mail.gmail.com/[1] Link: https://lkml.kernel.org/r/20241230174621.61185-14-ryncsn@gmail.com Signed-off-by: Kairui Song Suggested-by: Chris Li Cc: Barry Song Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Johannes Weiner Cc: Kalesh Singh Cc: Nhat Pham Cc: Ryan Roberts Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap_slots.h | 3 -- mm/swap_slots.c | 78 +++++---------------------------- mm/swapfile.c | 89 +++++++++++++++----------------------- 3 files changed, 44 insertions(+), 126 deletions(-) diff --git a/include/linux/swap_slots.h b/include/linux/swap_slots.h index 15adfb8c813a..840aec3523b2 100644 --- a/include/linux/swap_slots.h +++ b/include/linux/swap_slots.h @@ -16,15 +16,12 @@ struct swap_slots_cache { swp_entry_t *slots; int nr; int cur; - spinlock_t free_lock; /* protects slots_ret, n_ret */ - swp_entry_t *slots_ret; int n_ret; }; void disable_swap_slots_cache_lock(void); void reenable_swap_slots_cache_unlock(void); void enable_swap_slots_cache(void); -void free_swap_slot(swp_entry_t entry); extern bool swap_slot_cache_enabled; diff --git a/mm/swap_slots.c b/mm/swap_slots.c index 13ab3b771409..9c7c171df7ba 100644 --- a/mm/swap_slots.c +++ b/mm/swap_slots.c @@ -43,17 +43,15 @@ static DEFINE_MUTEX(swap_slots_cache_mutex); /* Serialize swap slots cache enable/disable operations */ static DEFINE_MUTEX(swap_slots_cache_enable_mutex); -static void __drain_swap_slots_cache(unsigned int type); +static void __drain_swap_slots_cache(void); #define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled) -#define SLOTS_CACHE 0x1 -#define SLOTS_CACHE_RET 0x2 static void deactivate_swap_slots_cache(void) { mutex_lock(&swap_slots_cache_mutex); swap_slot_cache_active = false; - __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); + __drain_swap_slots_cache(); mutex_unlock(&swap_slots_cache_mutex); } @@ -72,7 +70,7 @@ void disable_swap_slots_cache_lock(void) if (swap_slot_cache_initialized) { /* serialize with cpu hotplug operations */ cpus_read_lock(); - __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); + __drain_swap_slots_cache(); cpus_read_unlock(); } } @@ -113,7 +111,7 @@ out: static int alloc_swap_slot_cache(unsigned int cpu) { struct swap_slots_cache *cache; - swp_entry_t *slots, *slots_ret; + swp_entry_t *slots; /* * Do allocation outside swap_slots_cache_mutex @@ -125,28 +123,19 @@ static int alloc_swap_slot_cache(unsigned int cpu) if (!slots) return -ENOMEM; - slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), - GFP_KERNEL); - if (!slots_ret) { - kvfree(slots); - return -ENOMEM; - } - mutex_lock(&swap_slots_cache_mutex); cache = &per_cpu(swp_slots, cpu); - if (cache->slots || cache->slots_ret) { + if (cache->slots) { /* cache already allocated */ mutex_unlock(&swap_slots_cache_mutex); kvfree(slots); - kvfree(slots_ret); return 0; } if (!cache->lock_initialized) { mutex_init(&cache->alloc_lock); - spin_lock_init(&cache->free_lock); cache->lock_initialized = true; } cache->nr = 0; @@ -160,19 +149,16 @@ static int alloc_swap_slot_cache(unsigned int cpu) */ mb(); cache->slots = slots; - cache->slots_ret = slots_ret; mutex_unlock(&swap_slots_cache_mutex); return 0; } -static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, - bool free_slots) +static void drain_slots_cache_cpu(unsigned int cpu, bool free_slots) { struct swap_slots_cache *cache; - swp_entry_t *slots = NULL; cache = &per_cpu(swp_slots, cpu); - if ((type & SLOTS_CACHE) && cache->slots) { + if (cache->slots) { mutex_lock(&cache->alloc_lock); swapcache_free_entries(cache->slots + cache->cur, cache->nr); cache->cur = 0; @@ -183,20 +169,9 @@ static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, } mutex_unlock(&cache->alloc_lock); } - if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { - spin_lock_irq(&cache->free_lock); - swapcache_free_entries(cache->slots_ret, cache->n_ret); - cache->n_ret = 0; - if (free_slots && cache->slots_ret) { - slots = cache->slots_ret; - cache->slots_ret = NULL; - } - spin_unlock_irq(&cache->free_lock); - kvfree(slots); - } } -static void __drain_swap_slots_cache(unsigned int type) +static void __drain_swap_slots_cache(void) { unsigned int cpu; @@ -224,13 +199,13 @@ static void __drain_swap_slots_cache(unsigned int type) * There are no slots on such cpu that need to be drained. */ for_each_online_cpu(cpu) - drain_slots_cache_cpu(cpu, type, false); + drain_slots_cache_cpu(cpu, false); } static int free_slot_cache(unsigned int cpu) { mutex_lock(&swap_slots_cache_mutex); - drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); + drain_slots_cache_cpu(cpu, true); mutex_unlock(&swap_slots_cache_mutex); return 0; } @@ -269,39 +244,6 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache) return cache->nr; } -void free_swap_slot(swp_entry_t entry) -{ - struct swap_slots_cache *cache; - - /* Large folio swap slot is not covered. */ - zswap_invalidate(entry); - - cache = raw_cpu_ptr(&swp_slots); - if (likely(use_swap_slot_cache && cache->slots_ret)) { - spin_lock_irq(&cache->free_lock); - /* Swap slots cache may be deactivated before acquiring lock */ - if (!use_swap_slot_cache || !cache->slots_ret) { - spin_unlock_irq(&cache->free_lock); - goto direct_free; - } - if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { - /* - * Return slots to global pool. - * The current swap_map value is SWAP_HAS_CACHE. - * Set it to 0 to indicate it is available for - * allocation in global pool - */ - swapcache_free_entries(cache->slots_ret, cache->n_ret); - cache->n_ret = 0; - } - cache->slots_ret[cache->n_ret++] = entry; - spin_unlock_irq(&cache->free_lock); - } else { -direct_free: - swapcache_free_entries(&entry, 1); - } -} - swp_entry_t folio_alloc_swap(struct folio *folio) { swp_entry_t entry; diff --git a/mm/swapfile.c b/mm/swapfile.c index e57e5453a25b..d623f5b6dc4c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -53,14 +53,15 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); static void free_swap_count_continuations(struct swap_info_struct *); -static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry, - unsigned int nr_pages); +static void swap_entry_range_free(struct swap_info_struct *si, + struct swap_cluster_info *ci, + swp_entry_t entry, unsigned int nr_pages); static void swap_range_alloc(struct swap_info_struct *si, unsigned int nr_entries); static bool folio_swapcache_freeable(struct folio *folio); static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, unsigned long offset); -static void unlock_cluster(struct swap_cluster_info *ci); +static inline void unlock_cluster(struct swap_cluster_info *ci); static DEFINE_SPINLOCK(swap_lock); static unsigned int nr_swapfiles; @@ -261,10 +262,9 @@ static int __try_to_reclaim_swap(struct swap_info_struct *si, folio_ref_sub(folio, nr_pages); folio_set_dirty(folio); - /* Only sinple page folio can be backed by zswap */ - if (nr_pages == 1) - zswap_invalidate(entry); - swap_entry_range_free(si, entry, nr_pages); + ci = lock_cluster(si, offset); + swap_entry_range_free(si, ci, entry, nr_pages); + unlock_cluster(ci); ret = nr_pages; out_unlock: folio_unlock(folio); @@ -1125,8 +1125,10 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset, * Use atomic clear_bit operations only on zeromap instead of non-atomic * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes. */ - for (i = 0; i < nr_entries; i++) + for (i = 0; i < nr_entries; i++) { clear_bit(offset + i, si->zeromap); + zswap_invalidate(swp_entry(si->type, offset + i)); + } if (si->flags & SWP_BLKDEV) swap_slot_free_notify = @@ -1431,9 +1433,9 @@ static unsigned char __swap_entry_free(struct swap_info_struct *si, ci = lock_cluster(si, offset); usage = __swap_entry_free_locked(si, offset, 1); - unlock_cluster(ci); if (!usage) - free_swap_slot(entry); + swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1); + unlock_cluster(ci); return usage; } @@ -1461,13 +1463,10 @@ static bool __swap_entries_free(struct swap_info_struct *si, } for (i = 0; i < nr; i++) WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE); + if (!has_cache) + swap_entry_range_free(si, ci, entry, nr); unlock_cluster(ci); - if (!has_cache) { - for (i = 0; i < nr; i++) - zswap_invalidate(swp_entry(si->type, offset + i)); - swap_entry_range_free(si, entry, nr); - } return has_cache; fallback: @@ -1487,15 +1486,13 @@ fallback: * Drop the last HAS_CACHE flag of swap entries, caller have to * ensure all entries belong to the same cgroup. */ -static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry, - unsigned int nr_pages) +static void swap_entry_range_free(struct swap_info_struct *si, + struct swap_cluster_info *ci, + swp_entry_t entry, unsigned int nr_pages) { unsigned long offset = swp_offset(entry); unsigned char *map = si->swap_map + offset; unsigned char *map_end = map + nr_pages; - struct swap_cluster_info *ci; - - ci = lock_cluster(si, offset); /* It should never free entries across different clusters */ VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1)); @@ -1515,7 +1512,6 @@ static void swap_entry_range_free(struct swap_info_struct *si, swp_entry_t entry free_cluster(si, ci); else partial_free_cluster(si, ci); - unlock_cluster(ci); } static void cluster_swap_free_nr(struct swap_info_struct *si, @@ -1523,28 +1519,13 @@ static void cluster_swap_free_nr(struct swap_info_struct *si, unsigned char usage) { struct swap_cluster_info *ci; - DECLARE_BITMAP(to_free, BITS_PER_LONG) = { 0 }; - int i, nr; + unsigned long end = offset + nr_pages; ci = lock_cluster(si, offset); - while (nr_pages) { - nr = min(BITS_PER_LONG, nr_pages); - for (i = 0; i < nr; i++) { - if (!__swap_entry_free_locked(si, offset + i, usage)) - bitmap_set(to_free, i, 1); - } - if (!bitmap_empty(to_free, BITS_PER_LONG)) { - unlock_cluster(ci); - for_each_set_bit(i, to_free, BITS_PER_LONG) - free_swap_slot(swp_entry(si->type, offset + i)); - if (nr == nr_pages) - return; - bitmap_clear(to_free, 0, BITS_PER_LONG); - ci = lock_cluster(si, offset); - } - offset += nr; - nr_pages -= nr; - } + do { + if (!__swap_entry_free_locked(si, offset, usage)) + swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1); + } while (++offset < end); unlock_cluster(ci); } @@ -1585,18 +1566,12 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) return; ci = lock_cluster(si, offset); - if (size > 1 && swap_is_has_cache(si, offset, size)) { - unlock_cluster(ci); - swap_entry_range_free(si, entry, size); - return; - } - for (int i = 0; i < size; i++, entry.val++) { - if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { - unlock_cluster(ci); - free_swap_slot(entry); - if (i == size - 1) - return; - lock_cluster(si, offset); + if (swap_is_has_cache(si, offset, size)) + swap_entry_range_free(si, ci, entry, size); + else { + for (int i = 0; i < size; i++, entry.val++) { + if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) + swap_entry_range_free(si, ci, entry, 1); } } unlock_cluster(ci); @@ -1605,6 +1580,7 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry) void swapcache_free_entries(swp_entry_t *entries, int n) { int i; + struct swap_cluster_info *ci; struct swap_info_struct *si = NULL; if (n <= 0) @@ -1612,8 +1588,11 @@ void swapcache_free_entries(swp_entry_t *entries, int n) for (i = 0; i < n; ++i) { si = _swap_info_get(entries[i]); - if (si) - swap_entry_range_free(si, entries[i], 1); + if (si) { + ci = lock_cluster(si, swp_offset(entries[i])); + swap_entry_range_free(si, ci, entries[i], 1); + unlock_cluster(ci); + } } } From 15fdc79c59c862894b6ef4fb63ab52611e9ad2cf Mon Sep 17 00:00:00 2001 From: Maninder Singh Date: Mon, 30 Dec 2024 15:40:43 +0530 Subject: [PATCH 239/504] lib/list_debug.c: add object information in case of invalid object As of now during link list corruption it prints about cluprit address and its wrong value, but sometime it is not enough to catch the actual issue point. If it prints allocation and free path of that corrupted node, it will be a lot easier to find and fix the issues. Adding the same information when data mismatch is found in link list debug data: [ 14.243055] slab kmalloc-32 start ffff0000cda19320 data offset 32 pointer offset 8 size 32 allocated at add_to_list+0x28/0xb0 [ 14.245259] __kmalloc_cache_noprof+0x1c4/0x358 [ 14.245572] add_to_list+0x28/0xb0 ... [ 14.248632] do_el0_svc_compat+0x1c/0x34 [ 14.249018] el0_svc_compat+0x2c/0x80 [ 14.249244] Free path: [ 14.249410] kfree+0x24c/0x2f0 [ 14.249724] do_force_corruption+0xbc/0x100 ... [ 14.252266] el0_svc_common.constprop.0+0x40/0xe0 [ 14.252540] do_el0_svc_compat+0x1c/0x34 [ 14.252763] el0_svc_compat+0x2c/0x80 [ 14.253071] ------------[ cut here ]------------ [ 14.253303] list_del corruption. next->prev should be ffff0000cda192a8, but was 6b6b6b6b6b6b6b6b. (next=ffff0000cda19348) [ 14.254255] WARNING: CPU: 3 PID: 84 at lib/list_debug.c:65 __list_del_entry_valid_or_report+0x158/0x164 Moved prototype of mem_dump_obj() to bug.h, as mm.h can not be included in bug.h. Link: https://lkml.kernel.org/r/20241230101043.53773-1-maninder1.s@samsung.com Signed-off-by: Maninder Singh Acked-by: Jan Kara Cc: Al Viro Cc: Christian Brauner Cc: Marco Elver Cc: Rohit Thapliyal Signed-off-by: Andrew Morton --- fs/open.c | 2 +- fs/super.c | 2 +- include/linux/bug.h | 10 +++++++++- include/linux/mm.h | 6 ------ lib/list_debug.c | 22 +++++++++++----------- 5 files changed, 22 insertions(+), 20 deletions(-) diff --git a/fs/open.c b/fs/open.c index e6911101fe71..0f75e220b700 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1504,7 +1504,7 @@ static int filp_flush(struct file *filp, fl_owner_t id) { int retval = 0; - if (CHECK_DATA_CORRUPTION(file_count(filp) == 0, + if (CHECK_DATA_CORRUPTION(file_count(filp) == 0, filp, "VFS: Close: file count is 0 (f_op=%ps)", filp->f_op)) { return 0; diff --git a/fs/super.c b/fs/super.c index c9c7223bc2a2..5a7db4a556e3 100644 --- a/fs/super.c +++ b/fs/super.c @@ -647,7 +647,7 @@ void generic_shutdown_super(struct super_block *sb) */ fscrypt_destroy_keyring(sb); - if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), + if (CHECK_DATA_CORRUPTION(!list_empty(&sb->s_inodes), NULL, "VFS: Busy inodes after unmount of %s (%s)", sb->s_id, sb->s_type->name)) { /* diff --git a/include/linux/bug.h b/include/linux/bug.h index 348acf2558f3..a9948a9f1093 100644 --- a/include/linux/bug.h +++ b/include/linux/bug.h @@ -73,15 +73,23 @@ static inline void generic_bug_clear_once(void) {} #endif /* CONFIG_GENERIC_BUG */ +#ifdef CONFIG_PRINTK +void mem_dump_obj(void *object); +#else +static inline void mem_dump_obj(void *object) {} +#endif + /* * Since detected data corruption should stop operation on the affected * structures. Return value must be checked and sanely acted on by caller. */ static inline __must_check bool check_data_corruption(bool v) { return v; } -#define CHECK_DATA_CORRUPTION(condition, fmt, ...) \ +#define CHECK_DATA_CORRUPTION(condition, addr, fmt, ...) \ check_data_corruption(({ \ bool corruption = unlikely(condition); \ if (corruption) { \ + if (addr) \ + mem_dump_obj(addr); \ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) { \ pr_err(fmt, ##__VA_ARGS__); \ BUG(); \ diff --git a/include/linux/mm.h b/include/linux/mm.h index 7d3718e11047..3e6e5e6cee4f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -4168,12 +4168,6 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping, extern int sysctl_nr_trim_pages; -#ifdef CONFIG_PRINTK -void mem_dump_obj(void *object); -#else -static inline void mem_dump_obj(void *object) {} -#endif - #ifdef CONFIG_ANON_VMA_NAME int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, unsigned long len_in, diff --git a/lib/list_debug.c b/lib/list_debug.c index db602417febf..ee7eeeb8f92c 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -22,17 +22,17 @@ __list_valid_slowpath bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev, struct list_head *next) { - if (CHECK_DATA_CORRUPTION(prev == NULL, + if (CHECK_DATA_CORRUPTION(prev == NULL, NULL, "list_add corruption. prev is NULL.\n") || - CHECK_DATA_CORRUPTION(next == NULL, + CHECK_DATA_CORRUPTION(next == NULL, NULL, "list_add corruption. next is NULL.\n") || - CHECK_DATA_CORRUPTION(next->prev != prev, + CHECK_DATA_CORRUPTION(next->prev != prev, next, "list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n", prev, next->prev, next) || - CHECK_DATA_CORRUPTION(prev->next != next, + CHECK_DATA_CORRUPTION(prev->next != next, prev, "list_add corruption. prev->next should be next (%px), but was %px. (prev=%px).\n", next, prev->next, prev) || - CHECK_DATA_CORRUPTION(new == prev || new == next, + CHECK_DATA_CORRUPTION(new == prev || new == next, NULL, "list_add double add: new=%px, prev=%px, next=%px.\n", new, prev, next)) return false; @@ -49,20 +49,20 @@ bool __list_del_entry_valid_or_report(struct list_head *entry) prev = entry->prev; next = entry->next; - if (CHECK_DATA_CORRUPTION(next == NULL, + if (CHECK_DATA_CORRUPTION(next == NULL, NULL, "list_del corruption, %px->next is NULL\n", entry) || - CHECK_DATA_CORRUPTION(prev == NULL, + CHECK_DATA_CORRUPTION(prev == NULL, NULL, "list_del corruption, %px->prev is NULL\n", entry) || - CHECK_DATA_CORRUPTION(next == LIST_POISON1, + CHECK_DATA_CORRUPTION(next == LIST_POISON1, next, "list_del corruption, %px->next is LIST_POISON1 (%px)\n", entry, LIST_POISON1) || - CHECK_DATA_CORRUPTION(prev == LIST_POISON2, + CHECK_DATA_CORRUPTION(prev == LIST_POISON2, prev, "list_del corruption, %px->prev is LIST_POISON2 (%px)\n", entry, LIST_POISON2) || - CHECK_DATA_CORRUPTION(prev->next != entry, + CHECK_DATA_CORRUPTION(prev->next != entry, prev, "list_del corruption. prev->next should be %px, but was %px. (prev=%px)\n", entry, prev->next, prev) || - CHECK_DATA_CORRUPTION(next->prev != entry, + CHECK_DATA_CORRUPTION(next->prev != entry, next, "list_del corruption. next->prev should be %px, but was %px. (next=%px)\n", entry, next->prev, next)) return false; From 7ffdbd7b2f3f8feeb7df2be54701d95369b13197 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 2 Jan 2025 12:10:51 +0000 Subject: [PATCH 240/504] mips: vdso: prefer do_mmap() to mmap_region() Patch series "mm: update mips to use do_mmap(), make mmap_region() internal". Currently the only user of mmap_region() outside of the memory management code is the MIPS VDSO implementation. This uses mmap_region() to map a 'delay slot emulation page' at the top of the stack which is read-only and executable. This mapping requires that an already-acquired mmap write lock is utilised and that uffd and populate logic is ignored. This rules out vm_mmap(), however do_mmap() fits the bill. Adapt this code to use do_mmap() and then once done, make mmap_region() internal and userland testable, and avoid any other uses of mmap_region(), which is absolutely and strictly an internal mm function which bypasses a great number of checks and logic. This patch (of 2): mmap_region() is an internal memory management implementation detail that is not intended to be used outside of the memory management subsystem. Map the delay slot emulation page using do_mmap() which makes use of the already-held mmap write lock and bypasses unneeded populate and userfaultfd logic. This should have the precise same behaviour as the existing logic. Link: https://lkml.kernel.org/r/cover.1735819274.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/ef076e381570f709e5c2c142dc030ec5b3309a0e.1735819274.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Cc: Jann Horn Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- arch/mips/kernel/vdso.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index 4c8e3c0aa210..75c9d3618f58 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -97,11 +98,12 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) return -EINTR; if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { + unsigned long unused; + /* Map delay slot emulation page */ - base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, - VM_READ | VM_EXEC | - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, - 0, NULL); + base = do_mmap(NULL, STACK_TOP, PAGE_SIZE, PROT_READ | PROT_EXEC, + MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, 0, 0, &unused, + NULL); if (IS_ERR_VALUE(base)) { ret = base; goto out; From a73b3c582af746431c9619dae868dfbca5844921 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 2 Jan 2025 12:10:52 +0000 Subject: [PATCH 241/504] mm: make mmap_region() internal Now that we have removed the one user of mmap_region() outside of mm, make it internal and add it to vma.c so it can be userland tested. This ensures that all external memory mappings are performed using the appropriate interfaces and allows us to modify memory mapping logic as we see fit. Additionally expand test stubs to allow for the mmap_region() code to compile and be userland testable. Link: https://lkml.kernel.org/r/de5a3c574d35c26237edf20a1d8652d7305709c9.1735819274.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Liam R. Howlett Cc: Jann Horn Cc: Thomas Bogendoerfer Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/mm.h | 3 -- mm/mmap.c | 59 ----------------------------- mm/vma.c | 61 +++++++++++++++++++++++++++++- mm/vma.h | 2 +- tools/testing/vma/vma_internal.h | 65 ++++++++++++++++++++++++++++++++ 5 files changed, 126 insertions(+), 64 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 3e6e5e6cee4f..78ec9cc909d2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3446,9 +3446,6 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, return __get_unmapped_area(file, addr, len, pgoff, flags, 0); } -extern unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, - struct list_head *uf); extern unsigned long do_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate, diff --git a/mm/mmap.c b/mm/mmap.c index 7fdc4207fe98..7aa36216ecc0 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1072,65 +1072,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, return do_vmi_munmap(&vmi, mm, start, len, uf, false); } -/** - * mmap_region() - Actually perform the userland mapping of a VMA into - * current->mm with known, aligned and overflow-checked @addr and @len, and - * correctly determined VMA flags @vm_flags and page offset @pgoff. - * - * This is an internal memory management function, and should not be used - * directly. - * - * The caller must write-lock current->mm->mmap_lock. - * - * @file: If a file-backed mapping, a pointer to the struct file describing the - * file to be mapped, otherwise NULL. - * @addr: The page-aligned address at which to perform the mapping. - * @len: The page-aligned, non-zero, length of the mapping. - * @vm_flags: The VMA flags which should be applied to the mapping. - * @pgoff: If @file is specified, the page offset into the file, if not then - * the virtual page offset in memory of the anonymous mapping. - * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap - * events. - * - * Returns: Either an error, or the address at which the requested mapping has - * been performed. - */ -unsigned long mmap_region(struct file *file, unsigned long addr, - unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, - struct list_head *uf) -{ - unsigned long ret; - bool writable_file_mapping = false; - - mmap_assert_write_locked(current->mm); - - /* Check to see if MDWE is applicable. */ - if (map_deny_write_exec(vm_flags, vm_flags)) - return -EACCES; - - /* Allow architectures to sanity-check the vm_flags. */ - if (!arch_validate_flags(vm_flags)) - return -EINVAL; - - /* Map writable and ensure this isn't a sealed memfd. */ - if (file && is_shared_maywrite(vm_flags)) { - int error = mapping_map_writable(file->f_mapping); - - if (error) - return error; - writable_file_mapping = true; - } - - ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf); - - /* Clear our write mapping regardless of error. */ - if (writable_file_mapping) - mapping_unmap_writable(file->f_mapping); - - validate_mm(current->mm); - return ret; -} - int vm_munmap(unsigned long start, size_t len) { return __vm_munmap(start, len, false); diff --git a/mm/vma.c b/mm/vma.c index 87508abdd3cc..2ed118296164 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -2427,7 +2427,7 @@ static void __mmap_complete(struct mmap_state *map, struct vm_area_struct *vma) vma_set_page_prot(vma); } -unsigned long __mmap_region(struct file *file, unsigned long addr, +static unsigned long __mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf) { @@ -2479,6 +2479,65 @@ abort_munmap: return error; } +/** + * mmap_region() - Actually perform the userland mapping of a VMA into + * current->mm with known, aligned and overflow-checked @addr and @len, and + * correctly determined VMA flags @vm_flags and page offset @pgoff. + * + * This is an internal memory management function, and should not be used + * directly. + * + * The caller must write-lock current->mm->mmap_lock. + * + * @file: If a file-backed mapping, a pointer to the struct file describing the + * file to be mapped, otherwise NULL. + * @addr: The page-aligned address at which to perform the mapping. + * @len: The page-aligned, non-zero, length of the mapping. + * @vm_flags: The VMA flags which should be applied to the mapping. + * @pgoff: If @file is specified, the page offset into the file, if not then + * the virtual page offset in memory of the anonymous mapping. + * @uf: Optionally, a pointer to a list head used for tracking userfaultfd unmap + * events. + * + * Returns: Either an error, or the address at which the requested mapping has + * been performed. + */ +unsigned long mmap_region(struct file *file, unsigned long addr, + unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, + struct list_head *uf) +{ + unsigned long ret; + bool writable_file_mapping = false; + + mmap_assert_write_locked(current->mm); + + /* Check to see if MDWE is applicable. */ + if (map_deny_write_exec(vm_flags, vm_flags)) + return -EACCES; + + /* Allow architectures to sanity-check the vm_flags. */ + if (!arch_validate_flags(vm_flags)) + return -EINVAL; + + /* Map writable and ensure this isn't a sealed memfd. */ + if (file && is_shared_maywrite(vm_flags)) { + int error = mapping_map_writable(file->f_mapping); + + if (error) + return error; + writable_file_mapping = true; + } + + ret = __mmap_region(file, addr, len, vm_flags, pgoff, uf); + + /* Clear our write mapping regardless of error. */ + if (writable_file_mapping) + mapping_unmap_writable(file->f_mapping); + + validate_mm(current->mm); + return ret; +} + /* * do_brk_flags() - Increase the brk vma if the flags match. * @vmi: The vma iterator diff --git a/mm/vma.h b/mm/vma.h index bd54f2245a86..f51005b95b39 100644 --- a/mm/vma.h +++ b/mm/vma.h @@ -242,7 +242,7 @@ bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); int mm_take_all_locks(struct mm_struct *mm); void mm_drop_all_locks(struct mm_struct *mm); -unsigned long __mmap_region(struct file *file, unsigned long addr, +unsigned long mmap_region(struct file *file, unsigned long addr, unsigned long len, vm_flags_t vm_flags, unsigned long pgoff, struct list_head *uf); diff --git a/tools/testing/vma/vma_internal.h b/tools/testing/vma/vma_internal.h index c7c580ec9a2d..49a85ce0d45a 100644 --- a/tools/testing/vma/vma_internal.h +++ b/tools/testing/vma/vma_internal.h @@ -41,6 +41,8 @@ extern unsigned long dac_mmap_min_addr; #define VM_BUG_ON(_expr) (BUG_ON(_expr)) #define VM_BUG_ON_VMA(_expr, _vma) (BUG_ON(_expr)) +#define MMF_HAS_MDWE 28 + #define VM_NONE 0x00000000 #define VM_READ 0x00000001 #define VM_WRITE 0x00000002 @@ -222,6 +224,8 @@ struct mm_struct { unsigned long stack_vm; /* VM_STACK */ unsigned long def_flags; + + unsigned long flags; /* Must use atomic bitops to access */ }; struct file { @@ -1176,4 +1180,65 @@ static inline void userfaultfd_unmap_complete(struct mm_struct *mm, { } +/* + * Denies creating a writable executable mapping or gaining executable permissions. + * + * This denies the following: + * + * a) mmap(PROT_WRITE | PROT_EXEC) + * + * b) mmap(PROT_WRITE) + * mprotect(PROT_EXEC) + * + * c) mmap(PROT_WRITE) + * mprotect(PROT_READ) + * mprotect(PROT_EXEC) + * + * But allows the following: + * + * d) mmap(PROT_READ | PROT_EXEC) + * mmap(PROT_READ | PROT_EXEC | PROT_BTI) + * + * This is only applicable if the user has set the Memory-Deny-Write-Execute + * (MDWE) protection mask for the current process. + * + * @old specifies the VMA flags the VMA originally possessed, and @new the ones + * we propose to set. + * + * Return: false if proposed change is OK, true if not ok and should be denied. + */ +static inline bool map_deny_write_exec(unsigned long old, unsigned long new) +{ + /* If MDWE is disabled, we have nothing to deny. */ + if (!test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) + return false; + + /* If the new VMA is not executable, we have nothing to deny. */ + if (!(new & VM_EXEC)) + return false; + + /* Under MDWE we do not accept newly writably executable VMAs... */ + if (new & VM_WRITE) + return true; + + /* ...nor previously non-executable VMAs becoming executable. */ + if (!(old & VM_EXEC)) + return true; + + return false; +} + +static inline int mapping_map_writable(struct address_space *mapping) +{ + int c = atomic_read(&mapping->i_mmap_writable); + + /* Derived from the raw_atomic_inc_unless_negative() implementation. */ + do { + if (c < 0) + return -EPERM; + } while (!__sync_bool_compare_and_swap(&mapping->i_mmap_writable, c, c+1)); + + return 0; +} + #endif /* __MM_VMA_INTERNAL_H */ From 7f9387a7d5f24011310fd05b9bb2df641033909a Mon Sep 17 00:00:00 2001 From: Guo Weikang Date: Thu, 2 Jan 2025 15:25:28 +0800 Subject: [PATCH 242/504] mm/memblock: add memblock_alloc_or_panic interface Before SLUB initialization, various subsystems used memblock_alloc to allocate memory. In most cases, when memory allocation fails, an immediate panic is required. To simplify this behavior and reduce repetitive checks, introduce `memblock_alloc_or_panic`. This function ensures that memory allocation failures result in a panic automatically, improving code readability and consistency across subsystems that require this behavior. Link: https://lkml.kernel.org/r/20250102072528.650926-1-guoweikang.kernel@gmail.com Signed-off-by: Guo Weikang Acked-by: Geert Uytterhoeven [m68k] Reviewed-by: Alexander Gordeev [s390] Cc: Mike Rapoport (Microsoft) Cc: Alexander Gordeev Signed-off-by: Andrew Morton --- arch/alpha/kernel/core_cia.c | 5 +- arch/alpha/kernel/core_marvel.c | 10 +--- arch/alpha/kernel/pci.c | 13 +---- arch/alpha/kernel/pci_iommu.c | 10 +--- arch/arm/kernel/setup.c | 10 +--- arch/arm/mm/mmu.c | 17 ++---- arch/arm/mm/nommu.c | 5 +- arch/arm64/kernel/setup.c | 4 +- arch/loongarch/kernel/setup.c | 2 +- arch/loongarch/mm/init.c | 13 ++--- arch/m68k/mm/init.c | 5 +- arch/m68k/mm/mcfmmu.c | 10 +--- arch/m68k/mm/motorola.c | 5 +- arch/m68k/mm/sun3mmu.c | 10 +--- arch/m68k/sun3/sun3dvma.c | 6 +-- arch/mips/kernel/setup.c | 5 +- arch/openrisc/mm/ioremap.c | 5 +- arch/parisc/mm/init.c | 20 ++----- arch/powerpc/kernel/dt_cpu_ftrs.c | 10 ++-- arch/powerpc/kernel/pci_32.c | 5 +- arch/powerpc/kernel/setup-common.c | 5 +- arch/powerpc/kernel/setup_32.c | 8 +-- arch/powerpc/mm/book3s32/mmu.c | 5 +- arch/powerpc/mm/book3s64/pgtable.c | 6 +-- arch/powerpc/mm/kasan/init_book3e_64.c | 8 +-- arch/powerpc/mm/kasan/init_book3s_64.c | 2 +- arch/powerpc/mm/nohash/mmu_context.c | 16 ++---- arch/powerpc/mm/pgtable_32.c | 7 +-- arch/powerpc/platforms/powermac/nvram.c | 5 +- arch/powerpc/platforms/powernv/opal.c | 5 +- arch/powerpc/platforms/ps3/setup.c | 5 +- arch/powerpc/sysdev/msi_bitmap.c | 5 +- arch/riscv/kernel/setup.c | 4 +- arch/riscv/mm/kasan_init.c | 14 ++--- arch/s390/kernel/numa.c | 5 +- arch/s390/kernel/setup.c | 20 ++----- arch/s390/kernel/smp.c | 9 ++-- arch/s390/kernel/topology.c | 10 +--- arch/sh/mm/init.c | 10 +--- arch/sparc/kernel/prom_32.c | 4 +- arch/sparc/mm/srmmu.c | 14 ++--- arch/um/drivers/net_kern.c | 5 +- arch/um/drivers/vector_kern.c | 5 +- arch/um/kernel/load_file.c | 4 +- arch/x86/coco/sev/core.c | 4 +- arch/x86/kernel/acpi/boot.c | 5 +- arch/x86/kernel/apic/io_apic.c | 9 +--- arch/x86/kernel/e820.c | 5 +- arch/x86/platform/olpc/olpc_dt.c | 6 +-- arch/x86/xen/p2m.c | 8 +-- arch/xtensa/mm/kasan_init.c | 6 +-- drivers/clk/ti/clk.c | 5 +- drivers/macintosh/smu.c | 6 +-- drivers/of/fdt.c | 8 +-- drivers/of/unittest.c | 8 +-- include/linux/memblock.h | 6 +++ init/main.c | 18 ++----- kernel/power/snapshot.c | 5 +- lib/cpumask.c | 5 +- mm/kmsan/shadow.c | 8 +-- mm/memblock.c | 20 +++++++ mm/numa.c | 8 +-- mm/percpu.c | 70 +++++-------------------- mm/sparse.c | 5 +- 64 files changed, 143 insertions(+), 423 deletions(-) diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c index ca3d9c732b61..6e577228e175 100644 --- a/arch/alpha/kernel/core_cia.c +++ b/arch/alpha/kernel/core_cia.c @@ -331,10 +331,7 @@ cia_prepare_tbia_workaround(int window) long i; /* Use minimal 1K map. */ - ppte = memblock_alloc(CIA_BROKEN_TBIA_SIZE, 32768); - if (!ppte) - panic("%s: Failed to allocate %u bytes align=0x%x\n", - __func__, CIA_BROKEN_TBIA_SIZE, 32768); + ppte = memblock_alloc_or_panic(CIA_BROKEN_TBIA_SIZE, 32768); pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1; for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i) diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index b22248044bf0..b1bfbd11980d 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c @@ -81,10 +81,7 @@ mk_resource_name(int pe, int port, char *str) char *name; sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port); - name = memblock_alloc(strlen(tmp) + 1, SMP_CACHE_BYTES); - if (!name) - panic("%s: Failed to allocate %zu bytes\n", __func__, - strlen(tmp) + 1); + name = memblock_alloc_or_panic(strlen(tmp) + 1, SMP_CACHE_BYTES); strcpy(name, tmp); return name; @@ -119,10 +116,7 @@ alloc_io7(unsigned int pe) return NULL; } - io7 = memblock_alloc(sizeof(*io7), SMP_CACHE_BYTES); - if (!io7) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(*io7)); + io7 = memblock_alloc_or_panic(sizeof(*io7), SMP_CACHE_BYTES); io7->pe = pe; raw_spin_lock_init(&io7->irq_lock); diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index 4458eb7f44f0..8e9b4ac86b7e 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c @@ -391,10 +391,7 @@ alloc_pci_controller(void) { struct pci_controller *hose; - hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); - if (!hose) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(*hose)); + hose = memblock_alloc_or_panic(sizeof(*hose), SMP_CACHE_BYTES); *hose_tail = hose; hose_tail = &hose->next; @@ -405,13 +402,7 @@ alloc_pci_controller(void) struct resource * __init alloc_resource(void) { - void *ptr = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); - - if (!ptr) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct resource)); - - return ptr; + return memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES); } diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index 7fcf3e9b7103..681f56089d9c 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -71,14 +71,8 @@ iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base, if (align < mem_size) align = mem_size; - arena = memblock_alloc(sizeof(*arena), SMP_CACHE_BYTES); - if (!arena) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(*arena)); - arena->ptes = memblock_alloc(mem_size, align); - if (!arena->ptes) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, mem_size, align); + arena = memblock_alloc_or_panic(sizeof(*arena), SMP_CACHE_BYTES); + arena->ptes = memblock_alloc_or_panic(mem_size, align); spin_lock_init(&arena->lock); arena->hose = hose; diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index e6a857bf0ce6..a41c93988d2c 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -880,10 +880,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) */ boot_alias_start = phys_to_idmap(start); if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) { - res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); - if (!res) - panic("%s: Failed to allocate %zu bytes\n", - __func__, sizeof(*res)); + res = memblock_alloc_or_panic(sizeof(*res), SMP_CACHE_BYTES); res->name = "System RAM (boot alias)"; res->start = boot_alias_start; res->end = phys_to_idmap(res_end); @@ -891,10 +888,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc) request_resource(&iomem_resource, res); } - res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); - if (!res) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(*res)); + res = memblock_alloc_or_panic(sizeof(*res), SMP_CACHE_BYTES); res->name = "System RAM"; res->start = start; res->end = res_end; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index f5b7a16c5803..f02f872ea8a9 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -726,13 +726,8 @@ EXPORT_SYMBOL(phys_mem_access_prot); static void __init *early_alloc(unsigned long sz) { - void *ptr = memblock_alloc(sz, sz); + return memblock_alloc_or_panic(sz, sz); - if (!ptr) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, sz, sz); - - return ptr; } static void *__init late_alloc(unsigned long sz) @@ -1027,10 +1022,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr) if (!nr) return; - svm = memblock_alloc(sizeof(*svm) * nr, __alignof__(*svm)); - if (!svm) - panic("%s: Failed to allocate %zu bytes align=0x%zx\n", - __func__, sizeof(*svm) * nr, __alignof__(*svm)); + svm = memblock_alloc_or_panic(sizeof(*svm) * nr, __alignof__(*svm)); for (md = io_desc; nr; md++, nr--) { create_mapping(md); @@ -1052,10 +1044,7 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, struct vm_struct *vm; struct static_vm *svm; - svm = memblock_alloc(sizeof(*svm), __alignof__(*svm)); - if (!svm) - panic("%s: Failed to allocate %zu bytes align=0x%zx\n", - __func__, sizeof(*svm), __alignof__(*svm)); + svm = memblock_alloc_or_panic(sizeof(*svm), __alignof__(*svm)); vm = &svm->vm; vm->addr = (void *)addr; diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index c415f3859b20..1a8f6914ee59 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -162,10 +162,7 @@ void __init paging_init(const struct machine_desc *mdesc) mpu_setup(); /* allocate the zero page. */ - zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!zero_page) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + zero_page = (void *)memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); bootmem_init(); diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 4f613e8e0745..85104587f849 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -223,9 +223,7 @@ static void __init request_standard_resources(void) num_standard_resources = memblock.memory.cnt; res_size = num_standard_resources * sizeof(*standard_resources); - standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES); - if (!standard_resources) - panic("%s: Failed to allocate %zu bytes\n", __func__, res_size); + standard_resources = memblock_alloc_or_panic(res_size, SMP_CACHE_BYTES); for_each_mem_region(region) { res = &standard_resources[i++]; diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index 56934fe58170..edcfdfcad7d2 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -431,7 +431,7 @@ static void __init resource_init(void) num_standard_resources = memblock.memory.cnt; res_size = num_standard_resources * sizeof(*standard_resources); - standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES); + standard_resources = memblock_alloc_or_panic(res_size, SMP_CACHE_BYTES); for_each_mem_region(region) { res = &standard_resources[i++]; diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c index 188b52bbb254..ca5aa5f46a9f 100644 --- a/arch/loongarch/mm/init.c +++ b/arch/loongarch/mm/init.c @@ -174,9 +174,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) pmd_t *pmd; if (p4d_none(p4dp_get(p4d))) { - pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pud) - panic("%s: Failed to allocate memory\n", __func__); + pud = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); p4d_populate(&init_mm, p4d, pud); #ifndef __PAGETABLE_PUD_FOLDED pud_init(pud); @@ -185,9 +183,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) pud = pud_offset(p4d, addr); if (pud_none(pudp_get(pud))) { - pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pmd) - panic("%s: Failed to allocate memory\n", __func__); + pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pud_populate(&init_mm, pud, pmd); #ifndef __PAGETABLE_PMD_FOLDED pmd_init(pmd); @@ -198,10 +194,7 @@ pte_t * __init populate_kernel_pte(unsigned long addr) if (!pmd_present(pmdp_get(pmd))) { pte_t *pte; - pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("%s: Failed to allocate memory\n", __func__); - + pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd, pte); kernel_pte_init(pte); } diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 1b47bec15832..8b11d0d545aa 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -68,10 +68,7 @@ void __init paging_init(void) high_memory = (void *) end_mem; - empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); max_zone_pfn[ZONE_DMA] = end_mem >> PAGE_SHIFT; free_area_init(max_zone_pfn); } diff --git a/arch/m68k/mm/mcfmmu.c b/arch/m68k/mm/mcfmmu.c index 9a6fa342e872..19a75029036c 100644 --- a/arch/m68k/mm/mcfmmu.c +++ b/arch/m68k/mm/mcfmmu.c @@ -42,20 +42,14 @@ void __init paging_init(void) unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; int i; - empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pg_dir = swapper_pg_dir; memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); size = num_pages * sizeof(pte_t); size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); - next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE); - if (!next_pgtable) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, size, PAGE_SIZE); + next_pgtable = (unsigned long) memblock_alloc_or_panic(size, PAGE_SIZE); pg_dir += PAGE_OFFSET >> PGDIR_SHIFT; diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 81715cece70c..eab50dda14ee 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -491,10 +491,7 @@ void __init paging_init(void) * initialize the bad page table and bad page to point * to a couple of allocated pages */ - empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); /* * Set up SFC/DFC registers diff --git a/arch/m68k/mm/sun3mmu.c b/arch/m68k/mm/sun3mmu.c index 494739c1783e..1ecf6bdd08bf 100644 --- a/arch/m68k/mm/sun3mmu.c +++ b/arch/m68k/mm/sun3mmu.c @@ -44,10 +44,7 @@ void __init paging_init(void) unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; unsigned long size; - empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); address = PAGE_OFFSET; pg_dir = swapper_pg_dir; @@ -57,10 +54,7 @@ void __init paging_init(void) size = num_pages * sizeof(pte_t); size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1); - next_pgtable = (unsigned long)memblock_alloc(size, PAGE_SIZE); - if (!next_pgtable) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, size, PAGE_SIZE); + next_pgtable = (unsigned long)memblock_alloc_or_panic(size, PAGE_SIZE); bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK; /* Map whole memory from PAGE_OFFSET (0x0E000000) */ diff --git a/arch/m68k/sun3/sun3dvma.c b/arch/m68k/sun3/sun3dvma.c index 6ebf52740ad7..225fc735e466 100644 --- a/arch/m68k/sun3/sun3dvma.c +++ b/arch/m68k/sun3/sun3dvma.c @@ -252,12 +252,8 @@ void __init dvma_init(void) list_add(&(hole->list), &hole_list); - iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long), + iommu_use = memblock_alloc_or_panic(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long), SMP_CACHE_BYTES); - if (!iommu_use) - panic("%s: Failed to allocate %zu bytes\n", __func__, - IOMMU_TOTAL_ENTRIES * sizeof(unsigned long)); - dvma_unmap_iommu(DVMA_START, DVMA_SIZE); sun3_dvma_init(); diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 12a1a4ffb602..fbfe0771317e 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -704,10 +704,7 @@ static void __init resource_init(void) for_each_mem_range(i, &start, &end) { struct resource *res; - res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); - if (!res) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct resource)); + res = memblock_alloc_or_panic(sizeof(struct resource), SMP_CACHE_BYTES); res->start = start; /* diff --git a/arch/openrisc/mm/ioremap.c b/arch/openrisc/mm/ioremap.c index f59ea4c10b0f..8e63e86251ca 100644 --- a/arch/openrisc/mm/ioremap.c +++ b/arch/openrisc/mm/ioremap.c @@ -38,10 +38,7 @@ pte_t __ref *pte_alloc_one_kernel(struct mm_struct *mm) if (likely(mem_init_done)) { pte = (pte_t *)get_zeroed_page(GFP_KERNEL); } else { - pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); } return pte; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 96970fa75e4a..61c0a2477072 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -377,10 +377,8 @@ static void __ref map_pages(unsigned long start_vaddr, #if CONFIG_PGTABLE_LEVELS == 3 if (pud_none(*pud)) { - pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER, + pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER, PAGE_SIZE << PMD_TABLE_ORDER); - if (!pmd) - panic("pmd allocation failed.\n"); pud_populate(NULL, pud, pmd); } #endif @@ -388,9 +386,7 @@ static void __ref map_pages(unsigned long start_vaddr, pmd = pmd_offset(pud, vaddr); for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) { if (pmd_none(*pmd)) { - pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pg_table) - panic("page table allocation failed\n"); + pg_table = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pmd_populate_kernel(NULL, pmd, pg_table); } @@ -648,9 +644,7 @@ static void __init pagetable_init(void) } #endif - empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!empty_zero_page) - panic("zero page allocation failed.\n"); + empty_zero_page = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); } @@ -687,19 +681,15 @@ static void __init fixmap_init(void) #if CONFIG_PGTABLE_LEVELS == 3 if (pud_none(*pud)) { - pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER, + pmd = memblock_alloc_or_panic(PAGE_SIZE << PMD_TABLE_ORDER, PAGE_SIZE << PMD_TABLE_ORDER); - if (!pmd) - panic("fixmap: pmd allocation failed.\n"); pud_populate(NULL, pud, pmd); } #endif pmd = pmd_offset(pud, addr); do { - pte_t *pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("fixmap: pte allocation failed.\n"); + pte_t *pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd, pte); diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c index 1bee15c013e7..3af6c06af02f 100644 --- a/arch/powerpc/kernel/dt_cpu_ftrs.c +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c @@ -1087,12 +1087,10 @@ static int __init dt_cpu_ftrs_scan_callback(unsigned long node, const char /* Count and allocate space for cpu features */ of_scan_flat_dt_subnodes(node, count_cpufeatures_subnodes, &nr_dt_cpu_features); - dt_cpu_features = memblock_alloc(sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, PAGE_SIZE); - if (!dt_cpu_features) - panic("%s: Failed to allocate %zu bytes align=0x%lx\n", - __func__, - sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, - PAGE_SIZE); + dt_cpu_features = + memblock_alloc_or_panic( + sizeof(struct dt_cpu_feature) * nr_dt_cpu_features, + PAGE_SIZE); cpufeatures_setup_start(isa); diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index ce0c8623e563..f8a3bd8cfae4 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -213,11 +213,8 @@ pci_create_OF_bus_map(void) struct property* of_prop; struct device_node *dn; - of_prop = memblock_alloc(sizeof(struct property) + 256, + of_prop = memblock_alloc_or_panic(sizeof(struct property) + 256, SMP_CACHE_BYTES); - if (!of_prop) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct property) + 256); dn = of_find_node_by_path("/"); if (dn) { memset(of_prop, -1, sizeof(struct property) + 256); diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 6fa179448c33..f3ea1329c566 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -458,11 +458,8 @@ void __init smp_setup_cpu_maps(void) DBG("smp_setup_cpu_maps()\n"); - cpu_to_phys_id = memblock_alloc(nr_cpu_ids * sizeof(u32), + cpu_to_phys_id = memblock_alloc_or_panic(nr_cpu_ids * sizeof(u32), __alignof__(u32)); - if (!cpu_to_phys_id) - panic("%s: Failed to allocate %zu bytes align=0x%zx\n", - __func__, nr_cpu_ids * sizeof(u32), __alignof__(u32)); for_each_node_by_type(dn, "cpu") { const __be32 *intserv; diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 75dbf3e0d9c4..5a1bf501fbe1 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -140,13 +140,7 @@ arch_initcall(ppc_init); static void *__init alloc_stack(void) { - void *ptr = memblock_alloc(THREAD_SIZE, THREAD_ALIGN); - - if (!ptr) - panic("cannot allocate %d bytes for stack at %pS\n", - THREAD_SIZE, (void *)_RET_IP_); - - return ptr; + return memblock_alloc_or_panic(THREAD_SIZE, THREAD_ALIGN); } void __init irqstack_early_init(void) diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 6978344edcb4..be9c4106e22f 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -377,10 +377,7 @@ void __init MMU_init_hw(void) * Find some memory for the hash table. */ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); - Hash = memblock_alloc(Hash_size, Hash_size); - if (!Hash) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, Hash_size, Hash_size); + Hash = memblock_alloc_or_panic(Hash_size, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; pr_info("Total memory = %lldMB; using %ldkB for hash table\n", diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index 3f28e4acd920..ce64abea9e3e 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -330,11 +330,7 @@ void __init mmu_partition_table_init(void) unsigned long ptcr; /* Initialize the Partition Table with no entries */ - partition_tb = memblock_alloc(patb_size, patb_size); - if (!partition_tb) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, patb_size, patb_size); - + partition_tb = memblock_alloc_or_panic(patb_size, patb_size); ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12); set_ptcr_when_no_uv(ptcr); powernv_set_nmmu_ptcr(ptcr); diff --git a/arch/powerpc/mm/kasan/init_book3e_64.c b/arch/powerpc/mm/kasan/init_book3e_64.c index 43c03b84ff32..60c78aac0f63 100644 --- a/arch/powerpc/mm/kasan/init_book3e_64.c +++ b/arch/powerpc/mm/kasan/init_book3e_64.c @@ -40,19 +40,19 @@ static int __init kasan_map_kernel_page(unsigned long ea, unsigned long pa, pgpr pgdp = pgd_offset_k(ea); p4dp = p4d_offset(pgdp, ea); if (kasan_pud_table(*p4dp)) { - pudp = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE); + pudp = memblock_alloc_or_panic(PUD_TABLE_SIZE, PUD_TABLE_SIZE); memcpy(pudp, kasan_early_shadow_pud, PUD_TABLE_SIZE); p4d_populate(&init_mm, p4dp, pudp); } pudp = pud_offset(p4dp, ea); if (kasan_pmd_table(*pudp)) { - pmdp = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE); + pmdp = memblock_alloc_or_panic(PMD_TABLE_SIZE, PMD_TABLE_SIZE); memcpy(pmdp, kasan_early_shadow_pmd, PMD_TABLE_SIZE); pud_populate(&init_mm, pudp, pmdp); } pmdp = pmd_offset(pudp, ea); if (kasan_pte_table(*pmdp)) { - ptep = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE); + ptep = memblock_alloc_or_panic(PTE_TABLE_SIZE, PTE_TABLE_SIZE); memcpy(ptep, kasan_early_shadow_pte, PTE_TABLE_SIZE); pmd_populate_kernel(&init_mm, pmdp, ptep); } @@ -74,7 +74,7 @@ static void __init kasan_init_phys_region(void *start, void *end) k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE); k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE); - va = memblock_alloc(k_end - k_start, PAGE_SIZE); + va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE); for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE) kasan_map_kernel_page(k_cur, __pa(va), PAGE_KERNEL); } diff --git a/arch/powerpc/mm/kasan/init_book3s_64.c b/arch/powerpc/mm/kasan/init_book3s_64.c index 3fb5ce4f48f4..7d959544c077 100644 --- a/arch/powerpc/mm/kasan/init_book3s_64.c +++ b/arch/powerpc/mm/kasan/init_book3s_64.c @@ -32,7 +32,7 @@ static void __init kasan_init_phys_region(void *start, void *end) k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE); k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE); - va = memblock_alloc(k_end - k_start, PAGE_SIZE); + va = memblock_alloc_or_panic(k_end - k_start, PAGE_SIZE); for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE) map_kernel_page(k_cur, __pa(va), PAGE_KERNEL); } diff --git a/arch/powerpc/mm/nohash/mmu_context.c b/arch/powerpc/mm/nohash/mmu_context.c index 0b181da40ddb..a1a4e697251a 100644 --- a/arch/powerpc/mm/nohash/mmu_context.c +++ b/arch/powerpc/mm/nohash/mmu_context.c @@ -385,21 +385,11 @@ void __init mmu_context_init(void) /* * Allocate the maps used by context management */ - context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); - if (!context_map) - panic("%s: Failed to allocate %zu bytes\n", __func__, - CTX_MAP_SIZE); - context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1), + context_map = memblock_alloc_or_panic(CTX_MAP_SIZE, SMP_CACHE_BYTES); + context_mm = memblock_alloc_or_panic(sizeof(void *) * (LAST_CONTEXT + 1), SMP_CACHE_BYTES); - if (!context_mm) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(void *) * (LAST_CONTEXT + 1)); if (IS_ENABLED(CONFIG_SMP)) { - stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); - if (!stale_map[boot_cpuid]) - panic("%s: Failed to allocate %zu bytes\n", __func__, - CTX_MAP_SIZE); - + stale_map[boot_cpuid] = memblock_alloc_or_panic(CTX_MAP_SIZE, SMP_CACHE_BYTES); cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, "powerpc/mmu/ctx:prepare", mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 787b22206386..15276068f657 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -50,13 +50,8 @@ notrace void __init early_ioremap_init(void) void __init *early_alloc_pgtable(unsigned long size) { - void *ptr = memblock_alloc(size, size); + return memblock_alloc_or_panic(size, size); - if (!ptr) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, size, size); - - return ptr; } pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va) diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index fe2e0249cbc2..a112d26185a0 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c @@ -514,10 +514,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) printk(KERN_ERR "nvram: no address\n"); return -EINVAL; } - nvram_image = memblock_alloc(NVRAM_SIZE, SMP_CACHE_BYTES); - if (!nvram_image) - panic("%s: Failed to allocate %u bytes\n", __func__, - NVRAM_SIZE); + nvram_image = memblock_alloc_or_panic(NVRAM_SIZE, SMP_CACHE_BYTES); nvram_data = ioremap(addr, NVRAM_SIZE*2); nvram_naddrs = 1; /* Make sure we get the correct case */ diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 5d0f35bb917e..09bd93464b4f 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -180,10 +180,7 @@ int __init early_init_dt_scan_recoverable_ranges(unsigned long node, /* * Allocate a buffer to hold the MC recoverable ranges. */ - mc_recoverable_range = memblock_alloc(size, __alignof__(u64)); - if (!mc_recoverable_range) - panic("%s: Failed to allocate %u bytes align=0x%lx\n", - __func__, size, __alignof__(u64)); + mc_recoverable_range = memblock_alloc_or_panic(size, __alignof__(u64)); for (i = 0; i < mc_recoverable_range_len; i++) { mc_recoverable_range[i].start_addr = diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c index 5144f11359f7..150c09b58ae8 100644 --- a/arch/powerpc/platforms/ps3/setup.c +++ b/arch/powerpc/platforms/ps3/setup.c @@ -115,10 +115,7 @@ static void __init prealloc(struct ps3_prealloc *p) if (!p->size) return; - p->address = memblock_alloc(p->size, p->align); - if (!p->address) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, p->size, p->align); + p->address = memblock_alloc_or_panic(p->size, p->align); printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, p->address); diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c index 0b6e37f3ffb8..456a4f64ae0a 100644 --- a/arch/powerpc/sysdev/msi_bitmap.c +++ b/arch/powerpc/sysdev/msi_bitmap.c @@ -124,10 +124,7 @@ int __ref msi_bitmap_alloc(struct msi_bitmap *bmp, unsigned int irq_count, if (bmp->bitmap_from_slab) bmp->bitmap = kzalloc(size, GFP_KERNEL); else { - bmp->bitmap = memblock_alloc(size, SMP_CACHE_BYTES); - if (!bmp->bitmap) - panic("%s: Failed to allocate %u bytes\n", __func__, - size); + bmp->bitmap = memblock_alloc_or_panic(size, SMP_CACHE_BYTES); /* the bitmap won't be freed from memblock allocator */ kmemleak_not_leak(bmp->bitmap); } diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 45010e71df86..f1793630fc51 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -147,9 +147,7 @@ static void __init init_resources(void) res_idx = num_resources - 1; mem_res_sz = num_resources * sizeof(*mem_res); - mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES); - if (!mem_res) - panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz); + mem_res = memblock_alloc_or_panic(mem_res_sz, SMP_CACHE_BYTES); /* * Start by adding the reserved regions, if they overlap diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index c301c8d291d2..41c635d6aca4 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -32,7 +32,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned pte_t *ptep, *p; if (pmd_none(pmdp_get(pmd))) { - p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); + p = memblock_alloc_or_panic(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -54,7 +54,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned unsigned long next; if (pud_none(pudp_get(pud))) { - p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); + p = memblock_alloc_or_panic(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -85,7 +85,7 @@ static void __init kasan_populate_pud(p4d_t *p4d, unsigned long next; if (p4d_none(p4dp_get(p4d))) { - p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE); + p = memblock_alloc_or_panic(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE); set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -116,7 +116,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd, unsigned long next; if (pgd_none(pgdp_get(pgd))) { - p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE); + p = memblock_alloc_or_panic(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE); set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -385,7 +385,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d, next = pud_addr_end(vaddr, end); if (pud_none(pudp_get(pud_k))) { - p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE)); continue; } @@ -405,7 +405,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd, next = p4d_addr_end(vaddr, end); if (p4d_none(p4dp_get(p4d_k))) { - p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE)); continue; } @@ -424,7 +424,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long next = pgd_addr_end(vaddr, end); if (pgd_none(pgdp_get(pgd_k))) { - p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + p = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); continue; } diff --git a/arch/s390/kernel/numa.c b/arch/s390/kernel/numa.c index ddc1448ea2e1..a33e20f73330 100644 --- a/arch/s390/kernel/numa.c +++ b/arch/s390/kernel/numa.c @@ -22,10 +22,7 @@ void __init numa_setup(void) node_set(0, node_possible_map); node_set_online(0); for (nid = 0; nid < MAX_NUMNODES; nid++) { - NODE_DATA(nid) = memblock_alloc(sizeof(pg_data_t), 8); - if (!NODE_DATA(nid)) - panic("%s: Failed to allocate %zu bytes align=0x%x\n", - __func__, sizeof(pg_data_t), 8); + NODE_DATA(nid) = memblock_alloc_or_panic(sizeof(pg_data_t), 8); } NODE_DATA(0)->node_spanned_pages = memblock_end_of_DRAM() >> PAGE_SHIFT; NODE_DATA(0)->node_id = 0; diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index a3fea683b227..f873535eddd2 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -384,11 +384,7 @@ static unsigned long __init stack_alloc_early(void) { unsigned long stack; - stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE); - if (!stack) { - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, THREAD_SIZE, THREAD_SIZE); - } + stack = (unsigned long)memblock_alloc_or_panic(THREAD_SIZE, THREAD_SIZE); return stack; } @@ -512,10 +508,7 @@ static void __init setup_resources(void) bss_resource.end = __pa_symbol(__bss_stop) - 1; for_each_mem_range(i, &start, &end) { - res = memblock_alloc(sizeof(*res), 8); - if (!res) - panic("%s: Failed to allocate %zu bytes align=0x%x\n", - __func__, sizeof(*res), 8); + res = memblock_alloc_or_panic(sizeof(*res), 8); res->flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM; res->name = "System RAM"; @@ -534,10 +527,7 @@ static void __init setup_resources(void) std_res->start > res->end) continue; if (std_res->end > res->end) { - sub_res = memblock_alloc(sizeof(*sub_res), 8); - if (!sub_res) - panic("%s: Failed to allocate %zu bytes align=0x%x\n", - __func__, sizeof(*sub_res), 8); + sub_res = memblock_alloc_or_panic(sizeof(*sub_res), 8); *sub_res = *std_res; sub_res->end = res->end; std_res->start = res->end + 1; @@ -824,9 +814,7 @@ static void __init setup_randomness(void) { struct sysinfo_3_2_2 *vmms; - vmms = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!vmms) - panic("Failed to allocate memory for sysinfo structure\n"); + vmms = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); if (stsi(vmms, 3, 2, 2) == 0 && vmms->count) add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count); memblock_free(vmms, PAGE_SIZE); diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 822d8e6f8717..d77aaefb59bd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -611,9 +611,9 @@ void __init smp_save_dump_ipl_cpu(void) if (!dump_available()) return; sa = save_area_alloc(true); - regs = memblock_alloc(512, 8); - if (!sa || !regs) + if (!sa) panic("could not allocate memory for boot CPU save area\n"); + regs = memblock_alloc_or_panic(512, 8); copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512); save_area_add_regs(sa, regs); memblock_free(regs, 512); @@ -792,10 +792,7 @@ void __init smp_detect_cpus(void) u16 address; /* Get CPU information */ - info = memblock_alloc(sizeof(*info), 8); - if (!info) - panic("%s: Failed to allocate %zu bytes align=0x%x\n", - __func__, sizeof(*info), 8); + info = memblock_alloc_or_panic(sizeof(*info), 8); smp_get_core_info(info, 1); /* Find boot CPU type */ if (sclp.has_core_type) { diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 4f9c301a705b..45e220bfce75 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -548,10 +548,7 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info, nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; nr_masks = max(nr_masks, 1); for (i = 0; i < nr_masks; i++) { - mask->next = memblock_alloc(sizeof(*mask->next), 8); - if (!mask->next) - panic("%s: Failed to allocate %zu bytes align=0x%x\n", - __func__, sizeof(*mask->next), 8); + mask->next = memblock_alloc_or_panic(sizeof(*mask->next), 8); mask = mask->next; } } @@ -569,10 +566,7 @@ void __init topology_init_early(void) } if (!MACHINE_HAS_TOPOLOGY) goto out; - tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!tl_info) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + tl_info = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); info = tl_info; store_topology(info); pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n", diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 2a88b0c9e70f..289a2fecebef 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -137,10 +137,7 @@ static pmd_t * __init one_md_table_init(pud_t *pud) if (pud_none(*pud)) { pmd_t *pmd; - pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pmd) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + pmd = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pud_populate(&init_mm, pud, pmd); BUG_ON(pmd != pmd_offset(pud, 0)); } @@ -153,10 +150,7 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) if (pmd_none(*pmd)) { pte_t *pte; - pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - if (!pte) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); + pte = memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); pmd_populate_kernel(&init_mm, pmd, pte); BUG_ON(pte != pte_offset_kernel(pmd, 0)); } diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index 3df960c137f7..a67dd67f10c8 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -28,9 +28,7 @@ void * __init prom_early_alloc(unsigned long size) { void *ret; - ret = memblock_alloc(size, SMP_CACHE_BYTES); - if (!ret) - panic("%s: Failed to allocate %lu bytes\n", __func__, size); + ret = memblock_alloc_or_panic(size, SMP_CACHE_BYTES); prom_early_allocated += size; diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index e3a72c884b86..dd32711022f5 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c @@ -277,19 +277,13 @@ static void __init srmmu_nocache_init(void) bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; - srmmu_nocache_pool = memblock_alloc(srmmu_nocache_size, + srmmu_nocache_pool = memblock_alloc_or_panic(srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX); - if (!srmmu_nocache_pool) - panic("%s: Failed to allocate %lu bytes align=0x%x\n", - __func__, srmmu_nocache_size, SRMMU_NOCACHE_ALIGN_MAX); memset(srmmu_nocache_pool, 0, srmmu_nocache_size); srmmu_nocache_bitmap = - memblock_alloc(BITS_TO_LONGS(bitmap_bits) * sizeof(long), + memblock_alloc_or_panic(BITS_TO_LONGS(bitmap_bits) * sizeof(long), SMP_CACHE_BYTES); - if (!srmmu_nocache_bitmap) - panic("%s: Failed to allocate %zu bytes\n", __func__, - BITS_TO_LONGS(bitmap_bits) * sizeof(long)); bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); @@ -452,9 +446,7 @@ static void __init sparc_context_init(int numctx) unsigned long size; size = numctx * sizeof(struct ctx_list); - ctx_list_pool = memblock_alloc(size, SMP_CACHE_BYTES); - if (!ctx_list_pool) - panic("%s: Failed to allocate %lu bytes\n", __func__, size); + ctx_list_pool = memblock_alloc_or_panic(size, SMP_CACHE_BYTES); for (ctx = 0; ctx < numctx; ctx++) { struct ctx_list *clist; diff --git a/arch/um/drivers/net_kern.c b/arch/um/drivers/net_kern.c index 75d04fb4994a..d5a9c5aabaec 100644 --- a/arch/um/drivers/net_kern.c +++ b/arch/um/drivers/net_kern.c @@ -636,10 +636,7 @@ static int __init eth_setup(char *str) return 1; } - new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); - if (!new) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(*new)); + new = memblock_alloc_or_panic(sizeof(*new), SMP_CACHE_BYTES); INIT_LIST_HEAD(&new->list); new->index = n; diff --git a/arch/um/drivers/vector_kern.c b/arch/um/drivers/vector_kern.c index 64c09db392c1..85b129e2b70b 100644 --- a/arch/um/drivers/vector_kern.c +++ b/arch/um/drivers/vector_kern.c @@ -1694,10 +1694,7 @@ static int __init vector_setup(char *str) str, error); return 1; } - new = memblock_alloc(sizeof(*new), SMP_CACHE_BYTES); - if (!new) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(*new)); + new = memblock_alloc_or_panic(sizeof(*new), SMP_CACHE_BYTES); INIT_LIST_HEAD(&new->list); new->unit = n; new->arguments = str; diff --git a/arch/um/kernel/load_file.c b/arch/um/kernel/load_file.c index 5cecd0e291fb..cb9d178ab7d8 100644 --- a/arch/um/kernel/load_file.c +++ b/arch/um/kernel/load_file.c @@ -48,9 +48,7 @@ void *uml_load_file(const char *filename, unsigned long long *size) return NULL; } - area = memblock_alloc(*size, SMP_CACHE_BYTES); - if (!area) - panic("%s: Failed to allocate %llu bytes\n", __func__, *size); + area = memblock_alloc_or_panic(*size, SMP_CACHE_BYTES); if (__uml_load_file(filename, area, *size)) { memblock_free(area, *size); diff --git a/arch/x86/coco/sev/core.c b/arch/x86/coco/sev/core.c index c5b0148b8c0a..a3c9b7c67640 100644 --- a/arch/x86/coco/sev/core.c +++ b/arch/x86/coco/sev/core.c @@ -1572,9 +1572,7 @@ static void __init alloc_runtime_data(int cpu) struct svsm_ca *caa; /* Allocate the SVSM CA page if an SVSM is present */ - caa = memblock_alloc(sizeof(*caa), PAGE_SIZE); - if (!caa) - panic("Can't allocate SVSM CA page\n"); + caa = memblock_alloc_or_panic(sizeof(*caa), PAGE_SIZE); per_cpu(svsm_caa, cpu) = caa; per_cpu(svsm_caa_pa, cpu) = __pa(caa); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 3a44a9dc3fb7..7c15d6e83c37 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -911,11 +911,8 @@ static int __init acpi_parse_hpet(struct acpi_table_header *table) * the resource tree during the lateinit timeframe. */ #define HPET_RESOURCE_NAME_SIZE 9 - hpet_res = memblock_alloc(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE, + hpet_res = memblock_alloc_or_panic(sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE, SMP_CACHE_BYTES); - if (!hpet_res) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(*hpet_res) + HPET_RESOURCE_NAME_SIZE); hpet_res->name = (void *)&hpet_res[1]; hpet_res->flags = IORESOURCE_MEM; diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 1029ea4ac8ba..a57d3fa7c6b6 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2503,9 +2503,7 @@ static struct resource * __init ioapic_setup_resources(void) n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource); n *= nr_ioapics; - mem = memblock_alloc(n, SMP_CACHE_BYTES); - if (!mem) - panic("%s: Failed to allocate %lu bytes\n", __func__, n); + mem = memblock_alloc_or_panic(n, SMP_CACHE_BYTES); res = (void *)mem; mem += sizeof(struct resource) * nr_ioapics; @@ -2564,11 +2562,8 @@ void __init io_apic_init_mappings(void) #ifdef CONFIG_X86_32 fake_ioapic_page: #endif - ioapic_phys = (unsigned long)memblock_alloc(PAGE_SIZE, + ioapic_phys = (unsigned long)memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); - if (!ioapic_phys) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); ioapic_phys = __pa(ioapic_phys); } io_apic_set_fixmap(idx, ioapic_phys); diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 4893d30ce438..82b96ed9890a 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -1146,11 +1146,8 @@ void __init e820__reserve_resources(void) struct resource *res; u64 end; - res = memblock_alloc(sizeof(*res) * e820_table->nr_entries, + res = memblock_alloc_or_panic(sizeof(*res) * e820_table->nr_entries, SMP_CACHE_BYTES); - if (!res) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(*res) * e820_table->nr_entries); e820_res = res; for (i = 0; i < e820_table->nr_entries; i++) { diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index 74ebd6882690..cf5dca2dbb91 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c @@ -136,11 +136,7 @@ void * __init prom_early_alloc(unsigned long size) * fast enough on the platforms we care about while minimizing * wasted bootmem) and hand off chunks of it to callers. */ - res = memblock_alloc(chunk_size, SMP_CACHE_BYTES); - if (!res) - panic("%s: Failed to allocate %zu bytes\n", __func__, - chunk_size); - BUG_ON(!res); + res = memblock_alloc_or_panic(chunk_size, SMP_CACHE_BYTES); prom_early_allocated += chunk_size; memset(res, 0, chunk_size); free_mem = chunk_size; diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index b52d3e17e2c1..56914e21e303 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -178,13 +178,7 @@ static void p2m_init_identity(unsigned long *p2m, unsigned long pfn) static void * __ref alloc_p2m_page(void) { if (unlikely(!slab_is_available())) { - void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); - - if (!ptr) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, PAGE_SIZE, PAGE_SIZE); - - return ptr; + return memblock_alloc_or_panic(PAGE_SIZE, PAGE_SIZE); } return (void *)__get_free_page(GFP_KERNEL); diff --git a/arch/xtensa/mm/kasan_init.c b/arch/xtensa/mm/kasan_init.c index f00d122aa806..f39c4d83173a 100644 --- a/arch/xtensa/mm/kasan_init.c +++ b/arch/xtensa/mm/kasan_init.c @@ -39,11 +39,7 @@ static void __init populate(void *start, void *end) unsigned long i, j; unsigned long vaddr = (unsigned long)start; pmd_t *pmd = pmd_off_k(vaddr); - pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); - - if (!pte) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, n_pages * sizeof(pte_t), PAGE_SIZE); + pte_t *pte = memblock_alloc_or_panic(n_pages * sizeof(pte_t), PAGE_SIZE); pr_debug("%s: %p - %p\n", __func__, start, end); diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index f2117fef7c7d..9c75dcc9a534 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -449,10 +449,7 @@ void __init omap2_clk_legacy_provider_init(int index, void __iomem *mem) { struct clk_iomap *io; - io = memblock_alloc(sizeof(*io), SMP_CACHE_BYTES); - if (!io) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(*io)); + io = memblock_alloc_or_panic(sizeof(*io), SMP_CACHE_BYTES); io->mem = mem; diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index a01bc5090cdf..a1534cc6c641 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -492,11 +492,7 @@ int __init smu_init (void) goto fail_np; } - smu = memblock_alloc(sizeof(struct smu_device), SMP_CACHE_BYTES); - if (!smu) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct smu_device)); - + smu = memblock_alloc_or_panic(sizeof(struct smu_device), SMP_CACHE_BYTES); spin_lock_init(&smu->lock); INIT_LIST_HEAD(&smu->cmd_list); INIT_LIST_HEAD(&smu->cmd_i2c_list); diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 0121100372b4..2eb718fbeffd 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1126,13 +1126,7 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size) static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) { - void *ptr = memblock_alloc(size, align); - - if (!ptr) - panic("%s: Failed to allocate %llu bytes align=0x%llx\n", - __func__, size, align); - - return ptr; + return memblock_alloc_or_panic(size, align); } bool __init early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys) diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 438fd70fa995..6e8561dba537 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -3666,13 +3666,7 @@ static struct device_node *overlay_base_root; static void * __init dt_alloc_memory(u64 size, u64 align) { - void *ptr = memblock_alloc(size, align); - - if (!ptr) - panic("%s: Failed to allocate %llu bytes align=0x%llx\n", - __func__, size, align); - - return ptr; + return memblock_alloc_or_panic(size, align); } /* diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 673d5cae7c81..dee628350cd1 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -417,6 +417,12 @@ static __always_inline void *memblock_alloc(phys_addr_t size, phys_addr_t align) MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); } +void *__memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align, + const char *func); + +#define memblock_alloc_or_panic(size, align) \ + __memblock_alloc_or_panic(size, align, __func__) + static inline void *memblock_alloc_raw(phys_addr_t size, phys_addr_t align) { diff --git a/init/main.c b/init/main.c index 00fac1170294..4bae539ebc05 100644 --- a/init/main.c +++ b/init/main.c @@ -640,15 +640,11 @@ static void __init setup_command_line(char *command_line) len = xlen + strlen(boot_command_line) + ilen + 1; - saved_command_line = memblock_alloc(len, SMP_CACHE_BYTES); - if (!saved_command_line) - panic("%s: Failed to allocate %zu bytes\n", __func__, len); + saved_command_line = memblock_alloc_or_panic(len, SMP_CACHE_BYTES); len = xlen + strlen(command_line) + 1; - static_command_line = memblock_alloc(len, SMP_CACHE_BYTES); - if (!static_command_line) - panic("%s: Failed to allocate %zu bytes\n", __func__, len); + static_command_line = memblock_alloc_or_panic(len, SMP_CACHE_BYTES); if (xlen) { /* @@ -1145,16 +1141,10 @@ static int __init initcall_blacklist(char *str) str_entry = strsep(&str, ","); if (str_entry) { pr_debug("blacklisting initcall %s\n", str_entry); - entry = memblock_alloc(sizeof(*entry), + entry = memblock_alloc_or_panic(sizeof(*entry), SMP_CACHE_BYTES); - if (!entry) - panic("%s: Failed to allocate %zu bytes\n", - __func__, sizeof(*entry)); - entry->buf = memblock_alloc(strlen(str_entry) + 1, + entry->buf = memblock_alloc_or_panic(strlen(str_entry) + 1, SMP_CACHE_BYTES); - if (!entry->buf) - panic("%s: Failed to allocate %zu bytes\n", - __func__, strlen(str_entry) + 1); strcpy(entry->buf, str_entry); list_add(&entry->next, &blacklisted_initcalls); } diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 30894d8f0a78..c9fb559a6399 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1011,11 +1011,8 @@ void __init register_nosave_region(unsigned long start_pfn, unsigned long end_pf } } /* This allocation cannot fail */ - region = memblock_alloc(sizeof(struct nosave_region), + region = memblock_alloc_or_panic(sizeof(struct nosave_region), SMP_CACHE_BYTES); - if (!region) - panic("%s: Failed to allocate %zu bytes\n", __func__, - sizeof(struct nosave_region)); region->start_pfn = start_pfn; region->end_pfn = end_pfn; list_add_tail(®ion->list, &nosave_regions); diff --git a/lib/cpumask.c b/lib/cpumask.c index e77ee9d46f71..57274ba8b6d9 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -83,10 +83,7 @@ EXPORT_SYMBOL(alloc_cpumask_var_node); */ void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask) { - *mask = memblock_alloc(cpumask_size(), SMP_CACHE_BYTES); - if (!*mask) - panic("%s: Failed to allocate %u bytes\n", __func__, - cpumask_size()); + *mask = memblock_alloc_or_panic(cpumask_size(), SMP_CACHE_BYTES); } /** diff --git a/mm/kmsan/shadow.c b/mm/kmsan/shadow.c index 9c58f081d84f..1bb505a08415 100644 --- a/mm/kmsan/shadow.c +++ b/mm/kmsan/shadow.c @@ -280,12 +280,8 @@ void __init kmsan_init_alloc_meta_for_range(void *start, void *end) start = (void *)PAGE_ALIGN_DOWN((u64)start); size = PAGE_ALIGN((u64)end - (u64)start); - shadow = memblock_alloc(size, PAGE_SIZE); - origin = memblock_alloc(size, PAGE_SIZE); - - if (!shadow || !origin) - panic("%s: Failed to allocate metadata memory for early boot range of size %llu", - __func__, size); + shadow = memblock_alloc_or_panic(size, PAGE_SIZE); + origin = memblock_alloc_or_panic(size, PAGE_SIZE); for (u64 addr = 0; addr < size; addr += PAGE_SIZE) { page = virt_to_page_or_null((char *)start + addr); diff --git a/mm/memblock.c b/mm/memblock.c index 095c18b5c430..95af35fd1389 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -1691,6 +1691,26 @@ void * __init memblock_alloc_try_nid( return ptr; } +/** + * __memblock_alloc_or_panic - Try to allocate memory and panic on failure + * @size: size of memory block to be allocated in bytes + * @align: alignment of the region and block's size + * @func: caller func name + * + * This function attempts to allocate memory using memblock_alloc, + * and in case of failure, it calls panic with the formatted message. + * This function should not be used directly, please use the macro memblock_alloc_or_panic. + */ +void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align, + const char *func) +{ + void *addr = memblock_alloc(size, align); + + if (unlikely(!addr)) + panic("%s: Failed to allocate %pap bytes\n", func, &size); + return addr; +} + /** * memblock_free_late - free pages directly to buddy allocator * @base: phys starting address of the boot memory block diff --git a/mm/numa.c b/mm/numa.c index e2eec07707d1..f1787d7713a6 100644 --- a/mm/numa.c +++ b/mm/numa.c @@ -37,13 +37,7 @@ void __init alloc_node_data(int nid) void __init alloc_offline_node_data(int nid) { pg_data_t *pgdat; - - pgdat = memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); - if (!pgdat) - panic("Cannot allocate %zuB for node %d.\n", - sizeof(*pgdat), nid); - - node_data[nid] = pgdat; + node_data[nid] = memblock_alloc_or_panic(sizeof(*pgdat), SMP_CACHE_BYTES); } /* Stub functions: */ diff --git a/mm/percpu.c b/mm/percpu.c index d8dd31a2e407..ac61e3fc5f15 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1359,10 +1359,7 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, /* allocate chunk */ alloc_size = struct_size(chunk, populated, BITS_TO_LONGS(region_size >> PAGE_SHIFT)); - chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES); - if (!chunk) - panic("%s: Failed to allocate %zu bytes\n", __func__, - alloc_size); + chunk = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); INIT_LIST_HEAD(&chunk->list); @@ -1374,24 +1371,14 @@ static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr, region_bits = pcpu_chunk_map_bits(chunk); alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]); - chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); - if (!chunk->alloc_map) - panic("%s: Failed to allocate %zu bytes\n", __func__, - alloc_size); + chunk->alloc_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); alloc_size = BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]); - chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); - if (!chunk->bound_map) - panic("%s: Failed to allocate %zu bytes\n", __func__, - alloc_size); + chunk->bound_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]); - chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES); - if (!chunk->md_blocks) - panic("%s: Failed to allocate %zu bytes\n", __func__, - alloc_size); - + chunk->md_blocks = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); #ifdef NEED_PCPUOBJ_EXT /* first chunk is free to use */ chunk->obj_exts = NULL; @@ -2595,28 +2582,16 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, /* process group information and build config tables accordingly */ alloc_size = ai->nr_groups * sizeof(group_offsets[0]); - group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES); - if (!group_offsets) - panic("%s: Failed to allocate %zu bytes\n", __func__, - alloc_size); + group_offsets = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); alloc_size = ai->nr_groups * sizeof(group_sizes[0]); - group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES); - if (!group_sizes) - panic("%s: Failed to allocate %zu bytes\n", __func__, - alloc_size); + group_sizes = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); alloc_size = nr_cpu_ids * sizeof(unit_map[0]); - unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES); - if (!unit_map) - panic("%s: Failed to allocate %zu bytes\n", __func__, - alloc_size); + unit_map = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); alloc_size = nr_cpu_ids * sizeof(unit_off[0]); - unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES); - if (!unit_off) - panic("%s: Failed to allocate %zu bytes\n", __func__, - alloc_size); + unit_off = memblock_alloc_or_panic(alloc_size, SMP_CACHE_BYTES); for (cpu = 0; cpu < nr_cpu_ids; cpu++) unit_map[cpu] = UINT_MAX; @@ -2685,12 +2660,9 @@ void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, pcpu_free_slot = pcpu_sidelined_slot + 1; pcpu_to_depopulate_slot = pcpu_free_slot + 1; pcpu_nr_slots = pcpu_to_depopulate_slot + 1; - pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots * + pcpu_chunk_lists = memblock_alloc_or_panic(pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]), SMP_CACHE_BYTES); - if (!pcpu_chunk_lists) - panic("%s: Failed to allocate %zu bytes\n", __func__, - pcpu_nr_slots * sizeof(pcpu_chunk_lists[0])); for (i = 0; i < pcpu_nr_slots; i++) INIT_LIST_HEAD(&pcpu_chunk_lists[i]); @@ -3155,25 +3127,19 @@ void __init __weak pcpu_populate_pte(unsigned long addr) pmd_t *pmd; if (pgd_none(*pgd)) { - p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE); - if (!p4d) - goto err_alloc; + p4d = memblock_alloc_or_panic(P4D_TABLE_SIZE, P4D_TABLE_SIZE); pgd_populate(&init_mm, pgd, p4d); } p4d = p4d_offset(pgd, addr); if (p4d_none(*p4d)) { - pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE); - if (!pud) - goto err_alloc; + pud = memblock_alloc_or_panic(PUD_TABLE_SIZE, PUD_TABLE_SIZE); p4d_populate(&init_mm, p4d, pud); } pud = pud_offset(p4d, addr); if (pud_none(*pud)) { - pmd = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE); - if (!pmd) - goto err_alloc; + pmd = memblock_alloc_or_panic(PMD_TABLE_SIZE, PMD_TABLE_SIZE); pud_populate(&init_mm, pud, pmd); } @@ -3181,16 +3147,11 @@ void __init __weak pcpu_populate_pte(unsigned long addr) if (!pmd_present(*pmd)) { pte_t *new; - new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE); - if (!new) - goto err_alloc; + new = memblock_alloc_or_panic(PTE_TABLE_SIZE, PTE_TABLE_SIZE); pmd_populate_kernel(&init_mm, pmd, new); } return; - -err_alloc: - panic("%s: Failed to allocate memory\n", __func__); } /** @@ -3237,10 +3198,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t /* unaligned allocations can't be freed, round up to page size */ pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * sizeof(pages[0])); - pages = memblock_alloc(pages_size, SMP_CACHE_BYTES); - if (!pages) - panic("%s: Failed to allocate %zu bytes\n", __func__, - pages_size); + pages = memblock_alloc_or_panic(pages_size, SMP_CACHE_BYTES); /* allocate pages */ j = 0; diff --git a/mm/sparse.c b/mm/sparse.c index 13b6624d3562..133b033d0cba 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -257,10 +257,7 @@ static void __init memblocks_present(void) size = sizeof(struct mem_section *) * NR_SECTION_ROOTS; align = 1 << (INTERNODE_CACHE_SHIFT); - mem_section = memblock_alloc(size, align); - if (!mem_section) - panic("%s: Failed to allocate %lu bytes align=0x%lx\n", - __func__, size, align); + mem_section = memblock_alloc_or_panic(size, align); } #endif From 813fd59c168fc4d659fa5dde84f8b182a6d30663 Mon Sep 17 00:00:00 2001 From: Guo Weikang Date: Thu, 9 Jan 2025 11:31:36 +0800 Subject: [PATCH 243/504] arch/s390: save_area_alloc default failure behavior changed to panic Now with the memblock_alloc_or_panic interface, save_area_alloc no longer needs to handle panic itself. Link: https://lkml.kernel.org/r/20250109033136.2845676-1-guoweikang.kernel@gmail.com Link: https://lore.kernel.org/lkml/Z2fknmnNtiZbCc7x@kernel.org/ Signed-off-by: Guo Weikang Acked-by: Mike Rapoport (Microsoft) Cc: Alexander Gordeev Cc: Geert Uytterhoeven Signed-off-by: Andrew Morton --- arch/s390/kernel/crash_dump.c | 4 +--- arch/s390/kernel/numa.c | 3 +-- arch/s390/kernel/smp.c | 4 ---- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c index cd0c93a8fb8b..dc7328fd2ec4 100644 --- a/arch/s390/kernel/crash_dump.c +++ b/arch/s390/kernel/crash_dump.c @@ -63,9 +63,7 @@ struct save_area * __init save_area_alloc(bool is_boot_cpu) { struct save_area *sa; - sa = memblock_alloc(sizeof(*sa), 8); - if (!sa) - return NULL; + sa = memblock_alloc_or_panic(sizeof(*sa), 8); if (is_boot_cpu) list_add(&sa->list, &dump_save_areas); diff --git a/arch/s390/kernel/numa.c b/arch/s390/kernel/numa.c index a33e20f73330..2fc40f97c0ad 100644 --- a/arch/s390/kernel/numa.c +++ b/arch/s390/kernel/numa.c @@ -21,9 +21,8 @@ void __init numa_setup(void) nodes_clear(node_possible_map); node_set(0, node_possible_map); node_set_online(0); - for (nid = 0; nid < MAX_NUMNODES; nid++) { + for (nid = 0; nid < MAX_NUMNODES; nid++) NODE_DATA(nid) = memblock_alloc_or_panic(sizeof(pg_data_t), 8); - } NODE_DATA(0)->node_spanned_pages = memblock_end_of_DRAM() >> PAGE_SHIFT; NODE_DATA(0)->node_id = 0; } diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index d77aaefb59bd..7b08399b0846 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -611,8 +611,6 @@ void __init smp_save_dump_ipl_cpu(void) if (!dump_available()) return; sa = save_area_alloc(true); - if (!sa) - panic("could not allocate memory for boot CPU save area\n"); regs = memblock_alloc_or_panic(512, 8); copy_oldmem_kernel(regs, __LC_FPREGS_SAVE_AREA, 512); save_area_add_regs(sa, regs); @@ -646,8 +644,6 @@ void __init smp_save_dump_secondary_cpus(void) SIGP_CC_NOT_OPERATIONAL) continue; sa = save_area_alloc(false); - if (!sa) - panic("could not allocate memory for save area\n"); __pcpu_sigp_relax(addr, SIGP_STORE_STATUS_AT_ADDRESS, __pa(page)); save_area_add_regs(sa, page); if (cpu_has_vx()) { From 4b7d6efb13e7259317051908550ddf72906673d3 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Mon, 30 Dec 2024 21:35:32 -0700 Subject: [PATCH 244/504] mm/mglru: clean up workingset Patch series "mm/mglru: performance optimizations", v4. This series improves performance for some previously reported test cases. Most of the code changes gathered here has been floating on the mailing list [1][2]. They are now properly organized and have gone through various benchmarks on client and server devices, including Android, FIO, memcached, multiple VMs and MongoDB. In addition to the syzbot regressions fixed in v2 [3] and v3 [4], this version fixes two more regressions: one reported by Oliver Sang [5] and the other by Barry Song. [1] https://lore.kernel.org/CAOUHufahuWcKf5f1Sg3emnqX+cODuR=2TQo7T4Gr-QYLujn4RA@mail.gmail.com/ [2] https://lore.kernel.org/CAOUHufawNerxqLm7L9Yywp3HJFiYVrYO26ePUb1jH-qxNGWzyA@mail.gmail.com/ [3] https://lore.kernel.org/67294349.050a0220.701a.0010.GAE@google.com/ [4] https://lore.kernel.org/67549eca.050a0220.2477f.001b.GAE@google.com/ [5] https://lore.kernel.org/202412231601.f1eb8f84-lkp@intel.com/ This patch (of 7): Move VM_BUG_ON_FOLIO() to cover both the default and MGLRU paths. Also use a pair of rcu_read_lock() and rcu_read_unlock() within each path, to improve readability. This change should not have any side effects. Link: https://lkml.kernel.org/r/20241231043538.4075764-1-yuzhao@google.com Link: https://lkml.kernel.org/r/20241231043538.4075764-2-yuzhao@google.com Signed-off-by: Yu Zhao Tested-by: Kalesh Singh Cc: Barry Song Cc: Bharata B Rao Cc: David Stevens Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/workingset.c | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/mm/workingset.c b/mm/workingset.c index a4705e196545..ad181d1b8cf1 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -428,17 +428,17 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset, struct pglist_data *pgdat; unsigned long eviction; - rcu_read_lock(); - if (lru_gen_enabled()) { - bool recent = lru_gen_test_recent(shadow, file, - &eviction_lruvec, &eviction, workingset); + bool recent; + rcu_read_lock(); + recent = lru_gen_test_recent(shadow, file, &eviction_lruvec, + &eviction, workingset); rcu_read_unlock(); return recent; } - + rcu_read_lock(); unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); eviction <<= bucket_order; @@ -459,14 +459,12 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset, * configurations instead. */ eviction_memcg = mem_cgroup_from_id(memcgid); - if (!mem_cgroup_disabled() && - (!eviction_memcg || !mem_cgroup_tryget(eviction_memcg))) { - rcu_read_unlock(); - return false; - } - + if (!mem_cgroup_tryget(eviction_memcg)) + eviction_memcg = NULL; rcu_read_unlock(); + if (!mem_cgroup_disabled() && !eviction_memcg) + return false; /* * Flush stats (and potentially sleep) outside the RCU read section. * @@ -544,6 +542,8 @@ void workingset_refault(struct folio *folio, void *shadow) bool workingset; long nr; + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + if (lru_gen_enabled()) { lru_gen_refault(folio, shadow); return; @@ -558,7 +558,6 @@ void workingset_refault(struct folio *folio, void *shadow) * is actually experiencing the refault event. Make sure the folio is * locked to guarantee folio_memcg() stability throughout. */ - VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); nr = folio_nr_pages(folio); memcg = folio_memcg(folio); pgdat = folio_pgdat(folio); From e50ac92961622328883d68e77ad359fa0840118a Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Mon, 30 Dec 2024 21:35:33 -0700 Subject: [PATCH 245/504] mm/mglru: optimize deactivation Do not shuffle a folio in the deactivation paths if it is already in the oldest generation. This reduces the LRU lock contention. Before this patch, the contention is reproducible by FIO, e.g., fio -filename=/dev/nvme1n1p2 -direct=0 -thread -size=1024G \ -rwmixwrite=30 --norandommap --randrepeat=0 -ioengine=sync \ -bs=4k -numjobs=400 -runtime=25000 --time_based \ -group_reporting -name=mglru 98.96%--_raw_spin_lock_irqsave folio_lruvec_lock_irqsave | --98.78%--folio_batch_move_lru | --98.63%--deactivate_file_folio mapping_try_invalidate invalidate_mapping_pages invalidate_bdev blkdev_common_ioctl blkdev_ioctl After this patch, deactivate_file_folio() bails out early without taking the LRU lock. A side effect is that a folio can be left at the head of the oldest generation, rather than the tail. If reclaim happens at the same time, it cannot reclaim this folio immediately. Since there is no known correlation between truncation and reclaim, this side effect is considered insignificant. Link: https://lkml.kernel.org/r/20241231043538.4075764-3-yuzhao@google.com Reported-by: Bharata B Rao Closes: https://lore.kernel.org/CAOUHufawNerxqLm7L9Yywp3HJFiYVrYO26ePUb1jH-qxNGWzyA@mail.gmail.com/ Signed-off-by: Yu Zhao Tested-by: Kalesh Singh Cc: Barry Song Cc: David Stevens Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/swap.c | 48 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 7 deletions(-) diff --git a/mm/swap.c b/mm/swap.c index 3a01acfd5a89..649ef7f2b74b 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -379,7 +379,8 @@ static void __lru_cache_activate_folio(struct folio *folio) } #ifdef CONFIG_LRU_GEN -static void folio_inc_refs(struct folio *folio) + +static void lru_gen_inc_refs(struct folio *folio) { unsigned long new_flags, old_flags = READ_ONCE(folio->flags); @@ -406,10 +407,34 @@ static void folio_inc_refs(struct folio *folio) new_flags |= old_flags & ~LRU_REFS_MASK; } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); } -#else -static void folio_inc_refs(struct folio *folio) + +static bool lru_gen_clear_refs(struct folio *folio) +{ + struct lru_gen_folio *lrugen; + int gen = folio_lru_gen(folio); + int type = folio_is_file_lru(folio); + + if (gen < 0) + return true; + + set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); + + lrugen = &folio_lruvec(folio)->lrugen; + /* whether can do without shuffling under the LRU lock */ + return gen == lru_gen_from_seq(READ_ONCE(lrugen->min_seq[type])); +} + +#else /* !CONFIG_LRU_GEN */ + +static void lru_gen_inc_refs(struct folio *folio) { } + +static bool lru_gen_clear_refs(struct folio *folio) +{ + return false; +} + #endif /* CONFIG_LRU_GEN */ /** @@ -428,7 +453,7 @@ static void folio_inc_refs(struct folio *folio) void folio_mark_accessed(struct folio *folio) { if (lru_gen_enabled()) { - folio_inc_refs(folio); + lru_gen_inc_refs(folio); return; } @@ -524,7 +549,7 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma) */ static void lru_deactivate_file(struct lruvec *lruvec, struct folio *folio) { - bool active = folio_test_active(folio); + bool active = folio_test_active(folio) || lru_gen_enabled(); long nr_pages = folio_nr_pages(folio); if (folio_test_unevictable(folio)) @@ -589,7 +614,10 @@ static void lru_lazyfree(struct lruvec *lruvec, struct folio *folio) lruvec_del_folio(lruvec, folio); folio_clear_active(folio); - folio_clear_referenced(folio); + if (lru_gen_enabled()) + lru_gen_clear_refs(folio); + else + folio_clear_referenced(folio); /* * Lazyfree folios are clean anonymous folios. They have * the swapbacked flag cleared, to distinguish them from normal @@ -657,6 +685,9 @@ void deactivate_file_folio(struct folio *folio) if (folio_test_unevictable(folio)) return; + if (lru_gen_enabled() && lru_gen_clear_refs(folio)) + return; + folio_batch_add_and_move(folio, lru_deactivate_file, true); } @@ -670,7 +701,10 @@ void deactivate_file_folio(struct folio *folio) */ void folio_deactivate(struct folio *folio) { - if (folio_test_unevictable(folio) || !(folio_test_active(folio) || lru_gen_enabled())) + if (folio_test_unevictable(folio)) + return; + + if (lru_gen_enabled() ? lru_gen_clear_refs(folio) : !folio_test_active(folio)) return; folio_batch_add_and_move(folio, lru_deactivate, true); From d91ee134eb4523fd64d1b59a451cb5492f6f3e88 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Mon, 30 Dec 2024 21:35:34 -0700 Subject: [PATCH 246/504] mm/mglru: rework aging feedback The aging feedback is based on both the number of generations and the distribution of folios in each generation. The number of generations is currently the distance between max_seq and anon min_seq. This is because anon min_seq is not allowed to move past file min_seq. The rationale for that is that file is always evictable whereas anon is not. However, for use cases where anon is a lot cheaper than file: 1. Anon in the second oldest generation can be a better choice than file in the oldest generation. 2. A large amount of file in the oldest generation can skew the distribution, making should_run_aging() return false negative. Allow anon and file min_seq to move independently, and use solely the number of generations as the feedback for aging. Specifically, when both anon and file are evictable, anon min_seq can now be greater than file min_seq, and therefore the number of generations becomes the distance between max_seq and min(min_seq[0],min_seq[1]). And should_run_aging() returns true if and only if the number of generations is less than MAX_NR_GENS. As the first step to the final optimization, this change by itself should not have userspace-visiable effects beyond performance. The next twos patch will take advantage of this change; the last patch in this series will better distribute folios across MAX_NR_GENS. Link: https://lkml.kernel.org/r/20241231043538.4075764-4-yuzhao@google.com Signed-off-by: Yu Zhao Reported-by: David Stevens Tested-by: Kalesh Singh Cc: Barry Song Cc: Bharata B Rao Cc: Kairui Song Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 17 ++-- mm/vmscan.c | 200 ++++++++++++++++++----------------------- 2 files changed, 96 insertions(+), 121 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b36124145a16..8245ecb0400b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -421,12 +421,11 @@ enum { /* * The youngest generation number is stored in max_seq for both anon and file * types as they are aged on an equal footing. The oldest generation numbers are - * stored in min_seq[] separately for anon and file types as clean file pages - * can be evicted regardless of swap constraints. - * - * Normally anon and file min_seq are in sync. But if swapping is constrained, - * e.g., out of swap space, file min_seq is allowed to advance and leave anon - * min_seq behind. + * stored in min_seq[] separately for anon and file types so that they can be + * incremented independently. Ideally min_seq[] are kept in sync when both anon + * and file types are evictable. However, to adapt to situations like extreme + * swappiness, they are allowed to be out of sync by at most + * MAX_NR_GENS-MIN_NR_GENS-1. * * The number of pages in each generation is eventually consistent and therefore * can be transiently negative when reset_batch_size() is pending. @@ -446,8 +445,8 @@ struct lru_gen_folio { unsigned long avg_refaulted[ANON_AND_FILE][MAX_NR_TIERS]; /* the exponential moving average of evicted+protected */ unsigned long avg_total[ANON_AND_FILE][MAX_NR_TIERS]; - /* the first tier doesn't need protection, hence the minus one */ - unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS - 1]; + /* can only be modified under the LRU lock */ + unsigned long protected[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; /* can be modified without holding the LRU lock */ atomic_long_t evicted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; atomic_long_t refaulted[NR_HIST_GENS][ANON_AND_FILE][MAX_NR_TIERS]; @@ -498,7 +497,7 @@ struct lru_gen_mm_walk { int mm_stats[NR_MM_STATS]; /* total batched items */ int batched; - bool can_swap; + int swappiness; bool force_scan; }; diff --git a/mm/vmscan.c b/mm/vmscan.c index 4f669fc2bd7c..51dc162a1493 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2628,11 +2628,17 @@ static bool should_clear_pmd_young(void) READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \ } +#define evictable_min_seq(min_seq, swappiness) \ + min((min_seq)[!(swappiness)], (min_seq)[(swappiness) != MAX_SWAPPINESS]) + #define for_each_gen_type_zone(gen, type, zone) \ for ((gen) = 0; (gen) < MAX_NR_GENS; (gen)++) \ for ((type) = 0; (type) < ANON_AND_FILE; (type)++) \ for ((zone) = 0; (zone) < MAX_NR_ZONES; (zone)++) +#define for_each_evictable_type(type, swappiness) \ + for ((type) = !(swappiness); (type) <= ((swappiness) != MAX_SWAPPINESS); (type)++) + #define get_memcg_gen(seq) ((seq) % MEMCG_NR_GENS) #define get_memcg_bin(bin) ((bin) % MEMCG_NR_BINS) @@ -2678,10 +2684,16 @@ static int get_nr_gens(struct lruvec *lruvec, int type) static bool __maybe_unused seq_is_valid(struct lruvec *lruvec) { - /* see the comment on lru_gen_folio */ - return get_nr_gens(lruvec, LRU_GEN_FILE) >= MIN_NR_GENS && - get_nr_gens(lruvec, LRU_GEN_FILE) <= get_nr_gens(lruvec, LRU_GEN_ANON) && - get_nr_gens(lruvec, LRU_GEN_ANON) <= MAX_NR_GENS; + int type; + + for (type = 0; type < ANON_AND_FILE; type++) { + int n = get_nr_gens(lruvec, type); + + if (n < MIN_NR_GENS || n > MAX_NR_GENS) + return false; + } + + return true; } /****************************************************************************** @@ -3088,9 +3100,8 @@ static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, pos->refaulted = lrugen->avg_refaulted[type][tier] + atomic_long_read(&lrugen->refaulted[hist][type][tier]); pos->total = lrugen->avg_total[type][tier] + + lrugen->protected[hist][type][tier] + atomic_long_read(&lrugen->evicted[hist][type][tier]); - if (tier) - pos->total += lrugen->protected[hist][type][tier - 1]; pos->gain = gain; } @@ -3117,17 +3128,15 @@ static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); sum = lrugen->avg_total[type][tier] + + lrugen->protected[hist][type][tier] + atomic_long_read(&lrugen->evicted[hist][type][tier]); - if (tier) - sum += lrugen->protected[hist][type][tier - 1]; WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); } if (clear) { atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); atomic_long_set(&lrugen->evicted[hist][type][tier], 0); - if (tier) - WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); + WRITE_ONCE(lrugen->protected[hist][type][tier], 0); } } } @@ -3262,7 +3271,7 @@ static int should_skip_vma(unsigned long start, unsigned long end, struct mm_wal return true; if (vma_is_anonymous(vma)) - return !walk->can_swap; + return !walk->swappiness; if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) return true; @@ -3272,7 +3281,10 @@ static int should_skip_vma(unsigned long start, unsigned long end, struct mm_wal return true; if (shmem_mapping(mapping)) - return !walk->can_swap; + return !walk->swappiness; + + if (walk->swappiness == MAX_SWAPPINESS) + return true; /* to exclude special mappings like dax, etc. */ return !mapping->a_ops->read_folio; @@ -3360,7 +3372,7 @@ static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned } static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, - struct pglist_data *pgdat, bool can_swap) + struct pglist_data *pgdat) { struct folio *folio; @@ -3371,10 +3383,6 @@ static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, if (folio_memcg(folio) != memcg) return NULL; - /* file VMAs can contain anon pages from COW */ - if (!folio_is_file_lru(folio) && !can_swap) - return NULL; - return folio; } @@ -3430,7 +3438,7 @@ restart: if (pfn == -1) continue; - folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); + folio = get_pfn_folio(pfn, memcg, pgdat); if (!folio) continue; @@ -3515,7 +3523,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area if (pfn == -1) goto next; - folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); + folio = get_pfn_folio(pfn, memcg, pgdat); if (!folio) goto next; @@ -3727,22 +3735,26 @@ static void clear_mm_walk(void) kfree(walk); } -static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) +static bool inc_min_seq(struct lruvec *lruvec, int type, int swappiness) { int zone; int remaining = MAX_LRU_BATCH; struct lru_gen_folio *lrugen = &lruvec->lrugen; + int hist = lru_hist_from_seq(lrugen->min_seq[type]); int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); - if (type == LRU_GEN_ANON && !can_swap) + if (type ? swappiness == MAX_SWAPPINESS : !swappiness) goto done; - /* prevent cold/hot inversion if force_scan is true */ + /* prevent cold/hot inversion if the type is evictable */ for (zone = 0; zone < MAX_NR_ZONES; zone++) { struct list_head *head = &lrugen->folios[old_gen][type][zone]; while (!list_empty(head)) { struct folio *folio = lru_to_folio(head); + int refs = folio_lru_refs(folio); + int tier = lru_tier_from_refs(refs); + int delta = folio_nr_pages(folio); VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); @@ -3752,6 +3764,9 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, bool can_swap) new_gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); + WRITE_ONCE(lrugen->protected[hist][type][tier], + lrugen->protected[hist][type][tier] + delta); + if (!--remaining) return false; } @@ -3763,7 +3778,7 @@ done: return true; } -static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) +static bool try_to_inc_min_seq(struct lruvec *lruvec, int swappiness) { int gen, type, zone; bool success = false; @@ -3773,7 +3788,7 @@ static bool try_to_inc_min_seq(struct lruvec *lruvec, bool can_swap) VM_WARN_ON_ONCE(!seq_is_valid(lruvec)); /* find the oldest populated generation */ - for (type = !can_swap; type < ANON_AND_FILE; type++) { + for_each_evictable_type(type, swappiness) { while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { gen = lru_gen_from_seq(min_seq[type]); @@ -3789,13 +3804,17 @@ next: } /* see the comment on lru_gen_folio */ - if (can_swap) { - min_seq[LRU_GEN_ANON] = min(min_seq[LRU_GEN_ANON], min_seq[LRU_GEN_FILE]); - min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); + if (swappiness && swappiness != MAX_SWAPPINESS) { + unsigned long seq = lrugen->max_seq - MIN_NR_GENS; + + if (min_seq[LRU_GEN_ANON] > seq && min_seq[LRU_GEN_FILE] < seq) + min_seq[LRU_GEN_ANON] = seq; + else if (min_seq[LRU_GEN_FILE] > seq && min_seq[LRU_GEN_ANON] < seq) + min_seq[LRU_GEN_FILE] = seq; } - for (type = !can_swap; type < ANON_AND_FILE; type++) { - if (min_seq[type] == lrugen->min_seq[type]) + for_each_evictable_type(type, swappiness) { + if (min_seq[type] <= lrugen->min_seq[type]) continue; reset_ctrl_pos(lruvec, type, true); @@ -3806,8 +3825,7 @@ next: return success; } -static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, - bool can_swap, bool force_scan) +static bool inc_max_seq(struct lruvec *lruvec, unsigned long seq, int swappiness) { bool success; int prev, next; @@ -3825,13 +3843,11 @@ restart: if (!success) goto unlock; - for (type = ANON_AND_FILE - 1; type >= 0; type--) { + for (type = 0; type < ANON_AND_FILE; type++) { if (get_nr_gens(lruvec, type) != MAX_NR_GENS) continue; - VM_WARN_ON_ONCE(!force_scan && (type == LRU_GEN_FILE || can_swap)); - - if (inc_min_seq(lruvec, type, can_swap)) + if (inc_min_seq(lruvec, type, swappiness)) continue; spin_unlock_irq(&lruvec->lru_lock); @@ -3875,7 +3891,7 @@ unlock: } static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq, - bool can_swap, bool force_scan) + int swappiness, bool force_scan) { bool success; struct lru_gen_mm_walk *walk; @@ -3886,7 +3902,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq, VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq)); if (!mm_state) - return inc_max_seq(lruvec, seq, can_swap, force_scan); + return inc_max_seq(lruvec, seq, swappiness); /* see the comment in iterate_mm_list() */ if (seq <= READ_ONCE(mm_state->seq)) @@ -3911,7 +3927,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq, walk->lruvec = lruvec; walk->seq = seq; - walk->can_swap = can_swap; + walk->swappiness = swappiness; walk->force_scan = force_scan; do { @@ -3921,7 +3937,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long seq, } while (mm); done: if (success) { - success = inc_max_seq(lruvec, seq, can_swap, force_scan); + success = inc_max_seq(lruvec, seq, swappiness); WARN_ON_ONCE(!success); } @@ -3962,13 +3978,13 @@ static bool lruvec_is_sizable(struct lruvec *lruvec, struct scan_control *sc) { int gen, type, zone; unsigned long total = 0; - bool can_swap = get_swappiness(lruvec, sc); + int swappiness = get_swappiness(lruvec, sc); struct lru_gen_folio *lrugen = &lruvec->lrugen; struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MAX_SEQ(lruvec); DEFINE_MIN_SEQ(lruvec); - for (type = !can_swap; type < ANON_AND_FILE; type++) { + for_each_evictable_type(type, swappiness) { unsigned long seq; for (seq = min_seq[type]; seq <= max_seq; seq++) { @@ -3988,6 +4004,7 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc { int gen; unsigned long birth; + int swappiness = get_swappiness(lruvec, sc); struct mem_cgroup *memcg = lruvec_memcg(lruvec); DEFINE_MIN_SEQ(lruvec); @@ -3997,8 +4014,7 @@ static bool lruvec_is_reclaimable(struct lruvec *lruvec, struct scan_control *sc if (!lruvec_is_sizable(lruvec, sc)) return false; - /* see the comment on lru_gen_folio */ - gen = lru_gen_from_seq(min_seq[LRU_GEN_FILE]); + gen = lru_gen_from_seq(evictable_min_seq(min_seq, swappiness)); birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); return time_is_before_jiffies(birth + min_ttl); @@ -4065,7 +4081,6 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) unsigned long addr = pvmw->address; struct vm_area_struct *vma = pvmw->vma; struct folio *folio = pfn_folio(pvmw->pfn); - bool can_swap = !folio_is_file_lru(folio); struct mem_cgroup *memcg = folio_memcg(folio); struct pglist_data *pgdat = folio_pgdat(folio); struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); @@ -4118,7 +4133,7 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) if (pfn == -1) continue; - folio = get_pfn_folio(pfn, memcg, pgdat, can_swap); + folio = get_pfn_folio(pfn, memcg, pgdat); if (!folio) continue; @@ -4334,8 +4349,8 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); - WRITE_ONCE(lrugen->protected[hist][type][tier - 1], - lrugen->protected[hist][type][tier - 1] + delta); + WRITE_ONCE(lrugen->protected[hist][type][tier], + lrugen->protected[hist][type][tier] + delta); return true; } @@ -4534,7 +4549,6 @@ static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int sw { int i; int type; - int scanned; int tier = -1; DEFINE_MIN_SEQ(lruvec); @@ -4559,21 +4573,23 @@ static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int sw else type = get_type_to_scan(lruvec, swappiness, &tier); - for (i = !swappiness; i < ANON_AND_FILE; i++) { + for_each_evictable_type(i, swappiness) { + int scanned; + if (tier < 0) tier = get_tier_idx(lruvec, type); + *type_scanned = type; + scanned = scan_folios(lruvec, sc, type, tier, list); if (scanned) - break; + return scanned; type = !type; tier = -1; } - *type_scanned = type; - - return scanned; + return 0; } static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness) @@ -4589,6 +4605,7 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap struct reclaim_stat stat; struct lru_gen_mm_walk *walk; bool skip_retry = false; + struct lru_gen_folio *lrugen = &lruvec->lrugen; struct mem_cgroup *memcg = lruvec_memcg(lruvec); struct pglist_data *pgdat = lruvec_pgdat(lruvec); @@ -4598,7 +4615,7 @@ static int evict_folios(struct lruvec *lruvec, struct scan_control *sc, int swap scanned += try_to_inc_min_seq(lruvec, swappiness); - if (get_nr_gens(lruvec, !swappiness) == MIN_NR_GENS) + if (evictable_min_seq(lrugen->min_seq, swappiness) + MIN_NR_GENS > lrugen->max_seq) scanned = 0; spin_unlock_irq(&lruvec->lru_lock); @@ -4675,63 +4692,32 @@ retry: } static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, - bool can_swap, unsigned long *nr_to_scan) + int swappiness, unsigned long *nr_to_scan) { int gen, type, zone; - unsigned long old = 0; - unsigned long young = 0; - unsigned long total = 0; + unsigned long size = 0; struct lru_gen_folio *lrugen = &lruvec->lrugen; DEFINE_MIN_SEQ(lruvec); - /* whether this lruvec is completely out of cold folios */ - if (min_seq[!can_swap] + MIN_NR_GENS > max_seq) { - *nr_to_scan = 0; + *nr_to_scan = 0; + /* have to run aging, since eviction is not possible anymore */ + if (evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS > max_seq) return true; - } - for (type = !can_swap; type < ANON_AND_FILE; type++) { + for_each_evictable_type(type, swappiness) { unsigned long seq; for (seq = min_seq[type]; seq <= max_seq; seq++) { - unsigned long size = 0; - gen = lru_gen_from_seq(seq); for (zone = 0; zone < MAX_NR_ZONES; zone++) size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); - - total += size; - if (seq == max_seq) - young += size; - else if (seq + MIN_NR_GENS == max_seq) - old += size; } } - *nr_to_scan = total; - - /* - * The aging tries to be lazy to reduce the overhead, while the eviction - * stalls when the number of generations reaches MIN_NR_GENS. Hence, the - * ideal number of generations is MIN_NR_GENS+1. - */ - if (min_seq[!can_swap] + MIN_NR_GENS < max_seq) - return false; - - /* - * It's also ideal to spread pages out evenly, i.e., 1/(MIN_NR_GENS+1) - * of the total number of pages for each generation. A reasonable range - * for this average portion is [1/MIN_NR_GENS, 1/(MIN_NR_GENS+2)]. The - * aging cares about the upper bound of hot pages, while the eviction - * cares about the lower bound of cold pages. - */ - if (young * MIN_NR_GENS > total) - return true; - if (old * (MIN_NR_GENS + 2) < total) - return true; - - return false; + *nr_to_scan = size; + /* better to run aging even though eviction is still possible */ + return evictable_min_seq(min_seq, swappiness) + MIN_NR_GENS == max_seq; } /* @@ -4739,7 +4725,7 @@ static bool should_run_aging(struct lruvec *lruvec, unsigned long max_seq, * 1. Defer try_to_inc_max_seq() to workqueues to reduce latency for memcg * reclaim. */ -static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool can_swap) +static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, int swappiness) { bool success; unsigned long nr_to_scan; @@ -4749,7 +4735,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) return -1; - success = should_run_aging(lruvec, max_seq, can_swap, &nr_to_scan); + success = should_run_aging(lruvec, max_seq, swappiness, &nr_to_scan); /* try to scrape all its memory if this memcg was deleted */ if (nr_to_scan && !mem_cgroup_online(memcg)) @@ -4760,7 +4746,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool return nr_to_scan >> sc->priority; /* stop scanning this lruvec as it's low on cold folios */ - return try_to_inc_max_seq(lruvec, max_seq, can_swap, false) ? -1 : 0; + return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0; } static bool should_abort_scan(struct lruvec *lruvec, struct scan_control *sc) @@ -5304,8 +5290,7 @@ static void lru_gen_seq_show_full(struct seq_file *m, struct lruvec *lruvec, s = "rep"; n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); - if (tier) - n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); + n[2] = READ_ONCE(lrugen->protected[hist][type][tier]); } for (i = 0; i < 3; i++) @@ -5360,7 +5345,7 @@ static int lru_gen_seq_show(struct seq_file *m, void *v) seq_printf(m, " node %5d\n", nid); if (!full) - seq = min_seq[LRU_GEN_ANON]; + seq = evictable_min_seq(min_seq, MAX_SWAPPINESS / 2); else if (max_seq >= MAX_NR_GENS) seq = max_seq - MAX_NR_GENS + 1; else @@ -5400,23 +5385,14 @@ static const struct seq_operations lru_gen_seq_ops = { }; static int run_aging(struct lruvec *lruvec, unsigned long seq, - bool can_swap, bool force_scan) + int swappiness, bool force_scan) { DEFINE_MAX_SEQ(lruvec); - DEFINE_MIN_SEQ(lruvec); - - if (seq < max_seq) - return 0; if (seq > max_seq) return -EINVAL; - if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) - return -ERANGE; - - try_to_inc_max_seq(lruvec, max_seq, can_swap, force_scan); - - return 0; + return try_to_inc_max_seq(lruvec, max_seq, swappiness, force_scan) ? 0 : -EEXIST; } static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_control *sc, @@ -5432,7 +5408,7 @@ static int run_eviction(struct lruvec *lruvec, unsigned long seq, struct scan_co while (!signal_pending(current)) { DEFINE_MIN_SEQ(lruvec); - if (seq < min_seq[!swappiness]) + if (seq < evictable_min_seq(min_seq, swappiness)) return 0; if (sc->nr_reclaimed >= nr_to_reclaim) From e73348bd163c01ffcd4b23d9854fc7d6560ffa19 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Mon, 30 Dec 2024 21:35:35 -0700 Subject: [PATCH 247/504] mm/mglru: rework type selection With anon and file min_seq being able to move independently, rework type selection so that it is based on the total refaults from all tiers of each type. Also allow a type to be selected until that type reaches MIN_NR_GENS, regardless of whether that type has a larger min_seq or not, to accommodate extreme swappiness. Since some tiers of a selected type can have higher refaults than the first tier of the other type, use a less larger gain factor 2:3 instead of 1:2, in order for those tiers in the selected type to be better protected. As an intermediate step to the final optimization, this change by itself should not have userspace-visiable effects beyond performance. Link: https://lkml.kernel.org/r/20241231043538.4075764-5-yuzhao@google.com Signed-off-by: Yu Zhao Reported-by: David Stevens Tested-by: Kalesh Singh Cc: Barry Song Cc: Bharata B Rao Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/vmscan.c | 82 +++++++++++++++++------------------------------------ 1 file changed, 26 insertions(+), 56 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index 51dc162a1493..7eaa975d8546 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3094,15 +3094,20 @@ struct ctrl_pos { static void read_ctrl_pos(struct lruvec *lruvec, int type, int tier, int gain, struct ctrl_pos *pos) { + int i; struct lru_gen_folio *lrugen = &lruvec->lrugen; int hist = lru_hist_from_seq(lrugen->min_seq[type]); - pos->refaulted = lrugen->avg_refaulted[type][tier] + - atomic_long_read(&lrugen->refaulted[hist][type][tier]); - pos->total = lrugen->avg_total[type][tier] + - lrugen->protected[hist][type][tier] + - atomic_long_read(&lrugen->evicted[hist][type][tier]); pos->gain = gain; + pos->refaulted = pos->total = 0; + + for (i = tier % MAX_NR_TIERS; i <= min(tier, MAX_NR_TIERS - 1); i++) { + pos->refaulted += lrugen->avg_refaulted[type][i] + + atomic_long_read(&lrugen->refaulted[hist][type][i]); + pos->total += lrugen->avg_total[type][i] + + lrugen->protected[hist][type][i] + + atomic_long_read(&lrugen->evicted[hist][type][i]); + } } static void reset_ctrl_pos(struct lruvec *lruvec, int type, bool carryover) @@ -4502,13 +4507,13 @@ static int get_tier_idx(struct lruvec *lruvec, int type) struct ctrl_pos sp, pv; /* - * To leave a margin for fluctuations, use a larger gain factor (1:2). + * To leave a margin for fluctuations, use a larger gain factor (2:3). * This value is chosen because any other tier would have at least twice * as many refaults as the first tier. */ - read_ctrl_pos(lruvec, type, 0, 1, &sp); + read_ctrl_pos(lruvec, type, 0, 2, &sp); for (tier = 1; tier < MAX_NR_TIERS; tier++) { - read_ctrl_pos(lruvec, type, tier, 2, &pv); + read_ctrl_pos(lruvec, type, tier, 3, &pv); if (!positive_ctrl_err(&sp, &pv)) break; } @@ -4516,68 +4521,34 @@ static int get_tier_idx(struct lruvec *lruvec, int type) return tier - 1; } -static int get_type_to_scan(struct lruvec *lruvec, int swappiness, int *tier_idx) +static int get_type_to_scan(struct lruvec *lruvec, int swappiness) { - int type, tier; struct ctrl_pos sp, pv; - int gain[ANON_AND_FILE] = { swappiness, MAX_SWAPPINESS - swappiness }; + if (!swappiness) + return LRU_GEN_FILE; + + if (swappiness == MAX_SWAPPINESS) + return LRU_GEN_ANON; /* - * Compare the first tier of anon with that of file to determine which - * type to scan. Also need to compare other tiers of the selected type - * with the first tier of the other type to determine the last tier (of - * the selected type) to evict. + * Compare the sum of all tiers of anon with that of file to determine + * which type to scan. */ - read_ctrl_pos(lruvec, LRU_GEN_ANON, 0, gain[LRU_GEN_ANON], &sp); - read_ctrl_pos(lruvec, LRU_GEN_FILE, 0, gain[LRU_GEN_FILE], &pv); - type = positive_ctrl_err(&sp, &pv); + read_ctrl_pos(lruvec, LRU_GEN_ANON, MAX_NR_TIERS, swappiness, &sp); + read_ctrl_pos(lruvec, LRU_GEN_FILE, MAX_NR_TIERS, MAX_SWAPPINESS - swappiness, &pv); - read_ctrl_pos(lruvec, !type, 0, gain[!type], &sp); - for (tier = 1; tier < MAX_NR_TIERS; tier++) { - read_ctrl_pos(lruvec, type, tier, gain[type], &pv); - if (!positive_ctrl_err(&sp, &pv)) - break; - } - - *tier_idx = tier - 1; - - return type; + return positive_ctrl_err(&sp, &pv); } static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int swappiness, int *type_scanned, struct list_head *list) { int i; - int type; - int tier = -1; - DEFINE_MIN_SEQ(lruvec); - - /* - * Try to make the obvious choice first, and if anon and file are both - * available from the same generation, - * 1. Interpret swappiness 1 as file first and MAX_SWAPPINESS as anon - * first. - * 2. If !__GFP_IO, file first since clean pagecache is more likely to - * exist than clean swapcache. - */ - if (!swappiness) - type = LRU_GEN_FILE; - else if (min_seq[LRU_GEN_ANON] < min_seq[LRU_GEN_FILE]) - type = LRU_GEN_ANON; - else if (swappiness == 1) - type = LRU_GEN_FILE; - else if (swappiness == MAX_SWAPPINESS) - type = LRU_GEN_ANON; - else if (!(sc->gfp_mask & __GFP_IO)) - type = LRU_GEN_FILE; - else - type = get_type_to_scan(lruvec, swappiness, &tier); + int type = get_type_to_scan(lruvec, swappiness); for_each_evictable_type(i, swappiness) { int scanned; - - if (tier < 0) - tier = get_tier_idx(lruvec, type); + int tier = get_tier_idx(lruvec, type); *type_scanned = type; @@ -4586,7 +4557,6 @@ static int isolate_folios(struct lruvec *lruvec, struct scan_control *sc, int sw return scanned; type = !type; - tier = -1; } return 0; From 6853b71aca3ece1ca76573111f5c696c102ac684 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Mon, 30 Dec 2024 21:35:36 -0700 Subject: [PATCH 248/504] mm/mglru: rework refault detection With anon and file min_seq being able to move independently, rework workingset protection as well so that the comparison of refaults between anon and file is always on an equal footing. Specifically, make lru_gen_test_recent() return true for refaults happening within the distance of MAX_NR_GENS. For example, if min_seq of a type is max_seq-MIN_NR_GENS, refaults from min_seq-1, i.e., max_seq-MIN_NR_GENS-1, are also considered recent, since the distance max_seq-(max_seq-MIN_NR_GENS-1), i.e., MIN_NR_GENS+1 is less than MAX_NR_GENS. As an intermediate step to the final optimization, this change by itself should not have userspace-visiable effects beyond performance. Link: https://lkml.kernel.org/r/20241231043538.4075764-6-yuzhao@google.com Signed-off-by: Yu Zhao Reported-by: Kairui Song Closes: https://lore.kernel.org/CAOUHufahuWcKf5f1Sg3emnqX+cODuR=2TQo7T4Gr-QYLujn4RA@mail.gmail.com/ Tested-by: Kalesh Singh Cc: Barry Song Cc: Bharata B Rao Cc: David Stevens Signed-off-by: Andrew Morton --- mm/workingset.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/mm/workingset.c b/mm/workingset.c index ad181d1b8cf1..2c310c29f51e 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -260,11 +260,11 @@ static void *lru_gen_eviction(struct folio *folio) * Tests if the shadow entry is for a folio that was recently evicted. * Fills in @lruvec, @token, @workingset with the values unpacked from shadow. */ -static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, +static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec, unsigned long *token, bool *workingset) { int memcg_id; - unsigned long min_seq; + unsigned long max_seq; struct mem_cgroup *memcg; struct pglist_data *pgdat; @@ -273,8 +273,10 @@ static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, memcg = mem_cgroup_from_id(memcg_id); *lruvec = mem_cgroup_lruvec(memcg, pgdat); - min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]); - return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)); + max_seq = READ_ONCE((*lruvec)->lrugen.max_seq); + max_seq &= EVICTION_MASK >> LRU_REFS_WIDTH; + + return abs_diff(max_seq, *token >> LRU_REFS_WIDTH) < MAX_NR_GENS; } static void lru_gen_refault(struct folio *folio, void *shadow) @@ -290,7 +292,7 @@ static void lru_gen_refault(struct folio *folio, void *shadow) rcu_read_lock(); - recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset); + recent = lru_gen_test_recent(shadow, &lruvec, &token, &workingset); if (lruvec != folio_lruvec(folio)) goto unlock; @@ -331,7 +333,7 @@ static void *lru_gen_eviction(struct folio *folio) return NULL; } -static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, +static bool lru_gen_test_recent(void *shadow, struct lruvec **lruvec, unsigned long *token, bool *workingset) { return false; @@ -432,8 +434,7 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset, bool recent; rcu_read_lock(); - recent = lru_gen_test_recent(shadow, file, &eviction_lruvec, - &eviction, workingset); + recent = lru_gen_test_recent(shadow, &eviction_lruvec, &eviction, workingset); rcu_read_unlock(); return recent; } From cadffd8aab913c9214ad5426bd4d02a39427d334 Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Mon, 30 Dec 2024 21:35:37 -0700 Subject: [PATCH 249/504] mm/mglru: rework workingset protection With the aging feedback no longer considering the distribution of folios in each generation, rework workingset protection to better distribute folios across MAX_NR_GENS. This is achieved by reusing PG_workingset and PG_referenced/LRU_REFS_FLAGS in a slightly different way. For folios accessed multiple times through file descriptors, make lru_gen_inc_refs() set additional bits of LRU_REFS_WIDTH in folio->flags after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily promoted into the second oldest generation in the eviction path. And when folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that lru_gen_inc_refs() can start over. For this case, LRU_REFS_MASK is only valid when PG_referenced is set. For folios accessed multiple times through page tables, folio_update_gen() from a page table walk or lru_gen_set_refs() from a rmap walk sets PG_referenced after the accessed bit is cleared for the first time. Thereafter, those two paths set PG_workingset and promote folios to the youngest generation. Like folio_inc_gen(), when folio_update_gen() does that, it also clears PG_referenced. For this case, LRU_REFS_MASK is not used. For both of the cases, after PG_workingset is set on a folio, it remains until this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It can be set again if lru_gen_test_recent() returns true upon a refault. When adding folios to the LRU lists, lru_gen_folio_seq() distributes them as follows: +---------------------------------+---------------------------------+ | Accessed thru page tables | Accessed thru file descriptors | +---------------------------------+---------------------------------+ | PG_active (set while isolated) | | +----------------+----------------+----------------+----------------+ | PG_workingset | PG_referenced | PG_workingset | LRU_REFS_FLAGS | +---------------------------------+---------------------------------+ |<--------- MIN_NR_GENS --------->| | |<-------------------------- MAX_NR_GENS -------------------------->| After this patch, some typical client and server workloads showed improvements under heavy memory pressure. For example, Python TPC-C, which was used to benchmark a different approach [1] to better detect refault distances, showed a significant decrease in total refaults: Before After Change Time (seconds) 10801 10801 0% Executed (transactions) 41472 43663 +5% workingset_nodes 109070 120244 +10% workingset_refault_anon 5019627 7281831 +45% workingset_refault_file 1294678786 554855564 -57% workingset_refault_total 1299698413 562137395 -57% [1] https://lore.kernel.org/20230920190244.16839-1-ryncsn@gmail.com/ Link: https://lkml.kernel.org/r/20241231043538.4075764-7-yuzhao@google.com Signed-off-by: Yu Zhao Reported-by: Kairui Song Closes: https://lore.kernel.org/CAOUHufahuWcKf5f1Sg3emnqX+cODuR=2TQo7T4Gr-QYLujn4RA@mail.gmail.com/ Tested-by: Kalesh Singh Cc: Barry Song Cc: Bharata B Rao Cc: David Stevens Signed-off-by: Andrew Morton --- include/linux/mm_inline.h | 88 +++++++++++------------ include/linux/mmzone.h | 82 +++++++++++++-------- mm/swap.c | 24 +++---- mm/vmscan.c | 147 ++++++++++++++++++++++---------------- mm/workingset.c | 29 ++++---- 5 files changed, 204 insertions(+), 166 deletions(-) diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 34e5097182a0..f9157a0c42a5 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -133,31 +133,25 @@ static inline int lru_hist_from_seq(unsigned long seq) return seq % NR_HIST_GENS; } -static inline int lru_tier_from_refs(int refs) +static inline int lru_tier_from_refs(int refs, bool workingset) { VM_WARN_ON_ONCE(refs > BIT(LRU_REFS_WIDTH)); - /* see the comment in folio_lru_refs() */ - return order_base_2(refs + 1); + /* see the comment on MAX_NR_TIERS */ + return workingset ? MAX_NR_TIERS - 1 : order_base_2(refs); } static inline int folio_lru_refs(struct folio *folio) { unsigned long flags = READ_ONCE(folio->flags); - bool workingset = flags & BIT(PG_workingset); + if (!(flags & BIT(PG_referenced))) + return 0; /* - * Return the number of accesses beyond PG_referenced, i.e., N-1 if the - * total number of accesses is N>1, since N=0,1 both map to the first - * tier. lru_tier_from_refs() will account for this off-by-one. Also see - * the comment on MAX_NR_TIERS. + * Return the total number of accesses including PG_referenced. Also see + * the comment on LRU_REFS_FLAGS. */ - return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + workingset; -} - -static inline void folio_clear_lru_refs(struct folio *folio) -{ - set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); + return ((flags & LRU_REFS_MASK) >> LRU_REFS_PGOFF) + 1; } static inline int folio_lru_gen(struct folio *folio) @@ -223,11 +217,43 @@ static inline void lru_gen_update_size(struct lruvec *lruvec, struct folio *foli VM_WARN_ON_ONCE(lru_gen_is_active(lruvec, old_gen) && !lru_gen_is_active(lruvec, new_gen)); } +static inline unsigned long lru_gen_folio_seq(struct lruvec *lruvec, struct folio *folio, + bool reclaiming) +{ + int gen; + int type = folio_is_file_lru(folio); + struct lru_gen_folio *lrugen = &lruvec->lrugen; + + /* + * +-----------------------------------+-----------------------------------+ + * | Accessed through page tables and | Accessed through file descriptors | + * | promoted by folio_update_gen() | and protected by folio_inc_gen() | + * +-----------------------------------+-----------------------------------+ + * | PG_active (set while isolated) | | + * +-----------------+-----------------+-----------------+-----------------+ + * | PG_workingset | PG_referenced | PG_workingset | LRU_REFS_FLAGS | + * +-----------------------------------+-----------------------------------+ + * |<---------- MIN_NR_GENS ---------->| | + * |<---------------------------- MAX_NR_GENS ---------------------------->| + */ + if (folio_test_active(folio)) + gen = MIN_NR_GENS - folio_test_workingset(folio); + else if (reclaiming) + gen = MAX_NR_GENS; + else if ((!folio_is_file_lru(folio) && !folio_test_swapcache(folio)) || + (folio_test_reclaim(folio) && + (folio_test_dirty(folio) || folio_test_writeback(folio)))) + gen = MIN_NR_GENS; + else + gen = MAX_NR_GENS - folio_test_workingset(folio); + + return max(READ_ONCE(lrugen->max_seq) - gen + 1, READ_ONCE(lrugen->min_seq[type])); +} + static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, bool reclaiming) { unsigned long seq; unsigned long flags; - unsigned long mask; int gen = folio_lru_gen(folio); int type = folio_is_file_lru(folio); int zone = folio_zonenum(folio); @@ -237,40 +263,12 @@ static inline bool lru_gen_add_folio(struct lruvec *lruvec, struct folio *folio, if (folio_test_unevictable(folio) || !lrugen->enabled) return false; - /* - * There are four common cases for this page: - * 1. If it's hot, i.e., freshly faulted in, add it to the youngest - * generation, and it's protected over the rest below. - * 2. If it can't be evicted immediately, i.e., a dirty page pending - * writeback, add it to the second youngest generation. - * 3. If it should be evicted first, e.g., cold and clean from - * folio_rotate_reclaimable(), add it to the oldest generation. - * 4. Everything else falls between 2 & 3 above and is added to the - * second oldest generation if it's considered inactive, or the - * oldest generation otherwise. See lru_gen_is_active(). - */ - if (folio_test_active(folio)) - seq = lrugen->max_seq; - else if ((type == LRU_GEN_ANON && !folio_test_swapcache(folio)) || - (folio_test_reclaim(folio) && - (folio_test_dirty(folio) || folio_test_writeback(folio)))) - seq = lrugen->max_seq - 1; - else if (reclaiming || lrugen->min_seq[type] + MIN_NR_GENS >= lrugen->max_seq) - seq = lrugen->min_seq[type]; - else - seq = lrugen->min_seq[type] + 1; + seq = lru_gen_folio_seq(lruvec, folio, reclaiming); gen = lru_gen_from_seq(seq); flags = (gen + 1UL) << LRU_GEN_PGOFF; /* see the comment on MIN_NR_GENS about PG_active */ - mask = LRU_GEN_MASK; - /* - * Don't clear PG_workingset here because it can affect PSI accounting - * if the activation is due to workingset refault. - */ - if (folio_test_active(folio)) - mask |= LRU_REFS_MASK | BIT(PG_referenced) | BIT(PG_active); - set_mask_bits(&folio->flags, mask, flags); + set_mask_bits(&folio->flags, LRU_GEN_MASK | BIT(PG_active), flags); lru_gen_update_size(lruvec, folio, -1, gen); /* for folio_rotate_reclaimable() */ diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8245ecb0400b..9540b41894da 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -332,66 +332,88 @@ enum lruvec_flags { #endif /* !__GENERATING_BOUNDS_H */ /* - * Evictable pages are divided into multiple generations. The youngest and the + * Evictable folios are divided into multiple generations. The youngest and the * oldest generation numbers, max_seq and min_seq, are monotonically increasing. * They form a sliding window of a variable size [MIN_NR_GENS, MAX_NR_GENS]. An * offset within MAX_NR_GENS, i.e., gen, indexes the LRU list of the * corresponding generation. The gen counter in folio->flags stores gen+1 while - * a page is on one of lrugen->folios[]. Otherwise it stores 0. + * a folio is on one of lrugen->folios[]. Otherwise it stores 0. * - * A page is added to the youngest generation on faulting. The aging needs to - * check the accessed bit at least twice before handing this page over to the - * eviction. The first check takes care of the accessed bit set on the initial - * fault; the second check makes sure this page hasn't been used since then. - * This process, AKA second chance, requires a minimum of two generations, - * hence MIN_NR_GENS. And to maintain ABI compatibility with the active/inactive - * LRU, e.g., /proc/vmstat, these two generations are considered active; the - * rest of generations, if they exist, are considered inactive. See - * lru_gen_is_active(). + * After a folio is faulted in, the aging needs to check the accessed bit at + * least twice before handing this folio over to the eviction. The first check + * clears the accessed bit from the initial fault; the second check makes sure + * this folio hasn't been used since then. This process, AKA second chance, + * requires a minimum of two generations, hence MIN_NR_GENS. And to maintain ABI + * compatibility with the active/inactive LRU, e.g., /proc/vmstat, these two + * generations are considered active; the rest of generations, if they exist, + * are considered inactive. See lru_gen_is_active(). * - * PG_active is always cleared while a page is on one of lrugen->folios[] so - * that the aging needs not to worry about it. And it's set again when a page - * considered active is isolated for non-reclaiming purposes, e.g., migration. - * See lru_gen_add_folio() and lru_gen_del_folio(). + * PG_active is always cleared while a folio is on one of lrugen->folios[] so + * that the sliding window needs not to worry about it. And it's set again when + * a folio considered active is isolated for non-reclaiming purposes, e.g., + * migration. See lru_gen_add_folio() and lru_gen_del_folio(). * * MAX_NR_GENS is set to 4 so that the multi-gen LRU can support twice the * number of categories of the active/inactive LRU when keeping track of * accesses through page tables. This requires order_base_2(MAX_NR_GENS+1) bits - * in folio->flags. + * in folio->flags, masked by LRU_GEN_MASK. */ #define MIN_NR_GENS 2U #define MAX_NR_GENS 4U /* - * Each generation is divided into multiple tiers. A page accessed N times - * through file descriptors is in tier order_base_2(N). A page in the first tier - * (N=0,1) is marked by PG_referenced unless it was faulted in through page - * tables or read ahead. A page in any other tier (N>1) is marked by - * PG_referenced and PG_workingset. This implies a minimum of two tiers is - * supported without using additional bits in folio->flags. + * Each generation is divided into multiple tiers. A folio accessed N times + * through file descriptors is in tier order_base_2(N). A folio in the first + * tier (N=0,1) is marked by PG_referenced unless it was faulted in through page + * tables or read ahead. A folio in the last tier (MAX_NR_TIERS-1) is marked by + * PG_workingset. A folio in any other tier (1flags. * * In contrast to moving across generations which requires the LRU lock, moving * across tiers only involves atomic operations on folio->flags and therefore * has a negligible cost in the buffered access path. In the eviction path, - * comparisons of refaulted/(evicted+protected) from the first tier and the - * rest infer whether pages accessed multiple times through file descriptors - * are statistically hot and thus worth protecting. + * comparisons of refaulted/(evicted+protected) from the first tier and the rest + * infer whether folios accessed multiple times through file descriptors are + * statistically hot and thus worth protecting. * * MAX_NR_TIERS is set to 4 so that the multi-gen LRU can support twice the * number of categories of the active/inactive LRU when keeping track of * accesses through file descriptors. This uses MAX_NR_TIERS-2 spare bits in - * folio->flags. + * folio->flags, masked by LRU_REFS_MASK. */ #define MAX_NR_TIERS 4U #ifndef __GENERATING_BOUNDS_H -struct lruvec; -struct page_vma_mapped_walk; - #define LRU_GEN_MASK ((BIT(LRU_GEN_WIDTH) - 1) << LRU_GEN_PGOFF) #define LRU_REFS_MASK ((BIT(LRU_REFS_WIDTH) - 1) << LRU_REFS_PGOFF) +/* + * For folios accessed multiple times through file descriptors, + * lru_gen_inc_refs() sets additional bits of LRU_REFS_WIDTH in folio->flags + * after PG_referenced, then PG_workingset after LRU_REFS_WIDTH. After all its + * bits are set, i.e., LRU_REFS_FLAGS|BIT(PG_workingset), a folio is lazily + * promoted into the second oldest generation in the eviction path. And when + * folio_inc_gen() does that, it clears LRU_REFS_FLAGS so that + * lru_gen_inc_refs() can start over. Note that for this case, LRU_REFS_MASK is + * only valid when PG_referenced is set. + * + * For folios accessed multiple times through page tables, folio_update_gen() + * from a page table walk or lru_gen_set_refs() from a rmap walk sets + * PG_referenced after the accessed bit is cleared for the first time. + * Thereafter, those two paths set PG_workingset and promote folios to the + * youngest generation. Like folio_inc_gen(), folio_update_gen() also clears + * PG_referenced. Note that for this case, LRU_REFS_MASK is not used. + * + * For both cases above, after PG_workingset is set on a folio, it remains until + * this folio is either reclaimed, or "deactivated" by lru_gen_clear_refs(). It + * can be set again if lru_gen_test_recent() returns true upon a refault. + */ +#define LRU_REFS_FLAGS (LRU_REFS_MASK | BIT(PG_referenced)) + +struct lruvec; +struct page_vma_mapped_walk; + #ifdef CONFIG_LRU_GEN enum { @@ -406,8 +428,6 @@ enum { NR_LRU_GEN_CAPS }; -#define LRU_REFS_FLAGS (BIT(PG_referenced) | BIT(PG_workingset)) - #define MIN_LRU_BATCH BITS_PER_LONG #define MAX_LRU_BATCH (MIN_LRU_BATCH * 64) diff --git a/mm/swap.c b/mm/swap.c index 649ef7f2b74b..746a5ceba42c 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -387,24 +387,20 @@ static void lru_gen_inc_refs(struct folio *folio) if (folio_test_unevictable(folio)) return; + /* see the comment on LRU_REFS_FLAGS */ if (!folio_test_referenced(folio)) { - folio_set_referenced(folio); + set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); return; } - if (!folio_test_workingset(folio)) { - folio_set_workingset(folio); - return; - } - - /* see the comment on MAX_NR_TIERS */ do { - new_flags = old_flags & LRU_REFS_MASK; - if (new_flags == LRU_REFS_MASK) - break; + if ((old_flags & LRU_REFS_MASK) == LRU_REFS_MASK) { + if (!folio_test_workingset(folio)) + folio_set_workingset(folio); + return; + } - new_flags += BIT(LRU_REFS_PGOFF); - new_flags |= old_flags & ~LRU_REFS_MASK; + new_flags = old_flags + BIT(LRU_REFS_PGOFF); } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); } @@ -417,7 +413,7 @@ static bool lru_gen_clear_refs(struct folio *folio) if (gen < 0) return true; - set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); + set_mask_bits(&folio->flags, LRU_REFS_FLAGS | BIT(PG_workingset), 0); lrugen = &folio_lruvec(folio)->lrugen; /* whether can do without shuffling under the LRU lock */ @@ -499,7 +495,7 @@ void folio_add_lru(struct folio *folio) folio_test_unevictable(folio), folio); VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); - /* see the comment in lru_gen_add_folio() */ + /* see the comment in lru_gen_folio_seq() */ if (lru_gen_enabled() && !folio_test_unevictable(folio) && lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) folio_set_active(folio); diff --git a/mm/vmscan.c b/mm/vmscan.c index 7eaa975d8546..ca9f9f9e4f10 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -862,6 +862,31 @@ enum folio_references { FOLIOREF_ACTIVATE, }; +#ifdef CONFIG_LRU_GEN +/* + * Only used on a mapped folio in the eviction (rmap walk) path, where promotion + * needs to be done by taking the folio off the LRU list and then adding it back + * with PG_active set. In contrast, the aging (page table walk) path uses + * folio_update_gen(). + */ +static bool lru_gen_set_refs(struct folio *folio) +{ + /* see the comment on LRU_REFS_FLAGS */ + if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) { + set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); + return false; + } + + set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_workingset)); + return true; +} +#else +static bool lru_gen_set_refs(struct folio *folio) +{ + return false; +} +#endif /* CONFIG_LRU_GEN */ + static enum folio_references folio_check_references(struct folio *folio, struct scan_control *sc) { @@ -870,7 +895,6 @@ static enum folio_references folio_check_references(struct folio *folio, referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, &vm_flags); - referenced_folio = folio_test_clear_referenced(folio); /* * The supposedly reclaimable folio was found to be in a VM_LOCKED vma. @@ -888,6 +912,15 @@ static enum folio_references folio_check_references(struct folio *folio, if (referenced_ptes == -1) return FOLIOREF_KEEP; + if (lru_gen_enabled()) { + if (!referenced_ptes) + return FOLIOREF_RECLAIM; + + return lru_gen_set_refs(folio) ? FOLIOREF_ACTIVATE : FOLIOREF_KEEP; + } + + referenced_folio = folio_test_clear_referenced(folio); + if (referenced_ptes) { /* * All mapped folios start out with page table @@ -1092,11 +1125,6 @@ retry: if (!sc->may_unmap && folio_mapped(folio)) goto keep_locked; - /* folio_update_gen() tried to promote this page? */ - if (lru_gen_enabled() && !ignore_references && - folio_mapped(folio) && folio_test_referenced(folio)) - goto keep_locked; - /* * The number of dirty pages determines if a node is marked * reclaim_congested. kswapd will stall and start writing @@ -3168,16 +3196,19 @@ static int folio_update_gen(struct folio *folio, int gen) VM_WARN_ON_ONCE(gen >= MAX_NR_GENS); + /* see the comment on LRU_REFS_FLAGS */ + if (!folio_test_referenced(folio) && !folio_test_workingset(folio)) { + set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); + return -1; + } + do { /* lru_gen_del_folio() has isolated this page? */ - if (!(old_flags & LRU_GEN_MASK)) { - /* for shrink_folio_list() */ - new_flags = old_flags | BIT(PG_referenced); - continue; - } + if (!(old_flags & LRU_GEN_MASK)) + return -1; - new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); - new_flags |= (gen + 1UL) << LRU_GEN_PGOFF; + new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS); + new_flags |= ((gen + 1UL) << LRU_GEN_PGOFF) | BIT(PG_workingset); } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; @@ -3201,7 +3232,7 @@ static int folio_inc_gen(struct lruvec *lruvec, struct folio *folio, bool reclai new_gen = (old_gen + 1) % MAX_NR_GENS; - new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_MASK | LRU_REFS_FLAGS); + new_flags = old_flags & ~(LRU_GEN_MASK | LRU_REFS_FLAGS); new_flags |= (new_gen + 1UL) << LRU_GEN_PGOFF; /* for folio_end_writeback() */ if (reclaiming) @@ -3379,9 +3410,11 @@ static unsigned long get_pmd_pfn(pmd_t pmd, struct vm_area_struct *vma, unsigned static struct folio *get_pfn_folio(unsigned long pfn, struct mem_cgroup *memcg, struct pglist_data *pgdat) { - struct folio *folio; + struct folio *folio = pfn_folio(pfn); + + if (folio_lru_gen(folio) < 0) + return NULL; - folio = pfn_folio(pfn); if (folio_nid(folio) != pgdat->node_id) return NULL; @@ -3758,8 +3791,7 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, int swappiness) while (!list_empty(head)) { struct folio *folio = lru_to_folio(head); int refs = folio_lru_refs(folio); - int tier = lru_tier_from_refs(refs); - int delta = folio_nr_pages(folio); + bool workingset = folio_test_workingset(folio); VM_WARN_ON_ONCE_FOLIO(folio_test_unevictable(folio), folio); VM_WARN_ON_ONCE_FOLIO(folio_test_active(folio), folio); @@ -3769,8 +3801,14 @@ static bool inc_min_seq(struct lruvec *lruvec, int type, int swappiness) new_gen = folio_inc_gen(lruvec, folio, false); list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); - WRITE_ONCE(lrugen->protected[hist][type][tier], - lrugen->protected[hist][type][tier] + delta); + /* don't count the workingset being lazily promoted */ + if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) { + int tier = lru_tier_from_refs(refs, workingset); + int delta = folio_nr_pages(folio); + + WRITE_ONCE(lrugen->protected[hist][type][tier], + lrugen->protected[hist][type][tier] + delta); + } if (!--remaining) return false; @@ -4156,16 +4194,10 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) old_gen = folio_update_gen(folio, new_gen); if (old_gen >= 0 && old_gen != new_gen) update_batch_size(walk, folio, old_gen, new_gen); - - continue; - } - - old_gen = folio_lru_gen(folio); - if (old_gen < 0) - folio_set_referenced(folio); - else if (old_gen != new_gen) { - folio_clear_lru_refs(folio); - folio_activate(folio); + } else if (lru_gen_set_refs(folio)) { + old_gen = folio_lru_gen(folio); + if (old_gen >= 0 && old_gen != new_gen) + folio_activate(folio); } } @@ -4326,7 +4358,8 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c int zone = folio_zonenum(folio); int delta = folio_nr_pages(folio); int refs = folio_lru_refs(folio); - int tier = lru_tier_from_refs(refs); + bool workingset = folio_test_workingset(folio); + int tier = lru_tier_from_refs(refs, workingset); struct lru_gen_folio *lrugen = &lruvec->lrugen; VM_WARN_ON_ONCE_FOLIO(gen >= MAX_NR_GENS, folio); @@ -4348,14 +4381,17 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c } /* protected */ - if (tier > tier_idx || refs == BIT(LRU_REFS_WIDTH)) { - int hist = lru_hist_from_seq(lrugen->min_seq[type]); - + if (tier > tier_idx || refs + workingset == BIT(LRU_REFS_WIDTH) + 1) { gen = folio_inc_gen(lruvec, folio, false); - list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); + list_move(&folio->lru, &lrugen->folios[gen][type][zone]); - WRITE_ONCE(lrugen->protected[hist][type][tier], - lrugen->protected[hist][type][tier] + delta); + /* don't count the workingset being lazily promoted */ + if (refs + workingset != BIT(LRU_REFS_WIDTH) + 1) { + int hist = lru_hist_from_seq(lrugen->min_seq[type]); + + WRITE_ONCE(lrugen->protected[hist][type][tier], + lrugen->protected[hist][type][tier] + delta); + } return true; } @@ -4375,8 +4411,7 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, struct scan_c } /* waiting for writeback */ - if (folio_test_locked(folio) || writeback || - (type == LRU_GEN_FILE && dirty)) { + if (writeback || (type == LRU_GEN_FILE && dirty)) { gen = folio_inc_gen(lruvec, folio, true); list_move(&folio->lru, &lrugen->folios[gen][type][zone]); return true; @@ -4405,13 +4440,12 @@ static bool isolate_folio(struct lruvec *lruvec, struct folio *folio, struct sca return false; } - /* see the comment on MAX_NR_TIERS */ + /* see the comment on LRU_REFS_FLAGS */ if (!folio_test_referenced(folio)) - folio_clear_lru_refs(folio); + set_mask_bits(&folio->flags, LRU_REFS_MASK, 0); /* for shrink_folio_list() */ folio_clear_reclaim(folio); - folio_clear_referenced(folio); success = lru_gen_del_folio(lruvec, folio, true); VM_WARN_ON_ONCE_FOLIO(!success, folio); @@ -4601,31 +4635,24 @@ retry: type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON); list_for_each_entry_safe_reverse(folio, next, &list, lru) { + DEFINE_MIN_SEQ(lruvec); + if (!folio_evictable(folio)) { list_del(&folio->lru); folio_putback_lru(folio); continue; } - if (folio_test_reclaim(folio) && - (folio_test_dirty(folio) || folio_test_writeback(folio))) { - /* restore LRU_REFS_FLAGS cleared by isolate_folio() */ - if (folio_test_workingset(folio)) - folio_set_referenced(folio); - continue; - } - - if (skip_retry || folio_test_active(folio) || folio_test_referenced(folio) || - folio_mapped(folio) || folio_test_locked(folio) || - folio_test_dirty(folio) || folio_test_writeback(folio)) { - /* don't add rejected folios to the oldest generation */ - set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, - BIT(PG_active)); - continue; - } - /* retry folios that may have missed folio_rotate_reclaimable() */ - list_move(&folio->lru, &clean); + if (!skip_retry && !folio_test_active(folio) && !folio_mapped(folio) && + !folio_test_dirty(folio) && !folio_test_writeback(folio)) { + list_move(&folio->lru, &clean); + continue; + } + + /* don't add rejected folios to the oldest generation */ + if (lru_gen_folio_seq(lruvec, folio, false) == min_seq[type]) + set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_active)); } spin_lock_irq(&lruvec->lru_lock); diff --git a/mm/workingset.c b/mm/workingset.c index 2c310c29f51e..4841ae8af411 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -239,7 +239,8 @@ static void *lru_gen_eviction(struct folio *folio) int type = folio_is_file_lru(folio); int delta = folio_nr_pages(folio); int refs = folio_lru_refs(folio); - int tier = lru_tier_from_refs(refs); + bool workingset = folio_test_workingset(folio); + int tier = lru_tier_from_refs(refs, workingset); struct mem_cgroup *memcg = folio_memcg(folio); struct pglist_data *pgdat = folio_pgdat(folio); @@ -253,7 +254,7 @@ static void *lru_gen_eviction(struct folio *folio) hist = lru_hist_from_seq(min_seq); atomic_long_add(delta, &lrugen->evicted[hist][type][tier]); - return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs); + return pack_shadow(mem_cgroup_id(memcg), pgdat, token, workingset); } /* @@ -304,24 +305,20 @@ static void lru_gen_refault(struct folio *folio, void *shadow) lrugen = &lruvec->lrugen; hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type])); - /* see the comment in folio_lru_refs() */ - refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset; - tier = lru_tier_from_refs(refs); + refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + 1; + tier = lru_tier_from_refs(refs, workingset); atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]); - mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); - /* - * Count the following two cases as stalls: - * 1. For pages accessed through page tables, hotter pages pushed out - * hot pages which refaulted immediately. - * 2. For pages accessed multiple times through file descriptors, - * they would have been protected by sort_folio(). - */ - if (lru_gen_in_fault() || refs >= BIT(LRU_REFS_WIDTH) - 1) { - set_mask_bits(&folio->flags, 0, LRU_REFS_MASK | BIT(PG_workingset)); + /* see folio_add_lru() where folio_set_active() will be called */ + if (lru_gen_in_fault()) + mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); + + if (workingset) { + folio_set_workingset(folio); mod_lruvec_state(lruvec, WORKINGSET_RESTORE_BASE + type, delta); - } + } else + set_mask_bits(&folio->flags, LRU_REFS_MASK, (refs - 1UL) << LRU_REFS_PGOFF); unlock: rcu_read_unlock(); } From fed6bd41a108a9b5445e84d5a712d6eb9b7d275c Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Mon, 30 Dec 2024 21:35:38 -0700 Subject: [PATCH 250/504] mm/mglru: fix PTE-mapped large folios Count the accessed bits from PTEs mapping the same large folio as one access rather than multiple accesses. The last patch changed how folios accessed through page tables are promoted: rather than getting promoted after the accessed bit is cleared for the first time, a folio only gets promoted thereafter. Counting the accessed bits from the same large folio as multiple accesses can cause that folio to be promoted prematurely, which in turn can cause overprotection of single-use large folios. This patch reduced the sys time of the kernel compilation by 95% CI [2, 5]% on Altra M128-30 with 3GB DRAM, 12GB zram, 16KB THPs and -j32. Link: https://lkml.kernel.org/r/20241231043538.4075764-8-yuzhao@google.com Signed-off-by: Yu Zhao Reported-by: Barry Song Tested-by: Kalesh Singh Cc: Bharata B Rao Cc: David Stevens Cc: Kairui Song Signed-off-by: Andrew Morton --- mm/vmscan.c | 110 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 72 insertions(+), 38 deletions(-) diff --git a/mm/vmscan.c b/mm/vmscan.c index ca9f9f9e4f10..01dce6f26ed3 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3432,29 +3432,55 @@ static bool suitable_to_scan(int total, int young) return young * n >= total; } +static void walk_update_folio(struct lru_gen_mm_walk *walk, struct folio *folio, + int new_gen, bool dirty) +{ + int old_gen; + + if (!folio) + return; + + if (dirty && !folio_test_dirty(folio) && + !(folio_test_anon(folio) && folio_test_swapbacked(folio) && + !folio_test_swapcache(folio))) + folio_mark_dirty(folio); + + if (walk) { + old_gen = folio_update_gen(folio, new_gen); + if (old_gen >= 0 && old_gen != new_gen) + update_batch_size(walk, folio, old_gen, new_gen); + } else if (lru_gen_set_refs(folio)) { + old_gen = folio_lru_gen(folio); + if (old_gen >= 0 && old_gen != new_gen) + folio_activate(folio); + } +} + static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, struct mm_walk *args) { int i; + bool dirty; pte_t *pte; spinlock_t *ptl; unsigned long addr; int total = 0; int young = 0; + struct folio *last = NULL; struct lru_gen_mm_walk *walk = args->private; struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); DEFINE_MAX_SEQ(walk->lruvec); - int old_gen, new_gen = lru_gen_from_seq(max_seq); + int gen = lru_gen_from_seq(max_seq); pmd_t pmdval; - pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, - &ptl); + pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, &ptl); if (!pte) return false; + if (!spin_trylock(ptl)) { pte_unmap(pte); - return false; + return true; } if (unlikely(!pmd_same(pmdval, pmdp_get_lockless(pmd)))) { @@ -3483,19 +3509,23 @@ restart: if (!ptep_clear_young_notify(args->vma, addr, pte + i)) continue; + if (last != folio) { + walk_update_folio(walk, last, gen, dirty); + + last = folio; + dirty = false; + } + + if (pte_dirty(ptent)) + dirty = true; + young++; walk->mm_stats[MM_LEAF_YOUNG]++; - - if (pte_dirty(ptent) && !folio_test_dirty(folio) && - !(folio_test_anon(folio) && folio_test_swapbacked(folio) && - !folio_test_swapcache(folio))) - folio_mark_dirty(folio); - - old_gen = folio_update_gen(folio, new_gen); - if (old_gen >= 0 && old_gen != new_gen) - update_batch_size(walk, folio, old_gen, new_gen); } + walk_update_folio(walk, last, gen, dirty); + last = NULL; + if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end)) goto restart; @@ -3509,13 +3539,15 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area struct mm_walk *args, unsigned long *bitmap, unsigned long *first) { int i; + bool dirty; pmd_t *pmd; spinlock_t *ptl; + struct folio *last = NULL; struct lru_gen_mm_walk *walk = args->private; struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); DEFINE_MAX_SEQ(walk->lruvec); - int old_gen, new_gen = lru_gen_from_seq(max_seq); + int gen = lru_gen_from_seq(max_seq); VM_WARN_ON_ONCE(pud_leaf(*pud)); @@ -3568,20 +3600,23 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area if (!pmdp_clear_young_notify(vma, addr, pmd + i)) goto next; + if (last != folio) { + walk_update_folio(walk, last, gen, dirty); + + last = folio; + dirty = false; + } + + if (pmd_dirty(pmd[i])) + dirty = true; + walk->mm_stats[MM_LEAF_YOUNG]++; - - if (pmd_dirty(pmd[i]) && !folio_test_dirty(folio) && - !(folio_test_anon(folio) && folio_test_swapbacked(folio) && - !folio_test_swapcache(folio))) - folio_mark_dirty(folio); - - old_gen = folio_update_gen(folio, new_gen); - if (old_gen >= 0 && old_gen != new_gen) - update_batch_size(walk, folio, old_gen, new_gen); next: i = i > MIN_LRU_BATCH ? 0 : find_next_bit(bitmap, MIN_LRU_BATCH, i) + 1; } while (i <= MIN_LRU_BATCH); + walk_update_folio(walk, last, gen, dirty); + arch_leave_lazy_mmu_mode(); spin_unlock(ptl); done: @@ -4116,9 +4151,11 @@ static void lru_gen_age_node(struct pglist_data *pgdat, struct scan_control *sc) bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) { int i; + bool dirty; unsigned long start; unsigned long end; struct lru_gen_mm_walk *walk; + struct folio *last = NULL; int young = 1; pte_t *pte = pvmw->pte; unsigned long addr = pvmw->address; @@ -4129,7 +4166,7 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) struct lruvec *lruvec = mem_cgroup_lruvec(memcg, pgdat); struct lru_gen_mm_state *mm_state = get_mm_state(lruvec); DEFINE_MAX_SEQ(lruvec); - int old_gen, new_gen = lru_gen_from_seq(max_seq); + int gen = lru_gen_from_seq(max_seq); lockdep_assert_held(pvmw->ptl); VM_WARN_ON_ONCE_FOLIO(folio_test_lru(folio), folio); @@ -4183,24 +4220,21 @@ bool lru_gen_look_around(struct page_vma_mapped_walk *pvmw) if (!ptep_clear_young_notify(vma, addr, pte + i)) continue; - young++; + if (last != folio) { + walk_update_folio(walk, last, gen, dirty); - if (pte_dirty(ptent) && !folio_test_dirty(folio) && - !(folio_test_anon(folio) && folio_test_swapbacked(folio) && - !folio_test_swapcache(folio))) - folio_mark_dirty(folio); - - if (walk) { - old_gen = folio_update_gen(folio, new_gen); - if (old_gen >= 0 && old_gen != new_gen) - update_batch_size(walk, folio, old_gen, new_gen); - } else if (lru_gen_set_refs(folio)) { - old_gen = folio_lru_gen(folio); - if (old_gen >= 0 && old_gen != new_gen) - folio_activate(folio); + last = folio; + dirty = false; } + + if (pte_dirty(ptent)) + dirty = true; + + young++; } + walk_update_folio(walk, last, gen, dirty); + arch_leave_lazy_mmu_mode(); /* feedback from rmap walkers to page table walkers */ From cd3a25c5191c33ac221adfc041f05e850e9404b1 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 3 Jan 2025 19:35:35 +0000 Subject: [PATCH 251/504] mm/debug: introduce VM_WARN_ON_VMG() to dump VMA merge state Patch series "mm/debug: introduce and use VM_WARN_ON_VMG()". We use a number of asserts, enabled only when CONFIG_DEBUG_VM is set, during VMA merge operations to ensure state is as expected. However, when syzkaller or the like encounters these asserts, often the information provided by the report is insufficient to narrow down what the problem is. We noticed this recently in [0], where a non-repro issue resisted debugging due to simply not having sufficient information to go on. This series improves the situation by providing VM_WARN_ON_VMG() which acts like VM_WARN_ON() (i.e. only actually being invoked if CONFIG_DEBUG_VM is set), while dumping significant information about the VMA merge state, the mm_struct describing the virtual address space, all associated VMAs and, if CONFIG_DEBUG_VM_MAPLE_TREE is set, the associated maple tree. [0]:https://lore.kernel.org/all/6774c98f.050a0220.25abdd.0991.GAE@google.com/ This patch (of 2): We use a number of asserts, enabled only when CONFIG_DEBUG_VM is set, during VMA merge operations to ensure state is as expected. However, when syzkaller or the like encounters these asserts, often the information provided by the report is insufficient to narrow down what the problem is. This might not be so much of an issue if the reported problem is reproducible, but if it is a rarely encountered race or some other case which precludes a repro, it is a very big problem (see [0] for the motivating case). It is therefore sensible to provide a means by which we can easily and conveniently dump a lot more information in these circumstances. The aggregation of merge state into a single struct threaded through the operation makes this trivial - we can simply introduce a variant on VM_WARN_ON() which takes the VMA merge state object (vmg) and use that to dump information. This patch therefore introduces VM_WARN_ON_VMG() which provides this functionality. It additionally dumps full mm state, VMA state for each of the three VMAs the vmg contains (prev, next, vma) and if CONFIG_DEBUG_VM_MAPLE_TREE is enabled, dumps the maple tree from the provided VMA iterator if non-NULL. This patch has no functional impact if CONFIG_DEBUG_VM is not set. [0]:https://lore.kernel.org/all/6774c98f.050a0220.25abdd.0991.GAE@google.com/ Link: https://lkml.kernel.org/r/cover.1735932169.git.lorenzo.stoakes@oracle.com Link: https://lkml.kernel.org/r/13b09b52d4d103ee86acaf0ae612539648ae29e0.1735932169.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: David Hildenbrand Cc: Jann Horn Cc: Liam R. Howlett Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/mmdebug.h | 14 +++++++- mm/debug.c | 71 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index d7cb1e5ecbda..a0a3894900ed 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -9,10 +9,12 @@ struct page; struct vm_area_struct; struct mm_struct; struct vma_iterator; +struct vma_merge_struct; void dump_page(const struct page *page, const char *reason); void dump_vma(const struct vm_area_struct *vma); void dump_mm(const struct mm_struct *mm); +void dump_vmg(const struct vma_merge_struct *vmg, const char *reason); void vma_iter_dump_tree(const struct vma_iterator *vmi); #ifdef CONFIG_DEBUG_VM @@ -87,6 +89,15 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); } \ unlikely(__ret_warn_once); \ }) +#define VM_WARN_ON_VMG(cond, vmg) ({ \ + int __ret_warn = !!(cond); \ + \ + if (unlikely(__ret_warn)) { \ + dump_vmg(vmg, "VM_WARN_ON_VMG(" __stringify(cond)")"); \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn); \ +}) #define VM_WARN_ON(cond) (void)WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) @@ -104,9 +115,10 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); #define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_VMG(cond, vmg) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) -#endif +#endif /* CONFIG_DEBUG_VM */ #ifdef CONFIG_DEBUG_VM_IRQSOFF #define VM_WARN_ON_IRQS_ENABLED() WARN_ON_ONCE(!irqs_disabled()) diff --git a/mm/debug.c b/mm/debug.c index 9c4cbc3733b0..325d7bf22038 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -261,6 +261,77 @@ void dump_mm(const struct mm_struct *mm) } EXPORT_SYMBOL(dump_mm); +void dump_vmg(const struct vma_merge_struct *vmg, const char *reason) +{ + if (reason) + pr_warn("vmg %px dumped because: %s\n", vmg, reason); + + if (!vmg) { + pr_warn("vmg %px state: (NULL)\n", vmg); + return; + } + + pr_warn("vmg %px state: mm %px pgoff %lx\n" + "vmi %px [%lx,%lx)\n" + "prev %px next %px vma %px\n" + "start %lx end %lx flags %lx\n" + "file %px anon_vma %px policy %px\n" + "uffd_ctx %px\n" + "anon_name %px\n" + "merge_flags %x state %x\n", + vmg, vmg->mm, vmg->pgoff, + vmg->vmi, vmg->vmi ? vma_iter_addr(vmg->vmi) : 0, + vmg->vmi ? vma_iter_end(vmg->vmi) : 0, + vmg->prev, vmg->next, vmg->vma, + vmg->start, vmg->end, vmg->flags, + vmg->file, vmg->anon_vma, vmg->policy, +#ifdef CONFIG_USERFAULTFD + vmg->uffd_ctx.ctx, +#else + (void *)0, +#endif + vmg->anon_name, + (int)vmg->merge_flags, (int)vmg->state); + + if (vmg->mm) { + pr_warn("vmg %px mm:\n", vmg); + dump_mm(vmg->mm); + } else { + pr_warn("vmg %px mm: (NULL)\n", vmg); + } + + if (vmg->vma) { + pr_warn("vmg %px vma:\n", vmg); + dump_vma(vmg->vma); + } else { + pr_warn("vmg %px vma: (NULL)\n", vmg); + } + + if (vmg->prev) { + pr_warn("vmg %px prev:\n", vmg); + dump_vma(vmg->prev); + } else { + pr_warn("vmg %px prev: (NULL)\n", vmg); + } + + if (vmg->next) { + pr_warn("vmg %px next:\n", vmg); + dump_vma(vmg->next); + } else { + pr_warn("vmg %px next: (NULL)\n", vmg); + } + +#ifdef CONFIG_DEBUG_VM_MAPLE_TREE + if (vmg->vmi) { + pr_warn("vmg %px vmi:\n", vmg); + vma_iter_dump_tree(vmg->vmi); + } else { + pr_warn("vmg %px vmi: (NULL)\n", vmg); + } +#endif +} +EXPORT_SYMBOL(dump_vmg); + static bool page_init_poisoning __read_mostly = true; static int __init setup_vm_debug(char *str) From bad4b408af69f281be06592f77ca13f27afef885 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Fri, 3 Jan 2025 19:35:36 +0000 Subject: [PATCH 252/504] mm/debug: prefer VM_WARN_ON_VMG() to report VMG debug warnings Now we have VM_WARN_ON_VMG() to provide us with considerably more debug output when a debug assert fails, utilise it everywhere we can. This allows us to have considerably more information to go on when things go wrong, especially when a non-repro issue occurs as reported by syzkaller or the like. Link: https://lkml.kernel.org/r/986e45e9549e71284ac7a7fa878688568a94d58b.1735932169.git.lorenzo.stoakes@oracle.com Signed-off-by: Lorenzo Stoakes Cc: David Hildenbrand Cc: Jann Horn Cc: Liam R. Howlett Cc: Matthew Wilcox (Oracle) Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/vma.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/mm/vma.c b/mm/vma.c index 2ed118296164..0a5158d611e3 100644 --- a/mm/vma.c +++ b/mm/vma.c @@ -726,19 +726,20 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( bool expanded; mmap_assert_write_locked(vmg->mm); - VM_WARN_ON(!vma); /* We are modifying a VMA, so caller must specify. */ - VM_WARN_ON(vmg->next); /* We set this. */ - VM_WARN_ON(prev && start <= prev->vm_start); - VM_WARN_ON(start >= end); + VM_WARN_ON_VMG(!vma, vmg); /* We are modifying a VMA, so caller must specify. */ + VM_WARN_ON_VMG(vmg->next, vmg); /* We set this. */ + VM_WARN_ON_VMG(prev && start <= prev->vm_start, vmg); + VM_WARN_ON_VMG(start >= end, vmg); + /* * If vma == prev, then we are offset into a VMA. Otherwise, if we are * not, we must span a portion of the VMA. */ - VM_WARN_ON(vma && ((vma != prev && vmg->start != vma->vm_start) || - vmg->end > vma->vm_end)); + VM_WARN_ON_VMG(vma && ((vma != prev && vmg->start != vma->vm_start) || + vmg->end > vma->vm_end), vmg); /* The vmi must be positioned within vmg->vma. */ - VM_WARN_ON(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start && - vma_iter_addr(vmg->vmi) < vma->vm_end)); + VM_WARN_ON_VMG(vma && !(vma_iter_addr(vmg->vmi) >= vma->vm_start && + vma_iter_addr(vmg->vmi) < vma->vm_end), vmg); vmg->state = VMA_MERGE_NOMERGE; @@ -855,9 +856,9 @@ static __must_check struct vm_area_struct *vma_merge_existing_range( pgoff_t pglen = PHYS_PFN(vmg->end - vmg->start); - VM_WARN_ON(!merge_right); + VM_WARN_ON_VMG(!merge_right, vmg); /* If we are offset into a VMA, then prev must be vma. */ - VM_WARN_ON(vmg->start > vma->vm_start && prev && vma != prev); + VM_WARN_ON_VMG(vmg->start > vma->vm_start && prev && vma != prev, vmg); if (merge_will_delete_vma) { vmg->vma = next; @@ -969,9 +970,9 @@ struct vm_area_struct *vma_merge_new_range(struct vma_merge_struct *vmg) bool just_expand = vmg->merge_flags & VMG_FLAG_JUST_EXPAND; mmap_assert_write_locked(vmg->mm); - VM_WARN_ON(vmg->vma); + VM_WARN_ON_VMG(vmg->vma, vmg); /* vmi must point at or before the gap. */ - VM_WARN_ON(vma_iter_addr(vmg->vmi) > end); + VM_WARN_ON_VMG(vma_iter_addr(vmg->vmi) > end, vmg); vmg->state = VMA_MERGE_NOMERGE; @@ -1053,7 +1054,7 @@ int vma_expand(struct vma_merge_struct *vmg) remove_next = true; /* This should already have been checked by this point. */ - VM_WARN_ON(!can_merge_remove_vma(next)); + VM_WARN_ON_VMG(!can_merge_remove_vma(next), vmg); vma_start_write(next); ret = dup_anon_vma(vma, next, &anon_dup); if (ret) @@ -1061,10 +1062,10 @@ int vma_expand(struct vma_merge_struct *vmg) } /* Not merging but overwriting any part of next is not handled. */ - VM_WARN_ON(next && !remove_next && - next != vma && vmg->end > next->vm_start); + VM_WARN_ON_VMG(next && !remove_next && + next != vma && vmg->end > next->vm_start, vmg); /* Only handles expanding */ - VM_WARN_ON(vma->vm_start < vmg->start || vma->vm_end > vmg->end); + VM_WARN_ON_VMG(vma->vm_start < vmg->start || vma->vm_end > vmg->end, vmg); if (commit_merge(vmg, NULL, remove_next ? next : NULL, NULL, 0, true)) goto nomem; From c928eb2126713be0a25b42633c216be69c01eb18 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Fri, 3 Jan 2025 18:44:10 +0000 Subject: [PATCH 253/504] mm: move common part of pagetable_*_ctor to helper Patch series "Account page tables at all levels". This series should be considered in conjunction with Qi's series [1]. Together, they ensure that page table ctor/dtor are called at all levels (PTE to PGD) and all architectures, where page tables are regular pages. Besides the improvement in accounting and general cleanup, this also create a single place where construction/destruction hooks can be called for all page tables, namely the now-generic pagetable_dtor() introduced by Qi, and __pagetable_ctor() introduced in this series. [1] https://lore.kernel.org/linux-mm/cover.1735549103.git.zhengqi.arch@bytedance.com/ This patch (of 6): pagetable_*_ctor all have the same basic implementation. Move the common part to a helper to reduce duplication. Link: https://lkml.kernel.org/r/20250103184415.2744423-1-kevin.brodsky@arm.com Link: https://lkml.kernel.org/r/20250103184415.2744423-2-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Acked-by: Dave Hansen Acked-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Linus Walleij Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Peter Zijlstra Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Will Deacon Cc: Ingo Molnar Signed-off-by: Andrew Morton --- include/linux/mm.h | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 78ec9cc909d2..d52576c82ea1 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3076,6 +3076,14 @@ static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; } static inline void ptlock_free(struct ptdesc *ptdesc) {} #endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */ +static inline void __pagetable_ctor(struct ptdesc *ptdesc) +{ + struct folio *folio = ptdesc_folio(ptdesc); + + __folio_set_pgtable(folio); + lruvec_stat_add_folio(folio, NR_PAGETABLE); +} + static inline void pagetable_dtor(struct ptdesc *ptdesc) { struct folio *folio = ptdesc_folio(ptdesc); @@ -3093,12 +3101,9 @@ static inline void pagetable_dtor_free(struct ptdesc *ptdesc) static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc) { - struct folio *folio = ptdesc_folio(ptdesc); - if (!ptlock_init(ptdesc)) return false; - __folio_set_pgtable(folio); - lruvec_stat_add_folio(folio, NR_PAGETABLE); + __pagetable_ctor(ptdesc); return true; } @@ -3202,13 +3207,10 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd) static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc) { - struct folio *folio = ptdesc_folio(ptdesc); - if (!pmd_ptlock_init(ptdesc)) return false; - __folio_set_pgtable(folio); ptdesc_pmd_pts_init(ptdesc); - lruvec_stat_add_folio(folio, NR_PAGETABLE); + __pagetable_ctor(ptdesc); return true; } @@ -3233,18 +3235,12 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud) static inline void pagetable_pud_ctor(struct ptdesc *ptdesc) { - struct folio *folio = ptdesc_folio(ptdesc); - - __folio_set_pgtable(folio); - lruvec_stat_add_folio(folio, NR_PAGETABLE); + __pagetable_ctor(ptdesc); } static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc) { - struct folio *folio = ptdesc_folio(ptdesc); - - __folio_set_pgtable(folio); - lruvec_stat_add_folio(folio, NR_PAGETABLE); + __pagetable_ctor(ptdesc); } extern void __init pagecache_init(void); From 37be7c550fbc28d6f3c540eef6e726e24c7c587d Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Fri, 3 Jan 2025 18:44:11 +0000 Subject: [PATCH 254/504] parisc: mm: ensure pagetable_pmd_[cd]tor are called The implementation of pmd_{alloc_one,free} on parisc requires a non-zero allocation order, but is completely standard aside from that. Let's reuse the generic implementation of pmd_alloc_one(). Explicit zeroing is not needed as GFP_PGTABLE_KERNEL includes __GFP_ZERO. The generic pmd_free() can handle higher allocation orders so we don't need to define our own. These changes ensure that pagetable_pmd_[cd]tor are called, improving the accounting of page table pages. Link: https://lkml.kernel.org/r/20250103184415.2744423-3-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Acked-by: Dave Hansen Acked-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Ingo Molnar Cc: Linus Walleij Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Peter Zijlstra Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/parisc/include/asm/pgalloc.h | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index e3e142b1c5c5..3e8dbd79670b 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -11,7 +11,6 @@ #include #define __HAVE_ARCH_PMD_ALLOC_ONE -#define __HAVE_ARCH_PMD_FREE #define __HAVE_ARCH_PGD_FREE #include @@ -46,17 +45,19 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) { - pmd_t *pmd; + struct ptdesc *ptdesc; + gfp_t gfp = GFP_PGTABLE_USER; - pmd = (pmd_t *)__get_free_pages(GFP_PGTABLE_KERNEL, PMD_TABLE_ORDER); - if (likely(pmd)) - memset ((void *)pmd, 0, PAGE_SIZE << PMD_TABLE_ORDER); - return pmd; -} - -static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) -{ - free_pages((unsigned long)pmd, PMD_TABLE_ORDER); + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + ptdesc = pagetable_alloc(gfp, PMD_TABLE_ORDER); + if (!ptdesc) + return NULL; + if (!pagetable_pmd_ctor(ptdesc)) { + pagetable_free(ptdesc); + return NULL; + } + return ptdesc_address(ptdesc); } #endif From eea44bd92387b5dacd7dc5689bd047826d48ec7f Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Fri, 3 Jan 2025 18:44:12 +0000 Subject: [PATCH 255/504] m68k: mm: add calls to pagetable_pmd_[cd]tor get_pointer_table() and free_pointer_table() already special-case TABLE_PTE to call pagetable_pte_[cd]tor. Let's do the same at PMD level to improve accounting further. TABLE_PGD and TABLE_PMD are currently defined to the same value, so we first need to separate them. That also implies separating ptable_list for PMD/PGD levels. Link: https://lkml.kernel.org/r/20250103184415.2744423-4-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Acked-by: Dave Hansen Acked-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Ingo Molnar Cc: Linus Walleij Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Peter Zijlstra Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/m68k/include/asm/motorola_pgalloc.h | 6 +++--- arch/m68k/mm/motorola.c | 17 ++++++++++++----- 2 files changed, 15 insertions(+), 8 deletions(-) diff --git a/arch/m68k/include/asm/motorola_pgalloc.h b/arch/m68k/include/asm/motorola_pgalloc.h index 74a817d9387f..5abe7da8ac5a 100644 --- a/arch/m68k/include/asm/motorola_pgalloc.h +++ b/arch/m68k/include/asm/motorola_pgalloc.h @@ -9,9 +9,9 @@ extern void mmu_page_ctor(void *page); extern void mmu_page_dtor(void *page); enum m68k_table_types { - TABLE_PGD = 0, - TABLE_PMD = 0, /* same size as PGD */ - TABLE_PTE = 1, + TABLE_PGD, + TABLE_PMD, + TABLE_PTE, }; extern void init_pointer_table(void *table, int type); diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index eab50dda14ee..6c09ccb72e8b 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -97,17 +97,19 @@ void mmu_page_dtor(void *page) typedef struct list_head ptable_desc; -static struct list_head ptable_list[2] = { +static struct list_head ptable_list[3] = { LIST_HEAD_INIT(ptable_list[0]), LIST_HEAD_INIT(ptable_list[1]), + LIST_HEAD_INIT(ptable_list[2]), }; #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru)) #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index) -static const int ptable_shift[2] = { - 7+2, /* PGD, PMD */ +static const int ptable_shift[3] = { + 7+2, /* PGD */ + 7+2, /* PMD */ 6+2, /* PTE */ }; @@ -156,12 +158,17 @@ void *get_pointer_table(int type) if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) return NULL; - if (type == TABLE_PTE) { + switch (type) { + case TABLE_PTE: /* * m68k doesn't have SPLIT_PTE_PTLOCKS for not having * SMP. */ pagetable_pte_ctor(virt_to_ptdesc(page)); + break; + case TABLE_PMD: + pagetable_pmd_ctor(virt_to_ptdesc(page)); + break; } mmu_page_ctor(page); @@ -200,7 +207,7 @@ int free_pointer_table(void *table, int type) /* all tables in page are free, free page */ list_del(dp); mmu_page_dtor((void *)page); - if (type == TABLE_PTE) + if (type == TABLE_PTE || type == TABLE_PMD) pagetable_dtor(virt_to_ptdesc((void *)page)); free_page (page); return 1; From 3d82870cb0ebb7840a54ccabac79695fe085c5fa Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Fri, 3 Jan 2025 18:44:13 +0000 Subject: [PATCH 256/504] ARM: mm: rename PGD helpers Generic implementations of __pgd_alloc and __pgd_free are about to be introduced. Rename the macros in arch/arm/mm/pgd.c to avoid clashes. While we're at it, also pass down the mm as argument to those helpers, as it will be needed to call the generic __pgd_{alloc,free}. Link: https://lkml.kernel.org/r/20250103184415.2744423-5-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Acked-by: Dave Hansen Acked-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Ingo Molnar Cc: Linus Walleij Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Peter Zijlstra Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/arm/mm/pgd.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index f8e9bc58a84f..2a1077747758 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -17,11 +17,11 @@ #include "mm.h" #ifdef CONFIG_ARM_LPAE -#define __pgd_alloc() kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL) -#define __pgd_free(pgd) kfree(pgd) +#define _pgd_alloc(mm) kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL) +#define _pgd_free(mm, pgd) kfree(pgd) #else -#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2) -#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) +#define _pgd_alloc(mm) (pgd_t *)__get_free_pages(GFP_KERNEL, 2) +#define _pgd_free(mm, pgd) free_pages((unsigned long)pgd, 2) #endif /* @@ -35,7 +35,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; - new_pgd = __pgd_alloc(); + new_pgd = _pgd_alloc(mm); if (!new_pgd) goto no_pgd; @@ -134,7 +134,7 @@ no_pmd: no_pud: p4d_free(mm, new_p4d); no_p4d: - __pgd_free(new_pgd); + _pgd_free(mm, new_pgd); no_pgd: return NULL; } @@ -207,5 +207,5 @@ no_pgd: p4d_free(mm, p4d); } #endif - __pgd_free(pgd_base); + _pgd_free(mm, pgd_base); } From 96da344d9ef6d38f1d4ebf327fe5a8600b8f882e Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Fri, 3 Jan 2025 18:44:14 +0000 Subject: [PATCH 257/504] asm-generic: pgalloc: provide generic __pgd_{alloc,free} We already have a generic implementation of alloc/free up to P4D level, as well as pgd_free(). Let's finish the work and add a generic PGD-level alloc helper as well. Unlike at lower levels, almost all architectures need some specific magic at PGD level (typically initialising PGD entries), so introducing a generic pgd_alloc() isn't worth it. Instead we introduce two new helpers, __pgd_alloc() and __pgd_free(), and make use of them in the arch-specific pgd_alloc() and pgd_free() wherever possible. To accommodate as many arch as possible, __pgd_alloc() takes a page allocation order. Because pagetable_alloc() allocates zeroed pages, explicit zeroing in pgd_alloc() becomes redundant and we can get rid of it. Some trivial implementations of pgd_free() also become unnecessary once __pgd_alloc() is used; remove them. Another small improvement is consistent accounting of PGD pages by using GFP_PGTABLE_{USER,KERNEL} as appropriate. Not all PGD allocations can be handled by the generic helpers. In particular, multiple architectures allocate PGDs from a kmem_cache, and those PGDs may not be page-sized. Link: https://lkml.kernel.org/r/20250103184415.2744423-6-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Acked-by: Dave Hansen Acked-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Ingo Molnar Cc: Linus Walleij Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Peter Zijlstra Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/alpha/mm/init.c | 2 +- arch/arc/include/asm/pgalloc.h | 9 ++------- arch/arm/mm/pgd.c | 8 +++----- arch/arm64/mm/pgd.c | 4 ++-- arch/csky/include/asm/pgalloc.h | 2 +- arch/hexagon/include/asm/pgalloc.h | 2 +- arch/loongarch/mm/pgtable.c | 7 +++---- arch/m68k/include/asm/sun3_pgalloc.h | 2 +- arch/microblaze/include/asm/pgalloc.h | 7 +------ arch/mips/include/asm/pgalloc.h | 6 ------ arch/mips/mm/pgtable.c | 8 +++----- arch/nios2/mm/pgtable.c | 3 ++- arch/openrisc/include/asm/pgalloc.h | 6 ++---- arch/parisc/include/asm/pgalloc.h | 16 +--------------- arch/riscv/include/asm/pgalloc.h | 3 +-- arch/um/kernel/mem.c | 7 +++---- arch/x86/mm/pgtable.c | 24 +++++++++++------------- arch/xtensa/include/asm/pgalloc.h | 2 +- include/asm-generic/pgalloc.h | 27 ++++++++++++++++++++++++++- 19 files changed, 65 insertions(+), 80 deletions(-) diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 4fe618446e4c..61c2198b1359 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -42,7 +42,7 @@ pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + ret = __pgd_alloc(mm, 0); init = pgd_offset(&init_mm, 0UL); if (ret) { #ifdef CONFIG_ALPHA_LARGE_VMALLOC diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index 096b8ef58edb..dfae070fe8d5 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h @@ -53,19 +53,14 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL); + pgd_t *ret = __pgd_alloc(mm, 0); if (ret) { int num, num2; - num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE; - memzero(ret, num * sizeof(pgd_t)); + num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE; num2 = VMALLOC_SIZE / PGDIR_SIZE; memcpy(ret + num, swapper_pg_dir + num, num2 * sizeof(pgd_t)); - - memzero(ret + num + num2, - (PTRS_PER_PGD - num - num2) * sizeof(pgd_t)); - } return ret; } diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 2a1077747758..4eb81b7ed03a 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -17,11 +17,11 @@ #include "mm.h" #ifdef CONFIG_ARM_LPAE -#define _pgd_alloc(mm) kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL) +#define _pgd_alloc(mm) kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL | __GFP_ZERO) #define _pgd_free(mm, pgd) kfree(pgd) #else -#define _pgd_alloc(mm) (pgd_t *)__get_free_pages(GFP_KERNEL, 2) -#define _pgd_free(mm, pgd) free_pages((unsigned long)pgd, 2) +#define _pgd_alloc(mm) __pgd_alloc(mm, 2) +#define _pgd_free(mm, pgd) __pgd_free(mm, pgd) #endif /* @@ -39,8 +39,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!new_pgd) goto no_pgd; - memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); - /* * Copy over the kernel and IO PGD entries */ diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c index 0c501cabc238..8160cff35089 100644 --- a/arch/arm64/mm/pgd.c +++ b/arch/arm64/mm/pgd.c @@ -33,7 +33,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) gfp_t gfp = GFP_PGTABLE_USER; if (pgdir_is_page_size()) - return (pgd_t *)__get_free_page(gfp); + return __pgd_alloc(mm, 0); else return kmem_cache_alloc(pgd_cache, gfp); } @@ -41,7 +41,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) void pgd_free(struct mm_struct *mm, pgd_t *pgd) { if (pgdir_is_page_size()) - free_page((unsigned long)pgd); + __pgd_free(mm, pgd); else kmem_cache_free(pgd_cache, pgd); } diff --git a/arch/csky/include/asm/pgalloc.h b/arch/csky/include/asm/pgalloc.h index f1ce5b7b28f2..bf8400c28b5a 100644 --- a/arch/csky/include/asm/pgalloc.h +++ b/arch/csky/include/asm/pgalloc.h @@ -44,7 +44,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) pgd_t *ret; pgd_t *init; - ret = (pgd_t *) __get_free_page(GFP_KERNEL); + ret = __pgd_alloc(mm, 0); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init((unsigned long *)ret); diff --git a/arch/hexagon/include/asm/pgalloc.h b/arch/hexagon/include/asm/pgalloc.h index 40e42a0e7167..1ee5f5f157ca 100644 --- a/arch/hexagon/include/asm/pgalloc.h +++ b/arch/hexagon/include/asm/pgalloc.h @@ -22,7 +22,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; - pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + pgd = __pgd_alloc(mm, 0); /* * There may be better ways to do this, but to ensure diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c index 3fa69b23ff84..22a94bb3e6e8 100644 --- a/arch/loongarch/mm/pgtable.c +++ b/arch/loongarch/mm/pgtable.c @@ -23,11 +23,10 @@ EXPORT_SYMBOL(tlb_virt_to_page); pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *init, *ret = NULL; - struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0); + pgd_t *init, *ret; - if (ptdesc) { - ret = (pgd_t *)ptdesc_address(ptdesc); + ret = __pgd_alloc(mm, 0); + if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init(ret); memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h index 2b626cb3ad0a..f1ae4ed890db 100644 --- a/arch/m68k/include/asm/sun3_pgalloc.h +++ b/arch/m68k/include/asm/sun3_pgalloc.h @@ -43,7 +43,7 @@ static inline pgd_t * pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd; - new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL); + new_pgd = __pgd_alloc(mm, 0); memcpy(new_pgd, swapper_pg_dir, PAGE_SIZE); memset(new_pgd, 0, (PAGE_OFFSET >> PGDIR_SHIFT)); return new_pgd; diff --git a/arch/microblaze/include/asm/pgalloc.h b/arch/microblaze/include/asm/pgalloc.h index 6c33b05f730f..084a8a0dc239 100644 --- a/arch/microblaze/include/asm/pgalloc.h +++ b/arch/microblaze/include/asm/pgalloc.h @@ -21,12 +21,7 @@ extern void __bad_pte(pmd_t *pmd); -static inline pgd_t *get_pgd(void) -{ - return (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, 0); -} - -#define pgd_alloc(mm) get_pgd() +#define pgd_alloc(mm) __pgd_alloc(mm, 0) extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm); diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h index 36d9805033c4..26c7a6ede983 100644 --- a/arch/mips/include/asm/pgalloc.h +++ b/arch/mips/include/asm/pgalloc.h @@ -15,7 +15,6 @@ #define __HAVE_ARCH_PMD_ALLOC_ONE #define __HAVE_ARCH_PUD_ALLOC_ONE -#define __HAVE_ARCH_PGD_FREE #include static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, @@ -49,11 +48,6 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) extern void pgd_init(void *addr); extern pgd_t *pgd_alloc(struct mm_struct *mm); -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - pagetable_free(virt_to_ptdesc(pgd)); -} - #define __pte_free_tlb(tlb, pte, address) \ do { \ pagetable_dtor(page_ptdesc(pte)); \ diff --git a/arch/mips/mm/pgtable.c b/arch/mips/mm/pgtable.c index 1506e458040d..10835414819f 100644 --- a/arch/mips/mm/pgtable.c +++ b/arch/mips/mm/pgtable.c @@ -10,12 +10,10 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *init, *ret = NULL; - struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, - PGD_TABLE_ORDER); + pgd_t *init, *ret; - if (ptdesc) { - ret = ptdesc_address(ptdesc); + ret = __pgd_alloc(mm, PGD_TABLE_ORDER); + if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init(ret); memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, diff --git a/arch/nios2/mm/pgtable.c b/arch/nios2/mm/pgtable.c index 7c76e8a7447a..6470ed378782 100644 --- a/arch/nios2/mm/pgtable.c +++ b/arch/nios2/mm/pgtable.c @@ -11,6 +11,7 @@ #include #include +#include /* pteaddr: * ptbase | vpn* | zero @@ -54,7 +55,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *ret, *init; - ret = (pgd_t *) __get_free_page(GFP_KERNEL); + ret = __pgd_alloc(mm, 0); if (ret) { init = pgd_offset(&init_mm, 0UL); pgd_init(ret); diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h index 596e2355824e..3372f4e6ab4b 100644 --- a/arch/openrisc/include/asm/pgalloc.h +++ b/arch/openrisc/include/asm/pgalloc.h @@ -41,15 +41,13 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL); + pgd_t *ret = __pgd_alloc(mm, 0); - if (ret) { - memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + if (ret) memcpy(ret + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); - } return ret; } diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index 3e8dbd79670b..2ca74a56415c 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h @@ -11,26 +11,12 @@ #include #define __HAVE_ARCH_PMD_ALLOC_ONE -#define __HAVE_ARCH_PGD_FREE #include /* Allocate the top level pgd (page directory) */ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd; - - pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_TABLE_ORDER); - if (unlikely(pgd == NULL)) - return NULL; - - memset(pgd, 0, PAGE_SIZE << PGD_TABLE_ORDER); - - return pgd; -} - -static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) -{ - free_pages((unsigned long)pgd, PGD_TABLE_ORDER); + return __pgd_alloc(mm, PGD_TABLE_ORDER); } #if CONFIG_PGTABLE_LEVELS == 3 diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h index c8907b831711..3e2aebea6312 100644 --- a/arch/riscv/include/asm/pgalloc.h +++ b/arch/riscv/include/asm/pgalloc.h @@ -130,9 +130,8 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; - pgd = (pgd_t *)__get_free_page(GFP_KERNEL); + pgd = __pgd_alloc(mm, 0); if (likely(pgd != NULL)) { - memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); /* Copy kernel mappings */ sync_kernel_mappings(pgd); } diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 53248ed04771..d98812907493 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -214,14 +214,13 @@ void free_initmem(void) pgd_t *pgd_alloc(struct mm_struct *mm) { - pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL); + pgd_t *pgd = __pgd_alloc(mm, 0); - if (pgd) { - memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + if (pgd) memcpy(pgd + USER_PTRS_PER_PGD, swapper_pg_dir + USER_PTRS_PER_PGD, (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); - } + return pgd; } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index c02aa0427a6a..1fef5ad32d5a 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -397,15 +397,14 @@ void __init pgtable_cache_init(void) SLAB_PANIC, NULL); } -static inline pgd_t *_pgd_alloc(void) +static inline pgd_t *_pgd_alloc(struct mm_struct *mm) { /* * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain. * We allocate one page for pgd. */ if (!SHARED_KERNEL_PMD) - return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, - PGD_ALLOCATION_ORDER); + return __pgd_alloc(mm, PGD_ALLOCATION_ORDER); /* * Now PAE kernel is not running as a Xen domain. We can allocate @@ -414,24 +413,23 @@ static inline pgd_t *_pgd_alloc(void) return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_USER); } -static inline void _pgd_free(pgd_t *pgd) +static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd) { if (!SHARED_KERNEL_PMD) - free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); + __pgd_free(mm, pgd); else kmem_cache_free(pgd_cache, pgd); } #else -static inline pgd_t *_pgd_alloc(void) +static inline pgd_t *_pgd_alloc(struct mm_struct *mm) { - return (pgd_t *)__get_free_pages(GFP_PGTABLE_USER, - PGD_ALLOCATION_ORDER); + return __pgd_alloc(mm, PGD_ALLOCATION_ORDER); } -static inline void _pgd_free(pgd_t *pgd) +static inline void _pgd_free(struct mm_struct *mm, pgd_t *pgd) { - free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER); + __pgd_free(mm, pgd); } #endif /* CONFIG_X86_PAE */ @@ -441,7 +439,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS]; pmd_t *pmds[MAX_PREALLOCATED_PMDS]; - pgd = _pgd_alloc(); + pgd = _pgd_alloc(mm); if (pgd == NULL) goto out; @@ -484,7 +482,7 @@ out_free_pmds: if (sizeof(pmds) != 0) free_pmds(mm, pmds, PREALLOCATED_PMDS); out_free_pgd: - _pgd_free(pgd); + _pgd_free(mm, pgd); out: return NULL; } @@ -494,7 +492,7 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_mop_up_pmds(mm, pgd); pgd_dtor(pgd); paravirt_pgd_free(mm, pgd); - _pgd_free(pgd); + _pgd_free(mm, pgd); } /* diff --git a/arch/xtensa/include/asm/pgalloc.h b/arch/xtensa/include/asm/pgalloc.h index 7fc0f9126dd3..1919ee9c3dd6 100644 --- a/arch/xtensa/include/asm/pgalloc.h +++ b/arch/xtensa/include/asm/pgalloc.h @@ -29,7 +29,7 @@ static inline pgd_t* pgd_alloc(struct mm_struct *mm) { - return (pgd_t*) __get_free_page(GFP_KERNEL | __GFP_ZERO); + return __pgd_alloc(mm, 0); } static inline void ptes_clear(pte_t *ptep) diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index e3977ddca15e..de4df14158e6 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -258,10 +258,35 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) #endif /* CONFIG_PGTABLE_LEVELS > 4 */ +static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order) +{ + gfp_t gfp = GFP_PGTABLE_USER; + struct ptdesc *ptdesc; + + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + gfp &= ~__GFP_HIGHMEM; + + ptdesc = pagetable_alloc_noprof(gfp, order); + if (!ptdesc) + return NULL; + + return ptdesc_address(ptdesc); +} +#define __pgd_alloc(...) alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__)) + +static inline void __pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + struct ptdesc *ptdesc = virt_to_ptdesc(pgd); + + BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); + pagetable_free(ptdesc); +} + #ifndef __HAVE_ARCH_PGD_FREE static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - pagetable_free(virt_to_ptdesc(pgd)); + __pgd_free(mm, pgd); } #endif From a174ff666df80344e17c238481fcda0f358cd719 Mon Sep 17 00:00:00 2001 From: Kevin Brodsky Date: Fri, 3 Jan 2025 18:44:15 +0000 Subject: [PATCH 258/504] mm: introduce ctor/dtor at PGD level Following on from the introduction of P4D-level ctor/dtor, let's finish the job and introduce ctor/dtor at PGD level. The incurred improvement in page accounting is minimal - the main motivation is to create a single, generic place where construction/destruction hooks can be added for all page table pages. This patch should cover all architectures and all configurations where PGDs are one or more regular pages. This excludes any configuration where PGDs are allocated from a kmem_cache object. Link: https://lkml.kernel.org/r/20250103184415.2744423-7-kevin.brodsky@arm.com Signed-off-by: Kevin Brodsky Acked-by: Dave Hansen Acked-by: Qi Zheng Cc: Andy Lutomirski Cc: Catalin Marinas Cc: Ingo Molnar Cc: Linus Walleij Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (Microsoft) Cc: Peter Zijlstra Cc: Ryan Roberts Cc: Thomas Gleixner Cc: Will Deacon Signed-off-by: Andrew Morton --- arch/m68k/include/asm/mcf_pgalloc.h | 3 ++- arch/m68k/mm/motorola.c | 6 ++++-- arch/s390/include/asm/pgalloc.h | 9 ++++++++- include/asm-generic/pgalloc.h | 3 ++- include/linux/mm.h | 5 +++++ 5 files changed, 21 insertions(+), 5 deletions(-) diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h index 22d6c1fcabfb..4c648b51e7fd 100644 --- a/arch/m68k/include/asm/mcf_pgalloc.h +++ b/arch/m68k/include/asm/mcf_pgalloc.h @@ -73,7 +73,7 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pgtable) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - pagetable_free(virt_to_ptdesc(pgd)); + pagetable_dtor_free(virt_to_ptdesc(pgd)); } static inline pgd_t *pgd_alloc(struct mm_struct *mm) @@ -84,6 +84,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) if (!ptdesc) return NULL; + pagetable_pgd_ctor(ptdesc); new_pgd = ptdesc_address(ptdesc); memcpy(new_pgd, swapper_pg_dir, PTRS_PER_PGD * sizeof(pgd_t)); diff --git a/arch/m68k/mm/motorola.c b/arch/m68k/mm/motorola.c index 6c09ccb72e8b..73651e093c4d 100644 --- a/arch/m68k/mm/motorola.c +++ b/arch/m68k/mm/motorola.c @@ -169,6 +169,9 @@ void *get_pointer_table(int type) case TABLE_PMD: pagetable_pmd_ctor(virt_to_ptdesc(page)); break; + case TABLE_PGD: + pagetable_pgd_ctor(virt_to_ptdesc(page)); + break; } mmu_page_ctor(page); @@ -207,8 +210,7 @@ int free_pointer_table(void *table, int type) /* all tables in page are free, free page */ list_del(dp); mmu_page_dtor((void *)page); - if (type == TABLE_PTE || type == TABLE_PMD) - pagetable_dtor(virt_to_ptdesc((void *)page)); + pagetable_dtor(virt_to_ptdesc((void *)page)); free_page (page); return 1; } else if (ptable_list[type].next != dp) { diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index 5fced6d3c36b..b19b6ed2ab53 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h @@ -130,11 +130,18 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - return (pgd_t *) crst_table_alloc(mm); + unsigned long *table = crst_table_alloc(mm); + + if (!table) + return NULL; + pagetable_pgd_ctor(virt_to_ptdesc(table)); + + return (pgd_t *) table; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { + pagetable_dtor(virt_to_ptdesc(pgd)); crst_table_free(mm, (unsigned long *) pgd); } diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index de4df14158e6..892ece4558a2 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -271,6 +271,7 @@ static inline pgd_t *__pgd_alloc_noprof(struct mm_struct *mm, unsigned int order if (!ptdesc) return NULL; + pagetable_pgd_ctor(ptdesc); return ptdesc_address(ptdesc); } #define __pgd_alloc(...) alloc_hooks(__pgd_alloc_noprof(__VA_ARGS__)) @@ -280,7 +281,7 @@ static inline void __pgd_free(struct mm_struct *mm, pgd_t *pgd) struct ptdesc *ptdesc = virt_to_ptdesc(pgd); BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); - pagetable_free(ptdesc); + pagetable_dtor_free(ptdesc); } #ifndef __HAVE_ARCH_PGD_FREE diff --git a/include/linux/mm.h b/include/linux/mm.h index d52576c82ea1..ac78425e9838 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3243,6 +3243,11 @@ static inline void pagetable_p4d_ctor(struct ptdesc *ptdesc) __pagetable_ctor(ptdesc); } +static inline void pagetable_pgd_ctor(struct ptdesc *ptdesc) +{ + __pagetable_ctor(ptdesc); +} + extern void __init pagecache_init(void); extern void free_initmem(void); From 30404415cf173fa2d2d49ee252f363d8f393d852 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 3 Jan 2025 09:43:51 -0800 Subject: [PATCH 259/504] mm/damon/sysfs-schemes: remove unnecessary schemes existence check in damon_sysfs_schemes_clear_regions() Patch series "mm/damon: replace most damon_callback usages in sysfs with new core functions". DAMON provides damon_callback API that notifies monitoring events and allows safe access to damon_ctx internal data. The usage is simple. Users register and deregister callback functions for different monitoring events in damon_ctx. Then the DAMON worker thread (kdamond) of the damon_ctx calls back the registered functions on the events. It is designed in such simple way because it was sufficient for usages of DAMON at the early days. We also wanted to make it flexible so that API user code can implement any required additional features on top of damon_callback on their demands. As expected, more sophisticated usages have invented. Online updates of DAMON parameters and DAMOS auto-tuning inputs, and online retrieval of DAMOS statistics and tried regions information are such usages. Because damon_callback doesn't provide any explicit synchronization mechanism, the user ABIs for exposing such functionalities are implemented in asynchronous ways (DAMON_RECLAIM and DAMON_LRU_SORT}), or synchronous ways (DAMON_SYSFS) with additional synchronization mechanisms that built inside the ABI implementation, on top of damon_callback. So damon_callback is working as expected. However, the additional mechanisms built inside ABI on top of damon_callback is becoming somewhat too big and not easy to maintain. The additional mechanisms can be smaller and easier to maintain when implemented inside the core logic layer. Introduce two new DAMON core API, namely 'damon_call()' and 'damos_walk()'. The two functions support synchronous access to - damon_ctx internal data including DAMON parameters and monitoring results, and - DAMOS-specific data such as regions that each DAMOS action is applied, respectively. And replace most of damon_callback usages in DAMON sysfs interface with the new core API functions. damon_callback usage for online DAMON parameters tuning is not replaced in this series, since it has specific callback timing assumptions that require more works. Patch sequence ============== First two patches are fixups for simplifying the following changes. Those remove a unnecessary condition check and a synchronization, respectively. Third patch implements one of the new DAMON core APIs, namely damon_call(). Three patches replacing damon_callback usages in DAMON sysfs interface using damon_call() follow. Then, seventh and eighth patches introduces the other new DAMON API, damos_walk(), and document it on the design doc. Ninth patch replaces two damon_callback usages in DAMON sysfs interface using damos_walk(). The tenth patch finally cleans up code that no more being used. This patch (of 10): damon_sysfs_schemes_clear_regions() skips removing the scheme tried region directories only if the matching scheme is still ongoing. It is unnecessary check, since what users want is just removing the entire region directories. Remove the unnecessary check. Link: https://lkml.kernel.org/r/20250103174400.54890-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250103174400.54890-2-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-common.h | 3 +-- mm/damon/sysfs-schemes.c | 16 +++++----------- mm/damon/sysfs.c | 2 +- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h index 9a18f3c535d3..e79b4a65ff2d 100644 --- a/mm/damon/sysfs-common.h +++ b/mm/damon/sysfs-common.h @@ -56,8 +56,7 @@ bool damos_sysfs_regions_upd_done(void); int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx); int damon_sysfs_schemes_clear_regions( - struct damon_sysfs_schemes *sysfs_schemes, - struct damon_ctx *ctx); + struct damon_sysfs_schemes *sysfs_schemes); int damos_sysfs_set_quota_scores(struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx); diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index b095457380b5..2aa34778a472 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -2208,20 +2208,14 @@ void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx) /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ int damon_sysfs_schemes_clear_regions( - struct damon_sysfs_schemes *sysfs_schemes, - struct damon_ctx *ctx) + struct damon_sysfs_schemes *sysfs_schemes) { - struct damos *scheme; - int schemes_idx = 0; + int i; - damon_for_each_scheme(scheme, ctx) { + for (i = 0; i < sysfs_schemes->nr; i++) { struct damon_sysfs_scheme *sysfs_scheme; - /* user could have removed the scheme sysfs dir */ - if (schemes_idx >= sysfs_schemes->nr) - break; - - sysfs_scheme = sysfs_schemes->schemes_arr[schemes_idx++]; + sysfs_scheme = sysfs_schemes->schemes_arr[i]; damon_sysfs_scheme_regions_rm_dirs( sysfs_scheme->tried_regions); sysfs_scheme->tried_regions->total_bytes = 0; @@ -2271,7 +2265,7 @@ int damon_sysfs_schemes_update_regions_start( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx, bool total_bytes_only) { - damon_sysfs_schemes_clear_regions(sysfs_schemes, ctx); + damon_sysfs_schemes_clear_regions(sysfs_schemes); damon_sysfs_schemes_for_damos_callback = sysfs_schemes; damos_tried_regions_init_upd_status(sysfs_schemes, ctx); damos_regions_upd_total_bytes_only = total_bytes_only; diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 58145d59881d..789804986ab0 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1262,7 +1262,7 @@ static int damon_sysfs_clear_schemes_regions( if (!ctx) return -EINVAL; return damon_sysfs_schemes_clear_regions( - kdamond->contexts->contexts_arr[0]->schemes, ctx); + kdamond->contexts->contexts_arr[0]->schemes); } static inline bool damon_sysfs_kdamond_running( From d03fb550d75e4e55af67cf5c75654fe738d6afcb Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 3 Jan 2025 09:43:52 -0800 Subject: [PATCH 260/504] mm/damon/sysfs: handle clear_schemes_tried_regions from DAMON sysfs context DAMON sysfs interface handles clear_schemes_tried_regions request from the DAMON callback context (damon_sysfs_cmd_request_callback()), which is designed to be used for safe access to the related DAMON context internal data. But no DAMON context internal data is accessed for the work. Directly handle it from DAMON sysfs interface context, namely damon_sysfs_handle_cmd(). Link: https://lkml.kernel.org/r/20250103174400.54890-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 2 +- mm/damon/sysfs.c | 17 +++-------------- 2 files changed, 4 insertions(+), 15 deletions(-) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 2aa34778a472..c57ab47686ff 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -2260,7 +2260,7 @@ static void damos_tried_regions_init_upd_status( } } -/* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ +/* Called while damon_sysfs_lock is hold */ int damon_sysfs_schemes_update_regions_start( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx, bool total_bytes_only) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 789804986ab0..4f6e4720b7ce 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1254,17 +1254,6 @@ static int damon_sysfs_upd_schemes_regions_stop( return damon_sysfs_schemes_update_regions_stop(ctx); } -static int damon_sysfs_clear_schemes_regions( - struct damon_sysfs_kdamond *kdamond) -{ - struct damon_ctx *ctx = kdamond->damon_ctx; - - if (!ctx) - return -EINVAL; - return damon_sysfs_schemes_clear_regions( - kdamond->contexts->contexts_arr[0]->schemes); -} - static inline bool damon_sysfs_kdamond_running( struct damon_sysfs_kdamond *kdamond) { @@ -1417,9 +1406,6 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, damon_sysfs_schemes_regions_updating = false; } break; - case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: - err = damon_sysfs_clear_schemes_regions(kdamond); - break; case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: err = damon_sysfs_upd_schemes_effective_quotas(kdamond); break; @@ -1549,6 +1535,9 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, return damon_sysfs_turn_damon_on(kdamond); case DAMON_SYSFS_CMD_OFF: return damon_sysfs_turn_damon_off(kdamond); + case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: + return damon_sysfs_schemes_clear_regions( + kdamond->contexts->contexts_arr[0]->schemes); default: break; } From d5753dd24b4e2a5ec245b9d672719cc08c10df6d Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 3 Jan 2025 09:43:53 -0800 Subject: [PATCH 261/504] mm/damon/core: introduce damon_call() Introduce a new DAMON core API function, damon_call(). It aims to replace some damon_callback usages that access damon_ctx of ongoing kdamond with additional synchronizations. It receives a function pointer, let the parallel kdamond invokes the function, and returns after the invocation is finished, or canceled due to some races. kdamond invokes the function inside the main loop after sampling is done. If it is deactivated by DAMOS watermarks or already out of the main loop, mark the request as canceled so that damon_call() can wakeup and return. Link: https://lkml.kernel.org/r/20250103174400.54890-4-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 26 +++++++++++++ mm/damon/core.c | 86 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 112 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index a67f2c4940e9..ac2d42a50751 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -552,6 +552,27 @@ struct damon_callback { void (*before_terminate)(struct damon_ctx *context); }; +/* + * struct damon_call_control - Control damon_call(). + * + * @fn: Function to be called back. + * @data: Data that will be passed to @fn. + * @return_code: Return code from @fn invocation. + * + * Control damon_call(), which requests specific kdamond to invoke a given + * function. Refer to damon_call() for more details. + */ +struct damon_call_control { + int (*fn)(void *data); + void *data; + int return_code; +/* private: internal use only */ + /* informs if the kdamond finished handling of the request */ + struct completion completion; + /* informs if the kdamond canceled @fn infocation */ + bool canceled; +}; + /** * struct damon_attrs - Monitoring attributes for accuracy/overhead control. * @@ -632,6 +653,9 @@ struct damon_ctx { /* for scheme quotas prioritization */ unsigned long *regions_score_histogram; + struct damon_call_control *call_control; + struct mutex call_control_lock; + /* public: */ struct task_struct *kdamond; struct mutex kdamond_lock; @@ -779,6 +803,8 @@ static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive); int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); +int damon_call(struct damon_ctx *ctx, struct damon_call_control *control); + int damon_set_region_biggest_system_ram_default(struct damon_target *t, unsigned long *start, unsigned long *end); diff --git a/mm/damon/core.c b/mm/damon/core.c index 5192ee29f6cf..97f19ec4179c 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -504,6 +504,7 @@ struct damon_ctx *damon_new_ctx(void) ctx->next_ops_update_sis = 0; mutex_init(&ctx->kdamond_lock); + mutex_init(&ctx->call_control_lock); ctx->attrs.min_nr_regions = 10; ctx->attrs.max_nr_regions = 1000; @@ -1162,6 +1163,54 @@ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs) return err; } +static bool damon_is_running(struct damon_ctx *ctx) +{ + bool running; + + mutex_lock(&ctx->kdamond_lock); + running = ctx->kdamond != NULL; + mutex_unlock(&ctx->kdamond_lock); + return running; +} + +/** + * damon_call() - Invoke a given function on DAMON worker thread (kdamond). + * @ctx: DAMON context to call the function for. + * @control: Control variable of the call request. + * + * Ask DAMON worker thread (kdamond) of @ctx to call a function with an + * argument data that respectively passed via &damon_call_control->fn and + * &damon_call_control->data of @control, and wait until the kdamond finishes + * handling of the request. + * + * The kdamond executes the function with the argument in the main loop, just + * after a sampling of the iteration is finished. The function can hence + * safely access the internal data of the &struct damon_ctx without additional + * synchronization. The return value of the function will be saved in + * &damon_call_control->return_code. + * + * Return: 0 on success, negative error code otherwise. + */ +int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) +{ + init_completion(&control->completion); + control->canceled = false; + + mutex_lock(&ctx->call_control_lock); + if (ctx->call_control) { + mutex_unlock(&ctx->call_control_lock); + return -EBUSY; + } + ctx->call_control = control; + mutex_unlock(&ctx->call_control_lock); + if (!damon_is_running(ctx)) + return -EINVAL; + wait_for_completion(&control->completion); + if (control->canceled) + return -ECANCELED; + return 0; +} + /* * Reset the aggregated monitoring results ('nr_accesses' of each region). */ @@ -1917,6 +1966,39 @@ static void kdamond_usleep(unsigned long usecs) usleep_range_idle(usecs, usecs + 1); } +/* + * kdamond_call() - handle damon_call_control. + * @ctx: The &struct damon_ctx of the kdamond. + * @cancel: Whether to cancel the invocation of the function. + * + * If there is a &struct damon_call_control request that registered via + * &damon_call() on @ctx, do or cancel the invocation of the function depending + * on @cancel. @cancel is set when the kdamond is deactivated by DAMOS + * watermarks, or the kdamond is already out of the main loop and therefore + * will be terminated. + */ +static void kdamond_call(struct damon_ctx *ctx, bool cancel) +{ + struct damon_call_control *control; + int ret = 0; + + mutex_lock(&ctx->call_control_lock); + control = ctx->call_control; + mutex_unlock(&ctx->call_control_lock); + if (!control) + return; + if (cancel) { + control->canceled = true; + } else { + ret = control->fn(control->data); + control->return_code = ret; + } + complete(&control->completion); + mutex_lock(&ctx->call_control_lock); + ctx->call_control = NULL; + mutex_unlock(&ctx->call_control_lock); +} + /* Returns negative error code if it's not activated but should return */ static int kdamond_wait_activation(struct damon_ctx *ctx) { @@ -1941,6 +2023,7 @@ static int kdamond_wait_activation(struct damon_ctx *ctx) if (ctx->callback.after_wmarks_check && ctx->callback.after_wmarks_check(ctx)) break; + kdamond_call(ctx, true); } return -EBUSY; } @@ -2011,6 +2094,7 @@ static int kdamond_fn(void *data) if (ctx->callback.after_sampling && ctx->callback.after_sampling(ctx)) break; + kdamond_call(ctx, false); kdamond_usleep(sample_interval); ctx->passed_sample_intervals++; @@ -2072,6 +2156,8 @@ done: ctx->kdamond = NULL; mutex_unlock(&ctx->kdamond_lock); + kdamond_call(ctx, true); + mutex_lock(&damon_lock); nr_running_ctxs--; if (!nr_running_ctxs && running_exclusive_ctxs) From f74a8053c13c93cb63c6002e36a9979b30ed1d8c Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 3 Jan 2025 09:43:54 -0800 Subject: [PATCH 262/504] mm/damon/sysfs: use damon_call() for update_schemes_stats DAMON sysfs interface uses damon_callback with its own synchronization facility to handle update_schemes_stats kdamond command. But damon_call() can support the use case without the additional synchronizations. Convert the code to use damon_call() instead. Link: https://lkml.kernel.org/r/20250103174400.54890-5-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 4f6e4720b7ce..708c2ffdd620 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1214,19 +1214,19 @@ static void damon_sysfs_before_terminate(struct damon_ctx *ctx) /* * damon_sysfs_upd_schemes_stats() - Update schemes stats sysfs files. - * @kdamond: The kobject wrapper that associated to the kdamond thread. + * @data: The kobject wrapper that associated to the kdamond thread. * * This function reads the schemes stats of specific kdamond and update the * related values for sysfs files. This function should be called from DAMON - * callbacks while holding ``damon_syfs_lock``, to safely access the DAMON - * contexts-internal data and DAMON sysfs variables. + * worker thread,to safely access the DAMON contexts-internal data. Caller + * should also ensure holding ``damon_syfs_lock``, and ->damon_ctx of @data is + * not NULL but a valid pointer, to safely access DAMON sysfs variables. */ -static int damon_sysfs_upd_schemes_stats(struct damon_sysfs_kdamond *kdamond) +static int damon_sysfs_upd_schemes_stats(void *data) { + struct damon_sysfs_kdamond *kdamond = data; struct damon_ctx *ctx = kdamond->damon_ctx; - if (!ctx) - return -EINVAL; damon_sysfs_schemes_update_stats( kdamond->contexts->contexts_arr[0]->schemes, ctx); return 0; @@ -1371,9 +1371,6 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, if (!kdamond || kdamond->damon_ctx != c) goto out; switch (damon_sysfs_cmd_request.cmd) { - case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: - err = damon_sysfs_upd_schemes_stats(kdamond); - break; case DAMON_SYSFS_CMD_COMMIT: if (!after_aggregation) goto out; @@ -1511,6 +1508,18 @@ static int damon_sysfs_turn_damon_off(struct damon_sysfs_kdamond *kdamond) */ } +static int damon_sysfs_damon_call(int (*fn)(void *data), + struct damon_sysfs_kdamond *kdamond) +{ + struct damon_call_control call_control = {}; + + if (!kdamond->damon_ctx) + return -EINVAL; + call_control.fn = fn; + call_control.data = kdamond; + return damon_call(kdamond->damon_ctx, &call_control); +} + /* * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond. * @cmd: The command to handle. @@ -1529,12 +1538,14 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, { bool need_wait = true; - /* Handle commands that doesn't access DAMON context-internal data */ switch (cmd) { case DAMON_SYSFS_CMD_ON: return damon_sysfs_turn_damon_on(kdamond); case DAMON_SYSFS_CMD_OFF: return damon_sysfs_turn_damon_off(kdamond); + case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: + return damon_sysfs_damon_call( + damon_sysfs_upd_schemes_stats, kdamond); case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: return damon_sysfs_schemes_clear_regions( kdamond->contexts->contexts_arr[0]->schemes); From e5f3ff7d581ed1862bdcd2480e884e109a758a9f Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 3 Jan 2025 09:43:55 -0800 Subject: [PATCH 263/504] mm/damon/sysfs: use damon_call() for commit_schemes_quota_goals DAMON sysfs interface uses damon_callback with its own synchronization facility to handle commit_schemes_quota_goals command. But damon_call() can support the use case without the additional synchronizations. Convert the code to use damon_call() instead. Link: https://lkml.kernel.org/r/20250103174400.54890-6-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 708c2ffdd620..7c2aa9830edc 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1307,9 +1307,9 @@ static int damon_sysfs_commit_input(struct damon_sysfs_kdamond *kdamond) return err; } -static int damon_sysfs_commit_schemes_quota_goals( - struct damon_sysfs_kdamond *sysfs_kdamond) +static int damon_sysfs_commit_schemes_quota_goals(void *data) { + struct damon_sysfs_kdamond *sysfs_kdamond = data; struct damon_ctx *ctx; struct damon_sysfs_context *sysfs_ctx; @@ -1376,9 +1376,6 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, goto out; err = damon_sysfs_commit_input(kdamond); break; - case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: - err = damon_sysfs_commit_schemes_quota_goals(kdamond); - break; case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: total_bytes_only = true; fallthrough; @@ -1543,6 +1540,10 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, return damon_sysfs_turn_damon_on(kdamond); case DAMON_SYSFS_CMD_OFF: return damon_sysfs_turn_damon_off(kdamond); + case DAMON_SYSFS_CMD_COMMIT_SCHEMES_QUOTA_GOALS: + return damon_sysfs_damon_call( + damon_sysfs_commit_schemes_quota_goals, + kdamond); case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: return damon_sysfs_damon_call( damon_sysfs_upd_schemes_stats, kdamond); From 2bda233e0ca4315d6c4be0d29ffd827f179de8f4 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 3 Jan 2025 09:43:56 -0800 Subject: [PATCH 264/504] mm/damon/sysfs: use damon_call() for update_schemes_effective_quotas DAMON sysfs interface uses damon_callback with its own synchronization facility to handle update_schemes_effective_quotas command. But damon_call() can support the use case without the additional synchronizations. Convert the code to use damon_call() instead. Link: https://lkml.kernel.org/r/20250103174400.54890-7-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 7c2aa9830edc..917e6aca3f58 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1327,20 +1327,18 @@ static int damon_sysfs_commit_schemes_quota_goals(void *data) /* * damon_sysfs_upd_schemes_effective_quotas() - Update schemes effective quotas * sysfs files. - * @kdamond: The kobject wrapper that associated to the kdamond thread. + * @data: The kobject wrapper that associated to the kdamond thread. * * This function reads the schemes' effective quotas of specific kdamond and * update the related values for sysfs files. This function should be called * from DAMON callbacks while holding ``damon_syfs_lock``, to safely access the * DAMON contexts-internal data and DAMON sysfs variables. */ -static int damon_sysfs_upd_schemes_effective_quotas( - struct damon_sysfs_kdamond *kdamond) +static int damon_sysfs_upd_schemes_effective_quotas(void *data) { + struct damon_sysfs_kdamond *kdamond = data; struct damon_ctx *ctx = kdamond->damon_ctx; - if (!ctx) - return -EINVAL; damos_sysfs_update_effective_quotas( kdamond->contexts->contexts_arr[0]->schemes, ctx); return 0; @@ -1400,9 +1398,6 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, damon_sysfs_schemes_regions_updating = false; } break; - case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: - err = damon_sysfs_upd_schemes_effective_quotas(kdamond); - break; default: break; } @@ -1550,6 +1545,10 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: return damon_sysfs_schemes_clear_regions( kdamond->contexts->contexts_arr[0]->schemes); + case DAMON_SYSFS_CMD_UPDATE_SCHEMES_EFFECTIVE_QUOTAS: + return damon_sysfs_damon_call( + damon_sysfs_upd_schemes_effective_quotas, + kdamond); default: break; } From 5a8a19914d2a82b5834c4d9b3e078a88214cd136 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 3 Jan 2025 09:43:57 -0800 Subject: [PATCH 265/504] mm/damon/core: implement damos_walk() Introduce a new core layer interface, damos_walk(). It aims to replace some damon_callback usages that access DAMOS schemes applied regions of ongoing kdamond with additional synchronizations. It receives a function pointer and asks kdamond to invoke it for any region that it tried to apply any DAMOS action within one scheme apply interval for every scheme of it. The function further waits until the kdamond finishes the invocations for every scheme, or cancels the request, and returns. The kdamond invokes the function as requested within the main loop. If it is deactivated by DAMOS watermarks or going out of the main loop, it marks the request as canceled, so that damos_walk() can wakeup and return. Link: https://lkml.kernel.org/r/20250103174400.54890-8-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 33 ++++++++++- mm/damon/core.c | 132 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 163 insertions(+), 2 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index ac2d42a50751..2889de3526c3 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -352,6 +352,31 @@ struct damos_filter { struct list_head list; }; +struct damon_ctx; +struct damos; + +/** + * struct damos_walk_control - Control damos_walk(). + * + * @walk_fn: Function to be called back for each region. + * @data: Data that will be passed to walk functions. + * + * Control damos_walk(), which requests specific kdamond to invoke the given + * function to each region that eligible to apply actions of the kdamond's + * schemes. Refer to damos_walk() for more details. + */ +struct damos_walk_control { + void (*walk_fn)(void *data, struct damon_ctx *ctx, + struct damon_target *t, struct damon_region *r, + struct damos *s); + void *data; +/* private: internal use only */ + /* informs if the kdamond finished handling of the walk request */ + struct completion completion; + /* informs if the walk is canceled. */ + bool canceled; +}; + /** * struct damos_access_pattern - Target access pattern of the given scheme. * @min_sz_region: Minimum size of target regions. @@ -415,6 +440,8 @@ struct damos { * @action */ unsigned long next_apply_sis; + /* informs if ongoing DAMOS walk for this scheme is finished */ + bool walk_completed; /* public: */ struct damos_quota quota; struct damos_watermarks wmarks; @@ -442,8 +469,6 @@ enum damon_ops_id { NR_DAMON_OPS, }; -struct damon_ctx; - /** * struct damon_operations - Monitoring operations for given use cases. * @@ -656,6 +681,9 @@ struct damon_ctx { struct damon_call_control *call_control; struct mutex call_control_lock; + struct damos_walk_control *walk_control; + struct mutex walk_control_lock; + /* public: */ struct task_struct *kdamond; struct mutex kdamond_lock; @@ -804,6 +832,7 @@ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive); int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); int damon_call(struct damon_ctx *ctx, struct damon_call_control *control); +int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control); int damon_set_region_biggest_system_ram_default(struct damon_target *t, unsigned long *start, unsigned long *end); diff --git a/mm/damon/core.c b/mm/damon/core.c index 97f19ec4179c..d02a7d6da855 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -505,6 +505,7 @@ struct damon_ctx *damon_new_ctx(void) mutex_init(&ctx->kdamond_lock); mutex_init(&ctx->call_control_lock); + mutex_init(&ctx->walk_control_lock); ctx->attrs.min_nr_regions = 10; ctx->attrs.max_nr_regions = 1000; @@ -1211,6 +1212,46 @@ int damon_call(struct damon_ctx *ctx, struct damon_call_control *control) return 0; } +/** + * damos_walk() - Invoke a given functions while DAMOS walk regions. + * @ctx: DAMON context to call the functions for. + * @control: Control variable of the walk request. + * + * Ask DAMON worker thread (kdamond) of @ctx to call a function for each region + * that the kdamond will apply DAMOS action to, and wait until the kdamond + * finishes handling of the request. + * + * The kdamond executes the given function in the main loop, for each region + * just after it applied any DAMOS actions of @ctx to it. The invocation is + * made only within one &damos->apply_interval_us since damos_walk() + * invocation, for each scheme. The given callback function can hence safely + * access the internal data of &struct damon_ctx and &struct damon_region that + * each of the scheme will apply the action for next interval, without + * additional synchronizations against the kdamond. If every scheme of @ctx + * passed at least one &damos->apply_interval_us, kdamond marks the request as + * completed so that damos_walk() can wakeup and return. + * + * Return: 0 on success, negative error code otherwise. + */ +int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control) +{ + init_completion(&control->completion); + control->canceled = false; + mutex_lock(&ctx->walk_control_lock); + if (ctx->walk_control) { + mutex_unlock(&ctx->walk_control_lock); + return -EBUSY; + } + ctx->walk_control = control; + mutex_unlock(&ctx->walk_control_lock); + if (!damon_is_running(ctx)) + return -EINVAL; + wait_for_completion(&control->completion); + if (control->canceled) + return -ECANCELED; + return 0; +} + /* * Reset the aggregated monitoring results ('nr_accesses' of each region). */ @@ -1390,6 +1431,91 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, return false; } +/* + * damos_walk_call_walk() - Call &damos_walk_control->walk_fn. + * @ctx: The context of &damon_ctx->walk_control. + * @t: The monitoring target of @r that @s will be applied. + * @r: The region of @t that @s will be applied. + * @s: The scheme of @ctx that will be applied to @r. + * + * This function is called from kdamond whenever it asked the operation set to + * apply a DAMOS scheme action to a region. If a DAMOS walk request is + * installed by damos_walk() and not yet uninstalled, invoke it. + */ +static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, + struct damon_region *r, struct damos *s) +{ + struct damos_walk_control *control; + + mutex_lock(&ctx->walk_control_lock); + control = ctx->walk_control; + mutex_unlock(&ctx->walk_control_lock); + if (!control) + return; + control->walk_fn(control->data, ctx, t, r, s); +} + +/* + * damos_walk_complete() - Complete DAMOS walk request if all walks are done. + * @ctx: The context of &damon_ctx->walk_control. + * @s: A scheme of @ctx that all walks are now done. + * + * This function is called when kdamond finished applying the action of a DAMOS + * scheme to all regions that eligible for the given &damos->apply_interval_us. + * If every scheme of @ctx including @s now finished walking for at least one + * &damos->apply_interval_us, this function makrs the handling of the given + * DAMOS walk request is done, so that damos_walk() can wake up and return. + */ +static void damos_walk_complete(struct damon_ctx *ctx, struct damos *s) +{ + struct damos *siter; + struct damos_walk_control *control; + + mutex_lock(&ctx->walk_control_lock); + control = ctx->walk_control; + mutex_unlock(&ctx->walk_control_lock); + if (!control) + return; + + s->walk_completed = true; + /* if all schemes completed, signal completion to walker */ + damon_for_each_scheme(siter, ctx) { + if (!siter->walk_completed) + return; + } + complete(&control->completion); + mutex_lock(&ctx->walk_control_lock); + ctx->walk_control = NULL; + mutex_unlock(&ctx->walk_control_lock); +} + +/* + * damos_walk_cancel() - Cancel the current DAMOS walk request. + * @ctx: The context of &damon_ctx->walk_control. + * + * This function is called when @ctx is deactivated by DAMOS watermarks, DAMOS + * walk is requested but there is no DAMOS scheme to walk for, or the kdamond + * is already out of the main loop and therefore gonna be terminated, and hence + * cannot continue the walks. This function therefore marks the walk request + * as canceled, so that damos_walk() can wake up and return. + */ +static void damos_walk_cancel(struct damon_ctx *ctx) +{ + struct damos_walk_control *control; + + mutex_lock(&ctx->walk_control_lock); + control = ctx->walk_control; + mutex_unlock(&ctx->walk_control_lock); + + if (!control) + return; + control->canceled = true; + complete(&control->completion); + mutex_lock(&ctx->walk_control_lock); + ctx->walk_control = NULL; + mutex_unlock(&ctx->walk_control_lock); +} + static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, struct damon_region *r, struct damos *s) { @@ -1444,6 +1570,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, damon_nr_regions(t), do_trace); sz_applied = c->ops.apply_scheme(c, t, r, s); } + damos_walk_call_walk(c, t, r, s); ktime_get_coarse_ts64(&end); quota->total_charged_ns += timespec64_to_ns(&end) - timespec64_to_ns(&begin); @@ -1712,6 +1839,7 @@ static void kdamond_apply_schemes(struct damon_ctx *c) damon_for_each_scheme(s, c) { if (c->passed_sample_intervals < s->next_apply_sis) continue; + damos_walk_complete(c, s); s->next_apply_sis = c->passed_sample_intervals + (s->apply_interval_us ? s->apply_interval_us : c->attrs.aggr_interval) / sample_interval; @@ -2024,6 +2152,7 @@ static int kdamond_wait_activation(struct damon_ctx *ctx) ctx->callback.after_wmarks_check(ctx)) break; kdamond_call(ctx, true); + damos_walk_cancel(ctx); } return -EBUSY; } @@ -2117,6 +2246,8 @@ static int kdamond_fn(void *data) */ if (!list_empty(&ctx->schemes)) kdamond_apply_schemes(ctx); + else + damos_walk_cancel(ctx); sample_interval = ctx->attrs.sample_interval ? ctx->attrs.sample_interval : 1; @@ -2157,6 +2288,7 @@ done: mutex_unlock(&ctx->kdamond_lock); kdamond_call(ctx, true); + damos_walk_cancel(ctx); mutex_lock(&damon_lock); nr_running_ctxs--; From 3b58db6d53aabc61054022cb3b7267c4ffa2056f Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 3 Jan 2025 09:43:58 -0800 Subject: [PATCH 266/504] Docs/mm/damon/design: document DAMOS regions walking DAMOS' regions walking is a feature for efficiently retrieving monitoring results or DAMOS-internal behavior. It can be useful for multiple purposes including investigations and tuning. Add a section for it on the design document. Link: https://lkml.kernel.org/r/20250103174400.54890-9-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index f9c50525bdbf..a577ae40e71c 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -540,6 +540,17 @@ To know how user-space can set the watermarks via :ref:`DAMON sysfs interface documentation. +Regions Walking +~~~~~~~~~~~~~~~ + +DAMOS feature allowing users access each region that a DAMOS action has just +applied. Using this feature, DAMON :ref:`API ` allows users +access full properties of the regions including the access monitoring results. +:ref:`DAMON sysfs interface ` also allows users read the data +via special :ref:`files `. + +.. _damon_design_api: + Application Programming Interface --------------------------------- From 1739881f7349565e599df887bce71f976b547486 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 3 Jan 2025 09:43:59 -0800 Subject: [PATCH 267/504] mm/damon/sysfs: use damos_walk() for update_schemes_tried_{bytes,regions} DAMON sysfs interface uses damon_callback with its own complicated synchronization facility to handle update_schemes_tried_bytes and update_schemes_tried_regions commands. But damos_walk() can support the use case without the additional synchronizations. Convert the code to use damos_walk() instead. Link: https://lkml.kernel.org/r/20250103174400.54890-10-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-common.h | 5 +++++ mm/damon/sysfs-schemes.c | 48 ++++++++++++++++++++++++++++++++++++++++ mm/damon/sysfs.c | 43 +++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h index e79b4a65ff2d..81f1c845118f 100644 --- a/mm/damon/sysfs-common.h +++ b/mm/damon/sysfs-common.h @@ -55,6 +55,11 @@ bool damos_sysfs_regions_upd_done(void); int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx); +void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, + struct damon_ctx *ctx, struct damon_target *t, + struct damon_region *r, struct damos *s, + bool total_bytes_only); + int damon_sysfs_schemes_clear_regions( struct damon_sysfs_schemes *sysfs_schemes); diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index c57ab47686ff..1d8ad637051a 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -2183,6 +2183,54 @@ static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, return 0; } +/** + * damos_sysfs_populate_region_dir() - Populate a schemes tried region dir. + * @sysfs_schemes: Schemes directory to populate regions directory. + * @ctx: Corresponding DAMON context. + * @t: DAMON target of @r. + * @r: DAMON region to populate the directory for. + * @s: Corresponding scheme. + * @total_bytes_only: Whether the request is for bytes update only. + * + * Called from DAMOS walk callback while holding damon_sysfs_lock. + */ +void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, + struct damon_ctx *ctx, struct damon_target *t, + struct damon_region *r, struct damos *s, bool total_bytes_only) +{ + struct damos *scheme; + struct damon_sysfs_scheme_regions *sysfs_regions; + struct damon_sysfs_scheme_region *region; + int schemes_idx = 0; + + damon_for_each_scheme(scheme, ctx) { + if (scheme == s) + break; + schemes_idx++; + } + + /* user could have removed the scheme sysfs dir */ + if (schemes_idx >= sysfs_schemes->nr) + return; + + sysfs_regions = sysfs_schemes->schemes_arr[schemes_idx]->tried_regions; + sysfs_regions->total_bytes += r->ar.end - r->ar.start; + if (total_bytes_only) + return; + + region = damon_sysfs_scheme_region_alloc(r); + if (!region) + return; + list_add_tail(®ion->list, &sysfs_regions->regions_list); + sysfs_regions->nr_regions++; + if (kobject_init_and_add(®ion->kobj, + &damon_sysfs_scheme_region_ktype, + &sysfs_regions->kobj, "%d", + sysfs_regions->nr_regions++)) { + kobject_put(®ion->kobj); + } +} + /* * DAMON callback that called after each accesses sampling. While this * callback is registered, damon_sysfs_lock should be held to ensure the diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 917e6aca3f58..4ba44a314f2e 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1512,6 +1512,45 @@ static int damon_sysfs_damon_call(int (*fn)(void *data), return damon_call(kdamond->damon_ctx, &call_control); } +struct damon_sysfs_schemes_walk_data { + struct damon_sysfs_kdamond *sysfs_kdamond; + bool total_bytes_only; +}; + +/* populate the region directory */ +static void damon_sysfs_schemes_tried_regions_upd_one(void *data, struct damon_ctx *ctx, + struct damon_target *t, struct damon_region *r, + struct damos *s) +{ + struct damon_sysfs_schemes_walk_data *walk_data = data; + struct damon_sysfs_kdamond *sysfs_kdamond = walk_data->sysfs_kdamond; + + damos_sysfs_populate_region_dir( + sysfs_kdamond->contexts->contexts_arr[0]->schemes, + ctx, t, r, s, walk_data->total_bytes_only); +} + +static int damon_sysfs_update_schemes_tried_regions( + struct damon_sysfs_kdamond *sysfs_kdamond, bool total_bytes_only) +{ + struct damon_sysfs_schemes_walk_data walk_data = { + .sysfs_kdamond = sysfs_kdamond, + .total_bytes_only = total_bytes_only, + }; + struct damos_walk_control control = { + .walk_fn = damon_sysfs_schemes_tried_regions_upd_one, + .data = &walk_data, + }; + struct damon_ctx *ctx = sysfs_kdamond->damon_ctx; + + if (!ctx) + return -EINVAL; + + damon_sysfs_schemes_clear_regions( + sysfs_kdamond->contexts->contexts_arr[0]->schemes); + return damos_walk(ctx, &control); +} + /* * damon_sysfs_handle_cmd() - Handle a command for a specific kdamond. * @cmd: The command to handle. @@ -1542,6 +1581,10 @@ static int damon_sysfs_handle_cmd(enum damon_sysfs_cmd cmd, case DAMON_SYSFS_CMD_UPDATE_SCHEMES_STATS: return damon_sysfs_damon_call( damon_sysfs_upd_schemes_stats, kdamond); + case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: + return damon_sysfs_update_schemes_tried_regions(kdamond, true); + case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: + return damon_sysfs_update_schemes_tried_regions(kdamond, false); case DAMON_SYSFS_CMD_CLEAR_SCHEMES_TRIED_REGIONS: return damon_sysfs_schemes_clear_regions( kdamond->contexts->contexts_arr[0]->schemes); From ffe81319d4ea3c59d0b5699eb7e1cf46f00edd6f Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 3 Jan 2025 09:44:00 -0800 Subject: [PATCH 268/504] mm/damon/sysfs: remove unused code for schemes tried regions update DAMON sysfs interface was using damon_callback with its own complicated synchronization logics to update DAMOS scheme applied regions directories and files. But it is replaced to use damos_walk(), and the additional synchronization logics are no more being used. Remove those. Link: https://lkml.kernel.org/r/20250103174400.54890-11-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-common.h | 10 -- mm/damon/sysfs-schemes.c | 204 --------------------------------------- mm/damon/sysfs.c | 70 +------------- 3 files changed, 2 insertions(+), 282 deletions(-) diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h index 81f1c845118f..b3f63bc658b7 100644 --- a/mm/damon/sysfs-common.h +++ b/mm/damon/sysfs-common.h @@ -45,16 +45,6 @@ void damon_sysfs_schemes_update_stats( struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx); -int damon_sysfs_schemes_update_regions_start( - struct damon_sysfs_schemes *sysfs_schemes, - struct damon_ctx *ctx, bool total_bytes_only); - -void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx); - -bool damos_sysfs_regions_upd_done(void); - -int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx); - void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos *s, diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 1d8ad637051a..5c4490b97258 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -114,55 +114,11 @@ static const struct kobj_type damon_sysfs_scheme_region_ktype = { * scheme regions directory */ -/* - * enum damos_sysfs_regions_upd_status - Represent DAMOS tried regions update - * status - * @DAMOS_TRIED_REGIONS_UPD_IDLE: Waiting for next request. - * @DAMOS_TRIED_REGIONS_UPD_STARTED: Update started. - * @DAMOS_TRIED_REGIONS_UPD_FINISHED: Update finished. - * - * Each DAMON-based operation scheme (&struct damos) has its own apply - * interval, and we need to expose the scheme tried regions based on only - * single snapshot. For this, we keep the tried regions update status for each - * scheme. The status becomes 'idle' at the beginning. - * - * Once the tried regions update request is received, the request handling - * start function (damon_sysfs_scheme_update_regions_start()) sets the status - * of all schemes as 'idle' again, and register ->before_damos_apply() - * callback. - * - * Then, the first followup ->before_damos_apply() callback - * (damon_sysfs_before_damos_apply()) sets the status 'started'. The first - * ->after_sampling() or ->after_aggregation() callback - * (damon_sysfs_cmd_request_callback()) after the call is called only after - * the scheme is completely applied to the given snapshot. Hence the callback - * knows the situation by showing 'started' status, and sets the status as - * 'finished'. Then, damon_sysfs_before_damos_apply() understands the - * situation by showing the 'finished' status and do nothing. - * - * If DAMOS is not applied to any region due to any reasons including the - * access pattern, the watermarks, the quotas, and the filters, - * ->before_damos_apply() will not be called back. Until the situation is - * changed, the update will not be finished. To avoid this, - * damon_sysfs_after_sampling() set the status as 'finished' if more than two - * apply intervals of the scheme is passed while the state is 'idle'. - * - * Finally, the tried regions request handling finisher function - * (damon_sysfs_schemes_update_regions_stop()) unregisters the callbacks. - */ -enum damos_sysfs_regions_upd_status { - DAMOS_TRIED_REGIONS_UPD_IDLE, - DAMOS_TRIED_REGIONS_UPD_STARTED, - DAMOS_TRIED_REGIONS_UPD_FINISHED, -}; - struct damon_sysfs_scheme_regions { struct kobject kobj; struct list_head regions_list; int nr_regions; unsigned long total_bytes; - enum damos_sysfs_regions_upd_status upd_status; - unsigned long upd_timeout_jiffies; }; static struct damon_sysfs_scheme_regions * @@ -178,7 +134,6 @@ damon_sysfs_scheme_regions_alloc(void) INIT_LIST_HEAD(®ions->regions_list); regions->nr_regions = 0; regions->total_bytes = 0; - regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE; return regions; } @@ -2126,63 +2081,6 @@ void damon_sysfs_schemes_update_stats( } } -/* - * damon_sysfs_schemes that need to update its schemes regions dir. Protected - * by damon_sysfs_lock - */ -static struct damon_sysfs_schemes *damon_sysfs_schemes_for_damos_callback; -static int damon_sysfs_schemes_region_idx; -static bool damos_regions_upd_total_bytes_only; - -/* - * DAMON callback that called before damos apply. While this callback is - * registered, damon_sysfs_lock should be held to ensure the regions - * directories exist. - */ -static int damon_sysfs_before_damos_apply(struct damon_ctx *ctx, - struct damon_target *t, struct damon_region *r, - struct damos *s) -{ - struct damos *scheme; - struct damon_sysfs_scheme_regions *sysfs_regions; - struct damon_sysfs_scheme_region *region; - struct damon_sysfs_schemes *sysfs_schemes = - damon_sysfs_schemes_for_damos_callback; - int schemes_idx = 0; - - damon_for_each_scheme(scheme, ctx) { - if (scheme == s) - break; - schemes_idx++; - } - - /* user could have removed the scheme sysfs dir */ - if (schemes_idx >= sysfs_schemes->nr) - return 0; - - sysfs_regions = sysfs_schemes->schemes_arr[schemes_idx]->tried_regions; - if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_FINISHED) - return 0; - if (sysfs_regions->upd_status == DAMOS_TRIED_REGIONS_UPD_IDLE) - sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_STARTED; - sysfs_regions->total_bytes += r->ar.end - r->ar.start; - if (damos_regions_upd_total_bytes_only) - return 0; - - region = damon_sysfs_scheme_region_alloc(r); - if (!region) - return 0; - list_add_tail(®ion->list, &sysfs_regions->regions_list); - sysfs_regions->nr_regions++; - if (kobject_init_and_add(®ion->kobj, - &damon_sysfs_scheme_region_ktype, - &sysfs_regions->kobj, "%d", - damon_sysfs_schemes_region_idx++)) { - kobject_put(®ion->kobj); - } - return 0; -} - /** * damos_sysfs_populate_region_dir() - Populate a schemes tried region dir. * @sysfs_schemes: Schemes directory to populate regions directory. @@ -2231,29 +2129,6 @@ void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, } } -/* - * DAMON callback that called after each accesses sampling. While this - * callback is registered, damon_sysfs_lock should be held to ensure the - * regions directories exist. - */ -void damos_sysfs_mark_finished_regions_updates(struct damon_ctx *ctx) -{ - struct damon_sysfs_schemes *sysfs_schemes = - damon_sysfs_schemes_for_damos_callback; - struct damon_sysfs_scheme_regions *sysfs_regions; - int i; - - for (i = 0; i < sysfs_schemes->nr; i++) { - sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; - if (sysfs_regions->upd_status == - DAMOS_TRIED_REGIONS_UPD_STARTED || - time_after(jiffies, - sysfs_regions->upd_timeout_jiffies)) - sysfs_regions->upd_status = - DAMOS_TRIED_REGIONS_UPD_FINISHED; - } -} - /* Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock */ int damon_sysfs_schemes_clear_regions( struct damon_sysfs_schemes *sysfs_schemes) @@ -2270,82 +2145,3 @@ int damon_sysfs_schemes_clear_regions( } return 0; } - -static struct damos *damos_sysfs_nth_scheme(int n, struct damon_ctx *ctx) -{ - struct damos *scheme; - int i = 0; - - damon_for_each_scheme(scheme, ctx) { - if (i == n) - return scheme; - i++; - } - return NULL; -} - -static void damos_tried_regions_init_upd_status( - struct damon_sysfs_schemes *sysfs_schemes, - struct damon_ctx *ctx) -{ - int i; - struct damos *scheme; - struct damon_sysfs_scheme_regions *sysfs_regions; - - for (i = 0; i < sysfs_schemes->nr; i++) { - sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; - scheme = damos_sysfs_nth_scheme(i, ctx); - if (!scheme) { - sysfs_regions->upd_status = - DAMOS_TRIED_REGIONS_UPD_FINISHED; - continue; - } - sysfs_regions->upd_status = DAMOS_TRIED_REGIONS_UPD_IDLE; - sysfs_regions->upd_timeout_jiffies = jiffies + - 2 * usecs_to_jiffies(scheme->apply_interval_us ? - scheme->apply_interval_us : - ctx->attrs.aggr_interval); - } -} - -/* Called while damon_sysfs_lock is hold */ -int damon_sysfs_schemes_update_regions_start( - struct damon_sysfs_schemes *sysfs_schemes, - struct damon_ctx *ctx, bool total_bytes_only) -{ - damon_sysfs_schemes_clear_regions(sysfs_schemes); - damon_sysfs_schemes_for_damos_callback = sysfs_schemes; - damos_tried_regions_init_upd_status(sysfs_schemes, ctx); - damos_regions_upd_total_bytes_only = total_bytes_only; - ctx->callback.before_damos_apply = damon_sysfs_before_damos_apply; - return 0; -} - -bool damos_sysfs_regions_upd_done(void) -{ - struct damon_sysfs_schemes *sysfs_schemes = - damon_sysfs_schemes_for_damos_callback; - struct damon_sysfs_scheme_regions *sysfs_regions; - int i; - - for (i = 0; i < sysfs_schemes->nr; i++) { - sysfs_regions = sysfs_schemes->schemes_arr[i]->tried_regions; - if (sysfs_regions->upd_status != - DAMOS_TRIED_REGIONS_UPD_FINISHED) - return false; - } - return true; -} - -/* - * Called from damon_sysfs_cmd_request_callback under damon_sysfs_lock. Caller - * should unlock damon_sysfs_lock which held before - * damon_sysfs_schemes_update_regions_start() - */ -int damon_sysfs_schemes_update_regions_stop(struct damon_ctx *ctx) -{ - damon_sysfs_schemes_for_damos_callback = NULL; - ctx->callback.before_damos_apply = NULL; - damon_sysfs_schemes_region_idx = 0; - return 0; -} diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 4ba44a314f2e..cf8fb5a963d6 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1181,25 +1181,9 @@ static int damon_sysfs_add_targets(struct damon_ctx *ctx, return 0; } -static bool damon_sysfs_schemes_regions_updating; - static void damon_sysfs_before_terminate(struct damon_ctx *ctx) { struct damon_target *t, *next; - struct damon_sysfs_kdamond *kdamond; - enum damon_sysfs_cmd cmd; - - /* damon_sysfs_schemes_update_regions_stop() might not yet called */ - kdamond = damon_sysfs_cmd_request.kdamond; - cmd = damon_sysfs_cmd_request.cmd; - if (kdamond && ctx == kdamond->damon_ctx && - (cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS || - cmd == DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES) && - damon_sysfs_schemes_regions_updating) { - damon_sysfs_schemes_update_regions_stop(ctx); - damon_sysfs_schemes_regions_updating = false; - mutex_unlock(&damon_sysfs_lock); - } if (!damon_target_has_pid(ctx)) return; @@ -1232,28 +1216,6 @@ static int damon_sysfs_upd_schemes_stats(void *data) return 0; } -static int damon_sysfs_upd_schemes_regions_start( - struct damon_sysfs_kdamond *kdamond, bool total_bytes_only) -{ - struct damon_ctx *ctx = kdamond->damon_ctx; - - if (!ctx) - return -EINVAL; - return damon_sysfs_schemes_update_regions_start( - kdamond->contexts->contexts_arr[0]->schemes, ctx, - total_bytes_only); -} - -static int damon_sysfs_upd_schemes_regions_stop( - struct damon_sysfs_kdamond *kdamond) -{ - struct damon_ctx *ctx = kdamond->damon_ctx; - - if (!ctx) - return -EINVAL; - return damon_sysfs_schemes_update_regions_stop(ctx); -} - static inline bool damon_sysfs_kdamond_running( struct damon_sysfs_kdamond *kdamond) { @@ -1358,12 +1320,10 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, bool after_aggregation) { struct damon_sysfs_kdamond *kdamond; - bool total_bytes_only = false; int err = 0; /* avoid deadlock due to concurrent state_store('off') */ - if (!damon_sysfs_schemes_regions_updating && - !mutex_trylock(&damon_sysfs_lock)) + if (!mutex_trylock(&damon_sysfs_lock)) return 0; kdamond = damon_sysfs_cmd_request.kdamond; if (!kdamond || kdamond->damon_ctx != c) @@ -1374,39 +1334,13 @@ static int damon_sysfs_cmd_request_callback(struct damon_ctx *c, bool active, goto out; err = damon_sysfs_commit_input(kdamond); break; - case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_BYTES: - total_bytes_only = true; - fallthrough; - case DAMON_SYSFS_CMD_UPDATE_SCHEMES_TRIED_REGIONS: - if (!damon_sysfs_schemes_regions_updating) { - err = damon_sysfs_upd_schemes_regions_start(kdamond, - total_bytes_only); - if (!err) { - damon_sysfs_schemes_regions_updating = true; - goto keep_lock_out; - } - } else { - damos_sysfs_mark_finished_regions_updates(c); - /* - * Continue regions updating if DAMON is till - * active and the update for all schemes is not - * finished. - */ - if (active && !damos_sysfs_regions_upd_done()) - goto keep_lock_out; - err = damon_sysfs_upd_schemes_regions_stop(kdamond); - damon_sysfs_schemes_regions_updating = false; - } - break; default: break; } /* Mark the request as invalid now. */ damon_sysfs_cmd_request.kdamond = NULL; out: - if (!damon_sysfs_schemes_regions_updating) - mutex_unlock(&damon_sysfs_lock); -keep_lock_out: + mutex_unlock(&damon_sysfs_lock); return err; } From 6aa2a446f695601c1b0eaca457afb04f0c80d583 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:46 -0800 Subject: [PATCH 269/504] mm/damon: clarify trying vs applying on damos_stat kernel-doc comment Patch series "mm/damon: enable page level properties based monitoring". TL; DR ====== This patch series enables access monitoring based on page level properties including their anonymousness, belonging cgroups and young-ness, by extending DAMOS stats and regions walk features with region-internal DAMOS filters. Background ========== DAMOS has initially developed for only access-aware system operations. But, efficient acces monitoring results querying is yet another major usage of today's DAMOS. DAMOS stats and regions walk, which exposes accumulated counts and per-region monitoring results that filtered by DAMOS parameters including target access pattern, quotas and DAMOS filters, are the key features for that usage. For tunings and investigations, it can be more useful if only the information can be exposed without making real system operational change. Special DAMOS action, DAMOS_STAT, was introduced for the purpose. DAMOS fundametally works with only access pattern information in region granularity. For some use cases, fixed and fine granularity information based on non access pattern properties can be useful, though. For example, on systems having swap devices that much faster than storage devices for files, DAMOS-based proactive reclaim need to be applied differently for anonymous pages and file-backed pages. DAMOS filters is a feature that makes it possible. It supports non access pattern information including page level properties such as anonymousness, belonging cgroups, and young-ness (whether the page has accessed since the last access check of it). The information can be useful for tuning and investigations. DAMOS stat exposes some of it via {nr,sz}_applied, but it is mixed with operation failures. Also, exposing the information without making system operation change is impossible, since DAMOS_STAT simply ignores the page level properties based DAMOS filters. Design ====== Expose the exact information for every DAMOS action including DAMOS_STAT by implementing below changes. Extend the interface for DAMON operations set layer, which contains the implementation of the page level filters, to report back the amount of memory that passed the region-internal DAMOS filters to the core layer. On the core layer, account the operations set layer reported stat with DAMOS stat for per-scheme monitoring. Also, pass the information to regions walk for per-region monitoring. In this way, DAMON API users can efficiently get the fine-grained information. For the user-space, make DAMON sysfs interface collects the information using the updated DAMON core API, and expose those to new per-scheme stats file and per-DAMOS-tried region properties file. Practical Usages ================ With this patch series, DAMON users can query how many bytes of regions of specific access temperature is backed by pages of specific type. The type can be any of DAMOS filter-supporting one, including anonymousness, belonging cgroups, and young-ness. For example, users can visualize access hotness-based page granulairty histogram for different cgroups, backing content type, or youngness. In future, it could be extended to more types such as whether it is THP, position on LRU lists, etc. This can be useful for estimating benefits of a new or an existing access-aware system optimizations without really committing the changes. Patches Sequence ================ The patches are constructed in four sub-sequences. First three patches (patches 1-3) update documents to have missing background knowledges and better structures for easily introducing followup changes. Following three patches (patches 4-6) change the operations set layer interface to report back the region-internal filter passed memory size, and make the operations set implementations support the changed symantic. Following five patches (patches 7-11) implement per-scheme accumulated stat for region-internal filter-passed memory size on core API (damos_stat) and DAMON sysfs interface. First two patches of those are for code change, and following three patches are for documentation. Finally, five patches (patches 12-16) implementing per-region region-internal filter-passed memory size follows. Similar to that for per-scheme stat, first two patches implement core-API and sysfs interface change. Then three patches for documentation update follow. This patch (of 16): DAMOS stat kernel-doc documentation is using terms that bit ambiguous. Without reading the code, understanding it correctly is not that easy. Add the clarification on the kernel-doc comment. Link: https://lkml.kernel.org/r/20250106193401.109161-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250106193401.109161-2-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/include/linux/damon.h b/include/linux/damon.h index 2889de3526c3..b85eae388f5b 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -287,6 +287,23 @@ struct damos_watermarks { * @nr_applied: Total number of regions that the scheme is applied. * @sz_applied: Total size of regions that the scheme is applied. * @qt_exceeds: Total number of times the quota of the scheme has exceeded. + * + * "Tried an action to a region" in this context means the DAMOS core logic + * determined the region as eligible to apply the action. The access pattern + * (&struct damos_access_pattern), quotas (&struct damos_quota), watermarks + * (&struct damos_watermarks) and filters (&struct damos_filter) that handled + * on core logic can affect this. The core logic asks the operation set + * (&struct damon_operations) to apply the action to the region. + * + * "Applied an action to a region" in this context means the operation set + * (&struct damon_operations) successfully applied the action to the region, at + * least to a part of the region. The filters (&struct damos_filter) that + * handled on operation set layer and type of the action and pages of the + * region can affect this. For example, if a filter is set to exclude + * anonymous pages and the region has only anonymous pages, the region will be + * failed at applying the action. If the action is &DAMOS_PAGEOUT and all + * pages of the region are already paged out, the region will be failed at + * applying the action. */ struct damos_stat { unsigned long nr_tried; From 152058517ecf1e465b43dc648e5138cb6022821c Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:47 -0800 Subject: [PATCH 270/504] Docs/mm/damon/design: add 'statistics' section DAMOS stats are important feature for tuning of DAMOS-based access-aware system operation, and efficient access pattern monitoring. But not well documented on the design document. Add a section on the document. Link: https://lkml.kernel.org/r/20250106193401.109161-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 38 +++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index a577ae40e71c..d0f3bd92ed13 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -539,6 +539,44 @@ To know how user-space can set the watermarks via :ref:`DAMON sysfs interface `, refer to :ref:`filters ` part of the documentation. +Statistics +~~~~~~~~~~ + +The statistics of DAMOS behaviors that designed to help monitoring, tuning and +debugging of DAMOS. + +DAMOS accounts below statistics for each scheme, from the beginning of the +scheme's execution. + +- ``nr_tried``: Total number of regions that the scheme is tried to be applied. +- ``sz_trtied``: Total size of regions that the scheme is tried to be applied. +- ``nr_applied``: Total number of regions that the scheme is applied. +- ``sz_applied``: Total size of regions that the scheme is applied. +- ``qt_exceeds``: Total number of times the quota of the scheme has exceeded. + +"A scheme is tried to be applied to a region" means DAMOS core logic determined +the region is eligible to apply the scheme's :ref:`action +`. The :ref:`access pattern +`, :ref:`quotas +`, :ref:`watermarks +`, and :ref:`filters +` that handled on core logic could affect this. +The core logic will only ask the underlying :ref:`operation set +` to do apply the action to the region, so whether the +action is really applied or not is unclear. That's why it is called "tried". + +"A scheme is applied to a region" means the :ref:`operation set +` has applied the action to at least a part of the +region. The :ref:`filters ` that handled by the +operation set, and the types of the :ref:`action ` +and the pages of the region can affect this. For example, if a filter is set +to exclude anonymous pages and the region has only anonymous pages, or if the +action is ``pageout`` while all pages of the region are unreclaimable, applying +the action to the region will fail. + +To know how user-space can read the stats via :ref:`DAMON sysfs interface +`, refer to :ref:s`stats ` part of the +documentation. Regions Walking ~~~~~~~~~~~~~~~ From 5a1ec609456bd7c201f336d5ed75909fd7bd26c6 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:48 -0800 Subject: [PATCH 271/504] Docs/admin-guide/mm/damon/usage: link damos stat design doc DAMON sysfs usage document focuses on usage, rather than the detail of the stat metric itself. Add a link to the design document on DAMOS stat usage section. Link: https://lkml.kernel.org/r/20250106193401.109161-4-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 3 ++- Documentation/mm/damon/design.rst | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index d9be9f7caa7d..af70f7244700 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -457,7 +457,8 @@ schemes//stats/ DAMON counts the total number and bytes of regions that each scheme is tried to be applied, the two numbers for the regions that each scheme is successfully applied, and the total number of the quota limit exceeds. This statistics can -be used for online analysis or tuning of the schemes. +be used for online analysis or tuning of the schemes. Refer to :ref:`design +doc ` for more details about the stats. The statistics can be retrieved by reading the files under ``stats`` directory (``nr_tried``, ``sz_tried``, ``nr_applied``, ``sz_applied``, and diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index d0f3bd92ed13..8b8be42c64b9 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -539,6 +539,8 @@ To know how user-space can set the watermarks via :ref:`DAMON sysfs interface `, refer to :ref:`filters ` part of the documentation. +.. _damon_design_damos_stat: + Statistics ~~~~~~~~~~ From d8acf8a8761484cc281c646766309b384dffbd91 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:49 -0800 Subject: [PATCH 272/504] mm/damon: ask apply_scheme() to report filter-passed region-internal bytes Some DAMOS filter types including those for young page, anon page, and belonging memcg are handled by underlying DAMON operations set implementation, via damon_operations->apply_scheme() interface. How many bytes of the region have passed the filter can be useful for DAMOS scheme tuning and access pattern monitoring. Modify the interface to let the callback implementation reports back the number if possible. Link: https://lkml.kernel.org/r/20250106193401.109161-5-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 5 +++-- mm/damon/core.c | 4 +++- mm/damon/paddr.c | 2 +- mm/damon/vaddr.c | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index b85eae388f5b..da003173210f 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -529,7 +529,8 @@ enum damon_ops_id { * @apply_scheme is called from @kdamond when a region for user provided * DAMON-based operation scheme is found. It should apply the scheme's action * to the region and return bytes of the region that the action is successfully - * applied. + * applied. It should also report how many bytes of the region has passed + * filters (&struct damos_filter) that handled by itself. * @target_valid should check whether the target is still valid for the * monitoring. * @cleanup is called from @kdamond just before its termination. @@ -546,7 +547,7 @@ struct damon_operations { struct damos *scheme); unsigned long (*apply_scheme)(struct damon_ctx *context, struct damon_target *t, struct damon_region *r, - struct damos *scheme); + struct damos *scheme, unsigned long *sz_filter_passed); bool (*target_valid)(struct damon_target *t); void (*cleanup)(struct damon_ctx *context); }; diff --git a/mm/damon/core.c b/mm/damon/core.c index d02a7d6da855..c6ccb4825c57 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1523,6 +1523,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, unsigned long sz = damon_sz_region(r); struct timespec64 begin, end; unsigned long sz_applied = 0; + unsigned long sz_ops_filter_passed = 0; int err = 0; /* * We plan to support multiple context per kdamond, as DAMON sysfs @@ -1568,7 +1569,8 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, if (!err) { trace_damos_before_apply(cidx, sidx, tidx, r, damon_nr_regions(t), do_trace); - sz_applied = c->ops.apply_scheme(c, t, r, s); + sz_applied = c->ops.apply_scheme(c, t, r, s, + &sz_ops_filter_passed); } damos_walk_call_walk(c, t, r, s); ktime_get_coarse_ts64(&end); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index a9ff35341d65..3530ef9c80bd 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -477,7 +477,7 @@ put_folio: static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, - struct damos *scheme) + struct damos *scheme, unsigned long *sz_filter_passed) { switch (scheme->action) { case DAMOS_PAGEOUT: diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index b9eaa20b73b9..a6174f725bd7 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -655,7 +655,7 @@ static unsigned long damos_madvise(struct damon_target *target, static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, - struct damos *scheme) + struct damos *scheme, unsigned long *sz_filter_passed) { int madv_action; From 107ba69d3caf4526a1545c4e38c142d8ddd55448 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:50 -0800 Subject: [PATCH 273/504] mm/damon/paddr: report filter-passed bytes back for normal actions damon_operations->apply_scheme() implementations are requested to report back how many bytes of the given region has passed DAMOS filter. 'paddr' operations set implementation supports some of region-internal DAMOS filter handling for normal DAMOS actions except DAMOS_STAT action. But, those are not respecting the request. Report the region-internal DAMOS filter-passed bytes back for the actions. Link: https://lkml.kernel.org/r/20250106193401.109161-6-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 3530ef9c80bd..5944316a0b4c 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -243,7 +243,8 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) return false; } -static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) +static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, + unsigned long *sz_filter_passed) { unsigned long addr, applied; LIST_HEAD(folio_list); @@ -272,6 +273,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s) if (damos_pa_filter_out(s, folio)) goto put_folio; + else + *sz_filter_passed += folio_size(folio); folio_clear_referenced(folio); folio_test_clear_young(folio); @@ -292,7 +295,8 @@ put_folio: } static inline unsigned long damon_pa_mark_accessed_or_deactivate( - struct damon_region *r, struct damos *s, bool mark_accessed) + struct damon_region *r, struct damos *s, bool mark_accessed, + unsigned long *sz_filter_passed) { unsigned long addr, applied = 0; @@ -304,6 +308,8 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( if (damos_pa_filter_out(s, folio)) goto put_folio; + else + *sz_filter_passed += folio_size(folio); if (mark_accessed) folio_mark_accessed(folio); @@ -317,15 +323,17 @@ put_folio: } static unsigned long damon_pa_mark_accessed(struct damon_region *r, - struct damos *s) + struct damos *s, unsigned long *sz_filter_passed) { - return damon_pa_mark_accessed_or_deactivate(r, s, true); + return damon_pa_mark_accessed_or_deactivate(r, s, true, + sz_filter_passed); } static unsigned long damon_pa_deactivate_pages(struct damon_region *r, - struct damos *s) + struct damos *s, unsigned long *sz_filter_passed) { - return damon_pa_mark_accessed_or_deactivate(r, s, false); + return damon_pa_mark_accessed_or_deactivate(r, s, false, + sz_filter_passed); } static unsigned int __damon_pa_migrate_folio_list( @@ -449,7 +457,8 @@ static unsigned long damon_pa_migrate_pages(struct list_head *folio_list, return nr_migrated; } -static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s) +static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s, + unsigned long *sz_filter_passed) { unsigned long addr, applied; LIST_HEAD(folio_list); @@ -462,6 +471,8 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s) if (damos_pa_filter_out(s, folio)) goto put_folio; + else + *sz_filter_passed += folio_size(folio); if (!folio_isolate_lru(folio)) goto put_folio; @@ -481,14 +492,14 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, { switch (scheme->action) { case DAMOS_PAGEOUT: - return damon_pa_pageout(r, scheme); + return damon_pa_pageout(r, scheme, sz_filter_passed); case DAMOS_LRU_PRIO: - return damon_pa_mark_accessed(r, scheme); + return damon_pa_mark_accessed(r, scheme, sz_filter_passed); case DAMOS_LRU_DEPRIO: - return damon_pa_deactivate_pages(r, scheme); + return damon_pa_deactivate_pages(r, scheme, sz_filter_passed); case DAMOS_MIGRATE_HOT: case DAMOS_MIGRATE_COLD: - return damon_pa_migrate(r, scheme); + return damon_pa_migrate(r, scheme, sz_filter_passed); case DAMOS_STAT: break; default: From c6210f1c3157f37d410213dfd6ba288c5e24eba6 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:51 -0800 Subject: [PATCH 274/504] mm/damon/paddr: report filter-passed bytes back for DAMOS_STAT action DAMOS_STAT action handling of paddr DAMON operations set implementation is simply ignoring the region-internal DAMOS filters, and therefore not reporting back the filter-passed bytes. Apply the filters and report back the information. Before this change, DAMOS_STAT was doing nothing for DAMOS filters. Hence users might see some performance regressions. Such regression for use cases where no region-internal DAMOS filter is added to the scheme will be negligible, since this change avoids unnecessary filtering works if no such filter is installed. For old users who are using DAMOS_STAT with the types of filters, the regression could be visible depending on the size of the region and the overhead of the installed DAMOS filters. But, because the filters were completely ignored before in the use case, no real users would really depend on such use case that makes no point. Link: https://lkml.kernel.org/r/20250106193401.109161-7-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 5944316a0b4c..b0c283808ba6 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -485,6 +485,39 @@ put_folio: return applied * PAGE_SIZE; } +static bool damon_pa_scheme_has_filter(struct damos *s) +{ + struct damos_filter *f; + + damos_for_each_filter(f, s) + return true; + return false; +} + +static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s, + unsigned long *sz_filter_passed) +{ + unsigned long addr; + LIST_HEAD(folio_list); + + if (!damon_pa_scheme_has_filter(s)) + return 0; + + for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) { + struct folio *folio = damon_get_folio(PHYS_PFN(addr)); + + if (!folio) + continue; + + if (damos_pa_filter_out(s, folio)) + goto put_folio; + else + *sz_filter_passed += folio_size(folio); +put_folio: + folio_put(folio); + } + return 0; +} static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, @@ -501,7 +534,7 @@ static unsigned long damon_pa_apply_scheme(struct damon_ctx *ctx, case DAMOS_MIGRATE_COLD: return damon_pa_migrate(r, scheme, sz_filter_passed); case DAMOS_STAT: - break; + return damon_pa_stat(r, scheme, sz_filter_passed); default: /* DAMOS actions that not yet supported by 'paddr'. */ break; From d82f385f8a5cdbfdfe0b970390f9f553d09bbe5c Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:52 -0800 Subject: [PATCH 275/504] mm/damon/core: implement per-scheme ops-handled filter-passed bytes stat Implement a new per-DAMOS scheme statistic field, namely sz_ops_filter_passed, using the changed damon_operations->apply_scheme() interface. It counts total bytes of memory that given DAMOS action tried to be applied, and passed the operations layer handled region-internal filters of the scheme. DAMON API users can access it using DAMON-internal safe access features such as damon_call() and/or damos_walk(). Link: https://lkml.kernel.org/r/20250106193401.109161-8-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 3 +++ mm/damon/core.c | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index da003173210f..2a93dbe06ecc 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -286,6 +286,8 @@ struct damos_watermarks { * @sz_tried: Total size of regions that the scheme is tried to be applied. * @nr_applied: Total number of regions that the scheme is applied. * @sz_applied: Total size of regions that the scheme is applied. + * @sz_ops_filter_passed: + * Total bytes that passed ops layer-handled DAMOS filters. * @qt_exceeds: Total number of times the quota of the scheme has exceeded. * * "Tried an action to a region" in this context means the DAMOS core logic @@ -310,6 +312,7 @@ struct damos_stat { unsigned long sz_tried; unsigned long nr_applied; unsigned long sz_applied; + unsigned long sz_ops_filter_passed; unsigned long qt_exceeds; }; diff --git a/mm/damon/core.c b/mm/damon/core.c index c6ccb4825c57..c4ce72a86d81 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1362,13 +1362,15 @@ static bool damos_skip_charged_region(struct damon_target *t, } static void damos_update_stat(struct damos *s, - unsigned long sz_tried, unsigned long sz_applied) + unsigned long sz_tried, unsigned long sz_applied, + unsigned long sz_ops_filter_passed) { s->stat.nr_tried++; s->stat.sz_tried += sz_tried; if (sz_applied) s->stat.nr_applied++; s->stat.sz_applied += sz_applied; + s->stat.sz_ops_filter_passed += sz_ops_filter_passed; } static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, @@ -1586,7 +1588,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, r->age = 0; update_stat: - damos_update_stat(s, sz, sz_applied); + damos_update_stat(s, sz, sz_applied, sz_ops_filter_passed); } static void damon_do_apply_schemes(struct damon_ctx *c, From 54b0cc5cb8d59d73546080975e0ab83fcee96039 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:53 -0800 Subject: [PATCH 276/504] mm/damon/syfs-schemes: implement per-scheme filter-passed bytes stat Add a new DAMON sysfs interface file under scheme stat directory, namely 'sz_ops_filter_passed'. It represents total bytes that passed region-internal DAMOS filters of the scheme that handled by the DAMON operations set layer. Link: https://lkml.kernel.org/r/20250106193401.109161-9-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 5c4490b97258..b447c412b02c 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -188,6 +188,7 @@ struct damon_sysfs_stats { unsigned long sz_tried; unsigned long nr_applied; unsigned long sz_applied; + unsigned long sz_ops_filter_passed; unsigned long qt_exceeds; }; @@ -232,6 +233,15 @@ static ssize_t sz_applied_show(struct kobject *kobj, return sysfs_emit(buf, "%lu\n", stats->sz_applied); } +static ssize_t sz_ops_filter_passed_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_stats *stats = container_of(kobj, + struct damon_sysfs_stats, kobj); + + return sysfs_emit(buf, "%lu\n", stats->sz_ops_filter_passed); +} + static ssize_t qt_exceeds_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -258,6 +268,9 @@ static struct kobj_attribute damon_sysfs_stats_nr_applied_attr = static struct kobj_attribute damon_sysfs_stats_sz_applied_attr = __ATTR_RO_MODE(sz_applied, 0400); +static struct kobj_attribute damon_sysfs_stats_sz_ops_filter_passed_attr = + __ATTR_RO_MODE(sz_ops_filter_passed, 0400); + static struct kobj_attribute damon_sysfs_stats_qt_exceeds_attr = __ATTR_RO_MODE(qt_exceeds, 0400); @@ -266,6 +279,7 @@ static struct attribute *damon_sysfs_stats_attrs[] = { &damon_sysfs_stats_sz_tried_attr.attr, &damon_sysfs_stats_nr_applied_attr.attr, &damon_sysfs_stats_sz_applied_attr.attr, + &damon_sysfs_stats_sz_ops_filter_passed_attr.attr, &damon_sysfs_stats_qt_exceeds_attr.attr, NULL, }; @@ -2077,6 +2091,8 @@ void damon_sysfs_schemes_update_stats( sysfs_stats->sz_tried = scheme->stat.sz_tried; sysfs_stats->nr_applied = scheme->stat.nr_applied; sysfs_stats->sz_applied = scheme->stat.sz_applied; + sysfs_stats->sz_ops_filter_passed = + scheme->stat.sz_ops_filter_passed; sysfs_stats->qt_exceeds = scheme->stat.qt_exceeds; } } From 1397508af51db642756f9b50cd6e1f78db196656 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:54 -0800 Subject: [PATCH 277/504] Docs/mm/damon/design: document sz_ops_filter_passed Document the new per-scheme accumulated stat for total bytes that passed the operations set layer-handled DAMOS filters on the design document. Link: https://lkml.kernel.org/r/20250106193401.109161-10-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 8b8be42c64b9..5707e2e0e4a4 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -552,6 +552,8 @@ scheme's execution. - ``nr_tried``: Total number of regions that the scheme is tried to be applied. - ``sz_trtied``: Total size of regions that the scheme is tried to be applied. +- ``sz_ops_filter_passed``: Total bytes that passed operations set + layer-handled DAMOS filters. - ``nr_applied``: Total number of regions that the scheme is applied. - ``sz_applied``: Total size of regions that the scheme is applied. - ``qt_exceeds``: Total number of times the quota of the scheme has exceeded. From 6e5b3c53c72e0618ce6161a48be958cd924742f8 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:55 -0800 Subject: [PATCH 278/504] Docs/admin-guide/mm/damon/usage: document sz_ops_filter_passed Document the new per-scheme operations set layer-handled DAMOS filters passed bytes statistic file on DAMON sysfs interface usage document. Link: https://lkml.kernel.org/r/20250106193401.109161-11-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index af70f7244700..179a9060a32e 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -90,7 +90,7 @@ comma (","). │ │ │ │ │ │ │ :ref:`watermarks `/metric,interval_us,high,mid,low │ │ │ │ │ │ │ :ref:`filters `/nr_filters │ │ │ │ │ │ │ │ 0/type,matching,memcg_id - │ │ │ │ │ │ │ :ref:`stats `/nr_tried,sz_tried,nr_applied,sz_applied,qt_exceeds + │ │ │ │ │ │ │ :ref:`stats `/nr_tried,sz_tried,nr_applied,sz_applied,sz_ops_filter_passed,qt_exceeds │ │ │ │ │ │ │ :ref:`tried_regions `/total_bytes │ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age │ │ │ │ │ │ │ │ ... @@ -454,18 +454,16 @@ difference is applied to :ref:`stats ` and schemes//stats/ ------------------ -DAMON counts the total number and bytes of regions that each scheme is tried to -be applied, the two numbers for the regions that each scheme is successfully -applied, and the total number of the quota limit exceeds. This statistics can -be used for online analysis or tuning of the schemes. Refer to :ref:`design -doc ` for more details about the stats. +DAMON counts statistics for each scheme. This statistics can be used for +online analysis or tuning of the schemes. Refer to :ref:`design doc +` for more details about the stats. The statistics can be retrieved by reading the files under ``stats`` directory -(``nr_tried``, ``sz_tried``, ``nr_applied``, ``sz_applied``, and -``qt_exceeds``), respectively. The files are not updated in real time, so you -should ask DAMON sysfs interface to update the content of the files for the -stats by writing a special keyword, ``update_schemes_stats`` to the relevant -``kdamonds//state`` file. +(``nr_tried``, ``sz_tried``, ``nr_applied``, ``sz_applied``, +``sz_ops_filter_passed``, and ``qt_exceeds``), respectively. The files are not +updated in real time, so you should ask DAMON sysfs interface to update the +content of the files for the stats by writing a special keyword, +``update_schemes_stats`` to the relevant ``kdamonds//state`` file. .. _sysfs_schemes_tried_regions: From 0d360338e353b5c3e26fbf15f6f77ec98539c802 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:56 -0800 Subject: [PATCH 279/504] Docs/ABI/damon: document per-scheme filter-passed bytes stat file Document the new ABI for per-scheme operations set layer-handled DAMOS filters passed bytes statistic on the ABI document. Link: https://lkml.kernel.org/r/20250106193401.109161-12-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/ABI/testing/sysfs-kernel-mm-damon | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon index f1b90cf1249b..19cde386fd15 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-damon +++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon @@ -384,6 +384,12 @@ Contact: SeongJae Park Description: Reading this file returns the total size of regions that the action of the scheme has successfully applied in bytes. +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//stats/sz_ops_filter_passed +Date: Dec 2024 +Contact: SeongJae Park +Description: Reading this file returns the total size of memory that passed + DAMON operations layer-handled filters of the scheme in bytes. + What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//stats/qt_exceeds Date: Mar 2022 Contact: SeongJae Park From c08d8a75c4627ca770f38eb47270eaeb2c0764b8 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:57 -0800 Subject: [PATCH 280/504] mm/damon/core: pass per-region filter-passed bytes to damos_walk_control->walk_fn() Total size of memory that passed DAMON operations set layer-handled DAMOS filters per scheme is provided to DAMON core API and ABI (sysfs interface) users. Having it per-region in non-accumulated way can provide it in finer granularity. Provide it to damos_walk() core API users, by passing the data to damos_walk_control->walk_fn(). Link: https://lkml.kernel.org/r/20250106193401.109161-13-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 2 +- mm/damon/core.c | 7 ++++--- mm/damon/sysfs.c | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 2a93dbe06ecc..298b1a831e62 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -388,7 +388,7 @@ struct damos; struct damos_walk_control { void (*walk_fn)(void *data, struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, - struct damos *s); + struct damos *s, unsigned long sz_filter_passed); void *data; /* private: internal use only */ /* informs if the kdamond finished handling of the walk request */ diff --git a/mm/damon/core.c b/mm/damon/core.c index c4ce72a86d81..52e50f183ffe 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1445,7 +1445,8 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, * installed by damos_walk() and not yet uninstalled, invoke it. */ static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, - struct damon_region *r, struct damos *s) + struct damon_region *r, struct damos *s, + unsigned long sz_filter_passed) { struct damos_walk_control *control; @@ -1454,7 +1455,7 @@ static void damos_walk_call_walk(struct damon_ctx *ctx, struct damon_target *t, mutex_unlock(&ctx->walk_control_lock); if (!control) return; - control->walk_fn(control->data, ctx, t, r, s); + control->walk_fn(control->data, ctx, t, r, s, sz_filter_passed); } /* @@ -1574,7 +1575,7 @@ static void damos_apply_scheme(struct damon_ctx *c, struct damon_target *t, sz_applied = c->ops.apply_scheme(c, t, r, s, &sz_ops_filter_passed); } - damos_walk_call_walk(c, t, r, s); + damos_walk_call_walk(c, t, r, s, sz_ops_filter_passed); ktime_get_coarse_ts64(&end); quota->total_charged_ns += timespec64_to_ns(&end) - timespec64_to_ns(&begin); diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index cf8fb5a963d6..224873ca8aa6 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1454,7 +1454,7 @@ struct damon_sysfs_schemes_walk_data { /* populate the region directory */ static void damon_sysfs_schemes_tried_regions_upd_one(void *data, struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, - struct damos *s) + struct damos *s, unsigned long sz_filter_passed) { struct damon_sysfs_schemes_walk_data *walk_data = data; struct damon_sysfs_kdamond *sysfs_kdamond = walk_data->sysfs_kdamond; From 8fb0d8f40ce5e97602601732bb50a1e9922b8249 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:58 -0800 Subject: [PATCH 281/504] mm/damon/sysfs-schemes: expose per-region filter-passed bytes Per-region operations set-handled DAMOS filters passed memory size information is provided to only DAMON core API users. Further expose it to the user space by adding a new DAMON sysfs interface file under each scheme tried region directory. Link: https://lkml.kernel.org/r/20250106193401.109161-14-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-common.h | 2 +- mm/damon/sysfs-schemes.c | 19 ++++++++++++++++++- mm/damon/sysfs.c | 3 ++- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/mm/damon/sysfs-common.h b/mm/damon/sysfs-common.h index b3f63bc658b7..70d84bdc9f5f 100644 --- a/mm/damon/sysfs-common.h +++ b/mm/damon/sysfs-common.h @@ -48,7 +48,7 @@ void damon_sysfs_schemes_update_stats( void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos *s, - bool total_bytes_only); + bool total_bytes_only, unsigned long sz_filter_passed); int damon_sysfs_schemes_clear_regions( struct damon_sysfs_schemes *sysfs_schemes); diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index b447c412b02c..deeaf23c1fcf 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -19,6 +19,7 @@ struct damon_sysfs_scheme_region { struct damon_addr_range ar; unsigned int nr_accesses; unsigned int age; + unsigned long sz_filter_passed; struct list_head list; }; @@ -74,6 +75,15 @@ static ssize_t age_show(struct kobject *kobj, struct kobj_attribute *attr, return sysfs_emit(buf, "%u\n", region->age); } +static ssize_t sz_filter_passed_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme_region *region = container_of(kobj, + struct damon_sysfs_scheme_region, kobj); + + return sysfs_emit(buf, "%lu\n", region->sz_filter_passed); +} + static void damon_sysfs_scheme_region_release(struct kobject *kobj) { struct damon_sysfs_scheme_region *region = container_of(kobj, @@ -95,11 +105,15 @@ static struct kobj_attribute damon_sysfs_scheme_region_nr_accesses_attr = static struct kobj_attribute damon_sysfs_scheme_region_age_attr = __ATTR_RO_MODE(age, 0400); +static struct kobj_attribute damon_sysfs_scheme_region_sz_filter_passed_attr = + __ATTR_RO_MODE(sz_filter_passed, 0400); + static struct attribute *damon_sysfs_scheme_region_attrs[] = { &damon_sysfs_scheme_region_start_attr.attr, &damon_sysfs_scheme_region_end_attr.attr, &damon_sysfs_scheme_region_nr_accesses_attr.attr, &damon_sysfs_scheme_region_age_attr.attr, + &damon_sysfs_scheme_region_sz_filter_passed_attr.attr, NULL, }; ATTRIBUTE_GROUPS(damon_sysfs_scheme_region); @@ -2105,12 +2119,14 @@ void damon_sysfs_schemes_update_stats( * @r: DAMON region to populate the directory for. * @s: Corresponding scheme. * @total_bytes_only: Whether the request is for bytes update only. + * @sz_filter_passed: Bytes of @r that passed filters of @s. * * Called from DAMOS walk callback while holding damon_sysfs_lock. */ void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, struct damon_ctx *ctx, struct damon_target *t, - struct damon_region *r, struct damos *s, bool total_bytes_only) + struct damon_region *r, struct damos *s, bool total_bytes_only, + unsigned long sz_filter_passed) { struct damos *scheme; struct damon_sysfs_scheme_regions *sysfs_regions; @@ -2135,6 +2151,7 @@ void damos_sysfs_populate_region_dir(struct damon_sysfs_schemes *sysfs_schemes, region = damon_sysfs_scheme_region_alloc(r); if (!region) return; + region->sz_filter_passed = sz_filter_passed; list_add_tail(®ion->list, &sysfs_regions->regions_list); sysfs_regions->nr_regions++; if (kobject_init_and_add(®ion->kobj, diff --git a/mm/damon/sysfs.c b/mm/damon/sysfs.c index 224873ca8aa6..deeab04d3b46 100644 --- a/mm/damon/sysfs.c +++ b/mm/damon/sysfs.c @@ -1461,7 +1461,8 @@ static void damon_sysfs_schemes_tried_regions_upd_one(void *data, struct damon_c damos_sysfs_populate_region_dir( sysfs_kdamond->contexts->contexts_arr[0]->schemes, - ctx, t, r, s, walk_data->total_bytes_only); + ctx, t, r, s, walk_data->total_bytes_only, + sz_filter_passed); } static int damon_sysfs_update_schemes_tried_regions( From 7f1dd4c103c3340fa0cf44303a25ddacf721b6d9 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:33:59 -0800 Subject: [PATCH 282/504] Docs/mm/damon/design: document per-region sz_filter_passed stat Update 'Regions Walking' section of design document for the newly added per-region operations set handling DAMOS filters-passed bytes. Link: https://lkml.kernel.org/r/20250106193401.109161-15-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 5707e2e0e4a4..5ebb572d0999 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -587,7 +587,8 @@ Regions Walking DAMOS feature allowing users access each region that a DAMOS action has just applied. Using this feature, DAMON :ref:`API ` allows users -access full properties of the regions including the access monitoring results. +access full properties of the regions including the access monitoring results +and amount of the region's internal memory that passed the DAMOS filters. :ref:`DAMON sysfs interface ` also allows users read the data via special :ref:`files `. From 59096b6c8d54461214c5bc915ab541894201441b Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:34:00 -0800 Subject: [PATCH 283/504] Docs/admin-guide/mm/damon/usage: document sz_filtered_out of scheme tried region directories Document the newly added DAMON sysfs interface file for per-scheme-tried region's bytes that passed the operations set handling DAMOS filters. Link: https://lkml.kernel.org/r/20250106193401.109161-16-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index 179a9060a32e..a891e830c2a1 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -92,7 +92,7 @@ comma (","). │ │ │ │ │ │ │ │ 0/type,matching,memcg_id │ │ │ │ │ │ │ :ref:`stats `/nr_tried,sz_tried,nr_applied,sz_applied,sz_ops_filter_passed,qt_exceeds │ │ │ │ │ │ │ :ref:`tried_regions `/total_bytes - │ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age + │ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age,sz_filter_passed │ │ │ │ │ │ │ │ ... │ │ │ │ │ │ ... │ │ │ │ ... @@ -500,10 +500,10 @@ set the ``access pattern`` as their interested pattern that they want to query. tried_regions// ------------------ -In each region directory, you will find four files (``start``, ``end``, -``nr_accesses``, and ``age``). Reading the files will show the start and end -addresses, ``nr_accesses``, and ``age`` of the region that corresponding -DAMON-based operation scheme ``action`` has tried to be applied. +In each region directory, you will find five files (``start``, ``end``, +``nr_accesses``, ``age``, and ``sz_filter_passed``). Reading the files will +show the properties of the region that corresponding DAMON-based operation +scheme ``action`` has tried to be applied. Example ~~~~~~~ From 5170da54f920bf8b1543f62ffa45b5087f85e0ad Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:34:01 -0800 Subject: [PATCH 284/504] Docs/ABI/damon: document per-region DAMOS filter-passed bytes stat file Document the new ABI for per-region operations set layer-handled DAMOS filters passed bytes statistic. Link: https://lkml.kernel.org/r/20250106193401.109161-17-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/ABI/testing/sysfs-kernel-mm-damon | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon index 19cde386fd15..8c0acb31638b 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-damon +++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon @@ -430,3 +430,10 @@ Contact: SeongJae Park Description: Reading this file returns the 'age' of a memory region that corresponding DAMON-based Operation Scheme's action has tried to be applied. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//tried_regions//sz_filter_passed +Date: Dec 2024 +Contact: SeongJae Park +Description: Reading this file returns the size of the memory in the region + that passed DAMON operations layer-handled filters of the + scheme in bytes. From e16f831c0efb8caef8f1800ea603fde2d511ca83 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:19:34 -0800 Subject: [PATCH 285/504] Docs/translations/*/admin-guide/mm/damon/usage: remove DAMON debugfs interface documentation Patch series "mm/damon: remove DAMON debugfs interface". DAMON debugfs interface was the only user interface of DAMON at the beginning[1]. However, it turned out the interface would be not good enough for long-term flexibility and stability. In Feb 2022[2], we therefore introduced DAMON sysfs interface as an alternative user interface that aims long-term flexibility and stability. With its introduction, DAMON debugfs interface has announced to be deprecated in near future. In Feb 2023[3], we announced the official deprecation of DAMON debugfs interface. In Jan 2024[4], we further made the deprecation difficult to be ignored. In Oct 2024[5], we posted an RFC version of this patch series as the last notice. And as of this writing, no problem or concerns about the removal plan have reported. Apparently users are already moved to the alternative, or made good plans for the change. Remove the DAMON debugfs interface code from the tree. Given the past timeline and the absence of reported problems or concerns, it is safe enough to be done. [1] https://lore.kernel.org/20210716081449.22187-1-sj38.park@gmail.com [2] https://lore.kernel.org/20220228081314.5770-1-sj@kernel.org [3] https://lore.kernel.org/20230209192009.7885-1-sj@kernel.org [4] https://lore.kernel.org/20240130013549.89538-1-sj@kernel.org [5] https://lore.kernel.org/20241015175412.60563-1-sj@kernel.org This patch (of 8): It's time to remove DAMON debugfs interface, which has deprecated long before in February 2023. Read the cover letter of this patch sereis for more details. Remove DAMON debugfs interface usage documentation and references to it from translations, to avoid confusing users with documents for already removed things. Link: https://lkml.kernel.org/r/20250106191941.107070-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250106191941.107070-2-sj@kernel.org Signed-off-by: SeongJae Park Cc: Alex Shi Cc: Brendan Higgins Cc: David Gow Cc: Hu Haowen <2023002089@link.tyut.edu.cn> Cc: Jonathan Corbet Cc: Rae Moar Cc: Shuah Khan Cc: Yanteng Si Signed-off-by: Andrew Morton --- .../zh_CN/admin-guide/mm/damon/usage.rst | 248 +----------------- .../zh_TW/admin-guide/mm/damon/usage.rst | 248 +----------------- 2 files changed, 2 insertions(+), 494 deletions(-) diff --git a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst index 50f6f0b6bf11..9d7cb51be493 100644 --- a/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst +++ b/Documentation/translations/zh_CN/admin-guide/mm/damon/usage.rst @@ -26,12 +26,7 @@ DAMON 为不同的用户提供了下面这些接口。 使用它,用户可以通过读取和写入特殊的sysfs文件来使用DAMON的主要功能。因此,你可以编写和使 用你个性化的DAMON sysfs包装程序,代替你读/写sysfs文件。 `DAMON用户空间工具 `_ 就是这种程序的一个例子 它同时支持虚拟和物理地址 - 空间的监测。注意,这个界面只提供简单的监测结果 :ref:`统计 `。对于详细的监测 - 结果,DAMON提供了一个:ref:`跟踪点 `。 -- *debugfs interface.* - :ref:`这 ` 几乎与:ref:`sysfs interface ` 接 - 口相同。这将在下一个LTS内核发布后被移除,所以用户应该转移到 - :ref:`sysfs interface `。 + 空间的监测。 - *内核空间编程接口。* :doc:`这 ` 这是为内核空间程序员准备的。使用它,用户可以通过为你编写内 核空间的DAMON应用程序,最灵活有效地利用DAMON的每一个功能。你甚至可以为各种地址空间扩展DAMON。 @@ -335,247 +330,6 @@ tried_regions// 请注意,我们强烈建议使用用户空间的工具,如 `damo `_ , 而不是像上面那样手动读写文件。以上只是一个例子。 -debugfs接口 -=========== - -.. note:: - - DAMON debugfs接口将在下一个LTS内核发布后被移除,所以用户应该转移到 - :ref:`sysfs接口`。 - -DAMON导出了八个文件, ``attrs``, ``target_ids``, ``init_regions``, -``schemes``, ``monitor_on_DEPRECATED``, ``kdamond_pid``, ``mk_contexts`` 和 -``rm_contexts`` under its debugfs directory, ``/damon/``. - - -属性 ----- - -用户可以通过读取和写入 ``attrs`` 文件获得和设置 ``采样间隔`` 、 ``聚集间隔`` 、 ``更新间隔`` -以及监测目标区域的最小/最大数量。要详细了解监测属性,请参考 `:doc:/mm/damon/design` 。例如, -下面的命令将这些值设置为5ms、100ms、1000ms、10和1000,然后再次检查:: - - # cd /damon - # echo 5000 100000 1000000 10 1000 > attrs - # cat attrs - 5000 100000 1000000 10 1000 - - -目标ID ------- - -一些类型的地址空间支持多个监测目标。例如,虚拟内存地址空间的监测可以有多个进程作为监测目标。用户 -可以通过写入目标的相关id值来设置目标,并通过读取 ``target_ids`` 文件来获得当前目标的id。在监 -测虚拟地址空间的情况下,这些值应该是监测目标进程的pid。例如,下面的命令将pid为42和4242的进程设 -为监测目标,并再次检查:: - - # cd /damon - # echo 42 4242 > target_ids - # cat target_ids - 42 4242 - -用户还可以通过在文件中写入一个特殊的关键字 "paddr\n" 来监测系统的物理内存地址空间。因为物理地 -址空间监测不支持多个目标,读取文件会显示一个假值,即 ``42`` ,如下图所示:: - - # cd /damon - # echo paddr > target_ids - # cat target_ids - 42 - -请注意,设置目标ID并不启动监测。 - - -初始监测目标区域 ----------------- - -在虚拟地址空间监测的情况下,DAMON自动设置和更新监测的目标区域,这样就可以覆盖目标进程的整个 -内存映射。然而,用户可能希望将监测区域限制在特定的地址范围内,如堆、栈或特定的文件映射区域。 -或者,一些用户可以知道他们工作负载的初始访问模式,因此希望为“自适应区域调整”设置最佳初始区域。 - -相比之下,DAMON在物理内存监测的情况下不会自动设置和更新监测目标区域。因此,用户应该自己设置 -监测目标区域。 - -在这种情况下,用户可以通过在 ``init_regions`` 文件中写入适当的值,明确地设置他们想要的初 -始监测目标区域。输入应该是一个由三个整数组成的队列,用空格隔开,代表一个区域的形式如下:: - - - -目标idx应该是 ``target_ids`` 文件中目标的索引,从 ``0`` 开始,区域应该按照地址顺序传递。 -例如,下面的命令将设置几个地址范围, ``1-100`` 和 ``100-200`` 作为pid 42的初始监测目标 -区域,这是 ``target_ids`` 中的第一个(索引 ``0`` ),另外几个地址范围, ``20-40`` 和 -``50-100`` 作为pid 4242的地址,这是 ``target_ids`` 中的第二个(索引 ``1`` ):: - - # cd /damon - # cat target_ids - 42 4242 - # echo "0 1 100 \ - 0 100 200 \ - 1 20 40 \ - 1 50 100" > init_regions - -请注意,这只是设置了初始的监测目标区域。在虚拟内存监测的情况下,DAMON会在一个 ``更新间隔`` -后自动更新区域的边界。因此,在这种情况下,如果用户不希望更新的话,应该把 ``更新间隔`` 设 -置得足够大。 - - -方案 ----- - -对于通常的基于DAMON的数据访问感知的内存管理优化,用户只是希望系统对特定访问模式的内存区域应用内 -存管理操作。DAMON从用户那里接收这种形式化的操作方案,并将这些方案应用到目标进程中。 - -用户可以通过读取和写入 ``scheme`` debugfs文件来获得和设置这些方案。读取该文件还可以显示每个 -方案的统计数据。在文件中,每一个方案都应该在每一行中以下列形式表示出来:: - - - -你可以通过简单地在文件中写入一个空字符串来禁用方案。 - -目标访问模式 -~~~~~~~~~~~~ - -``<目标访问模式>`` 是由三个范围构成的,形式如下:: - - min-size max-size min-acc max-acc min-age max-age - -具体来说,区域大小的字节数( `min-size` 和 `max-size` ),访问频率的每聚合区间的监测访问次 -数( `min-acc` 和 `max-acc` ),区域年龄的聚合区间数( `min-age` 和 `max-age` )都被指定。 -请注意,这些范围是封闭区间。 - -动作 -~~~~ - -```` 是一个预定义的内存管理动作的整数,DAMON将应用于具有目标访问模式的区域。支持 -的数字和它们的含义如下:: - - - 0: Call ``madvise()`` for the region with ``MADV_WILLNEED`` - - 1: Call ``madvise()`` for the region with ``MADV_COLD`` - - 2: Call ``madvise()`` for the region with ``MADV_PAGEOUT`` - - 3: Call ``madvise()`` for the region with ``MADV_HUGEPAGE`` - - 4: Call ``madvise()`` for the region with ``MADV_NOHUGEPAGE`` - - 5: Do nothing but count the statistics - -配额 -~~~~ - -每个 ``动作`` 的最佳 ``目标访问模式`` 取决于工作负载,所以不容易找到。更糟糕的是,将某个 -动作的方案设置得过于激进会导致严重的开销。为了避免这种开销,用户可以通过下面表格中的 ```` -来限制方案的时间和大小配额:: - - - -这使得DAMON在 ```` 毫秒内,尽量只用 ```` 毫秒的时间对 ``目标访 -问模式`` 的内存区域应用动作,并在 ```` 内只对最多字节的内存区域应 -用动作。将 ```` 和 ```` 都设置为零,可以禁用配额限制。 - -当预计超过配额限制时,DAMON会根据 ``目标访问模式`` 的大小、访问频率和年龄,对发现的内存 -区域进行优先排序。为了实现个性化的优先级,用户可以在 ``<优先级权重>`` 中设置这三个属性的 -权重,具体形式如下:: - - - -水位 -~~~~ - -有些方案需要根据系统特定指标的当前值来运行,如自由内存比率。对于这种情况,用户可以为该条 -件指定水位。:: - - - -```` 是一个预定义的整数,用于要检查的度量。支持的数字和它们的含义如下。 - - - 0: 忽视水位 - - 1: 系统空闲内存率 (千分比) - -每隔 ``<检查间隔>`` 微秒检查一次公制的值。 - -如果该值高于 ``<高标>`` 或低于 ``<低标>`` ,该方案被停用。如果该值低于 ``<中标>`` , -该方案将被激活。 - -统计数据 -~~~~~~~~ - -它还统计每个方案被尝试应用的区域的总数量和字节数,每个方案被成功应用的区域的两个数量,以 -及超过配额限制的总数量。这些统计数据可用于在线分析或调整方案。 - -统计数据可以通过读取方案文件来显示。读取该文件将显示你在每一行中输入的每个 ``方案`` , -统计的五个数字将被加在每一行的末尾。 - -例子 -~~~~ - -下面的命令应用了一个方案:”如果一个大小为[4KiB, 8KiB]的内存区域在[10, 20]的聚合时间 -间隔内显示出每一个聚合时间间隔[0, 5]的访问量,请分页出该区域。对于分页,每秒最多只能使 -用10ms,而且每秒分页不能超过1GiB。在这一限制下,首先分页出具有较长年龄的内存区域。另外, -每5秒钟检查一次系统的可用内存率,当可用内存率低于50%时开始监测和分页,但如果可用内存率 -大于60%,或低于30%,则停止监测“:: - - # cd /damon - # scheme="4096 8192 0 5 10 20 2" # target access pattern and action - # scheme+=" 10 $((1024*1024*1024)) 1000" # quotas - # scheme+=" 0 0 100" # prioritization weights - # scheme+=" 1 5000000 600 500 300" # watermarks - # echo "$scheme" > schemes - - -开关 ----- - -除非你明确地启动监测,否则如上所述的文件设置不会产生效果。你可以通过写入和读取 ``monitor_on_DEPRECATED`` -文件来启动、停止和检查监测的当前状态。写入 ``on`` 该文件可以启动对有属性的目标的监测。写入 -``off`` 该文件则停止这些目标。如果每个目标进程被终止,DAMON也会停止。下面的示例命令开启、关 -闭和检查DAMON的状态:: - - # cd /damon - # echo on > monitor_on_DEPRECATED - # echo off > monitor_on_DEPRECATED - # cat monitor_on_DEPRECATED - off - -请注意,当监测开启时,你不能写到上述的debugfs文件。如果你在DAMON运行时写到这些文件,将会返 -回一个错误代码,如 ``-EBUSY`` 。 - - -监测线程PID ------------ - -DAMON通过一个叫做kdamond的内核线程来进行请求监测。你可以通过读取 ``kdamond_pid`` 文件获 -得该线程的 ``pid`` 。当监测被 ``关闭`` 时,读取该文件不会返回任何信息:: - - # cd /damon - # cat monitor_on_DEPRECATED - off - # cat kdamond_pid - none - # echo on > monitor_on_DEPRECATED - # cat kdamond_pid - 18594 - - -使用多个监测线程 ----------------- - -每个监测上下文都会创建一个 ``kdamond`` 线程。你可以使用 ``mk_contexts`` 和 ``rm_contexts`` -文件为多个 ``kdamond`` 需要的用例创建和删除监测上下文。 - -将新上下文的名称写入 ``mk_contexts`` 文件,在 ``DAMON debugfs`` 目录上创建一个该名称的目录。 -该目录将有该上下文的 ``DAMON debugfs`` 文件:: - - # cd /damon - # ls foo - # ls: cannot access 'foo': No such file or directory - # echo foo > mk_contexts - # ls foo - # attrs init_regions kdamond_pid schemes target_ids - -如果不再需要上下文,你可以通过把上下文的名字放到 ``rm_contexts`` 文件中来删除它和相应的目录:: - - # echo foo > rm_contexts - # ls foo - # ls: cannot access 'foo': No such file or directory - -注意, ``mk_contexts`` 、 ``rm_contexts`` 和 ``monitor_on_DEPRECATED`` 文件只在根目录下。 - 监测结果的监测点 ================ diff --git a/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst b/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst index fbbbbad59ee4..d3fd4f850793 100644 --- a/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst +++ b/Documentation/translations/zh_TW/admin-guide/mm/damon/usage.rst @@ -26,12 +26,7 @@ DAMON 爲不同的用戶提供了下面這些接口。 使用它,用戶可以通過讀取和寫入特殊的sysfs文件來使用DAMON的主要功能。因此,你可以編寫和使 用你個性化的DAMON sysfs包裝程序,代替你讀/寫sysfs文件。 `DAMON用戶空間工具 `_ 就是這種程序的一個例子 它同時支持虛擬和物理地址 - 空間的監測。注意,這個界面只提供簡單的監測結果 :ref:`統計 `。對於詳細的監測 - 結果,DAMON提供了一個:ref:`跟蹤點 `。 -- *debugfs interface.* - :ref:`這 ` 幾乎與:ref:`sysfs interface ` 接 - 口相同。這將在下一個LTS內核發佈後被移除,所以用戶應該轉移到 - :ref:`sysfs interface `。 + 空間的監測。 - *內核空間編程接口。* :doc:`這 ` 這是爲內核空間程序員準備的。使用它,用戶可以通過爲你編寫內 核空間的DAMON應用程序,最靈活有效地利用DAMON的每一個功能。你甚至可以爲各種地址空間擴展DAMON。 @@ -335,247 +330,6 @@ tried_regions// 請注意,我們強烈建議使用用戶空間的工具,如 `damo `_ , 而不是像上面那樣手動讀寫文件。以上只是一個例子。 -debugfs接口 -=========== - -.. note:: - - DAMON debugfs接口將在下一個LTS內核發佈後被移除,所以用戶應該轉移到 - :ref:`sysfs接口`。 - -DAMON導出了八個文件, ``attrs``, ``target_ids``, ``init_regions``, -``schemes``, ``monitor_on_DEPRECATED``, ``kdamond_pid``, ``mk_contexts`` 和 -``rm_contexts`` under its debugfs directory, ``/damon/``. - - -屬性 ----- - -用戶可以通過讀取和寫入 ``attrs`` 文件獲得和設置 ``採樣間隔`` 、 ``聚集間隔`` 、 ``更新間隔`` -以及監測目標區域的最小/最大數量。要詳細瞭解監測屬性,請參考 `:doc:/mm/damon/design` 。例如, -下面的命令將這些值設置爲5ms、100ms、1000ms、10和1000,然後再次檢查:: - - # cd /damon - # echo 5000 100000 1000000 10 1000 > attrs - # cat attrs - 5000 100000 1000000 10 1000 - - -目標ID ------- - -一些類型的地址空間支持多個監測目標。例如,虛擬內存地址空間的監測可以有多個進程作爲監測目標。用戶 -可以通過寫入目標的相關id值來設置目標,並通過讀取 ``target_ids`` 文件來獲得當前目標的id。在監 -測虛擬地址空間的情況下,這些值應該是監測目標進程的pid。例如,下面的命令將pid爲42和4242的進程設 -爲監測目標,並再次檢查:: - - # cd /damon - # echo 42 4242 > target_ids - # cat target_ids - 42 4242 - -用戶還可以通過在文件中寫入一個特殊的關鍵字 "paddr\n" 來監測系統的物理內存地址空間。因爲物理地 -址空間監測不支持多個目標,讀取文件會顯示一個假值,即 ``42`` ,如下圖所示:: - - # cd /damon - # echo paddr > target_ids - # cat target_ids - 42 - -請注意,設置目標ID並不啓動監測。 - - -初始監測目標區域 ----------------- - -在虛擬地址空間監測的情況下,DAMON自動設置和更新監測的目標區域,這樣就可以覆蓋目標進程的整個 -內存映射。然而,用戶可能希望將監測區域限制在特定的地址範圍內,如堆、棧或特定的文件映射區域。 -或者,一些用戶可以知道他們工作負載的初始訪問模式,因此希望爲“自適應區域調整”設置最佳初始區域。 - -相比之下,DAMON在物理內存監測的情況下不會自動設置和更新監測目標區域。因此,用戶應該自己設置 -監測目標區域。 - -在這種情況下,用戶可以通過在 ``init_regions`` 文件中寫入適當的值,明確地設置他們想要的初 -始監測目標區域。輸入應該是一個由三個整數組成的隊列,用空格隔開,代表一個區域的形式如下:: - - - -目標idx應該是 ``target_ids`` 文件中目標的索引,從 ``0`` 開始,區域應該按照地址順序傳遞。 -例如,下面的命令將設置幾個地址範圍, ``1-100`` 和 ``100-200`` 作爲pid 42的初始監測目標 -區域,這是 ``target_ids`` 中的第一個(索引 ``0`` ),另外幾個地址範圍, ``20-40`` 和 -``50-100`` 作爲pid 4242的地址,這是 ``target_ids`` 中的第二個(索引 ``1`` ):: - - # cd /damon - # cat target_ids - 42 4242 - # echo "0 1 100 \ - 0 100 200 \ - 1 20 40 \ - 1 50 100" > init_regions - -請注意,這只是設置了初始的監測目標區域。在虛擬內存監測的情況下,DAMON會在一個 ``更新間隔`` -後自動更新區域的邊界。因此,在這種情況下,如果用戶不希望更新的話,應該把 ``更新間隔`` 設 -置得足夠大。 - - -方案 ----- - -對於通常的基於DAMON的數據訪問感知的內存管理優化,用戶只是希望系統對特定訪問模式的內存區域應用內 -存管理操作。DAMON從用戶那裏接收這種形式化的操作方案,並將這些方案應用到目標進程中。 - -用戶可以通過讀取和寫入 ``scheme`` debugfs文件來獲得和設置這些方案。讀取該文件還可以顯示每個 -方案的統計數據。在文件中,每一個方案都應該在每一行中以下列形式表示出來:: - - - -你可以通過簡單地在文件中寫入一個空字符串來禁用方案。 - -目標訪問模式 -~~~~~~~~~~~~ - -``<目標訪問模式>`` 是由三個範圍構成的,形式如下:: - - min-size max-size min-acc max-acc min-age max-age - -具體來說,區域大小的字節數( `min-size` 和 `max-size` ),訪問頻率的每聚合區間的監測訪問次 -數( `min-acc` 和 `max-acc` ),區域年齡的聚合區間數( `min-age` 和 `max-age` )都被指定。 -請注意,這些範圍是封閉區間。 - -動作 -~~~~ - -```` 是一個預定義的內存管理動作的整數,DAMON將應用於具有目標訪問模式的區域。支持 -的數字和它們的含義如下:: - - - 0: Call ``madvise()`` for the region with ``MADV_WILLNEED`` - - 1: Call ``madvise()`` for the region with ``MADV_COLD`` - - 2: Call ``madvise()`` for the region with ``MADV_PAGEOUT`` - - 3: Call ``madvise()`` for the region with ``MADV_HUGEPAGE`` - - 4: Call ``madvise()`` for the region with ``MADV_NOHUGEPAGE`` - - 5: Do nothing but count the statistics - -配額 -~~~~ - -每個 ``動作`` 的最佳 ``目標訪問模式`` 取決於工作負載,所以不容易找到。更糟糕的是,將某個 -動作的方案設置得過於激進會導致嚴重的開銷。爲了避免這種開銷,用戶可以通過下面表格中的 ```` -來限制方案的時間和大小配額:: - - - -這使得DAMON在 ```` 毫秒內,儘量只用 ```` 毫秒的時間對 ``目標訪 -問模式`` 的內存區域應用動作,並在 ```` 內只對最多字節的內存區域應 -用動作。將 ```` 和 ```` 都設置爲零,可以禁用配額限制。 - -當預計超過配額限制時,DAMON會根據 ``目標訪問模式`` 的大小、訪問頻率和年齡,對發現的內存 -區域進行優先排序。爲了實現個性化的優先級,用戶可以在 ``<優先級權重>`` 中設置這三個屬性的 -權重,具體形式如下:: - - - -水位 -~~~~ - -有些方案需要根據系統特定指標的當前值來運行,如自由內存比率。對於這種情況,用戶可以爲該條 -件指定水位。:: - - - -```` 是一個預定義的整數,用於要檢查的度量。支持的數字和它們的含義如下。 - - - 0: 忽視水位 - - 1: 系統空閒內存率 (千分比) - -每隔 ``<檢查間隔>`` 微秒檢查一次公制的值。 - -如果該值高於 ``<高標>`` 或低於 ``<低標>`` ,該方案被停用。如果該值低於 ``<中標>`` , -該方案將被激活。 - -統計數據 -~~~~~~~~ - -它還統計每個方案被嘗試應用的區域的總數量和字節數,每個方案被成功應用的區域的兩個數量,以 -及超過配額限制的總數量。這些統計數據可用於在線分析或調整方案。 - -統計數據可以通過讀取方案文件來顯示。讀取該文件將顯示你在每一行中輸入的每個 ``方案`` , -統計的五個數字將被加在每一行的末尾。 - -例子 -~~~~ - -下面的命令應用了一個方案:”如果一個大小爲[4KiB, 8KiB]的內存區域在[10, 20]的聚合時間 -間隔內顯示出每一個聚合時間間隔[0, 5]的訪問量,請分頁出該區域。對於分頁,每秒最多隻能使 -用10ms,而且每秒分頁不能超過1GiB。在這一限制下,首先分頁出具有較長年齡的內存區域。另外, -每5秒鐘檢查一次系統的可用內存率,當可用內存率低於50%時開始監測和分頁,但如果可用內存率 -大於60%,或低於30%,則停止監測“:: - - # cd /damon - # scheme="4096 8192 0 5 10 20 2" # target access pattern and action - # scheme+=" 10 $((1024*1024*1024)) 1000" # quotas - # scheme+=" 0 0 100" # prioritization weights - # scheme+=" 1 5000000 600 500 300" # watermarks - # echo "$scheme" > schemes - - -開關 ----- - -除非你明確地啓動監測,否則如上所述的文件設置不會產生效果。你可以通過寫入和讀取 ``monitor_on_DEPRECATED`` -文件來啓動、停止和檢查監測的當前狀態。寫入 ``on`` 該文件可以啓動對有屬性的目標的監測。寫入 -``off`` 該文件則停止這些目標。如果每個目標進程被終止,DAMON也會停止。下面的示例命令開啓、關 -閉和檢查DAMON的狀態:: - - # cd /damon - # echo on > monitor_on_DEPRECATED - # echo off > monitor_on_DEPRECATED - # cat monitor_on_DEPRECATED - off - -請注意,當監測開啓時,你不能寫到上述的debugfs文件。如果你在DAMON運行時寫到這些文件,將會返 -回一個錯誤代碼,如 ``-EBUSY`` 。 - - -監測線程PID ------------ - -DAMON通過一個叫做kdamond的內核線程來進行請求監測。你可以通過讀取 ``kdamond_pid`` 文件獲 -得該線程的 ``pid`` 。當監測被 ``關閉`` 時,讀取該文件不會返回任何信息:: - - # cd /damon - # cat monitor_on_DEPRECATED - off - # cat kdamond_pid - none - # echo on > monitor_on_DEPRECATED - # cat kdamond_pid - 18594 - - -使用多個監測線程 ----------------- - -每個監測上下文都會創建一個 ``kdamond`` 線程。你可以使用 ``mk_contexts`` 和 ``rm_contexts`` -文件爲多個 ``kdamond`` 需要的用例創建和刪除監測上下文。 - -將新上下文的名稱寫入 ``mk_contexts`` 文件,在 ``DAMON debugfs`` 目錄上創建一個該名稱的目錄。 -該目錄將有該上下文的 ``DAMON debugfs`` 文件:: - - # cd /damon - # ls foo - # ls: cannot access 'foo': No such file or directory - # echo foo > mk_contexts - # ls foo - # attrs init_regions kdamond_pid schemes target_ids - -如果不再需要上下文,你可以通過把上下文的名字放到 ``rm_contexts`` 文件中來刪除它和相應的目錄:: - - # echo foo > rm_contexts - # ls foo - # ls: cannot access 'foo': No such file or directory - -注意, ``mk_contexts`` 、 ``rm_contexts`` 和 ``monitor_on_DEPRECATED`` 文件只在根目錄下。 - 監測結果的監測點 ================ From 12f248b1db254aa61286a4fced6b322b93f2a52b Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:19:35 -0800 Subject: [PATCH 286/504] Docs/admin-guide/mm/damon/usage: remove DAMON debugfs interface documentation It's time to remove DAMON debugfs interface, which has deprecated long before in February 2023. Read the cover letter of this patch series for more details. Remove DAMON debugfs interface usage documentation, to avoid confusing users with documents for an already removed thing. Link: https://lkml.kernel.org/r/20250106191941.107070-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: Alex Shi Cc: Brendan Higgins Cc: David Gow Cc: Hu Haowen <2023002089@link.tyut.edu.cn> Cc: Jonathan Corbet Cc: Rae Moar Cc: Shuah Khan Cc: Yanteng Si Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 309 ------------------- 1 file changed, 309 deletions(-) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index a891e830c2a1..71cf29ae8502 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -26,12 +26,6 @@ DAMON provides below interfaces for different users. writing kernel space DAMON application programs for you. You can even extend DAMON for various address spaces. For detail, please refer to the interface :doc:`document `. -- *debugfs interface. (DEPRECATED!)* - :ref:`This ` is almost identical to :ref:`sysfs interface - `. This is deprecated, so users should move to the - :ref:`sysfs interface `. If you depend on this and cannot - move, please report your usecase to damon@lists.linux.dev and - linux-mm@kvack.org. .. _sysfs_interface: @@ -599,306 +593,3 @@ fields are as usual. It shows the index of the DAMON context (``ctx_idx=X``) of the scheme in the list of the contexts of the context's kdamond, the index of the scheme (``scheme_idx=X``) in the list of the schemes of the context, in addition to the output of ``damon_aggregated`` tracepoint. - - -.. _debugfs_interface: - -debugfs Interface (DEPRECATED!) -=============================== - -.. note:: - - THIS IS DEPRECATED! - - DAMON debugfs interface is deprecated, so users should move to the - :ref:`sysfs interface `. If you depend on this and cannot - move, please report your usecase to damon@lists.linux.dev and - linux-mm@kvack.org. - -DAMON exports nine files, ``DEPRECATED``, ``attrs``, ``target_ids``, -``init_regions``, ``schemes``, ``monitor_on_DEPRECATED``, ``kdamond_pid``, -``mk_contexts`` and ``rm_contexts`` under its debugfs directory, -``/damon/``. - - -``DEPRECATED`` is a read-only file for the DAMON debugfs interface deprecation -notice. Reading it returns the deprecation notice, as below:: - - # cat DEPRECATED - DAMON debugfs interface is deprecated, so users should move to DAMON_SYSFS. If you cannot, please report your usecase to damon@lists.linux.dev and linux-mm@kvack.org. - - -Attributes ----------- - -Users can get and set the ``sampling interval``, ``aggregation interval``, -``update interval``, and min/max number of monitoring target regions by -reading from and writing to the ``attrs`` file. To know about the monitoring -attributes in detail, please refer to the :doc:`/mm/damon/design`. For -example, below commands set those values to 5 ms, 100 ms, 1,000 ms, 10 and -1000, and then check it again:: - - # cd /damon - # echo 5000 100000 1000000 10 1000 > attrs - # cat attrs - 5000 100000 1000000 10 1000 - - -Target IDs ----------- - -Some types of address spaces supports multiple monitoring target. For example, -the virtual memory address spaces monitoring can have multiple processes as the -monitoring targets. Users can set the targets by writing relevant id values of -the targets to, and get the ids of the current targets by reading from the -``target_ids`` file. In case of the virtual address spaces monitoring, the -values should be pids of the monitoring target processes. For example, below -commands set processes having pids 42 and 4242 as the monitoring targets and -check it again:: - - # cd /damon - # echo 42 4242 > target_ids - # cat target_ids - 42 4242 - -Users can also monitor the physical memory address space of the system by -writing a special keyword, "``paddr\n``" to the file. Because physical address -space monitoring doesn't support multiple targets, reading the file will show a -fake value, ``42``, as below:: - - # cd /damon - # echo paddr > target_ids - # cat target_ids - 42 - -Note that setting the target ids doesn't start the monitoring. - - -Initial Monitoring Target Regions ---------------------------------- - -In case of the virtual address space monitoring, DAMON automatically sets and -updates the monitoring target regions so that entire memory mappings of target -processes can be covered. However, users can want to limit the monitoring -region to specific address ranges, such as the heap, the stack, or specific -file-mapped area. Or, some users can know the initial access pattern of their -workloads and therefore want to set optimal initial regions for the 'adaptive -regions adjustment'. - -In contrast, DAMON do not automatically sets and updates the monitoring target -regions in case of physical memory monitoring. Therefore, users should set the -monitoring target regions by themselves. - -In such cases, users can explicitly set the initial monitoring target regions -as they want, by writing proper values to the ``init_regions`` file. The input -should be a sequence of three integers separated by white spaces that represent -one region in below form.:: - - - -The ``target idx`` should be the index of the target in ``target_ids`` file, -starting from ``0``, and the regions should be passed in address order. For -example, below commands will set a couple of address ranges, ``1-100`` and -``100-200`` as the initial monitoring target region of pid 42, which is the -first one (index ``0``) in ``target_ids``, and another couple of address -ranges, ``20-40`` and ``50-100`` as that of pid 4242, which is the second one -(index ``1``) in ``target_ids``.:: - - # cd /damon - # cat target_ids - 42 4242 - # echo "0 1 100 \ - 0 100 200 \ - 1 20 40 \ - 1 50 100" > init_regions - -Note that this sets the initial monitoring target regions only. In case of -virtual memory monitoring, DAMON will automatically updates the boundary of the -regions after one ``update interval``. Therefore, users should set the -``update interval`` large enough in this case, if they don't want the -update. - - -Schemes -------- - -Users can get and set the DAMON-based operation :ref:`schemes -` by reading from and writing to ``schemes`` debugfs file. -Reading the file also shows the statistics of each scheme. To the file, each -of the schemes should be represented in each line in below form:: - - - -You can disable schemes by simply writing an empty string to the file. - -Target Access Pattern -~~~~~~~~~~~~~~~~~~~~~ - -The target access :ref:`pattern ` of the -scheme. The ```` is constructed with three ranges in -below form:: - - min-size max-size min-acc max-acc min-age max-age - -Specifically, bytes for the size of regions (``min-size`` and ``max-size``), -number of monitored accesses per aggregate interval for access frequency -(``min-acc`` and ``max-acc``), number of aggregate intervals for the age of -regions (``min-age`` and ``max-age``) are specified. Note that the ranges are -closed interval. - -Action -~~~~~~ - -The ```` is a predefined integer for memory management :ref:`actions -`. The mapping between the ```` values and -the memory management actions is as below. For the detailed meaning of the -action and DAMON operations set supporting each action, please refer to the -list on :ref:`design doc `. - - - 0: ``willneed`` - - 1: ``cold`` - - 2: ``pageout`` - - 3: ``hugepage`` - - 4: ``nohugepage`` - - 5: ``stat`` - -Quota -~~~~~ - -Users can set the :ref:`quotas ` of the given scheme -via the ```` in below form:: - - - -This makes DAMON to try to use only up to ```` milliseconds for applying -the action to memory regions of the ``target access pattern`` within the -```` milliseconds, and to apply the action to only up to -```` bytes of memory regions within the ````. Setting both -```` and ```` zero disables the quota limits. - -For the :ref:`prioritization `, users -can set the weights for the three properties in ```` in below -form:: - - - -Watermarks -~~~~~~~~~~ - -Users can specify :ref:`watermarks ` of the -given scheme via ```` in below form:: - - - -```` is a predefined integer for the metric to be checked. The -supported numbers and their meanings are as below. - - - 0: Ignore the watermarks - - 1: System's free memory rate (per thousand) - -The value of the metric is checked every ```` microseconds. - -If the value is higher than ```` or lower than ````, the -scheme is deactivated. If the value is lower than ````, the scheme -is activated. - -.. _damos_stats: - -Statistics -~~~~~~~~~~ - -It also counts the total number and bytes of regions that each scheme is tried -to be applied, the two numbers for the regions that each scheme is successfully -applied, and the total number of the quota limit exceeds. This statistics can -be used for online analysis or tuning of the schemes. - -The statistics can be shown by reading the ``schemes`` file. Reading the file -will show each scheme you entered in each line, and the five numbers for the -statistics will be added at the end of each line. - -Example -~~~~~~~ - -Below commands applies a scheme saying "If a memory region of size in [4KiB, -8KiB] is showing accesses per aggregate interval in [0, 5] for aggregate -interval in [10, 20], page out the region. For the paging out, use only up to -10ms per second, and also don't page out more than 1GiB per second. Under the -limitation, page out memory regions having longer age first. Also, check the -free memory rate of the system every 5 seconds, start the monitoring and paging -out when the free memory rate becomes lower than 50%, but stop it if the free -memory rate becomes larger than 60%, or lower than 30%".:: - - # cd /damon - # scheme="4096 8192 0 5 10 20 2" # target access pattern and action - # scheme+=" 10 $((1024*1024*1024)) 1000" # quotas - # scheme+=" 0 0 100" # prioritization weights - # scheme+=" 1 5000000 600 500 300" # watermarks - # echo "$scheme" > schemes - - -Turning On/Off --------------- - -Setting the files as described above doesn't incur effect unless you explicitly -start the monitoring. You can start, stop, and check the current status of the -monitoring by writing to and reading from the ``monitor_on_DEPRECATED`` file. -Writing ``on`` to the file starts the monitoring of the targets with the -attributes. Writing ``off`` to the file stops those. DAMON also stops if -every target process is terminated. Below example commands turn on, off, and -check the status of DAMON:: - - # cd /damon - # echo on > monitor_on_DEPRECATED - # echo off > monitor_on_DEPRECATED - # cat monitor_on_DEPRECATED - off - -Please note that you cannot write to the above-mentioned debugfs files while -the monitoring is turned on. If you write to the files while DAMON is running, -an error code such as ``-EBUSY`` will be returned. - - -Monitoring Thread PID ---------------------- - -DAMON does requested monitoring with a kernel thread called ``kdamond``. You -can get the pid of the thread by reading the ``kdamond_pid`` file. When the -monitoring is turned off, reading the file returns ``none``. :: - - # cd /damon - # cat monitor_on_DEPRECATED - off - # cat kdamond_pid - none - # echo on > monitor_on_DEPRECATED - # cat kdamond_pid - 18594 - - -Using Multiple Monitoring Threads ---------------------------------- - -One ``kdamond`` thread is created for each monitoring context. You can create -and remove monitoring contexts for multiple ``kdamond`` required use case using -the ``mk_contexts`` and ``rm_contexts`` files. - -Writing the name of the new context to the ``mk_contexts`` file creates a -directory of the name on the DAMON debugfs directory. The directory will have -DAMON debugfs files for the context. :: - - # cd /damon - # ls foo - # ls: cannot access 'foo': No such file or directory - # echo foo > mk_contexts - # ls foo - # attrs init_regions kdamond_pid schemes target_ids - -If the context is not needed anymore, you can remove it and the corresponding -directory by putting the name of the context to the ``rm_contexts`` file. :: - - # echo foo > rm_contexts - # ls foo - # ls: cannot access 'foo': No such file or directory - -Note that ``mk_contexts``, ``rm_contexts``, and ``monitor_on_DEPRECATED`` files -are in the root directory only. From 56696f294d342aa18f95732403d8b07211c834cc Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:19:36 -0800 Subject: [PATCH 287/504] Docs/mm/damon/design: update for removal of DAMON debugfs interface It's time to remove DAMON debugfs interface, which has deprecated long before in February 2023. Read the cover letter of this patch series for more details. Update DAMON design documentation to stop mentioning about the interface, to avoid unnecessary confuses. Link: https://lkml.kernel.org/r/20250106191941.107070-4-sj@kernel.org Signed-off-by: SeongJae Park Cc: Alex Shi Cc: Brendan Higgins Cc: David Gow Cc: Hu Haowen <2023002089@link.tyut.edu.cn> Cc: Jonathan Corbet Cc: Rae Moar Cc: Shuah Khan Cc: Yanteng Si Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 23 ++++++++++------------- 1 file changed, 10 insertions(+), 13 deletions(-) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 5ebb572d0999..449eb33688c2 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -627,15 +627,11 @@ General Purpose User Interface Modules DAMON modules that provide user space ABIs for general purpose DAMON usage in runtime. -DAMON user interface modules, namely 'DAMON sysfs interface' and 'DAMON debugfs -interface' are DAMON API user kernel modules that provide ABIs to the -user-space. Please note that DAMON debugfs interface is currently deprecated. - -Like many other ABIs, the modules create files on sysfs and debugfs, allow -users to specify their requests to and get the answers from DAMON by writing to -and reading from the files. As a response to such I/O, DAMON user interface -modules control DAMON and retrieve the results as user requested via the DAMON -API, and return the results to the user-space. +Like many other ABIs, the modules create files on pseudo file systems like +'sysfs', allow users to specify their requests to and get the answers from +DAMON by writing to and reading from the files. As a response to such I/O, +DAMON user interface modules control DAMON and retrieve the results as user +requested via the DAMON API, and return the results to the user-space. The ABIs are designed to be used for user space applications development, rather than human beings' fingers. Human users are recommended to use such @@ -644,8 +640,9 @@ Github (https://github.com/damonitor/damo), Pypi (https://pypistats.org/packages/damo), and Fedora (https://packages.fedoraproject.org/pkgs/python-damo/damo/). -Please refer to the ABI :doc:`document ` for -details of the interfaces. +Currently, one module for this type, namely 'DAMON sysfs interface' is +available. Please refer to the ABI :ref:`doc ` for details of +the interfaces. Special-Purpose Access-aware Kernel Modules @@ -653,8 +650,8 @@ Special-Purpose Access-aware Kernel Modules DAMON modules that provide user space ABI for specific purpose DAMON usage. -DAMON sysfs/debugfs user interfaces are for full control of all DAMON features -in runtime. For each special-purpose system-wide data access-aware system +DAMON user interface modules are for full control of all DAMON features in +runtime. For each special-purpose system-wide data access-aware system operations such as proactive reclamation or LRU lists balancing, the interfaces could be simplified by removing unnecessary knobs for the specific purpose, and extended for boot-time and even compile time control. Default values of DAMON From 6500ea66e634e55c2492de8c4c11f646379b80f0 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:19:37 -0800 Subject: [PATCH 288/504] selftests/damon/config: remove configs for DAMON debugfs interface selftests It's time to remove DAMON debugfs interface, which has deprecated long before in February 2023. Read the cover letter of this patch series for more details. Remove configs for selftests of it from DAMON selftests config file, to prevent unnecessary noises from the tests. [1] https://lore.kernel.org/20230209192009.7885-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250106191941.107070-5-sj@kernel.org Signed-off-by: SeongJae Park Cc: Alex Shi Cc: Brendan Higgins Cc: David Gow Cc: Hu Haowen <2023002089@link.tyut.edu.cn> Cc: Jonathan Corbet Cc: Rae Moar Cc: Shuah Khan Cc: Yanteng Si Signed-off-by: Andrew Morton --- tools/testing/selftests/damon/config | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/testing/selftests/damon/config b/tools/testing/selftests/damon/config index 0daf38974eb0..a68a9fead5dc 100644 --- a/tools/testing/selftests/damon/config +++ b/tools/testing/selftests/damon/config @@ -1,6 +1,5 @@ CONFIG_DAMON=y CONFIG_DAMON_SYSFS=y -CONFIG_DAMON_DBGFS=y CONFIG_DAMON_PADDR=y CONFIG_DAMON_VADDR=y CONFIG_DAMON_RECLAIM=y From c97b36d41b66d751c06083addcf66cab7e4b4c4c Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:19:38 -0800 Subject: [PATCH 289/504] selftests/damon: remove tests for DAMON debugfs interface It's time to remove DAMON debugfs interface, which has deprecated long before in February 2023. Read the cover letter of this patch series for more details. Remove selftests for the interface, to prevent causing unnecessary test failures. Link: https://lkml.kernel.org/r/20250106191941.107070-6-sj@kernel.org Signed-off-by: SeongJae Park Cc: Alex Shi Cc: Brendan Higgins Cc: David Gow Cc: Hu Haowen <2023002089@link.tyut.edu.cn> Cc: Jonathan Corbet Cc: Rae Moar Cc: Shuah Khan Cc: Yanteng Si Signed-off-by: Andrew Morton --- tools/testing/selftests/damon/.gitignore | 3 - tools/testing/selftests/damon/Makefile | 11 +-- .../testing/selftests/damon/debugfs_attrs.sh | 17 ---- .../debugfs_duplicate_context_creation.sh | 27 ------- .../selftests/damon/debugfs_empty_targets.sh | 21 ----- .../damon/debugfs_huge_count_read_write.sh | 22 ----- .../damon/debugfs_rm_non_contexts.sh | 19 ----- .../selftests/damon/debugfs_schemes.sh | 19 ----- .../selftests/damon/debugfs_target_ids.sh | 19 ----- .../damon/debugfs_target_ids_pid_leak.c | 68 ---------------- .../damon/debugfs_target_ids_pid_leak.sh | 22 ----- ...fs_target_ids_read_before_terminate_race.c | 80 ------------------- ...s_target_ids_read_before_terminate_race.sh | 14 ---- .../selftests/damon/huge_count_read_write.c | 46 ----------- 14 files changed, 1 insertion(+), 387 deletions(-) delete mode 100755 tools/testing/selftests/damon/debugfs_attrs.sh delete mode 100755 tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh delete mode 100755 tools/testing/selftests/damon/debugfs_empty_targets.sh delete mode 100755 tools/testing/selftests/damon/debugfs_huge_count_read_write.sh delete mode 100755 tools/testing/selftests/damon/debugfs_rm_non_contexts.sh delete mode 100755 tools/testing/selftests/damon/debugfs_schemes.sh delete mode 100755 tools/testing/selftests/damon/debugfs_target_ids.sh delete mode 100644 tools/testing/selftests/damon/debugfs_target_ids_pid_leak.c delete mode 100755 tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh delete mode 100644 tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.c delete mode 100755 tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh delete mode 100644 tools/testing/selftests/damon/huge_count_read_write.c diff --git a/tools/testing/selftests/damon/.gitignore b/tools/testing/selftests/damon/.gitignore index 2ab675fecb6b..2f0297657c81 100644 --- a/tools/testing/selftests/damon/.gitignore +++ b/tools/testing/selftests/damon/.gitignore @@ -1,6 +1,3 @@ # SPDX-License-Identifier: GPL-2.0-only -huge_count_read_write -debugfs_target_ids_read_before_terminate_race -debugfs_target_ids_pid_leak access_memory access_memory_even diff --git a/tools/testing/selftests/damon/Makefile b/tools/testing/selftests/damon/Makefile index 812f656260fb..ecbf07afc6dd 100644 --- a/tools/testing/selftests/damon/Makefile +++ b/tools/testing/selftests/damon/Makefile @@ -1,15 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for damon selftests -TEST_GEN_FILES += huge_count_read_write -TEST_GEN_FILES += debugfs_target_ids_read_before_terminate_race -TEST_GEN_FILES += debugfs_target_ids_pid_leak TEST_GEN_FILES += access_memory access_memory_even -TEST_FILES = _chk_dependency.sh _debugfs_common.sh _damon_sysfs.py +TEST_FILES = _chk_dependency.sh _damon_sysfs.py # functionality tests -TEST_PROGS = debugfs_attrs.sh debugfs_schemes.sh debugfs_target_ids.sh TEST_PROGS += sysfs.sh TEST_PROGS += sysfs_update_schemes_tried_regions_wss_estimation.py TEST_PROGS += damos_quota.py damos_quota_goal.py damos_apply_interval.py @@ -17,11 +13,6 @@ TEST_PROGS += damos_tried_regions.py damon_nr_regions.py TEST_PROGS += reclaim.sh lru_sort.sh # regression tests (reproducers of previously found bugs) -TEST_PROGS += debugfs_empty_targets.sh debugfs_huge_count_read_write.sh -TEST_PROGS += debugfs_duplicate_context_creation.sh -TEST_PROGS += debugfs_rm_non_contexts.sh -TEST_PROGS += debugfs_target_ids_read_before_terminate_race.sh -TEST_PROGS += debugfs_target_ids_pid_leak.sh TEST_PROGS += sysfs_update_removed_scheme_dir.sh TEST_PROGS += sysfs_update_schemes_tried_regions_hang.py diff --git a/tools/testing/selftests/damon/debugfs_attrs.sh b/tools/testing/selftests/damon/debugfs_attrs.sh deleted file mode 100755 index 902e312bca89..000000000000 --- a/tools/testing/selftests/damon/debugfs_attrs.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -source _debugfs_common.sh - -# Test attrs file -# =============== - -file="$DBGFS/attrs" -orig_content=$(cat "$file") - -test_write_succ "$file" "1 2 3 4 5" "$orig_content" "valid input" -test_write_fail "$file" "1 2 3 4" "$orig_content" "no enough fields" -test_write_fail "$file" "1 2 3 5 4" "$orig_content" \ - "min_nr_regions > max_nr_regions" -test_content "$file" "$orig_content" "1 2 3 4 5" "successfully written" -echo "$orig_content" > "$file" diff --git a/tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh b/tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh deleted file mode 100755 index bd6c22d96ead..000000000000 --- a/tools/testing/selftests/damon/debugfs_duplicate_context_creation.sh +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -source _debugfs_common.sh - -# Test duplicated context creation -# ================================ - -if ! echo foo > "$DBGFS/mk_contexts" -then - echo "context creation failed" - exit 1 -fi - -if echo foo > "$DBGFS/mk_contexts" 2> /dev/null -then - echo "duplicate context creation success" - exit 1 -fi - -if ! echo foo > "$DBGFS/rm_contexts" -then - echo "context deletion failed" - exit 1 -fi - -exit 0 diff --git a/tools/testing/selftests/damon/debugfs_empty_targets.sh b/tools/testing/selftests/damon/debugfs_empty_targets.sh deleted file mode 100755 index effbea33dc16..000000000000 --- a/tools/testing/selftests/damon/debugfs_empty_targets.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -source _debugfs_common.sh - -# Test empty targets case -# ======================= - -orig_target_ids=$(cat "$DBGFS/target_ids") -echo "" > "$DBGFS/target_ids" - -if [ -f "$DBGFS/monitor_on_DEPRECATED" ] -then - monitor_on_file="$DBGFS/monitor_on_DEPRECATED" -else - monitor_on_file="$DBGFS/monitor_on" -fi - -orig_monitor_on=$(cat "$monitor_on_file") -test_write_fail "$monitor_on_file" "on" "orig_monitor_on" "empty target ids" -echo "$orig_target_ids" > "$DBGFS/target_ids" diff --git a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh b/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh deleted file mode 100755 index 922cadac2950..000000000000 --- a/tools/testing/selftests/damon/debugfs_huge_count_read_write.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -source _debugfs_common.sh - -# Test huge count read write -# ========================== - -dmesg -C - -for file in "$DBGFS/"* -do - ./huge_count_read_write "$file" -done - -if dmesg | grep -q WARNING -then - dmesg - exit 1 -else - exit 0 -fi diff --git a/tools/testing/selftests/damon/debugfs_rm_non_contexts.sh b/tools/testing/selftests/damon/debugfs_rm_non_contexts.sh deleted file mode 100755 index f3ffeb1343cf..000000000000 --- a/tools/testing/selftests/damon/debugfs_rm_non_contexts.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -source _debugfs_common.sh - -# Test putting non-ctx files/dirs to rm_contexts file -# =================================================== - -dmesg -C - -for file in "$DBGFS/"* -do - (echo "$(basename "$f")" > "$DBGFS/rm_contexts") &> /dev/null - if dmesg | grep -q BUG - then - dmesg - exit 1 - fi -done diff --git a/tools/testing/selftests/damon/debugfs_schemes.sh b/tools/testing/selftests/damon/debugfs_schemes.sh deleted file mode 100755 index 5b39ab44731c..000000000000 --- a/tools/testing/selftests/damon/debugfs_schemes.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -source _debugfs_common.sh - -# Test schemes file -# ================= - -file="$DBGFS/schemes" -orig_content=$(cat "$file") - -test_write_succ "$file" "1 2 3 4 5 6 4 0 0 0 1 2 3 1 100 3 2 1" \ - "$orig_content" "valid input" -test_write_fail "$file" "1 2 -3 4 5 6 3 0 0 0 1 2 3 1 100 3 2 1" "$orig_content" "multi lines" -test_write_succ "$file" "" "$orig_content" "disabling" -test_write_fail "$file" "2 1 2 1 10 1 3 10 1 1 1 1 1 1 1 1 2 3" \ - "$orig_content" "wrong condition ranges" -echo "$orig_content" > "$file" diff --git a/tools/testing/selftests/damon/debugfs_target_ids.sh b/tools/testing/selftests/damon/debugfs_target_ids.sh deleted file mode 100755 index 49aeabdb0aae..000000000000 --- a/tools/testing/selftests/damon/debugfs_target_ids.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -source _debugfs_common.sh - -# Test target_ids file -# ==================== - -file="$DBGFS/target_ids" -orig_content=$(cat "$file") - -test_write_succ "$file" "1 2 3 4" "$orig_content" "valid input" -test_write_succ "$file" "1 2 abc 4" "$orig_content" "still valid input" -test_content "$file" "$orig_content" "1 2" "non-integer was there" -test_write_succ "$file" "abc 2 3" "$orig_content" "the file allows wrong input" -test_content "$file" "$orig_content" "" "wrong input written" -test_write_succ "$file" "" "$orig_content" "empty input" -test_content "$file" "$orig_content" "" "empty input written" -echo "$orig_content" > "$file" diff --git a/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.c b/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.c deleted file mode 100644 index 0cc2eef7d142..000000000000 --- a/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.c +++ /dev/null @@ -1,68 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Author: SeongJae Park - */ - -#define _GNU_SOURCE - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DBGFS_TARGET_IDS "/sys/kernel/debug/damon/target_ids" - -static void write_targetid_exit(void) -{ - int target_ids_fd = open(DBGFS_TARGET_IDS, O_RDWR); - char pid_str[128]; - - snprintf(pid_str, sizeof(pid_str), "%d", getpid()); - write(target_ids_fd, pid_str, sizeof(pid_str)); - close(target_ids_fd); - exit(0); -} - -unsigned long msec_timestamp(void) -{ - struct timeval tv; - - gettimeofday(&tv, NULL); - return tv.tv_sec * 1000UL + tv.tv_usec / 1000; -} - -int main(int argc, char *argv[]) -{ - unsigned long start_ms; - int time_to_run, nr_forks = 0; - - if (argc != 2) { - fprintf(stderr, "Usage: %s \n", argv[0]); - exit(1); - } - time_to_run = atoi(argv[1]); - - start_ms = msec_timestamp(); - while (true) { - int pid = fork(); - - if (pid < 0) { - fprintf(stderr, "fork() failed\n"); - exit(1); - } - if (pid == 0) - write_targetid_exit(); - wait(NULL); - nr_forks++; - - if (msec_timestamp() - start_ms > time_to_run) - break; - } - printf("%d\n", nr_forks); - return 0; -} diff --git a/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh b/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh deleted file mode 100755 index 31fe33c2b032..000000000000 --- a/tools/testing/selftests/damon/debugfs_target_ids_pid_leak.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -before=$(grep "^pid " /proc/slabinfo | awk '{print $2}') - -nr_leaks=$(./debugfs_target_ids_pid_leak 1000) -expected_after_max=$((before + nr_leaks / 2)) - -after=$(grep "^pid " /proc/slabinfo | awk '{print $2}') - -echo > /sys/kernel/debug/damon/target_ids - -echo "tried $nr_leaks pid leak" -echo "number of active pid slabs: $before -> $after" -echo "(up to $expected_after_max expected)" -if [ $after -gt $expected_after_max ] -then - echo "maybe pids are leaking" - exit 1 -else - exit 0 -fi diff --git a/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.c b/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.c deleted file mode 100644 index b06f52a8ce2d..000000000000 --- a/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.c +++ /dev/null @@ -1,80 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Author: SeongJae Park - */ -#define _GNU_SOURCE - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define DBGFS_MONITOR_ON "/sys/kernel/debug/damon/monitor_on_DEPRECATED" -#define DBGFS_TARGET_IDS "/sys/kernel/debug/damon/target_ids" - -static void turn_damon_on_exit(void) -{ - int target_ids_fd = open(DBGFS_TARGET_IDS, O_RDWR); - int monitor_on_fd = open(DBGFS_MONITOR_ON, O_RDWR); - char pid_str[128]; - - snprintf(pid_str, sizeof(pid_str), "%d", getpid()); - write(target_ids_fd, pid_str, sizeof(pid_str)); - write(monitor_on_fd, "on\n", 3); - close(target_ids_fd); - close(monitor_on_fd); - usleep(1000); - exit(0); -} - -static void try_race(void) -{ - int target_ids_fd = open(DBGFS_TARGET_IDS, O_RDWR); - int pid = fork(); - int buf[256]; - - if (pid < 0) { - fprintf(stderr, "fork() failed\n"); - exit(1); - } - if (pid == 0) - turn_damon_on_exit(); - while (true) { - int status; - - read(target_ids_fd, buf, sizeof(buf)); - if (waitpid(-1, &status, WNOHANG) == pid) - break; - } - close(target_ids_fd); -} - -static inline uint64_t ts_to_ms(struct timespec *ts) -{ - return (uint64_t)ts->tv_sec * 1000 + (uint64_t)ts->tv_nsec / 1000000; -} - -int main(int argc, char *argv[]) -{ - struct timespec start_time, now; - int runtime_ms; - - if (argc != 2) { - fprintf(stderr, "Usage: %s \n", argv[0]); - exit(1); - } - runtime_ms = atoi(argv[1]); - clock_gettime(CLOCK_MONOTONIC, &start_time); - while (true) { - try_race(); - clock_gettime(CLOCK_MONOTONIC, &now); - if (ts_to_ms(&now) - ts_to_ms(&start_time) > runtime_ms) - break; - } - return 0; -} diff --git a/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh b/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh deleted file mode 100755 index fc793c4c9aea..000000000000 --- a/tools/testing/selftests/damon/debugfs_target_ids_read_before_terminate_race.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -# SPDX-License-Identifier: GPL-2.0 - -dmesg -C - -./debugfs_target_ids_read_before_terminate_race 5000 - -if dmesg | grep -q dbgfs_target_ids_read -then - dmesg - exit 1 -else - exit 0 -fi diff --git a/tools/testing/selftests/damon/huge_count_read_write.c b/tools/testing/selftests/damon/huge_count_read_write.c deleted file mode 100644 index 53e69a669668..000000000000 --- a/tools/testing/selftests/damon/huge_count_read_write.c +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Author: SeongJae Park - */ - -#include -#include -#include -#include - -#pragma GCC diagnostic push -#if __GNUC__ >= 11 && __GNUC_MINOR__ >= 1 -/* Ignore read(2) overflow and write(2) overread compile warnings */ -#pragma GCC diagnostic ignored "-Wstringop-overread" -#pragma GCC diagnostic ignored "-Wstringop-overflow" -#endif - -void write_read_with_huge_count(char *file) -{ - int filedesc = open(file, O_RDWR); - char buf[256]; - int ret; - - printf("%s %s\n", __func__, file); - if (filedesc < 0) { - fprintf(stderr, "failed opening %s\n", file); - exit(1); - } - - write(filedesc, "", 0xfffffffful); - ret = read(filedesc, buf, 0xfffffffful); - close(filedesc); -} - -#pragma GCC diagnostic pop - -int main(int argc, char *argv[]) -{ - if (argc != 2) { - fprintf(stderr, "Usage: %s \n", argv[0]); - exit(1); - } - write_read_with_huge_count(argv[1]); - - return 0; -} From 7f7b398575e80bfd73fbe105626d3599c7709473 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:19:39 -0800 Subject: [PATCH 290/504] kunit: configs: remove configs for DAMON debugfs interface tests It's time to remove DAMON debugfs interface, which has deprecated long before in February 2023. Read the cover letter of this patch series for more details. Remove kernel configs for running DAMON debugfs interface kunit tests from the kunit all_tests configuration, to prevent unnecessary noises from tests. Link: https://lkml.kernel.org/r/20250106191941.107070-7-sj@kernel.org Signed-off-by: SeongJae Park Cc: Alex Shi Cc: Brendan Higgins Cc: David Gow Cc: Hu Haowen <2023002089@link.tyut.edu.cn> Cc: Jonathan Corbet Cc: Rae Moar Cc: Shuah Khan Cc: Yanteng Si Signed-off-by: Andrew Morton --- tools/testing/kunit/configs/all_tests.config | 3 --- 1 file changed, 3 deletions(-) diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config index b3b00269a52a..b0049be00c70 100644 --- a/tools/testing/kunit/configs/all_tests.config +++ b/tools/testing/kunit/configs/all_tests.config @@ -38,9 +38,6 @@ CONFIG_IWLWIFI=y CONFIG_DAMON=y CONFIG_DAMON_VADDR=y CONFIG_DAMON_PADDR=y -CONFIG_DEBUG_FS=y -CONFIG_DAMON_DBGFS=y -CONFIG_DAMON_DBGFS_DEPRECATED=y CONFIG_REGMAP_BUILD=y From deaa335e776d53b12000705e325c1f31c89736d7 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:19:40 -0800 Subject: [PATCH 291/504] mm/damon: remove DAMON debugfs interface kunit tests It's time to remove DAMON debugfs interface, which has deprecated long before in February 2023. Read the cover letter of this patch series for more details. Remove kunit tests for the interface, to prevent unnecessary test failures. Link: https://lkml.kernel.org/r/20250106191941.107070-8-sj@kernel.org Signed-off-by: SeongJae Park Cc: Alex Shi Cc: Brendan Higgins Cc: David Gow Cc: Hu Haowen <2023002089@link.tyut.edu.cn> Cc: Jonathan Corbet Cc: Rae Moar Cc: Shuah Khan Cc: Yanteng Si Signed-off-by: Andrew Morton --- mm/damon/Kconfig | 12 --- mm/damon/dbgfs.c | 2 - mm/damon/tests/.kunitconfig | 7 -- mm/damon/tests/dbgfs-kunit.h | 173 ----------------------------------- 4 files changed, 194 deletions(-) delete mode 100644 mm/damon/tests/dbgfs-kunit.h diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig index d0357f3e9372..db0d92624e8b 100644 --- a/mm/damon/Kconfig +++ b/mm/damon/Kconfig @@ -89,18 +89,6 @@ config DAMON_DBGFS default y depends on DAMON_DBGFS_DEPRECATED -config DAMON_DBGFS_KUNIT_TEST - bool "Test for damon debugfs interface" if !KUNIT_ALL_TESTS - depends on DAMON_DBGFS && KUNIT=y - default KUNIT_ALL_TESTS - help - This builds the DAMON debugfs interface Kunit test suite. - - For more information on KUnit and unit tests in general, please refer - to the KUnit documentation. - - If unsure, say N. - config DAMON_RECLAIM bool "Build DAMON-based reclaim (DAMON_RECLAIM)" depends on DAMON_PADDR diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c index b4213bc47e44..5664c2cb0a5e 100644 --- a/mm/damon/dbgfs.c +++ b/mm/damon/dbgfs.c @@ -1144,5 +1144,3 @@ out: } module_init(damon_dbgfs_init); - -#include "tests/dbgfs-kunit.h" diff --git a/mm/damon/tests/.kunitconfig b/mm/damon/tests/.kunitconfig index a73be044fc9b..36a450f57b58 100644 --- a/mm/damon/tests/.kunitconfig +++ b/mm/damon/tests/.kunitconfig @@ -13,10 +13,3 @@ CONFIG_DAMON_VADDR_KUNIT_TEST=y CONFIG_SYSFS=y CONFIG_DAMON_SYSFS=y CONFIG_DAMON_SYSFS_KUNIT_TEST=y - -# for DAMON debugfs interface -CONFIG_DEBUG_FS=y -CONFIG_DAMON_PADDR=y -CONFIG_DAMON_DBGFS_DEPRECATED=y -CONFIG_DAMON_DBGFS=y -CONFIG_DAMON_DBGFS_KUNIT_TEST=y diff --git a/mm/damon/tests/dbgfs-kunit.h b/mm/damon/tests/dbgfs-kunit.h deleted file mode 100644 index 087e53f641a8..000000000000 --- a/mm/damon/tests/dbgfs-kunit.h +++ /dev/null @@ -1,173 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * DAMON Debugfs Interface Unit Tests - * - * Author: SeongJae Park - */ - -#ifdef CONFIG_DAMON_DBGFS_KUNIT_TEST - -#ifndef _DAMON_DBGFS_TEST_H -#define _DAMON_DBGFS_TEST_H - -#include - -static void damon_dbgfs_test_str_to_ints(struct kunit *test) -{ - char *question; - int *answers; - int expected[] = {12, 35, 46}; - ssize_t nr_integers = 0, i; - - question = "123"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); - KUNIT_EXPECT_EQ(test, 123, answers[0]); - kfree(answers); - - question = "123abc"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); - KUNIT_EXPECT_EQ(test, 123, answers[0]); - kfree(answers); - - question = "a123"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); - kfree(answers); - - question = "12 35"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); - for (i = 0; i < nr_integers; i++) - KUNIT_EXPECT_EQ(test, expected[i], answers[i]); - kfree(answers); - - question = "12 35 46"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers); - for (i = 0; i < nr_integers; i++) - KUNIT_EXPECT_EQ(test, expected[i], answers[i]); - kfree(answers); - - question = "12 35 abc 46"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); - for (i = 0; i < 2; i++) - KUNIT_EXPECT_EQ(test, expected[i], answers[i]); - kfree(answers); - - question = ""; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); - kfree(answers); - - question = "\n"; - answers = str_to_ints(question, strlen(question), &nr_integers); - KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); - kfree(answers); -} - -static void damon_dbgfs_test_set_targets(struct kunit *test) -{ - struct damon_ctx *ctx = dbgfs_new_ctx(); - char buf[64]; - - if (!damon_is_registered_ops(DAMON_OPS_PADDR)) { - dbgfs_destroy_ctx(ctx); - kunit_skip(test, "PADDR not registered"); - } - - /* Make DAMON consider target has no pid */ - damon_select_ops(ctx, DAMON_OPS_PADDR); - - dbgfs_set_targets(ctx, 0, NULL); - sprint_target_ids(ctx, buf, 64); - KUNIT_EXPECT_STREQ(test, (char *)buf, "\n"); - - dbgfs_set_targets(ctx, 1, NULL); - sprint_target_ids(ctx, buf, 64); - KUNIT_EXPECT_STREQ(test, (char *)buf, "42\n"); - - dbgfs_set_targets(ctx, 0, NULL); - sprint_target_ids(ctx, buf, 64); - KUNIT_EXPECT_STREQ(test, (char *)buf, "\n"); - - dbgfs_destroy_ctx(ctx); -} - -static void damon_dbgfs_test_set_init_regions(struct kunit *test) -{ - struct damon_ctx *ctx = damon_new_ctx(); - /* Each line represents one region in `` `` */ - char * const valid_inputs[] = {"1 10 20\n 1 20 30\n1 35 45", - "1 10 20\n", - "1 10 20\n0 39 59\n0 70 134\n 1 20 25\n", - ""}; - /* Reading the file again will show sorted, clean output */ - char * const valid_expects[] = {"1 10 20\n1 20 30\n1 35 45\n", - "1 10 20\n", - "0 39 59\n0 70 134\n1 10 20\n1 20 25\n", - ""}; - char * const invalid_inputs[] = {"3 10 20\n", /* target not exists */ - "1 10 20\n 1 14 26\n", /* regions overlap */ - "0 10 20\n1 30 40\n 0 5 8"}; /* not sorted by address */ - char *input, *expect; - int i, rc; - char buf[256]; - - if (!damon_is_registered_ops(DAMON_OPS_PADDR)) { - damon_destroy_ctx(ctx); - kunit_skip(test, "PADDR not registered"); - } - - damon_select_ops(ctx, DAMON_OPS_PADDR); - - dbgfs_set_targets(ctx, 3, NULL); - - /* Put valid inputs and check the results */ - for (i = 0; i < ARRAY_SIZE(valid_inputs); i++) { - input = valid_inputs[i]; - expect = valid_expects[i]; - - rc = set_init_regions(ctx, input, strnlen(input, 256)); - KUNIT_EXPECT_EQ(test, rc, 0); - - memset(buf, 0, 256); - sprint_init_regions(ctx, buf, 256); - - KUNIT_EXPECT_STREQ(test, (char *)buf, expect); - } - /* Put invalid inputs and check the return error code */ - for (i = 0; i < ARRAY_SIZE(invalid_inputs); i++) { - input = invalid_inputs[i]; - pr_info("input: %s\n", input); - rc = set_init_regions(ctx, input, strnlen(input, 256)); - KUNIT_EXPECT_EQ(test, rc, -EINVAL); - - memset(buf, 0, 256); - sprint_init_regions(ctx, buf, 256); - - KUNIT_EXPECT_STREQ(test, (char *)buf, ""); - } - - dbgfs_set_targets(ctx, 0, NULL); - damon_destroy_ctx(ctx); -} - -static struct kunit_case damon_test_cases[] = { - KUNIT_CASE(damon_dbgfs_test_str_to_ints), - KUNIT_CASE(damon_dbgfs_test_set_targets), - KUNIT_CASE(damon_dbgfs_test_set_init_regions), - {}, -}; - -static struct kunit_suite damon_test_suite = { - .name = "damon-dbgfs", - .test_cases = damon_test_cases, -}; -kunit_test_suite(damon_test_suite); - -#endif /* _DAMON_DBGFS_TEST_H */ - -#endif /* CONFIG_DAMON_KUNIT_TEST */ From 9aa622ee6c269712127f69024d83835405765cc2 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Mon, 6 Jan 2025 11:19:41 -0800 Subject: [PATCH 292/504] mm/damon: remove DAMON debugfs interface It's time to remove DAMON debugfs interface, which has deprecated long before in February 2023. Read the cover letter of this patch series for more details. All documents and related tests are also removed. Finally remove the interface. Link: https://lkml.kernel.org/r/20250106191941.107070-9-sj@kernel.org Signed-off-by: SeongJae Park Cc: Alex Shi Cc: Brendan Higgins Cc: David Gow Cc: Hu Haowen <2023002089@link.tyut.edu.cn> Cc: Jonathan Corbet Cc: Rae Moar Cc: Shuah Khan Cc: Yanteng Si Signed-off-by: Andrew Morton --- mm/damon/Kconfig | 18 - mm/damon/Makefile | 1 - mm/damon/dbgfs.c | 1146 --------------------------------------------- 3 files changed, 1165 deletions(-) delete mode 100644 mm/damon/dbgfs.c diff --git a/mm/damon/Kconfig b/mm/damon/Kconfig index db0d92624e8b..c213cf8b5638 100644 --- a/mm/damon/Kconfig +++ b/mm/damon/Kconfig @@ -71,24 +71,6 @@ config DAMON_SYSFS_KUNIT_TEST If unsure, say N. -config DAMON_DBGFS_DEPRECATED - bool "DAMON debugfs interface (DEPRECATED!)" - depends on DAMON_VADDR && DAMON_PADDR && DEBUG_FS - help - This builds the debugfs interface for DAMON. The user space admins - can use the interface for arbitrary data access monitoring. - - If unsure, say N. - - This is deprecated, so users should move to the sysfs interface - (DAMON_SYSFS). If you depend on this and cannot move, please report - your usecase to damon@lists.linux.dev and linux-mm@kvack.org. - -config DAMON_DBGFS - bool - default y - depends on DAMON_DBGFS_DEPRECATED - config DAMON_RECLAIM bool "Build DAMON-based reclaim (DAMON_RECLAIM)" depends on DAMON_PADDR diff --git a/mm/damon/Makefile b/mm/damon/Makefile index f7add3f4aa79..8b49012ba8c3 100644 --- a/mm/damon/Makefile +++ b/mm/damon/Makefile @@ -4,6 +4,5 @@ obj-y := core.o obj-$(CONFIG_DAMON_VADDR) += ops-common.o vaddr.o obj-$(CONFIG_DAMON_PADDR) += ops-common.o paddr.o obj-$(CONFIG_DAMON_SYSFS) += sysfs-common.o sysfs-schemes.o sysfs.o -obj-$(CONFIG_DAMON_DBGFS) += dbgfs.o obj-$(CONFIG_DAMON_RECLAIM) += modules-common.o reclaim.o obj-$(CONFIG_DAMON_LRU_SORT) += modules-common.o lru_sort.o diff --git a/mm/damon/dbgfs.c b/mm/damon/dbgfs.c deleted file mode 100644 index 5664c2cb0a5e..000000000000 --- a/mm/damon/dbgfs.c +++ /dev/null @@ -1,1146 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * DAMON Debugfs Interface - * - * Author: SeongJae Park - */ - -#define pr_fmt(fmt) "damon-dbgfs: " fmt - -#include -#include -#include -#include -#include -#include -#include - -#define DAMON_DBGFS_DEPRECATION_NOTICE \ - "DAMON debugfs interface is deprecated, so users should move " \ - "to DAMON_SYSFS. If you cannot, please report your usecase to " \ - "damon@lists.linux.dev and linux-mm@kvack.org.\n" - -static struct damon_ctx **dbgfs_ctxs; -static int dbgfs_nr_ctxs; -static struct dentry **dbgfs_dirs; -static DEFINE_MUTEX(damon_dbgfs_lock); - -static void damon_dbgfs_warn_deprecation(void) -{ - pr_warn_once(DAMON_DBGFS_DEPRECATION_NOTICE); -} - -/* - * Returns non-empty string on success, negative error code otherwise. - */ -static char *user_input_str(const char __user *buf, size_t count, loff_t *ppos) -{ - char *kbuf; - ssize_t ret; - - /* We do not accept continuous write */ - if (*ppos) - return ERR_PTR(-EINVAL); - - kbuf = kmalloc(count + 1, GFP_KERNEL | __GFP_NOWARN); - if (!kbuf) - return ERR_PTR(-ENOMEM); - - ret = simple_write_to_buffer(kbuf, count + 1, ppos, buf, count); - if (ret != count) { - kfree(kbuf); - return ERR_PTR(-EIO); - } - kbuf[ret] = '\0'; - - return kbuf; -} - -static ssize_t dbgfs_attrs_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char kbuf[128]; - int ret; - - mutex_lock(&ctx->kdamond_lock); - ret = scnprintf(kbuf, ARRAY_SIZE(kbuf), "%lu %lu %lu %lu %lu\n", - ctx->attrs.sample_interval, ctx->attrs.aggr_interval, - ctx->attrs.ops_update_interval, - ctx->attrs.min_nr_regions, ctx->attrs.max_nr_regions); - mutex_unlock(&ctx->kdamond_lock); - - return simple_read_from_buffer(buf, count, ppos, kbuf, ret); -} - -static ssize_t dbgfs_attrs_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - struct damon_attrs attrs; - char *kbuf; - ssize_t ret; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - if (sscanf(kbuf, "%lu %lu %lu %lu %lu", - &attrs.sample_interval, &attrs.aggr_interval, - &attrs.ops_update_interval, - &attrs.min_nr_regions, - &attrs.max_nr_regions) != 5) { - ret = -EINVAL; - goto out; - } - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) { - ret = -EBUSY; - goto unlock_out; - } - - ret = damon_set_attrs(ctx, &attrs); - if (!ret) - ret = count; -unlock_out: - mutex_unlock(&ctx->kdamond_lock); -out: - kfree(kbuf); - return ret; -} - -/* - * Return corresponding dbgfs' scheme action value (int) for the given - * damos_action if the given damos_action value is valid and supported by - * dbgfs, negative error code otherwise. - */ -static int damos_action_to_dbgfs_scheme_action(enum damos_action action) -{ - switch (action) { - case DAMOS_WILLNEED: - return 0; - case DAMOS_COLD: - return 1; - case DAMOS_PAGEOUT: - return 2; - case DAMOS_HUGEPAGE: - return 3; - case DAMOS_NOHUGEPAGE: - return 4; - case DAMOS_STAT: - return 5; - default: - return -EINVAL; - } -} - -static ssize_t sprint_schemes(struct damon_ctx *c, char *buf, ssize_t len) -{ - struct damos *s; - int written = 0; - int rc; - - damon_for_each_scheme(s, c) { - rc = scnprintf(&buf[written], len - written, - "%lu %lu %u %u %u %u %d %lu %lu %lu %u %u %u %d %lu %lu %lu %lu %lu %lu %lu %lu %lu\n", - s->pattern.min_sz_region, - s->pattern.max_sz_region, - s->pattern.min_nr_accesses, - s->pattern.max_nr_accesses, - s->pattern.min_age_region, - s->pattern.max_age_region, - damos_action_to_dbgfs_scheme_action(s->action), - s->quota.ms, s->quota.sz, - s->quota.reset_interval, - s->quota.weight_sz, - s->quota.weight_nr_accesses, - s->quota.weight_age, - s->wmarks.metric, s->wmarks.interval, - s->wmarks.high, s->wmarks.mid, s->wmarks.low, - s->stat.nr_tried, s->stat.sz_tried, - s->stat.nr_applied, s->stat.sz_applied, - s->stat.qt_exceeds); - if (!rc) - return -ENOMEM; - - written += rc; - } - return written; -} - -static ssize_t dbgfs_schemes_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char *kbuf; - ssize_t len; - - kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); - if (!kbuf) - return -ENOMEM; - - mutex_lock(&ctx->kdamond_lock); - len = sprint_schemes(ctx, kbuf, count); - mutex_unlock(&ctx->kdamond_lock); - if (len < 0) - goto out; - len = simple_read_from_buffer(buf, count, ppos, kbuf, len); - -out: - kfree(kbuf); - return len; -} - -static void free_schemes_arr(struct damos **schemes, ssize_t nr_schemes) -{ - ssize_t i; - - for (i = 0; i < nr_schemes; i++) - kfree(schemes[i]); - kfree(schemes); -} - -/* - * Return corresponding damos_action for the given dbgfs input for a scheme - * action if the input is valid, negative error code otherwise. - */ -static enum damos_action dbgfs_scheme_action_to_damos_action(int dbgfs_action) -{ - switch (dbgfs_action) { - case 0: - return DAMOS_WILLNEED; - case 1: - return DAMOS_COLD; - case 2: - return DAMOS_PAGEOUT; - case 3: - return DAMOS_HUGEPAGE; - case 4: - return DAMOS_NOHUGEPAGE; - case 5: - return DAMOS_STAT; - default: - return -EINVAL; - } -} - -/* - * Converts a string into an array of struct damos pointers - * - * Returns an array of struct damos pointers that converted if the conversion - * success, or NULL otherwise. - */ -static struct damos **str_to_schemes(const char *str, ssize_t len, - ssize_t *nr_schemes) -{ - struct damos *scheme, **schemes; - const int max_nr_schemes = 256; - int pos = 0, parsed, ret; - unsigned int action_input; - enum damos_action action; - - schemes = kmalloc_array(max_nr_schemes, sizeof(scheme), - GFP_KERNEL); - if (!schemes) - return NULL; - - *nr_schemes = 0; - while (pos < len && *nr_schemes < max_nr_schemes) { - struct damos_access_pattern pattern = {}; - struct damos_quota quota = {}; - struct damos_watermarks wmarks; - - ret = sscanf(&str[pos], - "%lu %lu %u %u %u %u %u %lu %lu %lu %u %u %u %u %lu %lu %lu %lu%n", - &pattern.min_sz_region, &pattern.max_sz_region, - &pattern.min_nr_accesses, - &pattern.max_nr_accesses, - &pattern.min_age_region, - &pattern.max_age_region, - &action_input, "a.ms, - "a.sz, "a.reset_interval, - "a.weight_sz, "a.weight_nr_accesses, - "a.weight_age, &wmarks.metric, - &wmarks.interval, &wmarks.high, &wmarks.mid, - &wmarks.low, &parsed); - if (ret != 18) - break; - action = dbgfs_scheme_action_to_damos_action(action_input); - if ((int)action < 0) - goto fail; - - if (pattern.min_sz_region > pattern.max_sz_region || - pattern.min_nr_accesses > pattern.max_nr_accesses || - pattern.min_age_region > pattern.max_age_region) - goto fail; - - if (wmarks.high < wmarks.mid || wmarks.high < wmarks.low || - wmarks.mid < wmarks.low) - goto fail; - - pos += parsed; - scheme = damon_new_scheme(&pattern, action, 0, "a, - &wmarks, NUMA_NO_NODE); - if (!scheme) - goto fail; - - schemes[*nr_schemes] = scheme; - *nr_schemes += 1; - } - return schemes; -fail: - free_schemes_arr(schemes, *nr_schemes); - return NULL; -} - -static ssize_t dbgfs_schemes_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char *kbuf; - struct damos **schemes; - ssize_t nr_schemes = 0, ret; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - schemes = str_to_schemes(kbuf, count, &nr_schemes); - if (!schemes) { - ret = -EINVAL; - goto out; - } - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) { - ret = -EBUSY; - goto unlock_out; - } - - damon_set_schemes(ctx, schemes, nr_schemes); - ret = count; - nr_schemes = 0; - -unlock_out: - mutex_unlock(&ctx->kdamond_lock); - free_schemes_arr(schemes, nr_schemes); -out: - kfree(kbuf); - return ret; -} - -static ssize_t sprint_target_ids(struct damon_ctx *ctx, char *buf, ssize_t len) -{ - struct damon_target *t; - int id; - int written = 0; - int rc; - - damon_for_each_target(t, ctx) { - if (damon_target_has_pid(ctx)) - /* Show pid numbers to debugfs users */ - id = pid_vnr(t->pid); - else - /* Show 42 for physical address space, just for fun */ - id = 42; - - rc = scnprintf(&buf[written], len - written, "%d ", id); - if (!rc) - return -ENOMEM; - written += rc; - } - if (written) - written -= 1; - written += scnprintf(&buf[written], len - written, "\n"); - return written; -} - -static ssize_t dbgfs_target_ids_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - ssize_t len; - char ids_buf[320]; - - mutex_lock(&ctx->kdamond_lock); - len = sprint_target_ids(ctx, ids_buf, 320); - mutex_unlock(&ctx->kdamond_lock); - if (len < 0) - return len; - - return simple_read_from_buffer(buf, count, ppos, ids_buf, len); -} - -/* - * Converts a string into an integers array - * - * Returns an array of integers array if the conversion success, or NULL - * otherwise. - */ -static int *str_to_ints(const char *str, ssize_t len, ssize_t *nr_ints) -{ - int *array; - const int max_nr_ints = 32; - int nr; - int pos = 0, parsed, ret; - - *nr_ints = 0; - array = kmalloc_array(max_nr_ints, sizeof(*array), GFP_KERNEL); - if (!array) - return NULL; - while (*nr_ints < max_nr_ints && pos < len) { - ret = sscanf(&str[pos], "%d%n", &nr, &parsed); - pos += parsed; - if (ret != 1) - break; - array[*nr_ints] = nr; - *nr_ints += 1; - } - - return array; -} - -static void dbgfs_put_pids(struct pid **pids, int nr_pids) -{ - int i; - - for (i = 0; i < nr_pids; i++) - put_pid(pids[i]); -} - -/* - * Converts a string into an struct pid pointers array - * - * Returns an array of struct pid pointers if the conversion success, or NULL - * otherwise. - */ -static struct pid **str_to_pids(const char *str, ssize_t len, ssize_t *nr_pids) -{ - int *ints; - ssize_t nr_ints; - struct pid **pids; - - *nr_pids = 0; - - ints = str_to_ints(str, len, &nr_ints); - if (!ints) - return NULL; - - pids = kmalloc_array(nr_ints, sizeof(*pids), GFP_KERNEL); - if (!pids) - goto out; - - for (; *nr_pids < nr_ints; (*nr_pids)++) { - pids[*nr_pids] = find_get_pid(ints[*nr_pids]); - if (!pids[*nr_pids]) { - dbgfs_put_pids(pids, *nr_pids); - kfree(ints); - kfree(pids); - return NULL; - } - } - -out: - kfree(ints); - return pids; -} - -/* - * dbgfs_set_targets() - Set monitoring targets. - * @ctx: monitoring context - * @nr_targets: number of targets - * @pids: array of target pids (size is same to @nr_targets) - * - * This function should not be called while the kdamond is running. @pids is - * ignored if the context is not configured to have pid in each target. On - * failure, reference counts of all pids in @pids are decremented. - * - * Return: 0 on success, negative error code otherwise. - */ -static int dbgfs_set_targets(struct damon_ctx *ctx, ssize_t nr_targets, - struct pid **pids) -{ - ssize_t i; - struct damon_target *t, *next; - - damon_for_each_target_safe(t, next, ctx) { - if (damon_target_has_pid(ctx)) - put_pid(t->pid); - damon_destroy_target(t); - } - - for (i = 0; i < nr_targets; i++) { - t = damon_new_target(); - if (!t) { - damon_for_each_target_safe(t, next, ctx) - damon_destroy_target(t); - if (damon_target_has_pid(ctx)) - dbgfs_put_pids(pids, nr_targets); - return -ENOMEM; - } - if (damon_target_has_pid(ctx)) - t->pid = pids[i]; - damon_add_target(ctx, t); - } - - return 0; -} - -static ssize_t dbgfs_target_ids_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - bool id_is_pid = true; - char *kbuf; - struct pid **target_pids = NULL; - ssize_t nr_targets; - ssize_t ret; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - if (!strncmp(kbuf, "paddr\n", count)) { - id_is_pid = false; - nr_targets = 1; - } - - if (id_is_pid) { - target_pids = str_to_pids(kbuf, count, &nr_targets); - if (!target_pids) { - ret = -ENOMEM; - goto out; - } - } - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) { - if (id_is_pid) - dbgfs_put_pids(target_pids, nr_targets); - ret = -EBUSY; - goto unlock_out; - } - - /* remove previously set targets */ - dbgfs_set_targets(ctx, 0, NULL); - if (!nr_targets) { - ret = count; - goto unlock_out; - } - - /* Configure the context for the address space type */ - if (id_is_pid) - ret = damon_select_ops(ctx, DAMON_OPS_VADDR); - else - ret = damon_select_ops(ctx, DAMON_OPS_PADDR); - if (ret) - goto unlock_out; - - ret = dbgfs_set_targets(ctx, nr_targets, target_pids); - if (!ret) - ret = count; - -unlock_out: - mutex_unlock(&ctx->kdamond_lock); - kfree(target_pids); -out: - kfree(kbuf); - return ret; -} - -static ssize_t sprint_init_regions(struct damon_ctx *c, char *buf, ssize_t len) -{ - struct damon_target *t; - struct damon_region *r; - int target_idx = 0; - int written = 0; - int rc; - - damon_for_each_target(t, c) { - damon_for_each_region(r, t) { - rc = scnprintf(&buf[written], len - written, - "%d %lu %lu\n", - target_idx, r->ar.start, r->ar.end); - if (!rc) - return -ENOMEM; - written += rc; - } - target_idx++; - } - return written; -} - -static ssize_t dbgfs_init_regions_read(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char *kbuf; - ssize_t len; - - kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); - if (!kbuf) - return -ENOMEM; - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) { - mutex_unlock(&ctx->kdamond_lock); - len = -EBUSY; - goto out; - } - - len = sprint_init_regions(ctx, kbuf, count); - mutex_unlock(&ctx->kdamond_lock); - if (len < 0) - goto out; - len = simple_read_from_buffer(buf, count, ppos, kbuf, len); - -out: - kfree(kbuf); - return len; -} - -static int add_init_region(struct damon_ctx *c, int target_idx, - struct damon_addr_range *ar) -{ - struct damon_target *t; - struct damon_region *r, *prev; - unsigned long idx = 0; - int rc = -EINVAL; - - if (ar->start >= ar->end) - return -EINVAL; - - damon_for_each_target(t, c) { - if (idx++ == target_idx) { - r = damon_new_region(ar->start, ar->end); - if (!r) - return -ENOMEM; - damon_add_region(r, t); - if (damon_nr_regions(t) > 1) { - prev = damon_prev_region(r); - if (prev->ar.end > r->ar.start) { - damon_destroy_region(r, t); - return -EINVAL; - } - } - rc = 0; - } - } - return rc; -} - -static int set_init_regions(struct damon_ctx *c, const char *str, ssize_t len) -{ - struct damon_target *t; - struct damon_region *r, *next; - int pos = 0, parsed, ret; - int target_idx; - struct damon_addr_range ar; - int err; - - damon_for_each_target(t, c) { - damon_for_each_region_safe(r, next, t) - damon_destroy_region(r, t); - } - - while (pos < len) { - ret = sscanf(&str[pos], "%d %lu %lu%n", - &target_idx, &ar.start, &ar.end, &parsed); - if (ret != 3) - break; - err = add_init_region(c, target_idx, &ar); - if (err) - goto fail; - pos += parsed; - } - - return 0; - -fail: - damon_for_each_target(t, c) { - damon_for_each_region_safe(r, next, t) - damon_destroy_region(r, t); - } - return err; -} - -static ssize_t dbgfs_init_regions_write(struct file *file, - const char __user *buf, size_t count, - loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char *kbuf; - ssize_t ret = count; - int err; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) { - ret = -EBUSY; - goto unlock_out; - } - - err = set_init_regions(ctx, kbuf, ret); - if (err) - ret = err; - -unlock_out: - mutex_unlock(&ctx->kdamond_lock); - kfree(kbuf); - return ret; -} - -static ssize_t dbgfs_kdamond_pid_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - struct damon_ctx *ctx = file->private_data; - char *kbuf; - ssize_t len; - - kbuf = kmalloc(count, GFP_KERNEL | __GFP_NOWARN); - if (!kbuf) - return -ENOMEM; - - mutex_lock(&ctx->kdamond_lock); - if (ctx->kdamond) - len = scnprintf(kbuf, count, "%d\n", ctx->kdamond->pid); - else - len = scnprintf(kbuf, count, "none\n"); - mutex_unlock(&ctx->kdamond_lock); - if (!len) - goto out; - len = simple_read_from_buffer(buf, count, ppos, kbuf, len); - -out: - kfree(kbuf); - return len; -} - -static int damon_dbgfs_open(struct inode *inode, struct file *file) -{ - damon_dbgfs_warn_deprecation(); - - file->private_data = inode->i_private; - - return nonseekable_open(inode, file); -} - -static const struct file_operations attrs_fops = { - .open = damon_dbgfs_open, - .read = dbgfs_attrs_read, - .write = dbgfs_attrs_write, -}; - -static const struct file_operations schemes_fops = { - .open = damon_dbgfs_open, - .read = dbgfs_schemes_read, - .write = dbgfs_schemes_write, -}; - -static const struct file_operations target_ids_fops = { - .open = damon_dbgfs_open, - .read = dbgfs_target_ids_read, - .write = dbgfs_target_ids_write, -}; - -static const struct file_operations init_regions_fops = { - .open = damon_dbgfs_open, - .read = dbgfs_init_regions_read, - .write = dbgfs_init_regions_write, -}; - -static const struct file_operations kdamond_pid_fops = { - .open = damon_dbgfs_open, - .read = dbgfs_kdamond_pid_read, -}; - -static void dbgfs_fill_ctx_dir(struct dentry *dir, struct damon_ctx *ctx) -{ - const char * const file_names[] = {"attrs", "schemes", "target_ids", - "init_regions", "kdamond_pid"}; - const struct file_operations *fops[] = {&attrs_fops, &schemes_fops, - &target_ids_fops, &init_regions_fops, &kdamond_pid_fops}; - int i; - - for (i = 0; i < ARRAY_SIZE(file_names); i++) - debugfs_create_file(file_names[i], 0600, dir, ctx, fops[i]); -} - -static void dbgfs_before_terminate(struct damon_ctx *ctx) -{ - struct damon_target *t, *next; - - if (!damon_target_has_pid(ctx)) - return; - - mutex_lock(&ctx->kdamond_lock); - damon_for_each_target_safe(t, next, ctx) { - put_pid(t->pid); - damon_destroy_target(t); - } - mutex_unlock(&ctx->kdamond_lock); -} - -static struct damon_ctx *dbgfs_new_ctx(void) -{ - struct damon_ctx *ctx; - - ctx = damon_new_ctx(); - if (!ctx) - return NULL; - - if (damon_select_ops(ctx, DAMON_OPS_VADDR) && - damon_select_ops(ctx, DAMON_OPS_PADDR)) { - damon_destroy_ctx(ctx); - return NULL; - } - ctx->callback.before_terminate = dbgfs_before_terminate; - return ctx; -} - -static void dbgfs_destroy_ctx(struct damon_ctx *ctx) -{ - damon_destroy_ctx(ctx); -} - -static ssize_t damon_dbgfs_deprecated_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - static const char kbuf[512] = DAMON_DBGFS_DEPRECATION_NOTICE; - - return simple_read_from_buffer(buf, count, ppos, kbuf, strlen(kbuf)); -} - -/* - * Make a context of @name and create a debugfs directory for it. - * - * This function should be called while holding damon_dbgfs_lock. - * - * Returns 0 on success, negative error code otherwise. - */ -static int dbgfs_mk_context(char *name) -{ - struct dentry *root, **new_dirs, *new_dir; - struct damon_ctx **new_ctxs, *new_ctx; - - if (damon_nr_running_ctxs()) - return -EBUSY; - - new_ctxs = krealloc(dbgfs_ctxs, sizeof(*dbgfs_ctxs) * - (dbgfs_nr_ctxs + 1), GFP_KERNEL); - if (!new_ctxs) - return -ENOMEM; - dbgfs_ctxs = new_ctxs; - - new_dirs = krealloc(dbgfs_dirs, sizeof(*dbgfs_dirs) * - (dbgfs_nr_ctxs + 1), GFP_KERNEL); - if (!new_dirs) - return -ENOMEM; - dbgfs_dirs = new_dirs; - - root = dbgfs_dirs[0]; - if (!root) - return -ENOENT; - - new_dir = debugfs_create_dir(name, root); - /* Below check is required for a potential duplicated name case */ - if (IS_ERR(new_dir)) - return PTR_ERR(new_dir); - dbgfs_dirs[dbgfs_nr_ctxs] = new_dir; - - new_ctx = dbgfs_new_ctx(); - if (!new_ctx) { - debugfs_remove(new_dir); - dbgfs_dirs[dbgfs_nr_ctxs] = NULL; - return -ENOMEM; - } - - dbgfs_ctxs[dbgfs_nr_ctxs] = new_ctx; - dbgfs_fill_ctx_dir(dbgfs_dirs[dbgfs_nr_ctxs], - dbgfs_ctxs[dbgfs_nr_ctxs]); - dbgfs_nr_ctxs++; - - return 0; -} - -static ssize_t dbgfs_mk_context_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - char *kbuf; - char *ctx_name; - ssize_t ret; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - ctx_name = kmalloc(count + 1, GFP_KERNEL); - if (!ctx_name) { - kfree(kbuf); - return -ENOMEM; - } - - /* Trim white space */ - if (sscanf(kbuf, "%s", ctx_name) != 1) { - ret = -EINVAL; - goto out; - } - - mutex_lock(&damon_dbgfs_lock); - ret = dbgfs_mk_context(ctx_name); - if (!ret) - ret = count; - mutex_unlock(&damon_dbgfs_lock); - -out: - kfree(kbuf); - kfree(ctx_name); - return ret; -} - -/* - * Remove a context of @name and its debugfs directory. - * - * This function should be called while holding damon_dbgfs_lock. - * - * Return 0 on success, negative error code otherwise. - */ -static int dbgfs_rm_context(char *name) -{ - struct dentry *root, *dir, **new_dirs; - struct inode *inode; - struct damon_ctx **new_ctxs; - int i, j; - int ret = 0; - - if (damon_nr_running_ctxs()) - return -EBUSY; - - root = dbgfs_dirs[0]; - if (!root) - return -ENOENT; - - dir = debugfs_lookup(name, root); - if (!dir) - return -ENOENT; - - inode = d_inode(dir); - if (!S_ISDIR(inode->i_mode)) { - ret = -EINVAL; - goto out_dput; - } - - new_dirs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_dirs), - GFP_KERNEL); - if (!new_dirs) { - ret = -ENOMEM; - goto out_dput; - } - - new_ctxs = kmalloc_array(dbgfs_nr_ctxs - 1, sizeof(*dbgfs_ctxs), - GFP_KERNEL); - if (!new_ctxs) { - ret = -ENOMEM; - goto out_new_dirs; - } - - for (i = 0, j = 0; i < dbgfs_nr_ctxs; i++) { - if (dbgfs_dirs[i] == dir) { - debugfs_remove(dbgfs_dirs[i]); - dbgfs_destroy_ctx(dbgfs_ctxs[i]); - continue; - } - new_dirs[j] = dbgfs_dirs[i]; - new_ctxs[j++] = dbgfs_ctxs[i]; - } - - kfree(dbgfs_dirs); - kfree(dbgfs_ctxs); - - dbgfs_dirs = new_dirs; - dbgfs_ctxs = new_ctxs; - dbgfs_nr_ctxs--; - - goto out_dput; - -out_new_dirs: - kfree(new_dirs); -out_dput: - dput(dir); - return ret; -} - -static ssize_t dbgfs_rm_context_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - char *kbuf; - ssize_t ret; - char *ctx_name; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - ctx_name = kmalloc(count + 1, GFP_KERNEL); - if (!ctx_name) { - kfree(kbuf); - return -ENOMEM; - } - - /* Trim white space */ - if (sscanf(kbuf, "%s", ctx_name) != 1) { - ret = -EINVAL; - goto out; - } - - mutex_lock(&damon_dbgfs_lock); - ret = dbgfs_rm_context(ctx_name); - if (!ret) - ret = count; - mutex_unlock(&damon_dbgfs_lock); - -out: - kfree(kbuf); - kfree(ctx_name); - return ret; -} - -static ssize_t dbgfs_monitor_on_read(struct file *file, - char __user *buf, size_t count, loff_t *ppos) -{ - char monitor_on_buf[5]; - bool monitor_on = damon_nr_running_ctxs() != 0; - int len; - - len = scnprintf(monitor_on_buf, 5, monitor_on ? "on\n" : "off\n"); - - return simple_read_from_buffer(buf, count, ppos, monitor_on_buf, len); -} - -static ssize_t dbgfs_monitor_on_write(struct file *file, - const char __user *buf, size_t count, loff_t *ppos) -{ - ssize_t ret; - char *kbuf; - - kbuf = user_input_str(buf, count, ppos); - if (IS_ERR(kbuf)) - return PTR_ERR(kbuf); - - /* Remove white space */ - if (sscanf(kbuf, "%s", kbuf) != 1) { - kfree(kbuf); - return -EINVAL; - } - - mutex_lock(&damon_dbgfs_lock); - if (!strncmp(kbuf, "on", count)) { - int i; - - for (i = 0; i < dbgfs_nr_ctxs; i++) { - if (damon_targets_empty(dbgfs_ctxs[i])) { - kfree(kbuf); - mutex_unlock(&damon_dbgfs_lock); - return -EINVAL; - } - } - ret = damon_start(dbgfs_ctxs, dbgfs_nr_ctxs, true); - } else if (!strncmp(kbuf, "off", count)) { - ret = damon_stop(dbgfs_ctxs, dbgfs_nr_ctxs); - } else { - ret = -EINVAL; - } - mutex_unlock(&damon_dbgfs_lock); - - if (!ret) - ret = count; - kfree(kbuf); - return ret; -} - -static int damon_dbgfs_static_file_open(struct inode *inode, struct file *file) -{ - damon_dbgfs_warn_deprecation(); - return nonseekable_open(inode, file); -} - -static const struct file_operations deprecated_fops = { - .read = damon_dbgfs_deprecated_read, -}; - -static const struct file_operations mk_contexts_fops = { - .open = damon_dbgfs_static_file_open, - .write = dbgfs_mk_context_write, -}; - -static const struct file_operations rm_contexts_fops = { - .open = damon_dbgfs_static_file_open, - .write = dbgfs_rm_context_write, -}; - -static const struct file_operations monitor_on_fops = { - .open = damon_dbgfs_static_file_open, - .read = dbgfs_monitor_on_read, - .write = dbgfs_monitor_on_write, -}; - -static int __init __damon_dbgfs_init(void) -{ - struct dentry *dbgfs_root; - const char * const file_names[] = {"mk_contexts", "rm_contexts", - "monitor_on_DEPRECATED", "DEPRECATED"}; - const struct file_operations *fops[] = {&mk_contexts_fops, - &rm_contexts_fops, &monitor_on_fops, &deprecated_fops}; - int i; - - dbgfs_root = debugfs_create_dir("damon", NULL); - - for (i = 0; i < ARRAY_SIZE(file_names); i++) - debugfs_create_file(file_names[i], 0600, dbgfs_root, NULL, - fops[i]); - dbgfs_fill_ctx_dir(dbgfs_root, dbgfs_ctxs[0]); - - dbgfs_dirs = kmalloc(sizeof(dbgfs_root), GFP_KERNEL); - if (!dbgfs_dirs) { - debugfs_remove(dbgfs_root); - return -ENOMEM; - } - dbgfs_dirs[0] = dbgfs_root; - - return 0; -} - -/* - * Functions for the initialization - */ - -static int __init damon_dbgfs_init(void) -{ - int rc = -ENOMEM; - - mutex_lock(&damon_dbgfs_lock); - dbgfs_ctxs = kmalloc(sizeof(*dbgfs_ctxs), GFP_KERNEL); - if (!dbgfs_ctxs) - goto out; - dbgfs_ctxs[0] = dbgfs_new_ctx(); - if (!dbgfs_ctxs[0]) { - kfree(dbgfs_ctxs); - goto out; - } - dbgfs_nr_ctxs = 1; - - rc = __damon_dbgfs_init(); - if (rc) { - kfree(dbgfs_ctxs[0]); - kfree(dbgfs_ctxs); - pr_err("%s: dbgfs init failed\n", __func__); - } - -out: - mutex_unlock(&damon_dbgfs_lock); - return rc; -} - -module_init(damon_dbgfs_init); From ce8e0193e7d9ff87304ea48434c537c419da9a7a Mon Sep 17 00:00:00 2001 From: Bruno Faccini Date: Mon, 6 Jan 2025 04:06:59 -0800 Subject: [PATCH 293/504] mm/fake-numa: allow later numa node hotplug MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Current fake-numa implementation prevents new Numa nodes to be later hot-plugged by drivers. A common symptom of this limitation is the "node was absent from the node_possible_map" message by associated warning in mm/memory_hotplug.c: add_memory_resource(). This comes from the lack of remapping in both pxm_to_node_map[] and node_to_pxm_map[] tables to take fake-numa nodes into account and thus triggers collisions with original and physical nodes only-mapping that had been determined from BIOS tables. This patch fixes this by doing the necessary node-ids translation in both pxm_to_node_map[]/node_to_pxm_map[] tables. node_distance[] table has also been fixed accordingly. Details: When trying to use fake-numa feature on our system where new Numa nodes are being "hot-plugged" upon driver load, this fails with the following type of message and warning with stack : node 8 was absent from the node_possible_map WARNING: CPU: 61 PID: 4259 at mm/memory_hotplug.c:1506 add_memory_resource+0x3dc/0x418 This issue prevents the use of the fake-NUMA debug feature with the system's full configuration, when it has proven to be sometimes extremely useful for performance testing of multi-tasked, memory-bound applications, as it enables better isolation of processes/ranks compared to fat NUMA nodes. Usual numactl output after driver has “hot-plugged”/unveiled some new Numa nodes with and without memory : $ numactl --hardware available: 9 nodes (0-8) node 0 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 node 0 size: 490037 MB node 0 free: 484432 MB node 1 cpus: node 1 size: 97280 MB node 1 free: 97279 MB node 2 cpus: node 2 size: 0 MB node 2 free: 0 MB node 3 cpus: node 3 size: 0 MB node 3 free: 0 MB node 4 cpus: node 4 size: 0 MB node 4 free: 0 MB node 5 cpus: node 5 size: 0 MB node 5 free: 0 MB node 6 cpus: node 6 size: 0 MB node 6 free: 0 MB node 7 cpus: node 7 size: 0 MB node 7 free: 0 MB node 8 cpus: node 8 size: 0 MB node 8 free: 0 MB node distances: node 0 1 2 3 4 5 6 7 8 0: 10 80 80 80 80 80 80 80 80 1: 80 10 255 255 255 255 255 255 255 2: 80 255 10 255 255 255 255 255 255 3: 80 255 255 10 255 255 255 255 255 4: 80 255 255 255 10 255 255 255 255 5: 80 255 255 255 255 10 255 255 255 6: 80 255 255 255 255 255 10 255 255 7: 80 255 255 255 255 255 255 10 255 8: 80 255 255 255 255 255 255 255 10 With recent M.Rapoport set of fake-numa patches in mm-everything and using numa=fake=4 boot parameter : $ numactl --hardware available: 4 nodes (0-3) node 0 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 node 0 size: 122518 MB node 0 free: 117141 MB node 1 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 node 1 size: 219911 MB node 1 free: 219751 MB node 2 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 node 2 size: 122599 MB node 2 free: 122541 MB node 3 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 node 3 size: 122479 MB node 3 free: 122408 MB node distances: node 0 1 2 3 0: 10 10 10 10 1: 10 10 10 10 2: 10 10 10 10 3: 10 10 10 10 With recent M.Rapoport set of fake-numa patches in mm-everything, this patch on top, using numa=fake=4 boot parameter : # numactl —hardware available: 12 nodes (0-11) node 0 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 node 0 size: 122518 MB node 0 free: 116429 MB node 1 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 node 1 size: 122631 MB node 1 free: 122576 MB node 2 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 node 2 size: 122599 MB node 2 free: 122544 MB node 3 cpus: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 node 3 size: 122479 MB node 3 free: 122419 MB node 4 cpus: node 4 size: 97280 MB node 4 free: 97279 MB node 5 cpus: node 5 size: 0 MB node 5 free: 0 MB node 6 cpus: node 6 size: 0 MB node 6 free: 0 MB node 7 cpus: node 7 size: 0 MB node 7 free: 0 MB node 8 cpus: node 8 size: 0 MB node 8 free: 0 MB node 9 cpus: node 9 size: 0 MB node 9 free: 0 MB node 10 cpus: node 10 size: 0 MB node 10 free: 0 MB node 11 cpus: node 11 size: 0 MB node 11 free: 0 MB node distances: node 0 1 2 3 4 5 6 7 8 9 10 11 0: 10 10 10 10 80 80 80 80 80 80 80 80 1: 10 10 10 10 80 80 80 80 80 80 80 80 2: 10 10 10 10 80 80 80 80 80 80 80 80 3: 10 10 10 10 80 80 80 80 80 80 80 80 4: 80 80 80 80 10 255 255 255 255 255 255 255 5: 80 80 80 80 255 10 255 255 255 255 255 255 6: 80 80 80 80 255 255 10 255 255 255 255 255 7: 80 80 80 80 255 255 255 10 255 255 255 255 8: 80 80 80 80 255 255 255 255 10 255 255 255 9: 80 80 80 80 255 255 255 255 255 10 255 255 10: 80 80 80 80 255 255 255 255 255 255 10 255 11: 80 80 80 80 255 255 255 255 255 255 255 10 Link: https://lkml.kernel.org/r/20250106120659.359610-2-bfaccini@nvidia.com Signed-off-by: Bruno Faccini Cc: David Hildenbrand Cc: John Hubbard Cc: Mike Rapoport (Microsoft) Cc: Zi Yan Signed-off-by: Andrew Morton --- drivers/acpi/numa/srat.c | 86 ++++++++++++++++++++++++++++++++++++ include/acpi/acpi_numa.h | 5 +++ include/linux/numa_memblks.h | 3 ++ mm/numa_emulation.c | 45 ++++++++++++++++--- mm/numa_memblks.c | 2 +- 5 files changed, 133 insertions(+), 8 deletions(-) diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index bec0dcd1f9c3..59fffe34c9d0 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -81,6 +81,92 @@ int acpi_map_pxm_to_node(int pxm) } EXPORT_SYMBOL(acpi_map_pxm_to_node); +#ifdef CONFIG_NUMA_EMU +/* + * Take max_nid - 1 fake-numa nodes into account in both + * pxm_to_node_map()/node_to_pxm_map[] tables. + */ +int __init fix_pxm_node_maps(int max_nid) +{ + static int pxm_to_node_map_copy[MAX_PXM_DOMAINS] __initdata + = { [0 ... MAX_PXM_DOMAINS - 1] = NUMA_NO_NODE }; + static int node_to_pxm_map_copy[MAX_NUMNODES] __initdata + = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL }; + int i, j, index = -1, count = 0; + nodemask_t nodes_to_enable; + + if (numa_off || srat_disabled()) + return -1; + + /* find fake nodes PXM mapping */ + for (i = 0; i < MAX_NUMNODES; i++) { + if (node_to_pxm_map[i] != PXM_INVAL) { + for (j = 0; j <= max_nid; j++) { + if ((emu_nid_to_phys[j] == i) && + WARN(node_to_pxm_map_copy[j] != PXM_INVAL, + "Node %d is already binded to PXM %d\n", + j, node_to_pxm_map_copy[j])) + return -1; + if (emu_nid_to_phys[j] == i) { + node_to_pxm_map_copy[j] = + node_to_pxm_map[i]; + if (j > index) + index = j; + count++; + } + } + } + } + if (WARN(index != max_nid, "%d max nid when expected %d\n", + index, max_nid)) + return -1; + + nodes_clear(nodes_to_enable); + + /* map phys nodes not used for fake nodes */ + for (i = 0; i < MAX_NUMNODES; i++) { + if (node_to_pxm_map[i] != PXM_INVAL) { + for (j = 0; j <= max_nid; j++) + if (emu_nid_to_phys[j] == i) + break; + /* fake nodes PXM mapping has been done */ + if (j <= max_nid) + continue; + /* find first hole */ + for (j = 0; + j < MAX_NUMNODES && + node_to_pxm_map_copy[j] != PXM_INVAL; + j++) + ; + if (WARN(j == MAX_NUMNODES, + "Number of nodes exceeds MAX_NUMNODES\n")) + return -1; + node_to_pxm_map_copy[j] = node_to_pxm_map[i]; + node_set(j, nodes_to_enable); + count++; + } + } + + /* creating reverse mapping in pxm_to_node_map[] */ + for (i = 0; i < MAX_NUMNODES; i++) + if (node_to_pxm_map_copy[i] != PXM_INVAL && + pxm_to_node_map_copy[node_to_pxm_map_copy[i]] == NUMA_NO_NODE) + pxm_to_node_map_copy[node_to_pxm_map_copy[i]] = i; + + /* overwrite with new mapping */ + for (i = 0; i < MAX_NUMNODES; i++) { + node_to_pxm_map[i] = node_to_pxm_map_copy[i]; + pxm_to_node_map[i] = pxm_to_node_map_copy[i]; + } + + /* enable other nodes found in PXM for hotplug */ + nodes_or(numa_nodes_parsed, nodes_to_enable, numa_nodes_parsed); + + pr_debug("found %d total number of nodes\n", count); + return 0; +} +#endif + static void __init acpi_table_print_srat_entry(struct acpi_subtable_header *header) { diff --git a/include/acpi/acpi_numa.h b/include/acpi/acpi_numa.h index b5f594754a9e..99b960bd473c 100644 --- a/include/acpi/acpi_numa.h +++ b/include/acpi/acpi_numa.h @@ -17,11 +17,16 @@ extern int node_to_pxm(int); extern int acpi_map_pxm_to_node(int); extern unsigned char acpi_srat_revision; extern void disable_srat(void); +extern int fix_pxm_node_maps(int max_nid); extern void bad_srat(void); extern int srat_disabled(void); #else /* CONFIG_ACPI_NUMA */ +static inline int fix_pxm_node_maps(int max_nid) +{ + return 0; +} static inline void disable_srat(void) { } diff --git a/include/linux/numa_memblks.h b/include/linux/numa_memblks.h index cfad6ce7e1bd..dd85613cdd86 100644 --- a/include/linux/numa_memblks.h +++ b/include/linux/numa_memblks.h @@ -29,7 +29,10 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi); int __init numa_memblks_init(int (*init_func)(void), bool memblock_force_top_down); +extern int numa_distance_cnt; + #ifdef CONFIG_NUMA_EMU +extern int emu_nid_to_phys[MAX_NUMNODES]; int numa_emu_cmdline(char *str); void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, unsigned int nr_emu_nids); diff --git a/mm/numa_emulation.c b/mm/numa_emulation.c index 031fb9961bf7..9d55679d99ce 100644 --- a/mm/numa_emulation.c +++ b/mm/numa_emulation.c @@ -8,11 +8,12 @@ #include #include #include +#include #define FAKE_NODE_MIN_SIZE ((u64)32 << 20) #define FAKE_NODE_MIN_HASH_MASK (~(FAKE_NODE_MIN_SIZE - 1UL)) -static int emu_nid_to_phys[MAX_NUMNODES]; +int emu_nid_to_phys[MAX_NUMNODES]; static char *emu_cmdline __initdata; int __init numa_emu_cmdline(char *str) @@ -379,6 +380,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) size_t phys_size = numa_dist_cnt * numa_dist_cnt * sizeof(phys_dist[0]); int max_emu_nid, dfl_phys_nid; int i, j, ret; + nodemask_t physnode_mask = numa_nodes_parsed; if (!emu_cmdline) goto no_emu; @@ -395,7 +397,6 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) * split the system RAM into N fake nodes. */ if (strchr(emu_cmdline, 'U')) { - nodemask_t physnode_mask = numa_nodes_parsed; unsigned long n; int nid = 0; @@ -465,9 +466,6 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) */ max_emu_nid = setup_emu2phys_nid(&dfl_phys_nid); - /* commit */ - *numa_meminfo = ei; - /* Make sure numa_nodes_parsed only contains emulated nodes */ nodes_clear(numa_nodes_parsed); for (i = 0; i < ARRAY_SIZE(ei.blk); i++) @@ -475,10 +473,21 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) ei.blk[i].nid != NUMA_NO_NODE) node_set(ei.blk[i].nid, numa_nodes_parsed); - numa_emu_update_cpu_to_node(emu_nid_to_phys, ARRAY_SIZE(emu_nid_to_phys)); + /* fix pxm_to_node_map[] and node_to_pxm_map[] to avoid collision + * with faked numa nodes, particularly during later memory hotplug + * handling, and also update numa_nodes_parsed accordingly. + */ + ret = fix_pxm_node_maps(max_emu_nid); + if (ret < 0) + goto no_emu; + + /* commit */ + *numa_meminfo = ei; + + numa_emu_update_cpu_to_node(emu_nid_to_phys, max_emu_nid + 1); /* make sure all emulated nodes are mapped to a physical node */ - for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) + for (i = 0; i < max_emu_nid + 1; i++) if (emu_nid_to_phys[i] == NUMA_NO_NODE) emu_nid_to_phys[i] = dfl_phys_nid; @@ -501,12 +510,34 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) numa_set_distance(i, j, dist); } } + for (i = 0; i < numa_distance_cnt; i++) { + for (j = 0; j < numa_distance_cnt; j++) { + int physi, physj; + u8 dist; + + /* distance between fake nodes is already ok */ + if (emu_nid_to_phys[i] != NUMA_NO_NODE && + emu_nid_to_phys[j] != NUMA_NO_NODE) + continue; + if (emu_nid_to_phys[i] != NUMA_NO_NODE) + physi = emu_nid_to_phys[i]; + else + physi = i - max_emu_nid; + if (emu_nid_to_phys[j] != NUMA_NO_NODE) + physj = emu_nid_to_phys[j]; + else + physj = j - max_emu_nid; + dist = phys_dist[physi * numa_dist_cnt + physj]; + numa_set_distance(i, j, dist); + } + } /* free the copied physical distance table */ memblock_free(phys_dist, phys_size); return; no_emu: + numa_nodes_parsed = physnode_mask; /* No emulation. Build identity emu_nid_to_phys[] for numa_add_cpu() */ for (i = 0; i < ARRAY_SIZE(emu_nid_to_phys); i++) emu_nid_to_phys[i] = i; diff --git a/mm/numa_memblks.c b/mm/numa_memblks.c index a3877e9bc878..ff4054f4334d 100644 --- a/mm/numa_memblks.c +++ b/mm/numa_memblks.c @@ -7,7 +7,7 @@ #include #include -static int numa_distance_cnt; +int numa_distance_cnt; static u8 *numa_distance; nodemask_t numa_nodes_parsed __initdata; From 74eb038f38fef0a094134b454588707444c890c9 Mon Sep 17 00:00:00 2001 From: Guo Weikang Date: Mon, 6 Jan 2025 10:11:25 +0800 Subject: [PATCH 294/504] mm/memmap: prevent double scanning of memmap by kmemleak kmemleak explicitly scans the mem_map through the valid struct page objects. However, memmap_alloc() was also adding this memory to the gray object list, causing it to be scanned twice. Remove memmap_alloc() from the scan list and add a comment to clarify the behavior. Link: https://lore.kernel.org/lkml/CAOm6qn=FVeTpH54wGDFMHuCOeYtvoTx30ktnv9-w3Nh8RMofEA@mail.gmail.com/ Link: https://lkml.kernel.org/r/20250106021126.1678334-1-guoweikang.kernel@gmail.com Signed-off-by: Guo Weikang Reviewed-by: Catalin Marinas Cc: Mike Rapoport (Microsoft) Signed-off-by: Andrew Morton --- include/linux/memblock.h | 4 ++++ mm/mm_init.c | 8 ++++++-- mm/sparse-vmemmap.c | 5 +++-- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/include/linux/memblock.h b/include/linux/memblock.h index dee628350cd1..e79eb6ac516f 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -378,6 +378,10 @@ static inline int memblock_get_region_node(const struct memblock_region *r) /* Flags for memblock allocation APIs */ #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) #define MEMBLOCK_ALLOC_ACCESSIBLE 0 +/* + * MEMBLOCK_ALLOC_NOLEAKTRACE avoids kmemleak tracing. It implies + * MEMBLOCK_ALLOC_ACCESSIBLE + */ #define MEMBLOCK_ALLOC_NOLEAKTRACE 1 /* We are using top down, so it is safe to use 0 here */ diff --git a/mm/mm_init.c b/mm/mm_init.c index 24b68b425afb..2630cc30147e 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1585,13 +1585,17 @@ void __init *memmap_alloc(phys_addr_t size, phys_addr_t align, { void *ptr; + /* + * Kmemleak will explicitly scan mem_map by traversing all valid + * `struct *page`,so memblock does not need to be added to the scan list. + */ if (exact_nid) ptr = memblock_alloc_exact_nid_raw(size, align, min_addr, - MEMBLOCK_ALLOC_ACCESSIBLE, + MEMBLOCK_ALLOC_NOLEAKTRACE, nid); else ptr = memblock_alloc_try_nid_raw(size, align, min_addr, - MEMBLOCK_ALLOC_ACCESSIBLE, + MEMBLOCK_ALLOC_NOLEAKTRACE, nid); if (ptr && size > 0) diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index cec67c5f37d8..3287ebadd167 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -31,6 +31,8 @@ #include #include +#include "internal.h" + /* * Allocate a block of memory to be used to back the virtual memory map * or to back the page tables that are used to create the mapping. @@ -42,8 +44,7 @@ static void * __ref __earlyonly_bootmem_alloc(int node, unsigned long align, unsigned long goal) { - return memblock_alloc_try_nid_raw(size, align, goal, - MEMBLOCK_ALLOC_ACCESSIBLE, node); + return memmap_alloc(size, align, goal, node, false); } void * __meminit vmemmap_alloc_block(unsigned long size, int node) From e37099587494103107f78b4a824a874a9dcdcd15 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 8 Jan 2025 10:16:49 +0800 Subject: [PATCH 295/504] mm: shmem: skip swapcache for swapin of synchronous swap device With fast swap devices (such as zram), swapin latency is crucial to applications. For shmem swapin, similar to anonymous memory swapin, we can skip the swapcache operation to improve swapin latency. Testing 1G shmem sequential swapin without THP enabled, I observed approximately a 6% performance improvement: (Note: I repeated 5 times and took the mean data for each test) w/o patch w/ patch changes 534.8ms 501ms +6.3% In addition, currently, we always split the large swap entry stored in the shmem mapping during shmem large folio swapin, which is not perfect, especially with a fast swap device. We should swap in the whole large folio instead of splitting the precious large folios to take advantage of the large folios and improve the swapin latency if the swap device is synchronous device, which is similar to anonymous memory mTHP swapin. Testing 1G shmem sequential swapin with 64K mTHP and 2M mTHP, I observed obvious performance improvement: mTHP=64K w/o patch w/ patch changes 550.4ms 169.6ms +69% mTHP=2M w/o patch w/ patch changes 542.8ms 126.8ms +77% Note that skipping swapcache requires attention to concurrent swapin scenarios. Fortunately the swapcache_prepare() and shmem_add_to_page_cache() can help identify concurrent swapin and large swap entry split scenarios, and return -EEXIST for retry. Link: https://lkml.kernel.org/r/3d9f3bd3bc6ec953054baff5134f66feeaae7c1e.1736301701.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kairui Song Cc: Kefeng Wang Cc: Matthew Wilcox (Oracle) Cc: Ryan Roberts Signed-off-by: Andrew Morton --- mm/shmem.c | 112 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 107 insertions(+), 5 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index a687ed3404ff..ac5070b3769b 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1967,6 +1967,67 @@ unlock: return ERR_PTR(error); } +static struct folio *shmem_swap_alloc_folio(struct inode *inode, + struct vm_area_struct *vma, pgoff_t index, + swp_entry_t entry, int order, gfp_t gfp) +{ + struct shmem_inode_info *info = SHMEM_I(inode); + struct folio *new; + void *shadow; + int nr_pages; + + /* + * We have arrived here because our zones are constrained, so don't + * limit chance of success by further cpuset and node constraints. + */ + gfp &= ~GFP_CONSTRAINT_MASK; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (order > 0) { + gfp_t huge_gfp = vma_thp_gfp_mask(vma); + + gfp = limit_gfp_mask(huge_gfp, gfp); + } +#endif + + new = shmem_alloc_folio(gfp, order, info, index); + if (!new) + return ERR_PTR(-ENOMEM); + + nr_pages = folio_nr_pages(new); + if (mem_cgroup_swapin_charge_folio(new, vma ? vma->vm_mm : NULL, + gfp, entry)) { + folio_put(new); + return ERR_PTR(-ENOMEM); + } + + /* + * Prevent parallel swapin from proceeding with the swap cache flag. + * + * Of course there is another possible concurrent scenario as well, + * that is to say, the swap cache flag of a large folio has already + * been set by swapcache_prepare(), while another thread may have + * already split the large swap entry stored in the shmem mapping. + * In this case, shmem_add_to_page_cache() will help identify the + * concurrent swapin and return -EEXIST. + */ + if (swapcache_prepare(entry, nr_pages)) { + folio_put(new); + return ERR_PTR(-EEXIST); + } + + __folio_set_locked(new); + __folio_set_swapbacked(new); + new->swap = entry; + + mem_cgroup_swapin_uncharge_swap(entry, nr_pages); + shadow = get_shadow_from_swap_cache(entry); + if (shadow) + workingset_refault(new, shadow); + folio_add_lru(new); + swap_read_folio(new, NULL); + return new; +} + /* * When a page is moved from swapcache to shmem filecache (either by the * usual swapin of shmem_get_folio_gfp(), or by the less common swapoff of @@ -2070,7 +2131,8 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp, } static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, - struct folio *folio, swp_entry_t swap) + struct folio *folio, swp_entry_t swap, + bool skip_swapcache) { struct address_space *mapping = inode->i_mapping; swp_entry_t swapin_error; @@ -2086,7 +2148,8 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, nr_pages = folio_nr_pages(folio); folio_wait_writeback(folio); - delete_from_swap_cache(folio); + if (!skip_swapcache) + delete_from_swap_cache(folio); /* * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks * won't be 0 when inode is released and thus trigger WARN_ON(i_blocks) @@ -2190,6 +2253,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, struct shmem_inode_info *info = SHMEM_I(inode); struct swap_info_struct *si; struct folio *folio = NULL; + bool skip_swapcache = false; swp_entry_t swap; int error, nr_pages; @@ -2211,6 +2275,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, /* Look it up and read it in.. */ folio = swap_cache_get_folio(swap, NULL, 0); if (!folio) { + int order = xa_get_order(&mapping->i_pages, index); + bool fallback_order0 = false; int split_order; /* Or update major stats only when swapin succeeds?? */ @@ -2220,6 +2286,33 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, count_memcg_event_mm(fault_mm, PGMAJFAULT); } + /* + * If uffd is active for the vma, we need per-page fault + * fidelity to maintain the uffd semantics, then fallback + * to swapin order-0 folio, as well as for zswap case. + */ + if (order > 0 && ((vma && unlikely(userfaultfd_armed(vma))) || + !zswap_never_enabled())) + fallback_order0 = true; + + /* Skip swapcache for synchronous device. */ + if (!fallback_order0 && data_race(si->flags & SWP_SYNCHRONOUS_IO)) { + folio = shmem_swap_alloc_folio(inode, vma, index, swap, order, gfp); + if (!IS_ERR(folio)) { + skip_swapcache = true; + goto alloced; + } + + /* + * Fallback to swapin order-0 folio unless the swap entry + * already exists. + */ + error = PTR_ERR(folio); + folio = NULL; + if (error == -EEXIST) + goto failed; + } + /* * Now swap device can only swap in order 0 folio, then we * should split the large swap entry stored in the pagecache @@ -2250,9 +2343,10 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, } } +alloced: /* We have to do this with folio locked to prevent races */ folio_lock(folio); - if (!folio_test_swapcache(folio) || + if ((!skip_swapcache && !folio_test_swapcache(folio)) || folio->swap.val != swap.val || !shmem_confirm_swap(mapping, index, swap)) { error = -EEXIST; @@ -2288,7 +2382,12 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, if (sgp == SGP_WRITE) folio_mark_accessed(folio); - delete_from_swap_cache(folio); + if (skip_swapcache) { + folio->swap.val = 0; + swapcache_clear(si, swap, nr_pages); + } else { + delete_from_swap_cache(folio); + } folio_mark_dirty(folio); swap_free_nr(swap, nr_pages); put_swap_device(si); @@ -2299,8 +2398,11 @@ failed: if (!shmem_confirm_swap(mapping, index, swap)) error = -EEXIST; if (error == -EIO) - shmem_set_folio_swapin_error(inode, index, folio, swap); + shmem_set_folio_swapin_error(inode, index, folio, swap, + skip_swapcache); unlock: + if (skip_swapcache) + swapcache_clear(si, swap, folio_nr_pages(folio)); if (folio) { folio_unlock(folio); folio_put(folio); From 317733f330ce36b2a2fe70a66a604d09f7a81134 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Tue, 7 Jan 2025 18:25:19 -0800 Subject: [PATCH 296/504] mm-shmem-skip-swapcache-for-swapin-of-synchronous-swap-device-fix use IS_ENABLED(), tweak comment grammar Cc: Baolin Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Hugh Dickins Cc: Kairui Song Cc: Kefeng Wang Cc: Matthew Wilcox (Oracle) Cc: Ryan Roberts Signed-off-by: Andrew Morton --- mm/shmem.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/mm/shmem.c b/mm/shmem.c index ac5070b3769b..44379bee5b96 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1978,16 +1978,14 @@ static struct folio *shmem_swap_alloc_folio(struct inode *inode, /* * We have arrived here because our zones are constrained, so don't - * limit chance of success by further cpuset and node constraints. + * limit chance of success with further cpuset and node constraints. */ gfp &= ~GFP_CONSTRAINT_MASK; -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - if (order > 0) { + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && order > 0) { gfp_t huge_gfp = vma_thp_gfp_mask(vma); gfp = limit_gfp_mask(huge_gfp, gfp); } -#endif new = shmem_alloc_folio(gfp, order, info, index); if (!new) From 8ec6067276704129409bc12ec14a8522b3552fcf Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:39 -0700 Subject: [PATCH 297/504] mm/filemap: change filemap_create_folio() to take a struct kiocb Patch series "Uncached buffered IO", v8. 5 years ago I posted patches adding support for RWF_UNCACHED, as a way to do buffered IO that isn't page cache persistent. The approach back then was to have private pages for IO, and then get rid of them once IO was done. But that then runs into all the issues that O_DIRECT has, in terms of synchronizing with the page cache. So here's a new approach to the same concent, but using the page cache as synchronization. Due to excessive bike shedding on the naming, this is now named RWF_DONTCACHE, and is less special in that it's just page cache IO, except it prunes the ranges once IO is completed. Why do this, you may ask? The tldr is that device speeds are only getting faster, while reclaim is not. Doing normal buffered IO can be very unpredictable, and suck up a lot of resources on the reclaim side. This leads people to use O_DIRECT as a work-around, which has its own set of restrictions in terms of size, offset, and length of IO. It's also inherently synchronous, and now you need async IO as well. While the latter isn't necessarily a big problem as we have good options available there, it also should not be a requirement when all you want to do is read or write some data without caching. Even on desktop type systems, a normal NVMe device can fill the entire page cache in seconds. On the big system I used for testing, there's a lot more RAM, but also a lot more devices. As can be seen in some of the results in the following patches, you can still fill RAM in seconds even when there's 1TB of it. Hence this problem isn't solely a "big hyperscaler system" issue, it's common across the board. Common for both reads and writes with RWF_DONTCACHE is that they use the page cache for IO. Reads work just like a normal buffered read would, with the only exception being that the touched ranges will get pruned after data has been copied. For writes, the ranges will get writeback kicked off before the syscall returns, and then writeback completion will prune the range. Hence writes aren't synchronous, and it's easy to pipeline writes using RWF_DONTCACHE. Folios that aren't instantiated by RWF_DONTCACHE IO are left untouched. This means you that uncached IO will take advantage of the page cache for uptodate data, but not leave anything it instantiated/created in cache. File systems need to support this. This patchset adds support for the generic read path, which covers file systems like ext4. Patches exist to add support for iomap/XFS and btrfs as well, which sit on top of this series. If RWF_DONTCACHE IO is attempted on a file system that doesn't support it, -EOPNOTSUPP is returned. Hence the user can rely on it either working as designed, or flagging and error if that's not the case. The intent here is to give the application a sensible fallback path - eg, it may fall back to O_DIRECT if appropriate, or just live with the fact that uncached IO isn't available and do normal buffered IO. Adding "support" to other file systems should be trivial, most of the time just a one-liner adding FOP_DONTCACHE to the fop_flags in the file_operations struct, if the file system is using either iomap or the generic filemap helpers for reading and writing. Performance results are in patch 8 for reads, and you can find the write side results in the XFS patch adding support for DONTCACHE writes for XFS: https://git.kernel.dk/cgit/linux/commit/?h=buffered-uncached-fs.10&id=257e92de795fdff7d7e256501e024fac6da6a7f4 with the tldr being that I see about a 65% improvement in performance for both, with fully predictable IO times. CPU reduction is substantial as well, with no kswapd activity at all for reclaim when using uncached IO. Using it from applications is trivial - just set RWF_DONTCACHE for the read or write, using pwritev2(2) or preadv2(2). For io_uring, same thing, just set RWF_DONTCACHE in sqe->rw_flags for a buffered read/write operation. And that's it. Patches 1..7 are just prep patches, and should have no functional changes at all. Patch 8 adds support for the filemap path for RWF_DONTCACHE reads, and patches 9..12 are just prep patches for supporting the write side of uncached writes. In the below mentioned branch, there are then patches to adopt uncached reads and writes for xfs, btrfs, and ext4. The latter currently relies on bit of a hack for passing whether this is an uncached write or not through ->write_begin(), which can hopefully go away once ext4 adopts iomap for buffered writes. I say this is a hack as it's not the prettiest way to do it, however it is fully solid and will work just fine. Passes full xfstests and fsx overnight runs, no issues observed. That includes the vm running the testing also using RWF_DONTCACHE on the host. I'll post fsstress and fsx patches for RWF_DONTCACHE separately. As far as I'm concerned, no further work needs doing here. And git tree for the patches is here: https://git.kernel.dk/cgit/linux/log/?h=buffered-uncached.10 with the file system patches on top adding support for xfs/btrfs/ext4 here: https://git.kernel.dk/cgit/linux/log/?h=buffered-uncached-fs.10 This patch (of 12): Rather than pass in both the file and position directly from the kiocb, just take a struct kiocb instead. With the kiocb being passed in, skip passing in the address_space separately as well. While doing so, move the ki_flags checking into filemap_create_folio() as well. In preparation for actually needing the kiocb in the function. No functional changes in this patch. Link: https://lkml.kernel.org/r/20241220154831.1086649-1-axboe@kernel.dk Link: https://lkml.kernel.org/r/20241220154831.1086649-2-axboe@kernel.dk Signed-off-by: Jens Axboe Reviewed-by: Kirill A. Shutemov Reviewed-by: Matthew Wilcox (Oracle) Cc: Brian Foster Cc: Chris Mason Cc: Johannes Weiner Cc: Christoph Hellwig Signed-off-by: Andrew Morton --- mm/filemap.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index b6494d2d3bc2..904d8fa2bfc0 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2431,15 +2431,17 @@ unlock_mapping: return error; } -static int filemap_create_folio(struct file *file, - struct address_space *mapping, loff_t pos, - struct folio_batch *fbatch) +static int filemap_create_folio(struct kiocb *iocb, struct folio_batch *fbatch) { + struct address_space *mapping = iocb->ki_filp->f_mapping; struct folio *folio; int error; unsigned int min_order = mapping_min_folio_order(mapping); pgoff_t index; + if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) + return -EAGAIN; + folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order); if (!folio) return -ENOMEM; @@ -2458,7 +2460,7 @@ static int filemap_create_folio(struct file *file, * well to keep locking rules simple. */ filemap_invalidate_lock_shared(mapping); - index = (pos >> (PAGE_SHIFT + min_order)) << min_order; + index = (iocb->ki_pos >> (PAGE_SHIFT + min_order)) << min_order; error = filemap_add_folio(mapping, folio, index, mapping_gfp_constraint(mapping, GFP_KERNEL)); if (error == -EEXIST) @@ -2466,7 +2468,8 @@ static int filemap_create_folio(struct file *file, if (error) goto error; - error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); + error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, + folio); if (error) goto error; @@ -2522,9 +2525,7 @@ retry: filemap_get_read_batch(mapping, index, last_index - 1, fbatch); } if (!folio_batch_count(fbatch)) { - if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) - return -EAGAIN; - err = filemap_create_folio(filp, mapping, iocb->ki_pos, fbatch); + err = filemap_create_folio(iocb, fbatch); if (err == AOP_TRUNCATED_PAGE) goto retry; return err; From 3dc3b284e6343770d5a4b92a9dcae0f10d500239 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:40 -0700 Subject: [PATCH 298/504] mm/filemap: use page_cache_sync_ra() to kick off read-ahead Rather than use the page_cache_sync_readahead() helper, define our own ractl and use page_cache_sync_ra() directly. In preparation for needing to modify ractl inside filemap_get_pages(). No functional changes in this patch. Link: https://lkml.kernel.org/r/20241220154831.1086649-3-axboe@kernel.dk Signed-off-by: Jens Axboe Reviewed-by: Kirill A. Shutemov Reviewed-by: Christoph Hellwig Reviewed-by: Matthew Wilcox (Oracle) Cc: Brian Foster Cc: Chris Mason Cc: Johannes Weiner Signed-off-by: Andrew Morton --- mm/filemap.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 904d8fa2bfc0..a1fda00aa6bc 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2499,7 +2499,6 @@ static int filemap_get_pages(struct kiocb *iocb, size_t count, { struct file *filp = iocb->ki_filp; struct address_space *mapping = filp->f_mapping; - struct file_ra_state *ra = &filp->f_ra; pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; pgoff_t last_index; struct folio *folio; @@ -2514,12 +2513,13 @@ retry: filemap_get_read_batch(mapping, index, last_index - 1, fbatch); if (!folio_batch_count(fbatch)) { + DEFINE_READAHEAD(ractl, filp, &filp->f_ra, mapping, index); + if (iocb->ki_flags & IOCB_NOIO) return -EAGAIN; if (iocb->ki_flags & IOCB_NOWAIT) flags = memalloc_noio_save(); - page_cache_sync_readahead(mapping, ra, filp, index, - last_index - index); + page_cache_sync_ra(&ractl, last_index - index); if (iocb->ki_flags & IOCB_NOWAIT) memalloc_noio_restore(flags); filemap_get_read_batch(mapping, index, last_index - 1, fbatch); From e2cd197d7eff75113a82da1b50d1fb845afccf07 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:41 -0700 Subject: [PATCH 299/504] mm/readahead: add folio allocation helper Just a wrapper around filemap_alloc_folio() for now, but add it in preparation for modifying the folio based on the 'ractl' being passed in. No functional changes in this patch. Link: https://lkml.kernel.org/r/20241220154831.1086649-4-axboe@kernel.dk Signed-off-by: Jens Axboe Reviewed-by: Kirill A. Shutemov Reviewed-by: Matthew Wilcox (Oracle) Cc: Brian Foster Cc: Chris Mason Cc: Christoph Hellwig Cc: Johannes Weiner Signed-off-by: Andrew Morton --- mm/readahead.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/mm/readahead.c b/mm/readahead.c index 2bc3abf07828..722b541c7137 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -178,6 +178,12 @@ static void read_pages(struct readahead_control *rac) BUG_ON(readahead_count(rac)); } +static struct folio *ractl_alloc_folio(struct readahead_control *ractl, + gfp_t gfp_mask, unsigned int order) +{ + return filemap_alloc_folio(gfp_mask, order); +} + /** * page_cache_ra_unbounded - Start unchecked readahead. * @ractl: Readahead control. @@ -255,8 +261,8 @@ void page_cache_ra_unbounded(struct readahead_control *ractl, continue; } - folio = filemap_alloc_folio(gfp_mask, - mapping_min_folio_order(mapping)); + folio = ractl_alloc_folio(ractl, gfp_mask, + mapping_min_folio_order(mapping)); if (!folio) break; @@ -426,7 +432,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index, pgoff_t mark, unsigned int order, gfp_t gfp) { int err; - struct folio *folio = filemap_alloc_folio(gfp, order); + struct folio *folio = ractl_alloc_folio(ractl, gfp, order); if (!folio) return -ENOMEM; @@ -751,7 +757,7 @@ void readahead_expand(struct readahead_control *ractl, if (folio && !xa_is_value(folio)) return; /* Folio apparently present */ - folio = filemap_alloc_folio(gfp_mask, min_order); + folio = ractl_alloc_folio(ractl, gfp_mask, min_order); if (!folio) return; @@ -780,7 +786,7 @@ void readahead_expand(struct readahead_control *ractl, if (folio && !xa_is_value(folio)) return; /* Folio apparently present */ - folio = filemap_alloc_folio(gfp_mask, min_order); + folio = ractl_alloc_folio(ractl, gfp_mask, min_order); if (!folio) return; From fb90bc967a163d053d32740f8dcaa58b1aaa72e6 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:42 -0700 Subject: [PATCH 300/504] mm: add PG_dropbehind folio flag Add a folio flag that file IO can use to indicate that the cached IO being done should be dropped from the page cache upon completion. Link: https://lkml.kernel.org/r/20241220154831.1086649-5-axboe@kernel.dk Signed-off-by: Jens Axboe Reviewed-by: Kirill A. Shutemov Cc: Brian Foster Cc: Chris Mason Cc: Christoph Hellwig Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 5 +++++ include/trace/events/mmflags.h | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 16fa8f0cea02..2414e7921eea 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -110,6 +110,7 @@ enum pageflags { PG_reclaim, /* To be reclaimed asap */ PG_swapbacked, /* Page is backed by RAM/swap */ PG_unevictable, /* Page is "unevictable" */ + PG_dropbehind, /* drop pages on IO completion */ #ifdef CONFIG_MMU PG_mlocked, /* Page is vma mlocked */ #endif @@ -599,6 +600,10 @@ PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE) +FOLIO_FLAG(dropbehind, FOLIO_HEAD_PAGE) + FOLIO_TEST_CLEAR_FLAG(dropbehind, FOLIO_HEAD_PAGE) + __FOLIO_SET_FLAG(dropbehind, FOLIO_HEAD_PAGE) + #ifdef CONFIG_HIGHMEM /* * Must use a macro here due to header dependency issues. page_zone() is not diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index bb8a59c6caa2..3bc8656c8359 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -116,7 +116,8 @@ DEF_PAGEFLAG_NAME(head), \ DEF_PAGEFLAG_NAME(reclaim), \ DEF_PAGEFLAG_NAME(swapbacked), \ - DEF_PAGEFLAG_NAME(unevictable) \ + DEF_PAGEFLAG_NAME(unevictable), \ + DEF_PAGEFLAG_NAME(dropbehind) \ IF_HAVE_PG_MLOCK(mlocked) \ IF_HAVE_PG_HWPOISON(hwpoison) \ IF_HAVE_PG_IDLE(idle) \ From b5db12db32ca4843f4548b77ef76aac39383344f Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:43 -0700 Subject: [PATCH 301/504] mm/readahead: add readahead_control->dropbehind member If ractl->dropbehind is set to true, then folios created are marked as dropbehind as well. Link: https://lkml.kernel.org/r/20241220154831.1086649-6-axboe@kernel.dk Signed-off-by: Jens Axboe Reviewed-by: Kirill A. Shutemov Cc: Brian Foster Cc: Chris Mason Cc: Christoph Hellwig Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 1 + mm/readahead.c | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 12136ed844ac..db816b7ab4fc 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1369,6 +1369,7 @@ struct readahead_control { pgoff_t _index; unsigned int _nr_pages; unsigned int _batch_count; + bool dropbehind; bool _workingset; unsigned long _pflags; }; diff --git a/mm/readahead.c b/mm/readahead.c index 722b541c7137..6a4e96b69702 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -181,7 +181,13 @@ static void read_pages(struct readahead_control *rac) static struct folio *ractl_alloc_folio(struct readahead_control *ractl, gfp_t gfp_mask, unsigned int order) { - return filemap_alloc_folio(gfp_mask, order); + struct folio *folio; + + folio = filemap_alloc_folio(gfp_mask, order); + if (folio && ractl->dropbehind) + __folio_set_dropbehind(folio); + + return folio; } /** From b0d1b5d3e83a831477e246cc9f1574bfdedd20e1 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:44 -0700 Subject: [PATCH 302/504] mm/truncate: add folio_unmap_invalidate() helper Add a folio_unmap_invalidate() helper, which unmaps and invalidates a given folio. The caller must already have locked the folio. Embed the old invalidate_complete_folio2() helper in there as well, as nobody else calls it. Use this new helper in invalidate_inode_pages2_range(), rather than duplicate the code there. In preparation for using this elsewhere as well, have it take a gfp_t mask rather than assume GFP_KERNEL is the right choice. This bubbles back to invalidate_complete_folio2() as well. Link: https://lkml.kernel.org/r/20241220154831.1086649-7-axboe@kernel.dk Signed-off-by: Jens Axboe Cc: Brian Foster Cc: Chris Mason Cc: Christoph Hellwig Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/internal.h | 2 ++ mm/truncate.c | 53 +++++++++++++++++++++++++++------------------------ 2 files changed, 30 insertions(+), 25 deletions(-) diff --git a/mm/internal.h b/mm/internal.h index 4d4028d74e5d..109ef30fee11 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -392,6 +392,8 @@ void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma, unsigned long addr, unsigned long end, struct zap_details *details); +int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, + gfp_t gfp); void page_cache_ra_order(struct readahead_control *, struct file_ra_state *, unsigned int order); diff --git a/mm/truncate.c b/mm/truncate.c index 7c304d2f0052..e2e115adfbc5 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -525,6 +525,15 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping, } EXPORT_SYMBOL(invalidate_mapping_pages); +static int folio_launder(struct address_space *mapping, struct folio *folio) +{ + if (!folio_test_dirty(folio)) + return 0; + if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) + return 0; + return mapping->a_ops->launder_folio(folio); +} + /* * This is like mapping_evict_folio(), except it ignores the folio's * refcount. We do this because invalidate_inode_pages2() needs stronger @@ -532,14 +541,26 @@ EXPORT_SYMBOL(invalidate_mapping_pages); * shrink_folio_list() has a temp ref on them, or because they're transiently * sitting in the folio_add_lru() caches. */ -static int invalidate_complete_folio2(struct address_space *mapping, - struct folio *folio) +int folio_unmap_invalidate(struct address_space *mapping, struct folio *folio, + gfp_t gfp) { - if (folio->mapping != mapping) - return 0; + int ret; - if (!filemap_release_folio(folio, GFP_KERNEL)) + VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); + + if (folio_test_dirty(folio)) return 0; + if (folio_mapped(folio)) + unmap_mapping_folio(folio); + BUG_ON(folio_mapped(folio)); + + ret = folio_launder(mapping, folio); + if (ret) + return ret; + if (folio->mapping != mapping) + return -EBUSY; + if (!filemap_release_folio(folio, gfp)) + return -EBUSY; spin_lock(&mapping->host->i_lock); xa_lock_irq(&mapping->i_pages); @@ -558,16 +579,7 @@ static int invalidate_complete_folio2(struct address_space *mapping, failed: xa_unlock_irq(&mapping->i_pages); spin_unlock(&mapping->host->i_lock); - return 0; -} - -static int folio_launder(struct address_space *mapping, struct folio *folio) -{ - if (!folio_test_dirty(folio)) - return 0; - if (folio->mapping != mapping || mapping->a_ops->launder_folio == NULL) - return 0; - return mapping->a_ops->launder_folio(folio); + return -EBUSY; } /** @@ -631,16 +643,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, } VM_BUG_ON_FOLIO(!folio_contains(folio, indices[i]), folio); folio_wait_writeback(folio); - - if (folio_mapped(folio)) - unmap_mapping_folio(folio); - BUG_ON(folio_mapped(folio)); - - ret2 = folio_launder(mapping, folio); - if (ret2 == 0) { - if (!invalidate_complete_folio2(mapping, folio)) - ret2 = -EBUSY; - } + ret2 = folio_unmap_invalidate(mapping, folio, GFP_KERNEL); if (ret2 < 0) ret = ret2; folio_unlock(folio); From f12f45a602e27fe0347c5e2091e745448ca33206 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:45 -0700 Subject: [PATCH 303/504] fs: add RWF_DONTCACHE iocb and FOP_DONTCACHE file_operations flag If a file system supports uncached buffered IO, it may set FOP_DONTCACHE and enable support for RWF_DONTCACHE. If RWF_DONTCACHE is attempted without the file system supporting it, it'll get errored with -EOPNOTSUPP. Link: https://lkml.kernel.org/r/20241220154831.1086649-8-axboe@kernel.dk Signed-off-by: Jens Axboe Cc: Brian Foster Cc: Chris Mason Cc: Christoph Hellwig Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/fs.h | 14 +++++++++++++- include/uapi/linux/fs.h | 6 +++++- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/include/linux/fs.h b/include/linux/fs.h index 7e29433c5ecc..6a838b5479a6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -322,6 +322,7 @@ struct readahead_control; #define IOCB_NOWAIT (__force int) RWF_NOWAIT #define IOCB_APPEND (__force int) RWF_APPEND #define IOCB_ATOMIC (__force int) RWF_ATOMIC +#define IOCB_DONTCACHE (__force int) RWF_DONTCACHE /* non-RWF related bits - start at 16 */ #define IOCB_EVENTFD (1 << 16) @@ -356,7 +357,8 @@ struct readahead_control; { IOCB_SYNC, "SYNC" }, \ { IOCB_NOWAIT, "NOWAIT" }, \ { IOCB_APPEND, "APPEND" }, \ - { IOCB_ATOMIC, "ATOMIC"}, \ + { IOCB_ATOMIC, "ATOMIC" }, \ + { IOCB_DONTCACHE, "DONTCACHE" }, \ { IOCB_EVENTFD, "EVENTFD"}, \ { IOCB_DIRECT, "DIRECT" }, \ { IOCB_WRITE, "WRITE" }, \ @@ -2127,6 +2129,8 @@ struct file_operations { #define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5)) /* Supports asynchronous lock callbacks */ #define FOP_ASYNC_LOCK ((__force fop_flags_t)(1 << 6)) +/* File system supports uncached read/write buffered IO */ +#define FOP_DONTCACHE ((__force fop_flags_t)(1 << 7)) /* Wrap a directory iterator that needs exclusive inode access */ int wrap_directory_iterator(struct file *, struct dir_context *, @@ -3614,6 +3618,14 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags, if (!(ki->ki_filp->f_mode & FMODE_CAN_ATOMIC_WRITE)) return -EOPNOTSUPP; } + if (flags & RWF_DONTCACHE) { + /* file system must support it */ + if (!(ki->ki_filp->f_op->fop_flags & FOP_DONTCACHE)) + return -EOPNOTSUPP; + /* DAX mappings not supported */ + if (IS_DAX(ki->ki_filp->f_mapping->host)) + return -EOPNOTSUPP; + } kiocb_flags |= (__force int) (flags & RWF_SUPPORTED); if (flags & RWF_SYNC) kiocb_flags |= IOCB_DSYNC; diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h index 753971770733..56a4f93a08f4 100644 --- a/include/uapi/linux/fs.h +++ b/include/uapi/linux/fs.h @@ -332,9 +332,13 @@ typedef int __bitwise __kernel_rwf_t; /* Atomic Write */ #define RWF_ATOMIC ((__force __kernel_rwf_t)0x00000040) +/* buffered IO that drops the cache after reading or writing data */ +#define RWF_DONTCACHE ((__force __kernel_rwf_t)0x00000080) + /* mask of flags supported by the kernel */ #define RWF_SUPPORTED (RWF_HIPRI | RWF_DSYNC | RWF_SYNC | RWF_NOWAIT |\ - RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC) + RWF_APPEND | RWF_NOAPPEND | RWF_ATOMIC |\ + RWF_DONTCACHE) #define PROCFS_IOCTL_MAGIC 'f' From 89ae0f8a3355ae85ceee24692413b9a81c0a2f02 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:46 -0700 Subject: [PATCH 304/504] mm/filemap: add read support for RWF_DONTCACHE Add RWF_DONTCACHE as a read operation flag, which means that any data read wil be removed from the page cache upon completion. Uses the page cache to synchronize, and simply prunes folios that were instantiated when the operation completes. While it would be possible to use private pages for this, using the page cache as synchronization is handy for a variety of reasons: 1) No special truncate magic is needed 2) Async buffered reads need some place to serialize, using the page cache is a lot easier than writing extra code for this 3) The pruning cost is pretty reasonable and the code to support this is much simpler as a result. You can think of uncached buffered IO as being the much more attractive cousin of O_DIRECT - it has none of the restrictions of O_DIRECT. Yes, it will copy the data, but unlike regular buffered IO, it doesn't run into the unpredictability of the page cache in terms of reclaim. As an example, on a test box with 32 drives, reading them with buffered IO looks as follows: Reading bs 65536, uncached 0 1s: 145945MB/sec 2s: 158067MB/sec 3s: 157007MB/sec 4s: 148622MB/sec 5s: 118824MB/sec 6s: 70494MB/sec 7s: 41754MB/sec 8s: 90811MB/sec 9s: 92204MB/sec 10s: 95178MB/sec 11s: 95488MB/sec 12s: 95552MB/sec 13s: 96275MB/sec where it's quite easy to see where the page cache filled up, and performance went from good to erratic, and finally settles at a much lower rate. Looking at top while this is ongoing, we see: PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 7535 root 20 0 267004 0 0 S 3199 0.0 8:40.65 uncached 3326 root 20 0 0 0 0 R 100.0 0.0 0:16.40 kswapd4 3327 root 20 0 0 0 0 R 100.0 0.0 0:17.22 kswapd5 3328 root 20 0 0 0 0 R 100.0 0.0 0:13.29 kswapd6 3332 root 20 0 0 0 0 R 100.0 0.0 0:11.11 kswapd10 3339 root 20 0 0 0 0 R 100.0 0.0 0:16.25 kswapd17 3348 root 20 0 0 0 0 R 100.0 0.0 0:16.40 kswapd26 3343 root 20 0 0 0 0 R 100.0 0.0 0:16.30 kswapd21 3344 root 20 0 0 0 0 R 100.0 0.0 0:11.92 kswapd22 3349 root 20 0 0 0 0 R 100.0 0.0 0:16.28 kswapd27 3352 root 20 0 0 0 0 R 99.7 0.0 0:11.89 kswapd30 3353 root 20 0 0 0 0 R 96.7 0.0 0:16.04 kswapd31 3329 root 20 0 0 0 0 R 96.4 0.0 0:11.41 kswapd7 3345 root 20 0 0 0 0 R 96.4 0.0 0:13.40 kswapd23 3330 root 20 0 0 0 0 S 91.1 0.0 0:08.28 kswapd8 3350 root 20 0 0 0 0 S 86.8 0.0 0:11.13 kswapd28 3325 root 20 0 0 0 0 S 76.3 0.0 0:07.43 kswapd3 3341 root 20 0 0 0 0 S 74.7 0.0 0:08.85 kswapd19 3334 root 20 0 0 0 0 S 71.7 0.0 0:10.04 kswapd12 3351 root 20 0 0 0 0 R 60.5 0.0 0:09.59 kswapd29 3323 root 20 0 0 0 0 R 57.6 0.0 0:11.50 kswapd1 [...] which is just showing a partial list of the 32 kswapd threads that are running mostly full tilt, burning ~28 full CPU cores. If the same test case is run with RWF_DONTCACHE set for the buffered read, the output looks as follows: Reading bs 65536, uncached 0 1s: 153144MB/sec 2s: 156760MB/sec 3s: 158110MB/sec 4s: 158009MB/sec 5s: 158043MB/sec 6s: 157638MB/sec 7s: 157999MB/sec 8s: 158024MB/sec 9s: 157764MB/sec 10s: 157477MB/sec 11s: 157417MB/sec 12s: 157455MB/sec 13s: 157233MB/sec 14s: 156692MB/sec which is just chugging along at ~155GB/sec of read performance. Looking at top, we see: PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND 7961 root 20 0 267004 0 0 S 3180 0.0 5:37.95 uncached 8024 axboe 20 0 14292 4096 0 R 1.0 0.0 0:00.13 top where just the test app is using CPU, no reclaim is taking place outside of the main thread. Not only is performance 65% better, it's also using half the CPU to do it. Link: https://lkml.kernel.org/r/20241220154831.1086649-9-axboe@kernel.dk Signed-off-by: Jens Axboe Cc: Brian Foster Cc: Chris Mason Cc: Christoph Hellwig Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/filemap.c | 28 ++++++++++++++++++++++++++-- mm/swap.c | 2 ++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index a1fda00aa6bc..9eade935a48c 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2445,6 +2445,8 @@ static int filemap_create_folio(struct kiocb *iocb, struct folio_batch *fbatch) folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order); if (!folio) return -ENOMEM; + if (iocb->ki_flags & IOCB_DONTCACHE) + __folio_set_dropbehind(folio); /* * Protect against truncate / hole punch. Grabbing invalidate_lock @@ -2490,6 +2492,8 @@ static int filemap_readahead(struct kiocb *iocb, struct file *file, if (iocb->ki_flags & IOCB_NOIO) return -EAGAIN; + if (iocb->ki_flags & IOCB_DONTCACHE) + ractl.dropbehind = 1; page_cache_async_ra(&ractl, folio, last_index - folio->index); return 0; } @@ -2519,6 +2523,8 @@ retry: return -EAGAIN; if (iocb->ki_flags & IOCB_NOWAIT) flags = memalloc_noio_save(); + if (iocb->ki_flags & IOCB_DONTCACHE) + ractl.dropbehind = 1; page_cache_sync_ra(&ractl, last_index - index); if (iocb->ki_flags & IOCB_NOWAIT) memalloc_noio_restore(flags); @@ -2566,6 +2572,20 @@ static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio) return (pos1 >> shift == pos2 >> shift); } +static void filemap_end_dropbehind_read(struct address_space *mapping, + struct folio *folio) +{ + if (!folio_test_dropbehind(folio)) + return; + if (folio_test_writeback(folio) || folio_test_dirty(folio)) + return; + if (folio_trylock(folio)) { + if (folio_test_clear_dropbehind(folio)) + folio_unmap_invalidate(mapping, folio, 0); + folio_unlock(folio); + } +} + /** * filemap_read - Read data from the page cache. * @iocb: The iocb to read. @@ -2679,8 +2699,12 @@ ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter, } } put_folios: - for (i = 0; i < folio_batch_count(&fbatch); i++) - folio_put(fbatch.folios[i]); + for (i = 0; i < folio_batch_count(&fbatch); i++) { + struct folio *folio = fbatch.folios[i]; + + filemap_end_dropbehind_read(mapping, folio); + folio_put(folio); + } folio_batch_init(&fbatch); } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); diff --git a/mm/swap.c b/mm/swap.c index 746a5ceba42c..fc8281ef4241 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -448,6 +448,8 @@ static bool lru_gen_clear_refs(struct folio *folio) */ void folio_mark_accessed(struct folio *folio) { + if (folio_test_dropbehind(folio)) + return; if (lru_gen_enabled()) { lru_gen_inc_refs(folio); return; From 342b379f33629719b7860d5a601ffba4e1c092c0 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:47 -0700 Subject: [PATCH 305/504] mm/filemap: drop streaming/uncached pages when writeback completes If the folio is marked as streaming, drop pages when writeback completes. Intended to be used with RWF_DONTCACHE, to avoid needing sync writes for uncached IO. Link: https://lkml.kernel.org/r/20241220154831.1086649-10-axboe@kernel.dk Signed-off-by: Jens Axboe Cc: Brian Foster Cc: Chris Mason Cc: Christoph Hellwig Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/filemap.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/mm/filemap.c b/mm/filemap.c index 9eade935a48c..fb17b573ae51 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1571,6 +1571,27 @@ int folio_wait_private_2_killable(struct folio *folio) } EXPORT_SYMBOL(folio_wait_private_2_killable); +/* + * If folio was marked as dropbehind, then pages should be dropped when writeback + * completes. Do that now. If we fail, it's likely because of a big folio - + * just reset dropbehind for that case and latter completions should invalidate. + */ +static void folio_end_dropbehind_write(struct folio *folio) +{ + /* + * Hitting !in_task() should not happen off RWF_DONTCACHE writeback, + * but can happen if normal writeback just happens to find dirty folios + * that were created as part of uncached writeback, and that writeback + * would otherwise not need non-IRQ handling. Just skip the + * invalidation in that case. + */ + if (in_task() && folio_trylock(folio)) { + if (folio->mapping) + folio_unmap_invalidate(folio->mapping, folio, 0); + folio_unlock(folio); + } +} + /** * folio_end_writeback - End writeback against a folio. * @folio: The folio. @@ -1581,6 +1602,8 @@ EXPORT_SYMBOL(folio_wait_private_2_killable); */ void folio_end_writeback(struct folio *folio) { + bool folio_dropbehind = false; + VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio); /* @@ -1602,9 +1625,14 @@ void folio_end_writeback(struct folio *folio) * reused before the folio_wake_bit(). */ folio_get(folio); + if (!folio_test_dirty(folio)) + folio_dropbehind = folio_test_clear_dropbehind(folio); if (__folio_end_writeback(folio)) folio_wake_bit(folio, PG_writeback); acct_reclaim_writeback(folio); + + if (folio_dropbehind) + folio_end_dropbehind_write(folio); folio_put(folio); } EXPORT_SYMBOL(folio_end_writeback); From 5abc2bdbd4ea06359401b7085705189c22612b88 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:48 -0700 Subject: [PATCH 306/504] mm/filemap: add filemap_fdatawrite_range_kick() helper Works like filemap_fdatawrite_range(), except it's a non-integrity data writeback and hence only starts writeback on the specified range. Will help facilitate generically starting uncached writeback from generic_write_sync(), as header dependencies preclude doing this inline from fs.h. Link: https://lkml.kernel.org/r/20241220154831.1086649-11-axboe@kernel.dk Signed-off-by: Jens Axboe Cc: Brian Foster Cc: Chris Mason Cc: Christoph Hellwig Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/fs.h | 2 ++ mm/filemap.c | 18 ++++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/include/linux/fs.h b/include/linux/fs.h index 6a838b5479a6..653b5efa3d3f 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2878,6 +2878,8 @@ extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart, extern int __must_check file_check_and_advance_wb_err(struct file *file); extern int __must_check file_write_and_wait_range(struct file *file, loff_t start, loff_t end); +int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start, + loff_t end); static inline int file_write_and_wait(struct file *file) { diff --git a/mm/filemap.c b/mm/filemap.c index fb17b573ae51..0aa3861aed45 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -440,6 +440,24 @@ int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, } EXPORT_SYMBOL(filemap_fdatawrite_range); +/** + * filemap_fdatawrite_range_kick - start writeback on a range + * @mapping: target address_space + * @start: index to start writeback on + * @end: last (non-inclusive) index for writeback + * + * This is a non-integrity writeback helper, to start writing back folios + * for the indicated range. + * + * Return: %0 on success, negative error code otherwise. + */ +int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start, + loff_t end) +{ + return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_NONE); +} +EXPORT_SYMBOL_GPL(filemap_fdatawrite_range_kick); + /** * filemap_flush - mostly a non-blocking flush * @mapping: target address_space From b76a5232eb6eb3ce381b78063e26d4a8281e6af8 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:49 -0700 Subject: [PATCH 307/504] mm: call filemap_fdatawrite_range_kick() after IOCB_DONTCACHE issue When a buffered write submitted with IOCB_DONTCACHE has been successfully submitted, call filemap_fdatawrite_range_kick() to kick off the IO. File systems call generic_write_sync() for any successful buffered write submission, hence add the logic here rather than needing to modify the file system. Link: https://lkml.kernel.org/r/20241220154831.1086649-12-axboe@kernel.dk Signed-off-by: Jens Axboe Cc: Brian Foster Cc: Chris Mason Cc: Christoph Hellwig Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/fs.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/fs.h b/include/linux/fs.h index 653b5efa3d3f..58a618853574 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2912,6 +2912,11 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count) (iocb->ki_flags & IOCB_SYNC) ? 0 : 1); if (ret) return ret; + } else if (iocb->ki_flags & IOCB_DONTCACHE) { + struct address_space *mapping = iocb->ki_filp->f_mapping; + + filemap_fdatawrite_range_kick(mapping, iocb->ki_pos, + iocb->ki_pos + count); } return count; From 8fbed73d404c499a3908b9914a234e48f751c0df Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Fri, 20 Dec 2024 08:47:50 -0700 Subject: [PATCH 308/504] mm: add FGP_DONTCACHE folio creation flag Callers can pass this in for uncached folio creation, in which case if a folio is newly created it gets marked as uncached. If a folio exists for this index and lookup succeeds, then it will not get marked as uncached. If an !uncached lookup finds a cached folio, clear the flag. For that case, there are competeting uncached and cached users of the folio, and it should not get pruned. Link: https://lkml.kernel.org/r/20241220154831.1086649-13-axboe@kernel.dk Signed-off-by: Jens Axboe Cc: Brian Foster Cc: Chris Mason Cc: Christoph Hellwig Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 2 ++ mm/filemap.c | 5 +++++ 2 files changed, 7 insertions(+) diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index db816b7ab4fc..d0be5f36082a 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -721,6 +721,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, * * %FGP_NOFS - __GFP_FS will get cleared in gfp. * * %FGP_NOWAIT - Don't block on the folio lock. * * %FGP_STABLE - Wait for the folio to be stable (finished writeback) + * * %FGP_DONTCACHE - Uncached buffered IO * * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin() * implementation. */ @@ -734,6 +735,7 @@ typedef unsigned int __bitwise fgf_t; #define FGP_NOWAIT ((__force fgf_t)0x00000020) #define FGP_FOR_MMAP ((__force fgf_t)0x00000040) #define FGP_STABLE ((__force fgf_t)0x00000080) +#define FGP_DONTCACHE ((__force fgf_t)0x00000100) #define FGF_GET_ORDER(fgf) (((__force unsigned)fgf) >> 26) /* top 6 bits */ #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) diff --git a/mm/filemap.c b/mm/filemap.c index 0aa3861aed45..279959cf9300 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1973,6 +1973,8 @@ no_page: /* Init accessed so avoid atomic mark_page_accessed later */ if (fgp_flags & FGP_ACCESSED) __folio_set_referenced(folio); + if (fgp_flags & FGP_DONTCACHE) + __folio_set_dropbehind(folio); err = filemap_add_folio(mapping, folio, index, gfp); if (!err) @@ -1995,6 +1997,9 @@ no_page: if (!folio) return ERR_PTR(-ENOENT); + /* not an uncached lookup, clear uncached if set */ + if (folio_test_dropbehind(folio) && !(fgp_flags & FGP_DONTCACHE)) + folio_clear_dropbehind(folio); return folio; } EXPORT_SYMBOL(__filemap_get_folio); From 841131f4476985d9c54b93cc5d14f5d91840cf9c Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:39:56 -0500 Subject: [PATCH 309/504] mm/hugetlb: fix avoid_reserve to allow taking folio from subpool Patch series "mm/hugetlb: Refactor hugetlb allocation resv accounting", v2. This is a follow up on Ackerley's series here as replacement: https://lore.kernel.org/r/cover.1728684491.git.ackerleytng@google.com The goal of this series is to cleanup hugetlb resv accounting, especially during folio allocation, to decouple a few things: - Hugetlb folios v.s. Hugetlbfs: IOW, the hope is in the future hugetlb folios can be allocated completely without hugetlbfs. - Decouple VMA v.s. hugetlb folio allocations: allocating a hugetlb folio should not always require a hugetlbfs VMA. For example, either it got allocated from the inode level (see hugetlbfs_fallocate() where it used a pesudo VMA for allocation), or it can be allocated by other kernel subsystems. It paves way for other users to allocate hugetlb folios out of either system reservations, or subpools (instead of hugetlbfs, as a file system). For longer term, this prepares hugetlb as a separate concept versus hugetlbfs, so that hugetlb folios can be allocated by not only hugetlbfs and other things. Tests I've done: - I had a reproducer in patch 1 for the bug I found, this will start to work after patch 1 or the whole set applied. - Hugetlb regression tests (on x86_64 2MBs), includes: - All vmtests on hugetlbfs - libhugetlbfs test suite (which may fail some tests, but no new failures will be introduced by this series, so all such failures happen before this series so shouldn't be relevant). This patch (of 7): Since commit 04f2cbe35699 ("hugetlb: guarantee that COW faults for a process that called mmap(MAP_PRIVATE) on hugetlbfs will succeed"), avoid_reserve was introduced for a special case of CoW on hugetlb private mappings, and only if the owner VMA is trying to allocate yet another hugetlb folio that is not reserved within the private vma reserved map. Later on, in commit d85f69b0b533 ("mm/hugetlb: alloc_huge_page handle areas hole punched by fallocate"), alloc_huge_page() enforced to not consume any global reservation as long as avoid_reserve=true. This operation doesn't look correct, because even if it will enforce the allocation to not use global reservation at all, it will still try to take one reservation from the spool (if the subpool existed). Then since the spool reserved pages take from global reservation, it'll also take one reservation globally. Logically it can cause global reservation to go wrong. I wrote a reproducer below, trigger this special path, and every run of such program will cause global reservation count to increment by one, until it hits the number of free pages: #define _GNU_SOURCE /* See feature_test_macros(7) */ #include #include #include #include #include #include #define MSIZE (2UL << 20) int main(int argc, char *argv[]) { const char *path; int *buf; int fd, ret; pid_t child; if (argc < 2) { printf("usage: %s \n", argv[0]); return -1; } path = argv[1]; fd = open(path, O_RDWR | O_CREAT, 0666); if (fd < 0) { perror("open failed"); return -1; } ret = fallocate(fd, 0, 0, MSIZE); if (ret != 0) { perror("fallocate"); return -1; } buf = mmap(NULL, MSIZE, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); if (buf == MAP_FAILED) { perror("mmap() failed"); return -1; } /* Allocate a page */ *buf = 1; child = fork(); if (child == 0) { /* child doesn't need to do anything */ exit(0); } /* Trigger CoW from owner */ *buf = 2; munmap(buf, MSIZE); close(fd); unlink(path); return 0; } It can only reproduce with a sub-mount when there're reserved pages on the spool, like: # sysctl vm.nr_hugepages=128 # mkdir ./hugetlb-pool # mount -t hugetlbfs -o min_size=8M,pagesize=2M none ./hugetlb-pool Then run the reproducer on the mountpoint: # ./reproducer ./hugetlb-pool/test Fix it by taking the reservation from spool if available. In general, avoid_reserve is IMHO more about "avoid vma resv map", not spool's. I copied stable, however I have no intention for backporting if it's not a clean cherry-pick, because private hugetlb mapping, and then fork() on top is too rare to hit. Link: https://lkml.kernel.org/r/20250107204002.2683356-1-peterx@redhat.com Link: https://lkml.kernel.org/r/20250107204002.2683356-2-peterx@redhat.com Fixes: d85f69b0b533 ("mm/hugetlb: alloc_huge_page handle areas hole punched by fallocate") Signed-off-by: Peter Xu Reviewed-by: Ackerley Tng Tested-by: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Cc: Signed-off-by: Andrew Morton --- mm/hugetlb.c | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 312ed27b9721..a10d376cb1a8 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1398,8 +1398,7 @@ static unsigned long available_huge_pages(struct hstate *h) static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, - unsigned long address, int avoid_reserve, - long chg) + unsigned long address, long chg) { struct folio *folio = NULL; struct mempolicy *mpol; @@ -1415,10 +1414,6 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) goto err; - /* If reserves cannot be used, ensure enough pages are in the pool */ - if (avoid_reserve && !available_huge_pages(h)) - goto err; - gfp_mask = htlb_alloc_mask(h); nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask); @@ -1434,7 +1429,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); - if (folio && !avoid_reserve && vma_has_reserves(vma, chg)) { + if (folio && vma_has_reserves(vma, chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } @@ -3051,17 +3046,6 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, gbl_chg = hugepage_subpool_get_pages(spool, 1); if (gbl_chg < 0) goto out_end_reservation; - - /* - * Even though there was no reservation in the region/reserve - * map, there could be reservations associated with the - * subpool that can be used. This would be indicated if the - * return value of hugepage_subpool_get_pages() is zero. - * However, if avoid_reserve is specified we still avoid even - * the subpool reservations. - */ - if (avoid_reserve) - gbl_chg = 1; } /* If this allocation is not consuming a reservation, charge it now. @@ -3084,7 +3068,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, * from the global free pool (global change). gbl_chg == 0 indicates * a reservation exists for the allocation. */ - folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg); + folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg); if (!folio) { spin_unlock_irq(&hugetlb_lock); folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr); From 077907025766ea0296fda22c6e082b13955bfde6 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:39:57 -0500 Subject: [PATCH 310/504] mm/hugetlb: stop using avoid_reserve flag in fork() When fork() and stumble on top of a dma-pinned hugetlb private page, CoW must happen during fork() to guarantee dma coherency. In this specific path, hugetlb pages need to be allocated for the child process. Stop using avoid_reserve=1 flag here: it's not required to be used here, as dest_vma (which is destined to be a MAP_PRIVATE hugetlb vma) will have no private vma resv map, and that will make sure it won't be able to use a vma reservation later. No functional change intended with this change. Said that, it's still wanted to do this, so as to reduce the usage of avoid_reserve to the only one user, which is also why this flag was introduced initially in commit 04f2cbe35699 ("hugetlb: guarantee that COW faults for a process that called mmap(MAP_PRIVATE) on hugetlbfs will succeed"). I don't see whoever else should set it at all. Further patch will clean up resv accounting based on this. Link: https://lkml.kernel.org/r/20250107204002.2683356-3-peterx@redhat.com Signed-off-by: Peter Xu Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a10d376cb1a8..7df19b5f956c 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5373,7 +5373,7 @@ again: spin_unlock(src_ptl); spin_unlock(dst_ptl); /* Do not use reserve as it's private owned */ - new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); + new_folio = alloc_hugetlb_folio(dst_vma, addr, 0); if (IS_ERR(new_folio)) { folio_put(pte_folio); ret = PTR_ERR(new_folio); From 0f020e2c6d9e8ac4a504bef11368f6cc021d75f2 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:39:58 -0500 Subject: [PATCH 311/504] mm/hugetlb: rename avoid_reserve to cow_from_owner The old name "avoid_reserve" can be too generic and can be used wrongly in the new call sites that want to allocate a hugetlb folio. It's confusing on two things: (1) whether one can opt-in to avoid global reservation, and (2) whether it should take more than one count. In reality, this flag is only used in an extremely hacky path, in an extremely hacky way in hugetlb CoW path only, and always use with 1 saying "skip global reservation". Rename the flag to avoid future abuse of this flag, making it a boolean so as to reflect its true representation that it's not a counter. To make it even harder to abuse, add a comment above the function to explain it. Link: https://lkml.kernel.org/r/20250107204002.2683356-4-peterx@redhat.com Signed-off-by: Peter Xu Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- fs/hugetlbfs/inode.c | 2 +- include/linux/hugetlb.h | 4 ++-- mm/hugetlb.c | 33 ++++++++++++++++++++------------- 3 files changed, 23 insertions(+), 16 deletions(-) diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 62fb0cbc93ab..0fc179a59830 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -814,7 +814,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, * folios in these areas, we need to consume the reserves * to keep reservation accounting consistent. */ - folio = alloc_hugetlb_folio(&pseudo_vma, addr, 0); + folio = alloc_hugetlb_folio(&pseudo_vma, addr, false); if (IS_ERR(folio)) { mutex_unlock(&hugetlb_fault_mutex_table[hash]); error = PTR_ERR(folio); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 10faf42ca96a..49ec2362ce92 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -683,7 +683,7 @@ struct huge_bootmem_page { int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn); struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, - unsigned long addr, int avoid_reserve); + unsigned long addr, bool cow_from_owner); struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback); @@ -1068,7 +1068,7 @@ static inline int replace_free_hugepage_folios(unsigned long start_pfn, static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, unsigned long addr, - int avoid_reserve) + bool cow_from_owner) { return NULL; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7df19b5f956c..e59b734b2c95 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3012,8 +3012,15 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) return ret; } +/* + * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW + * faults of hugetlb private mappings on top of a non-page-cache folio (in + * which case even if there's a private vma resv map it won't cover such + * allocation). New call sites should (probably) never set it to true!! + * When it's set, the allocation will bypass all vma level reservations. + */ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, - unsigned long addr, int avoid_reserve) + unsigned long addr, bool cow_from_owner) { struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); @@ -3042,7 +3049,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, * Allocations for MAP_NORESERVE mappings also need to be * checked against any subpool limit. */ - if (map_chg || avoid_reserve) { + if (map_chg || cow_from_owner) { gbl_chg = hugepage_subpool_get_pages(spool, 1); if (gbl_chg < 0) goto out_end_reservation; @@ -3050,7 +3057,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, /* If this allocation is not consuming a reservation, charge it now. */ - deferred_reserve = map_chg || avoid_reserve; + deferred_reserve = map_chg || cow_from_owner; if (deferred_reserve) { ret = hugetlb_cgroup_charge_cgroup_rsvd( idx, pages_per_huge_page(h), &h_cg); @@ -3075,7 +3082,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); - if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { + if (!cow_from_owner && vma_has_reserves(vma, gbl_chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } @@ -3142,7 +3149,7 @@ out_uncharge_cgroup_reservation: hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), h_cg); out_subpool_put: - if (map_chg || avoid_reserve) + if (map_chg || cow_from_owner) hugepage_subpool_put_pages(spool, 1); out_end_reservation: vma_end_reservation(h, vma, addr); @@ -5373,7 +5380,7 @@ again: spin_unlock(src_ptl); spin_unlock(dst_ptl); /* Do not use reserve as it's private owned */ - new_folio = alloc_hugetlb_folio(dst_vma, addr, 0); + new_folio = alloc_hugetlb_folio(dst_vma, addr, false); if (IS_ERR(new_folio)) { folio_put(pte_folio); ret = PTR_ERR(new_folio); @@ -5839,7 +5846,7 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio, struct hstate *h = hstate_vma(vma); struct folio *old_folio; struct folio *new_folio; - int outside_reserve = 0; + bool cow_from_owner = 0; vm_fault_t ret = 0; struct mmu_notifier_range range; @@ -5902,7 +5909,7 @@ retry_avoidcopy: */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && old_folio != pagecache_folio) - outside_reserve = 1; + cow_from_owner = true; folio_get(old_folio); @@ -5911,7 +5918,7 @@ retry_avoidcopy: * be acquired again before returning to the caller, as expected. */ spin_unlock(vmf->ptl); - new_folio = alloc_hugetlb_folio(vma, vmf->address, outside_reserve); + new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner); if (IS_ERR(new_folio)) { /* @@ -5921,7 +5928,7 @@ retry_avoidcopy: * reliability, unmap the page from child processes. The child * may get SIGKILLed if it later faults. */ - if (outside_reserve) { + if (cow_from_owner) { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx; u32 hash; @@ -6172,7 +6179,7 @@ static vm_fault_t hugetlb_no_page(struct address_space *mapping, goto out; } - folio = alloc_hugetlb_folio(vma, vmf->address, 0); + folio = alloc_hugetlb_folio(vma, vmf->address, false); if (IS_ERR(folio)) { /* * Returning error will result in faulting task being @@ -6638,7 +6645,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, goto out; } - folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); + folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); if (IS_ERR(folio)) { ret = -ENOMEM; goto out; @@ -6680,7 +6687,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, goto out; } - folio = alloc_hugetlb_folio(dst_vma, dst_addr, 0); + folio = alloc_hugetlb_folio(dst_vma, dst_addr, false); if (IS_ERR(folio)) { folio_put(*foliop); ret = -ENOMEM; From b4ae0c66c798e7a4e6a42003423b7b13399c2f1c Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:39:59 -0500 Subject: [PATCH 312/504] mm/hugetlb: clean up map/global resv accounting when allocate alloc_hugetlb_folio() isn't a function easy to read, especially on reservation accountings for either VMA or globally (majorly, spool only). The 1st complexity lies in the special private CoW path, aka, cow_from_owner=true case. The 2nd complexity may be the confusing updates of gbl_chg after it's set once, which looks like they can change anytime on the fly. Logically, cow_from_user is only about vma reservation. We could already decouple the flag and consolidate it into map charge flag very early. Then we don't need to keep checking the CoW special flag every time. This patch does it by making map_chg a tri-state flag. Tri-state needed is unfortunate, and it's because currently vma_needs_reservation() has a side effect internally, that it must be followed by either a end() or commit(). We keep the same semantic as before on one thing: "if (map_chg)" means we need a separate per-vma resv count. It keeps most of the old code like before untouched with the new enum. After this patch, we take these steps to decide these variables, hopefully slightly easier to follow: - First, decide map_chg. This will take cow_from_owner into account, once and for all. It's about whether we could take a resv count from the vma, no matter it's shared, private, etc. - Then, decide gbl_chg. The only diff here is spool, comparing to map_chg. Now only update each flag once and for all, instead of keep any of them flipping which can be very hard to follow. With cow_from_owner merged into map_chg, we could remove quite a few such checks all over. Side benefit of such is that we can get rid of one more confusing flag, which is deferred_reserve. Cleanup the comments a bit too. E.g., MAP_NORESERVE may not need to check against spool limit, AFAIU, if it's on a shared mapping, and if the page cache folio has its inode's resv map available (in which case map_chg would have been set zero, hence the code should be correct, not the comment). There's one trivial detail that needs attention that this patch touched, which is this check right after vma_commit_reservation(): if (map_chg > map_commit) It changes to: if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) It should behave the same like before, because previously the only way to make "map_chg > map_commit" happen is map_chg=1 && map_commit=0. That's exactly the rewritten line. Meanwhile, either commit() or end() will need to be skipped if ENFORCE, to keep the old behavior. Even though it looks a lot changed, but no functional change expected. Link: https://lkml.kernel.org/r/20250107204002.2683356-5-peterx@redhat.com Signed-off-by: Peter Xu Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 110 +++++++++++++++++++++++++++++++++++---------------- 1 file changed, 77 insertions(+), 33 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e59b734b2c95..bac6ee9ede4d 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3012,6 +3012,25 @@ int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn) return ret; } +typedef enum { + /* + * For either 0/1: we checked the per-vma resv map, and one resv + * count either can be reused (0), or an extra needed (1). + */ + MAP_CHG_REUSE = 0, + MAP_CHG_NEEDED = 1, + /* + * Cannot use per-vma resv count can be used, hence a new resv + * count is enforced. + * + * NOTE: This is mostly identical to MAP_CHG_NEEDED, except + * that currently vma_needs_reservation() has an unwanted side + * effect to either use end() or commit() to complete the + * transaction. Hence it needs to differenciate from NEEDED. + */ + MAP_CHG_ENFORCED = 2, +} map_chg_state; + /* * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW * faults of hugetlb private mappings on top of a non-page-cache folio (in @@ -3025,40 +3044,59 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, struct hugepage_subpool *spool = subpool_vma(vma); struct hstate *h = hstate_vma(vma); struct folio *folio; - long map_chg, map_commit; - long gbl_chg; + long retval, gbl_chg; + map_chg_state map_chg; int ret, idx; struct hugetlb_cgroup *h_cg = NULL; - bool deferred_reserve; gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL; idx = hstate_index(h); - /* - * Examine the region/reserve map to determine if the process - * has a reservation for the page to be allocated. A return - * code of zero indicates a reservation exists (no change). - */ - map_chg = gbl_chg = vma_needs_reservation(h, vma, addr); - if (map_chg < 0) - return ERR_PTR(-ENOMEM); + + /* Whether we need a separate per-vma reservation? */ + if (cow_from_owner) { + /* + * Special case! Since it's a CoW on top of a reserved + * page, the private resv map doesn't count. So it cannot + * consume the per-vma resv map even if it's reserved. + */ + map_chg = MAP_CHG_ENFORCED; + } else { + /* + * Examine the region/reserve map to determine if the process + * has a reservation for the page to be allocated. A return + * code of zero indicates a reservation exists (no change). + */ + retval = vma_needs_reservation(h, vma, addr); + if (retval < 0) + return ERR_PTR(-ENOMEM); + map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE; + } /* + * Whether we need a separate global reservation? + * * Processes that did not create the mapping will have no * reserves as indicated by the region/reserve map. Check * that the allocation will not exceed the subpool limit. - * Allocations for MAP_NORESERVE mappings also need to be - * checked against any subpool limit. + * Or if it can get one from the pool reservation directly. */ - if (map_chg || cow_from_owner) { + if (map_chg) { gbl_chg = hugepage_subpool_get_pages(spool, 1); if (gbl_chg < 0) goto out_end_reservation; + } else { + /* + * If we have the vma reservation ready, no need for extra + * global reservation. + */ + gbl_chg = 0; } - /* If this allocation is not consuming a reservation, charge it now. + /* + * If this allocation is not consuming a per-vma reservation, + * charge the hugetlb cgroup now. */ - deferred_reserve = map_chg || cow_from_owner; - if (deferred_reserve) { + if (map_chg) { ret = hugetlb_cgroup_charge_cgroup_rsvd( idx, pages_per_huge_page(h), &h_cg); if (ret) @@ -3082,7 +3120,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); - if (!cow_from_owner && vma_has_reserves(vma, gbl_chg)) { + if (vma_has_reserves(vma, gbl_chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } @@ -3095,7 +3133,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, /* If allocation is not consuming a reservation, also store the * hugetlb_cgroup pointer on the page. */ - if (deferred_reserve) { + if (map_chg) { hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h), h_cg, folio); } @@ -3104,26 +3142,31 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, hugetlb_set_folio_subpool(folio, spool); - map_commit = vma_commit_reservation(h, vma, addr); - if (unlikely(map_chg > map_commit)) { + if (map_chg != MAP_CHG_ENFORCED) { + /* commit() is only needed if the map_chg is not enforced */ + retval = vma_commit_reservation(h, vma, addr); /* + * Check for possible race conditions. When it happens.. * The page was added to the reservation map between * vma_needs_reservation and vma_commit_reservation. * This indicates a race with hugetlb_reserve_pages. * Adjust for the subpool count incremented above AND - * in hugetlb_reserve_pages for the same page. Also, + * in hugetlb_reserve_pages for the same page. Also, * the reservation count added in hugetlb_reserve_pages * no longer applies. */ - long rsv_adjust; + if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) { + long rsv_adjust; - rsv_adjust = hugepage_subpool_put_pages(spool, 1); - hugetlb_acct_memory(h, -rsv_adjust); - if (deferred_reserve) { - spin_lock_irq(&hugetlb_lock); - hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h), - pages_per_huge_page(h), folio); - spin_unlock_irq(&hugetlb_lock); + rsv_adjust = hugepage_subpool_put_pages(spool, 1); + hugetlb_acct_memory(h, -rsv_adjust); + if (map_chg) { + spin_lock_irq(&hugetlb_lock); + hugetlb_cgroup_uncharge_folio_rsvd( + hstate_index(h), pages_per_huge_page(h), + folio); + spin_unlock_irq(&hugetlb_lock); + } } } @@ -3145,14 +3188,15 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, out_uncharge_cgroup: hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg); out_uncharge_cgroup_reservation: - if (deferred_reserve) + if (map_chg) hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h), h_cg); out_subpool_put: - if (map_chg || cow_from_owner) + if (map_chg) hugepage_subpool_put_pages(spool, 1); out_end_reservation: - vma_end_reservation(h, vma, addr); + if (map_chg != MAP_CHG_ENFORCED) + vma_end_reservation(h, vma, addr); return ERR_PTR(-ENOSPC); } From 94b47d6ad78ab4aac2df657bf42424fa3a47450c Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:40:00 -0500 Subject: [PATCH 313/504] mm/hugetlb: simplify vma_has_reserves() vma_has_reserves() is a helper "trying" to know whether the vma should consume one reservation when allocating the hugetlb folio. However it's not clear on why we need such complexity, as such information is already represented in the "chg" variable. From alloc_hugetlb_folio() context, "chg" (or in the function's context, "gbl_chg") is defined as: - If gbl_chg=1, the allocation cannot reuse an existing reservation - If gbl_chg=0, the allocation should reuse an existing reservation Firstly, map_chg is defined as following, to cover all cases of hugetlb reservation scenarios (mostly, via vma_needs_reservation(), but cow_from_owner is an outlier): CONDITION HAS RESERVATION? ========= ================ - SHARED: always check against per-inode resv_map (ignore NONRESERVE) - If resv exists ==> YES [1] - If not ==> NO [2] - PRIVATE: complicated... - Request came from a CoW from owner resv map ==> NO [3] (when cow_from_owner==true) - If does not own a resv_map at all.. ==> NO [4] (examples: VM_NORESERVE, private fork()) - If owns a resv_map, but resv donsn't exists ==> NO [5] - If owns a resv_map, and resv exists ==> YES [6] Further on, gbl_chg considered spool setup, so that is a decision based on all the context. If we look at vma_has_reserves(), it almost does check that has already been processed by map_chg accounting (I marked each return value to the case above): static bool vma_has_reserves(struct vm_area_struct *vma, long chg) { if (vma->vm_flags & VM_NORESERVE) { if (vma->vm_flags & VM_MAYSHARE && chg == 0) return true; ==> [1] else return false; ==> [2] or [4] } if (vma->vm_flags & VM_MAYSHARE) { if (chg) return false; ==> [2] else return true; ==> [1] } if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { if (chg) return false; ==> [5] else return true; ==> [6] } return false; ==> [4] } It didn't check [3], but [3] case was actually already covered now by the "chg" / "gbl_chg" / "map_chg" calculations. In short, vma_has_reserves() doesn't provide anything more than return "!chg".. so just simplify all the things. There're a lot of comments describing truncation races, IIUC there should have no race as long as map_chg is properly done. Link: https://lkml.kernel.org/r/20250107204002.2683356-6-peterx@redhat.com Signed-off-by: Peter Xu Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 67 ++++++---------------------------------------------- 1 file changed, 7 insertions(+), 60 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index bac6ee9ede4d..efd8e9f9bf0e 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1248,66 +1248,13 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma) } /* Returns true if the VMA has associated reserve pages */ -static bool vma_has_reserves(struct vm_area_struct *vma, long chg) +static bool vma_has_reserves(long chg) { - if (vma->vm_flags & VM_NORESERVE) { - /* - * This address is already reserved by other process(chg == 0), - * so, we should decrement reserved count. Without decrementing, - * reserve count remains after releasing inode, because this - * allocated page will go into page cache and is regarded as - * coming from reserved pool in releasing step. Currently, we - * don't have any other solution to deal with this situation - * properly, so add work-around here. - */ - if (vma->vm_flags & VM_MAYSHARE && chg == 0) - return true; - else - return false; - } - - /* Shared mappings always use reserves */ - if (vma->vm_flags & VM_MAYSHARE) { - /* - * We know VM_NORESERVE is not set. Therefore, there SHOULD - * be a region map for all pages. The only situation where - * there is no region map is if a hole was punched via - * fallocate. In this case, there really are no reserves to - * use. This situation is indicated if chg != 0. - */ - if (chg) - return false; - else - return true; - } - /* - * Only the process that called mmap() has reserves for - * private mappings. + * Now "chg" has all the conditions considered for whether we + * should use an existing reservation. */ - if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) { - /* - * Like the shared case above, a hole punch or truncate - * could have been performed on the private mapping. - * Examine the value of chg to determine if reserves - * actually exist or were previously consumed. - * Very Subtle - The value of chg comes from a previous - * call to vma_needs_reserves(). The reserve map for - * private mappings has different (opposite) semantics - * than that of shared mappings. vma_needs_reserves() - * has already taken this difference in semantics into - * account. Therefore, the meaning of chg is the same - * as in the shared case above. Code could easily be - * combined, but keeping it separate draws attention to - * subtle differences. - */ - if (chg) - return false; - else - return true; - } - - return false; + return chg == 0; } static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) @@ -1411,7 +1358,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, * have no page reserves. This check ensures that reservations are * not "stolen". The child may still get SIGKILLed */ - if (!vma_has_reserves(vma, chg) && !available_huge_pages(h)) + if (!vma_has_reserves(chg) && !available_huge_pages(h)) goto err; gfp_mask = htlb_alloc_mask(h); @@ -1429,7 +1376,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); - if (folio && vma_has_reserves(vma, chg)) { + if (folio && vma_has_reserves(chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } @@ -3120,7 +3067,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); - if (vma_has_reserves(vma, gbl_chg)) { + if (vma_has_reserves(gbl_chg)) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } From 92f8931502567821cd0f6a92a6ff35be914ea388 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:40:01 -0500 Subject: [PATCH 314/504] mm/hugetlb: drop vma_has_reserves() After the previous cleanup, vma_has_reserves() is mostly an empty helper except that it says "use reserve count" is inverted meaning from "needs a global reserve count", which is still true. To avoid confusions on having two inverted ways to ask the same question, always use the gbl_chg everywhere, and drop the function. When at it, rename "chg" to "gbl_chg" in dequeue_hugetlb_folio_vma(). It might be helpful for readers to see that the "chg" here is the global reserve count, not the vma resv count. Link: https://lkml.kernel.org/r/20250107204002.2683356-7-peterx@redhat.com Signed-off-by: Peter Xu Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 23 ++++++----------------- 1 file changed, 6 insertions(+), 17 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index efd8e9f9bf0e..8e46798a9dfc 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1247,16 +1247,6 @@ void clear_vma_resv_huge_pages(struct vm_area_struct *vma) hugetlb_dup_vma_private(vma); } -/* Returns true if the VMA has associated reserve pages */ -static bool vma_has_reserves(long chg) -{ - /* - * Now "chg" has all the conditions considered for whether we - * should use an existing reservation. - */ - return chg == 0; -} - static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio) { int nid = folio_nid(folio); @@ -1345,7 +1335,7 @@ static unsigned long available_huge_pages(struct hstate *h) static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma, - unsigned long address, long chg) + unsigned long address, long gbl_chg) { struct folio *folio = NULL; struct mempolicy *mpol; @@ -1354,11 +1344,10 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, int nid; /* - * A child process with MAP_PRIVATE mappings created by their parent - * have no page reserves. This check ensures that reservations are - * not "stolen". The child may still get SIGKILLed + * gbl_chg==1 means the allocation requires a new page that was not + * reserved before. Making sure there's at least one free page. */ - if (!vma_has_reserves(chg) && !available_huge_pages(h)) + if (gbl_chg && !available_huge_pages(h)) goto err; gfp_mask = htlb_alloc_mask(h); @@ -1376,7 +1365,7 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); - if (folio && vma_has_reserves(chg)) { + if (folio && !gbl_chg) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } @@ -3067,7 +3056,7 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); - if (vma_has_reserves(gbl_chg)) { + if (!gbl_chg) { folio_set_hugetlb_restore_reserve(folio); h->resv_huge_pages--; } From a18c096265af3da952c362ee633359593379dea0 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Tue, 7 Jan 2025 15:40:02 -0500 Subject: [PATCH 315/504] mm/hugetlb: unify restore reserve accounting for new allocations Either hugetlb pages dequeued from hstate, or newly allocated from buddy, would require restore-reserve accounting to be managed properly. Merge the two paths on it. Add a small comment to make it slightly nicer. Link: https://lkml.kernel.org/r/20250107204002.2683356-8-peterx@redhat.com Signed-off-by: Peter Xu Cc: Ackerley Tng Cc: Breno Leitao Cc: Muchun Song Cc: Naoya Horiguchi Cc: Oscar Salvador Cc: Rik van Riel Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/hugetlb.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8e46798a9dfc..58c2c5498207 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1365,11 +1365,6 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h, folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, nid, nodemask); - if (folio && !gbl_chg) { - folio_set_hugetlb_restore_reserve(folio); - h->resv_huge_pages--; - } - mpol_cond_put(mpol); return folio; @@ -3056,15 +3051,20 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, if (!folio) goto out_uncharge_cgroup; spin_lock_irq(&hugetlb_lock); - if (!gbl_chg) { - folio_set_hugetlb_restore_reserve(folio); - h->resv_huge_pages--; - } list_add(&folio->lru, &h->hugepage_activelist); folio_ref_unfreeze(folio, 1); /* Fall through */ } + /* + * Either dequeued or buddy-allocated folio needs to add special + * mark to the folio when it consumes a global reservation. + */ + if (!gbl_chg) { + folio_set_hugetlb_restore_reserve(folio); + h->resv_huge_pages--; + } + hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); /* If allocation is not consuming a reservation, also store the * hugetlb_cgroup pointer on the page. From dfc7b1e2ad87c459dadb1b5880d5675cc9683637 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Tue, 7 Jan 2025 14:47:53 +0000 Subject: [PATCH 316/504] selftests/mm: introduce uffd-wp-mremap regression test Introduce a test that registers a range of memory for UFFDIO_WRITEPROTECT_MODE_WP without UFFD_FEATURE_EVENT_REMAP. First check that the uffd-wp bit is set for every PTE in the range. Then mremap() the range to a new location and check that the uffd-wp bit is clear for every PTE in the range. Run the test for small folios, all supported THP sizes and all supported hugetlb sizes, and for swapped out memory, shared and private. There was previously a bug in the kernel where the uffd-wp bits remained set in all PTEs for this case, after fixing the kernel, the tests all pass. Link: https://lkml.kernel.org/r/20250107144755.1871363-3-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Cc: David Hildenbrand Cc: Jann Horn Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Mark Rutland Cc: Muchun Song Cc: Peter Xu Cc: Shuah Khan Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/.gitignore | 1 + tools/testing/selftests/mm/Makefile | 2 + tools/testing/selftests/mm/run_vmtests.sh | 1 + tools/testing/selftests/mm/uffd-wp-mremap.c | 380 ++++++++++++++++++++ 4 files changed, 384 insertions(+) create mode 100644 tools/testing/selftests/mm/uffd-wp-mremap.c diff --git a/tools/testing/selftests/mm/.gitignore b/tools/testing/selftests/mm/.gitignore index a51a947b2d1d..121000c28c10 100644 --- a/tools/testing/selftests/mm/.gitignore +++ b/tools/testing/selftests/mm/.gitignore @@ -27,6 +27,7 @@ protection_keys_64 madv_populate uffd-stress uffd-unit-tests +uffd-wp-mremap mlock-intersect-test mlock-random-test virtual_address_range diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index f430c4303c0d..63ce39d024bb 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -88,6 +88,7 @@ TEST_GEN_FILES += thuge-gen TEST_GEN_FILES += transhuge-stress TEST_GEN_FILES += uffd-stress TEST_GEN_FILES += uffd-unit-tests +TEST_GEN_FILES += uffd-wp-mremap TEST_GEN_FILES += split_huge_page_test TEST_GEN_FILES += ksm_tests TEST_GEN_FILES += ksm_functional_tests @@ -158,6 +159,7 @@ $(TEST_GEN_FILES): vm_util.c thp_settings.c $(OUTPUT)/uffd-stress: uffd-common.c $(OUTPUT)/uffd-unit-tests: uffd-common.c +$(OUTPUT)/uffd-wp-mremap: uffd-common.c $(OUTPUT)/protection_keys: pkey_util.c $(OUTPUT)/pkey_sighandler_tests: pkey_util.c diff --git a/tools/testing/selftests/mm/run_vmtests.sh b/tools/testing/selftests/mm/run_vmtests.sh index 00c3f07ea100..333c468c2699 100755 --- a/tools/testing/selftests/mm/run_vmtests.sh +++ b/tools/testing/selftests/mm/run_vmtests.sh @@ -309,6 +309,7 @@ CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb "$half_ufd_size_MB" 3 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} hugetlb-private "$half_ufd_size_MB" 32 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem 20 16 CATEGORY="userfaultfd" run_test ${uffd_stress_bin} shmem-private 20 16 +CATEGORY="userfaultfd" run_test ./uffd-wp-mremap #cleanup echo "$nr_hugepgs" > /proc/sys/vm/nr_hugepages diff --git a/tools/testing/selftests/mm/uffd-wp-mremap.c b/tools/testing/selftests/mm/uffd-wp-mremap.c new file mode 100644 index 000000000000..2c4f984bd73c --- /dev/null +++ b/tools/testing/selftests/mm/uffd-wp-mremap.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define _GNU_SOURCE +#include +#include +#include +#include +#include +#include +#include "../kselftest.h" +#include "thp_settings.h" +#include "uffd-common.h" + +static int pagemap_fd; +static size_t pagesize; +static int nr_pagesizes = 1; +static int nr_thpsizes; +static size_t thpsizes[20]; +static int nr_hugetlbsizes; +static size_t hugetlbsizes[10]; + +static int sz2ord(size_t size) +{ + return __builtin_ctzll(size / pagesize); +} + +static int detect_thp_sizes(size_t sizes[], int max) +{ + int count = 0; + unsigned long orders; + size_t kb; + int i; + + /* thp not supported at all. */ + if (!read_pmd_pagesize()) + return 0; + + orders = thp_supported_orders(); + + for (i = 0; orders && count < max; i++) { + if (!(orders & (1UL << i))) + continue; + orders &= ~(1UL << i); + kb = (pagesize >> 10) << i; + sizes[count++] = kb * 1024; + ksft_print_msg("[INFO] detected THP size: %zu KiB\n", kb); + } + + return count; +} + +static void *mmap_aligned(size_t size, int prot, int flags) +{ + size_t mmap_size = size * 2; + char *mmap_mem, *mem; + + mmap_mem = mmap(NULL, mmap_size, prot, flags, -1, 0); + if (mmap_mem == MAP_FAILED) + return mmap_mem; + + mem = (char *)(((uintptr_t)mmap_mem + size - 1) & ~(size - 1)); + munmap(mmap_mem, mem - mmap_mem); + munmap(mem + size, mmap_mem + mmap_size - mem - size); + + return mem; +} + +static void *alloc_one_folio(size_t size, bool private, bool hugetlb) +{ + bool thp = !hugetlb && size > pagesize; + int flags = MAP_ANONYMOUS; + int prot = PROT_READ | PROT_WRITE; + char *mem, *addr; + + assert((size & (size - 1)) == 0); + + if (private) + flags |= MAP_PRIVATE; + else + flags |= MAP_SHARED; + + /* + * For THP, we must explicitly enable the THP size, allocate twice the + * required space then manually align. + */ + if (thp) { + struct thp_settings settings = *thp_current_settings(); + + if (private) + settings.hugepages[sz2ord(size)].enabled = THP_ALWAYS; + else + settings.shmem_hugepages[sz2ord(size)].enabled = SHMEM_ALWAYS; + + thp_push_settings(&settings); + + mem = mmap_aligned(size, prot, flags); + } else { + if (hugetlb) { + flags |= MAP_HUGETLB; + flags |= __builtin_ctzll(size) << MAP_HUGE_SHIFT; + } + + mem = mmap(NULL, size, prot, flags, -1, 0); + } + + if (mem == MAP_FAILED) { + mem = NULL; + goto out; + } + + assert(((uintptr_t)mem & (size - 1)) == 0); + + /* + * Populate the folio by writing the first byte and check that all pages + * are populated. Finally set the whole thing to non-zero data to avoid + * kernel from mapping it back to the zero page. + */ + mem[0] = 1; + for (addr = mem; addr < mem + size; addr += pagesize) { + if (!pagemap_is_populated(pagemap_fd, addr)) { + munmap(mem, size); + mem = NULL; + goto out; + } + } + memset(mem, 1, size); +out: + if (thp) + thp_pop_settings(); + + return mem; +} + +static bool check_uffd_wp_state(void *mem, size_t size, bool expect) +{ + uint64_t pte; + void *addr; + + for (addr = mem; addr < mem + size; addr += pagesize) { + pte = pagemap_get_entry(pagemap_fd, addr); + if (!!(pte & PM_UFFD_WP) != expect) { + ksft_test_result_fail("uffd-wp not %s for pte %lu!\n", + expect ? "set" : "clear", + (addr - mem) / pagesize); + return false; + } + } + + return true; +} + +static bool range_is_swapped(void *addr, size_t size) +{ + for (; size; addr += pagesize, size -= pagesize) + if (!pagemap_is_swapped(pagemap_fd, addr)) + return false; + return true; +} + +static void test_one_folio(size_t size, bool private, bool swapout, bool hugetlb) +{ + struct uffdio_writeprotect wp_prms; + uint64_t features = 0; + void *addr = NULL; + void *mem = NULL; + + assert(!(hugetlb && swapout)); + + ksft_print_msg("[RUN] %s(size=%zu, private=%s, swapout=%s, hugetlb=%s)\n", + __func__, + size, + private ? "true" : "false", + swapout ? "true" : "false", + hugetlb ? "true" : "false"); + + /* Allocate a folio of required size and type. */ + mem = alloc_one_folio(size, private, hugetlb); + if (!mem) { + ksft_test_result_fail("alloc_one_folio() failed\n"); + goto out; + } + + /* Register range for uffd-wp. */ + if (userfaultfd_open(&features)) { + ksft_test_result_fail("userfaultfd_open() failed\n"); + goto out; + } + if (uffd_register(uffd, mem, size, false, true, false)) { + ksft_test_result_fail("uffd_register() failed\n"); + goto out; + } + wp_prms.mode = UFFDIO_WRITEPROTECT_MODE_WP; + wp_prms.range.start = (uintptr_t)mem; + wp_prms.range.len = size; + if (ioctl(uffd, UFFDIO_WRITEPROTECT, &wp_prms)) { + ksft_test_result_fail("ioctl(UFFDIO_WRITEPROTECT) failed\n"); + goto out; + } + + if (swapout) { + madvise(mem, size, MADV_PAGEOUT); + if (!range_is_swapped(mem, size)) { + ksft_test_result_skip("MADV_PAGEOUT did not work, is swap enabled?\n"); + goto out; + } + } + + /* Check that uffd-wp is set for all PTEs in range. */ + if (!check_uffd_wp_state(mem, size, true)) + goto out; + + /* + * Move the mapping to a new, aligned location. Since + * UFFD_FEATURE_EVENT_REMAP is not set, we expect the uffd-wp bit for + * each PTE to be cleared in the new mapping. + */ + addr = mmap_aligned(size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS); + if (addr == MAP_FAILED) { + ksft_test_result_fail("mmap_aligned() failed\n"); + goto out; + } + if (mremap(mem, size, size, MREMAP_FIXED | MREMAP_MAYMOVE, addr) == MAP_FAILED) { + ksft_test_result_fail("mremap() failed\n"); + munmap(addr, size); + goto out; + } + mem = addr; + + /* Check that uffd-wp is cleared for all PTEs in range. */ + if (!check_uffd_wp_state(mem, size, false)) + goto out; + + ksft_test_result_pass("%s(size=%zu, private=%s, swapout=%s, hugetlb=%s)\n", + __func__, + size, + private ? "true" : "false", + swapout ? "true" : "false", + hugetlb ? "true" : "false"); +out: + if (mem) + munmap(mem, size); + if (uffd >= 0) { + close(uffd); + uffd = -1; + } +} + +struct testcase { + size_t *sizes; + int *nr_sizes; + bool private; + bool swapout; + bool hugetlb; +}; + +static const struct testcase testcases[] = { + /* base pages. */ + { + .sizes = &pagesize, + .nr_sizes = &nr_pagesizes, + .private = false, + .swapout = false, + .hugetlb = false, + }, + { + .sizes = &pagesize, + .nr_sizes = &nr_pagesizes, + .private = true, + .swapout = false, + .hugetlb = false, + }, + { + .sizes = &pagesize, + .nr_sizes = &nr_pagesizes, + .private = false, + .swapout = true, + .hugetlb = false, + }, + { + .sizes = &pagesize, + .nr_sizes = &nr_pagesizes, + .private = true, + .swapout = true, + .hugetlb = false, + }, + + /* thp. */ + { + .sizes = thpsizes, + .nr_sizes = &nr_thpsizes, + .private = false, + .swapout = false, + .hugetlb = false, + }, + { + .sizes = thpsizes, + .nr_sizes = &nr_thpsizes, + .private = true, + .swapout = false, + .hugetlb = false, + }, + { + .sizes = thpsizes, + .nr_sizes = &nr_thpsizes, + .private = false, + .swapout = true, + .hugetlb = false, + }, + { + .sizes = thpsizes, + .nr_sizes = &nr_thpsizes, + .private = true, + .swapout = true, + .hugetlb = false, + }, + + /* hugetlb. */ + { + .sizes = hugetlbsizes, + .nr_sizes = &nr_hugetlbsizes, + .private = false, + .swapout = false, + .hugetlb = true, + }, + { + .sizes = hugetlbsizes, + .nr_sizes = &nr_hugetlbsizes, + .private = true, + .swapout = false, + .hugetlb = true, + }, +}; + +int main(int argc, char **argv) +{ + struct thp_settings settings; + int i, j, plan = 0; + + pagesize = getpagesize(); + nr_thpsizes = detect_thp_sizes(thpsizes, ARRAY_SIZE(thpsizes)); + nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes, + ARRAY_SIZE(hugetlbsizes)); + + /* If THP is supported, save THP settings and initially disable THP. */ + if (nr_thpsizes) { + thp_save_settings(); + thp_read_settings(&settings); + for (i = 0; i < NR_ORDERS; i++) { + settings.hugepages[i].enabled = THP_NEVER; + settings.shmem_hugepages[i].enabled = SHMEM_NEVER; + } + thp_push_settings(&settings); + } + + for (i = 0; i < ARRAY_SIZE(testcases); i++) + plan += *testcases[i].nr_sizes; + ksft_set_plan(plan); + + pagemap_fd = open("/proc/self/pagemap", O_RDONLY); + if (pagemap_fd < 0) + ksft_exit_fail_msg("opening pagemap failed\n"); + + for (i = 0; i < ARRAY_SIZE(testcases); i++) { + const struct testcase *tc = &testcases[i]; + + for (j = 0; j < *tc->nr_sizes; j++) + test_one_folio(tc->sizes[j], tc->private, tc->swapout, + tc->hugetlb); + } + + /* If THP is supported, restore original THP settings. */ + if (nr_thpsizes) + thp_restore_settings(); + + i = ksft_get_fail_cnt(); + if (i) + ksft_exit_fail_msg("%d out of %d tests failed\n", + i, ksft_test_num()); + ksft_exit_pass(); +} From 6fb4820f4c4cde7b0a4f4436187e6040354dd7d7 Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Mon, 23 Dec 2024 17:00:37 -0500 Subject: [PATCH 317/504] mm: alloc_pages_bulk_noprof: drop page_list argument Patch series "mm: alloc_pages_bulk: small API refactor", v2. Today, alloc_pages_bulk_noprof() supports two arguments to return allocated pages: a linked list and an array. There are also higher level APIs for both. However, the linked list API has apparently never been used. So, this series removes it along with the list API and also refactors the remaining API naming for consistency. This patch (of 2): commit 387ba26fb1cb ("mm/page_alloc: add a bulk page allocator") added __alloc_pages_bulk() along with the page_list argument. The next commit 0f87d9d30f21 ("mm/page_alloc: add an array-based interface to the bulk page allocator") added the array-based argument. As it turns out, the page_list argument has no users in the current tree (if it ever had any). Dropping it allows for a slight simplification and eliminates some unnecessary checks, now that page_array is required. Also, note that the removal of the page_list argument was proposed before in the thread below, where Matthew Wilcox mentions that: """ Iterating a linked list is _expensive_. It is about 10x quicker to iterate an array than a linked list. """ (https://lore.kernel.org/linux-mm/20231025093254.xvomlctwhcuerzky@techsingularity.net) Link: https://lkml.kernel.org/r/cover.1734991165.git.luizcap@redhat.com Link: https://lkml.kernel.org/r/f1c75db91d08cafd211eca6a3b199b629d4ffe16.1734991165.git.luizcap@redhat.com Signed-off-by: Luiz Capitulino Acked-by: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Yunsheng Lin Signed-off-by: Andrew Morton --- include/linux/gfp.h | 8 ++------ mm/mempolicy.c | 14 +++++++------- mm/page_alloc.c | 39 ++++++++++++--------------------------- 3 files changed, 21 insertions(+), 40 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c96d5d7f7b89..f8b33c5e7a14 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -212,7 +212,6 @@ struct folio *__folio_alloc_noprof(gfp_t gfp, unsigned int order, int preferred_ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, - struct list_head *page_list, struct page **page_array); #define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__)) @@ -223,11 +222,8 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__)) /* Bulk allocate order-0 pages */ -#define alloc_pages_bulk_list(_gfp, _nr_pages, _list) \ - __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _list, NULL) - #define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \ - __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, NULL, _page_array) + __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array) static inline unsigned long alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, @@ -236,7 +232,7 @@ alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, if (nid == NUMA_NO_NODE) nid = numa_mem_id(); - return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, NULL, page_array); + return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array); } #define alloc_pages_bulk_array_node(...) \ diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 305aa3012173..0da6cf950f7b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2391,13 +2391,13 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, if (delta) { nr_allocated = alloc_pages_bulk_noprof(gfp, interleave_nodes(pol), NULL, - nr_pages_per_node + 1, NULL, + nr_pages_per_node + 1, page_array); delta--; } else { nr_allocated = alloc_pages_bulk_noprof(gfp, interleave_nodes(pol), NULL, - nr_pages_per_node, NULL, page_array); + nr_pages_per_node, page_array); } page_array += nr_allocated; @@ -2446,7 +2446,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp, if (weight && node_isset(node, nodes)) { node_pages = min(rem_pages, weight); nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages, - NULL, page_array); + page_array); page_array += nr_allocated; total_allocated += nr_allocated; /* if that's all the pages, no need to interleave */ @@ -2509,7 +2509,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp, if (!node_pages) break; nr_allocated = __alloc_pages_bulk(gfp, node, NULL, node_pages, - NULL, page_array); + page_array); page_array += nr_allocated; total_allocated += nr_allocated; if (total_allocated == nr_pages) @@ -2533,11 +2533,11 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, preferred_gfp &= ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL); nr_allocated = alloc_pages_bulk_noprof(preferred_gfp, nid, &pol->nodes, - nr_pages, NULL, page_array); + nr_pages, page_array); if (nr_allocated < nr_pages) nr_allocated += alloc_pages_bulk_noprof(gfp, numa_node_id(), NULL, - nr_pages - nr_allocated, NULL, + nr_pages - nr_allocated, page_array + nr_allocated); return nr_allocated; } @@ -2573,7 +2573,7 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, nid = numa_node_id(); nodemask = policy_nodemask(gfp, pol, NO_INTERLEAVE_INDEX, &nid); return alloc_pages_bulk_noprof(gfp, nid, nodemask, - nr_pages, NULL, page_array); + nr_pages, page_array); } int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index aa70d0e73d6d..c9d5f2450a08 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4531,28 +4531,23 @@ static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, } /* - * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array + * __alloc_pages_bulk - Allocate a number of order-0 pages to an array * @gfp: GFP flags for the allocation * @preferred_nid: The preferred NUMA node ID to allocate from * @nodemask: Set of nodes to allocate from, may be NULL - * @nr_pages: The number of pages desired on the list or array - * @page_list: Optional list to store the allocated pages - * @page_array: Optional array to store the pages + * @nr_pages: The number of pages desired in the array + * @page_array: Array to store the pages * * This is a batched version of the page allocator that attempts to - * allocate nr_pages quickly. Pages are added to page_list if page_list - * is not NULL, otherwise it is assumed that the page_array is valid. + * allocate nr_pages quickly. Pages are added to the page_array. * - * For lists, nr_pages is the number of pages that should be allocated. - * - * For arrays, only NULL elements are populated with pages and nr_pages + * Note that only NULL elements are populated with pages and nr_pages * is the maximum number of pages that will be stored in the array. * - * Returns the number of pages on the list or array. + * Returns the number of pages in the array. */ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, nodemask_t *nodemask, int nr_pages, - struct list_head *page_list, struct page **page_array) { struct page *page; @@ -4570,7 +4565,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, * Skip populated array elements to determine if any pages need * to be allocated before disabling IRQs. */ - while (page_array && nr_populated < nr_pages && page_array[nr_populated]) + while (nr_populated < nr_pages && page_array[nr_populated]) nr_populated++; /* No pages requested? */ @@ -4578,7 +4573,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, goto out; /* Already populated array? */ - if (unlikely(page_array && nr_pages - nr_populated == 0)) + if (unlikely(nr_pages - nr_populated == 0)) goto out; /* Bulk allocator does not support memcg accounting. */ @@ -4660,7 +4655,7 @@ retry_this_zone: while (nr_populated < nr_pages) { /* Skip existing pages */ - if (page_array && page_array[nr_populated]) { + if (page_array[nr_populated]) { nr_populated++; continue; } @@ -4679,11 +4674,7 @@ retry_this_zone: prep_new_page(page, 0, gfp, 0); set_page_refcounted(page); - if (page_list) - list_add(&page->lru, page_list); - else - page_array[nr_populated] = page; - nr_populated++; + page_array[nr_populated++] = page; } pcp_spin_unlock(pcp); @@ -4700,14 +4691,8 @@ failed_irq: failed: page = __alloc_pages_noprof(gfp, 0, preferred_nid, nodemask); - if (page) { - if (page_list) - list_add(&page->lru, page_list); - else - page_array[nr_populated] = page; - nr_populated++; - } - + if (page) + page_array[nr_populated++] = page; goto out; } EXPORT_SYMBOL_GPL(alloc_pages_bulk_noprof); From 655b9a0a16f0fe6d12faea6148e0e4bdcfedd5e0 Mon Sep 17 00:00:00 2001 From: Luiz Capitulino Date: Mon, 23 Dec 2024 17:00:38 -0500 Subject: [PATCH 318/504] mm: alloc_pages_bulk: rename API The previous commit removed the page_list argument from alloc_pages_bulk_noprof() along with the alloc_pages_bulk_list() function. Now that only the *_array() flavour of the API remains, we can do the following renaming (along with the _noprof() ones): alloc_pages_bulk_array -> alloc_pages_bulk alloc_pages_bulk_array_mempolicy -> alloc_pages_bulk_mempolicy alloc_pages_bulk_array_node -> alloc_pages_bulk_node Link: https://lkml.kernel.org/r/275a3bbc0be20fbe9002297d60045e67ab3d4ada.1734991165.git.luizcap@redhat.com Signed-off-by: Luiz Capitulino Acked-by: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Mel Gorman Cc: Yunsheng Lin Signed-off-by: Andrew Morton --- drivers/staging/media/atomisp/pci/hmm/hmm_bo.c | 4 ++-- drivers/vfio/pci/mlx5/cmd.c | 14 +++++++------- drivers/vfio/pci/virtio/migrate.c | 6 +++--- fs/btrfs/extent_io.c | 2 +- fs/erofs/zutil.c | 4 ++-- fs/splice.c | 2 +- fs/xfs/xfs_buf.c | 4 ++-- include/linux/gfp.h | 14 +++++++------- kernel/bpf/arena.c | 2 +- lib/alloc_tag.c | 4 ++-- lib/kunit_iov_iter.c | 2 +- lib/test_vmalloc.c | 2 +- mm/mempolicy.c | 14 +++++++------- mm/vmalloc.c | 4 ++-- net/core/page_pool.c | 7 +++---- net/sunrpc/svc.c | 4 ++-- net/sunrpc/svc_xprt.c | 3 +-- 17 files changed, 45 insertions(+), 47 deletions(-) diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c index 07ed33464d71..224ca8d42721 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c @@ -624,10 +624,10 @@ static int alloc_private_pages(struct hmm_buffer_object *bo) const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS; int ret; - ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages); + ret = alloc_pages_bulk(gfp, bo->pgnr, bo->pages); if (ret != bo->pgnr) { free_pages_bulk_array(ret, bo->pages); - dev_err(atomisp_dev, "alloc_pages_bulk_array() failed\n"); + dev_err(atomisp_dev, "alloc_pages_bulk() failed\n"); return -ENOMEM; } diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c index eb7387ee6ebd..11eda6b207f1 100644 --- a/drivers/vfio/pci/mlx5/cmd.c +++ b/drivers/vfio/pci/mlx5/cmd.c @@ -408,7 +408,7 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf) buf->dma_dir, 0); } - /* Undo alloc_pages_bulk_array() */ + /* Undo alloc_pages_bulk() */ for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0) __free_page(sg_page_iter_page(&sg_iter)); sg_free_append_table(&buf->table); @@ -431,8 +431,8 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf, return -ENOMEM; do { - filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill, - page_list); + filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill, + page_list); if (!filled) { ret = -ENOMEM; goto err; @@ -1342,7 +1342,7 @@ static void free_recv_pages(struct mlx5_vhca_recv_buf *recv_buf) { int i; - /* Undo alloc_pages_bulk_array() */ + /* Undo alloc_pages_bulk() */ for (i = 0; i < recv_buf->npages; i++) __free_page(recv_buf->page_list[i]); @@ -1361,9 +1361,9 @@ static int alloc_recv_pages(struct mlx5_vhca_recv_buf *recv_buf, return -ENOMEM; for (;;) { - filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, - npages - done, - recv_buf->page_list + done); + filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, + npages - done, + recv_buf->page_list + done); if (!filled) goto err; diff --git a/drivers/vfio/pci/virtio/migrate.c b/drivers/vfio/pci/virtio/migrate.c index ee54f4c17857..ba92bb4e9af9 100644 --- a/drivers/vfio/pci/virtio/migrate.c +++ b/drivers/vfio/pci/virtio/migrate.c @@ -77,8 +77,8 @@ static int virtiovf_add_migration_pages(struct virtiovf_data_buffer *buf, return -ENOMEM; do { - filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill, - page_list); + filled = alloc_pages_bulk(GFP_KERNEL_ACCOUNT, to_fill, + page_list); if (!filled) { ret = -ENOMEM; goto err; @@ -112,7 +112,7 @@ static void virtiovf_free_data_buffer(struct virtiovf_data_buffer *buf) { struct sg_page_iter sg_iter; - /* Undo alloc_pages_bulk_array() */ + /* Undo alloc_pages_bulk() */ for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0) __free_page(sg_page_iter_page(&sg_iter)); sg_free_append_table(&buf->table); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index b923d0cec61c..d70e9461fea8 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -632,7 +632,7 @@ int btrfs_alloc_page_array(unsigned int nr_pages, struct page **page_array, for (allocated = 0; allocated < nr_pages;) { unsigned int last = allocated; - allocated = alloc_pages_bulk_array(gfp, nr_pages, page_array); + allocated = alloc_pages_bulk(gfp, nr_pages, page_array); if (unlikely(allocated == last)) { /* No progress, fail and do cleanup. */ for (int i = 0; i < allocated; i++) { diff --git a/fs/erofs/zutil.c b/fs/erofs/zutil.c index 0dd65cefce33..9c5aa9d53682 100644 --- a/fs/erofs/zutil.c +++ b/fs/erofs/zutil.c @@ -87,8 +87,8 @@ int z_erofs_gbuf_growsize(unsigned int nrpages) tmp_pages[j] = gbuf->pages[j]; do { last = j; - j = alloc_pages_bulk_array(GFP_KERNEL, nrpages, - tmp_pages); + j = alloc_pages_bulk(GFP_KERNEL, nrpages, + tmp_pages); if (last == j) goto out; } while (j != nrpages); diff --git a/fs/splice.c b/fs/splice.c index 2898fa1e9e63..28cfa63aa236 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -342,7 +342,7 @@ ssize_t copy_splice_read(struct file *in, loff_t *ppos, return -ENOMEM; pages = (struct page **)(bv + npages); - npages = alloc_pages_bulk_array(GFP_USER, npages, pages); + npages = alloc_pages_bulk(GFP_USER, npages, pages); if (!npages) { kfree(bv); return -ENOMEM; diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index aa63b8efd782..82db3ab0e8b4 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -395,8 +395,8 @@ xfs_buf_alloc_pages( for (;;) { long last = filled; - filled = alloc_pages_bulk_array(gfp_mask, bp->b_page_count, - bp->b_pages); + filled = alloc_pages_bulk(gfp_mask, bp->b_page_count, + bp->b_pages); if (filled == bp->b_page_count) { XFS_STATS_INC(bp->b_mount, xb_page_found); break; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f8b33c5e7a14..6bb1a5a7a4ae 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -215,18 +215,18 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid, struct page **page_array); #define __alloc_pages_bulk(...) alloc_hooks(alloc_pages_bulk_noprof(__VA_ARGS__)) -unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, +unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp, unsigned long nr_pages, struct page **page_array); -#define alloc_pages_bulk_array_mempolicy(...) \ - alloc_hooks(alloc_pages_bulk_array_mempolicy_noprof(__VA_ARGS__)) +#define alloc_pages_bulk_mempolicy(...) \ + alloc_hooks(alloc_pages_bulk_mempolicy_noprof(__VA_ARGS__)) /* Bulk allocate order-0 pages */ -#define alloc_pages_bulk_array(_gfp, _nr_pages, _page_array) \ +#define alloc_pages_bulk(_gfp, _nr_pages, _page_array) \ __alloc_pages_bulk(_gfp, numa_mem_id(), NULL, _nr_pages, _page_array) static inline unsigned long -alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, +alloc_pages_bulk_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, struct page **page_array) { if (nid == NUMA_NO_NODE) @@ -235,8 +235,8 @@ alloc_pages_bulk_array_node_noprof(gfp_t gfp, int nid, unsigned long nr_pages, return alloc_pages_bulk_noprof(gfp, nid, NULL, nr_pages, page_array); } -#define alloc_pages_bulk_array_node(...) \ - alloc_hooks(alloc_pages_bulk_array_node_noprof(__VA_ARGS__)) +#define alloc_pages_bulk_node(...) \ + alloc_hooks(alloc_pages_bulk_node_noprof(__VA_ARGS__)) static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask) { diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c index 945a5680f6a5..9927cd4c9e0e 100644 --- a/kernel/bpf/arena.c +++ b/kernel/bpf/arena.c @@ -443,7 +443,7 @@ static long arena_alloc_pages(struct bpf_arena *arena, long uaddr, long page_cnt return 0; } - /* zeroing is needed, since alloc_pages_bulk_array() only fills in non-zero entries */ + /* zeroing is needed, since alloc_pages_bulk() only fills in non-zero entries */ pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL); if (!pages) return 0; diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 4e5d7af3eaa2..19b45617bdcf 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -425,8 +425,8 @@ static int vm_module_tags_populate(void) unsigned long nr; more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT; - nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN, - NUMA_NO_NODE, more_pages, next_page); + nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN, + NUMA_NO_NODE, more_pages, next_page); if (nr < more_pages || vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL, next_page, PAGE_SHIFT) < 0) { diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c index 13e15687675a..830bf3eca4c2 100644 --- a/lib/kunit_iov_iter.c +++ b/lib/kunit_iov_iter.c @@ -57,7 +57,7 @@ static void *__init iov_kunit_create_buffer(struct kunit *test, KUNIT_ASSERT_NOT_ERR_OR_NULL(test, pages); *ppages = pages; - got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages); + got = alloc_pages_bulk(GFP_KERNEL, npages, pages); if (got != npages) { release_pages(pages, got); KUNIT_ASSERT_EQ(test, got, npages); diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 4ddf769861ff..f585949ff696 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -373,7 +373,7 @@ vm_map_ram_test(void) if (!pages) return -1; - nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages); + nr_allocated = alloc_pages_bulk(GFP_KERNEL, map_nr_pages, pages); if (nr_allocated != map_nr_pages) goto cleanup; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 0da6cf950f7b..f83b73236ffe 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2372,7 +2372,7 @@ struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) } EXPORT_SYMBOL(folio_alloc_noprof); -static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, +static unsigned long alloc_pages_bulk_interleave(gfp_t gfp, struct mempolicy *pol, unsigned long nr_pages, struct page **page_array) { @@ -2407,7 +2407,7 @@ static unsigned long alloc_pages_bulk_array_interleave(gfp_t gfp, return total_allocated; } -static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp, +static unsigned long alloc_pages_bulk_weighted_interleave(gfp_t gfp, struct mempolicy *pol, unsigned long nr_pages, struct page **page_array) { @@ -2522,7 +2522,7 @@ static unsigned long alloc_pages_bulk_array_weighted_interleave(gfp_t gfp, return total_allocated; } -static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, +static unsigned long alloc_pages_bulk_preferred_many(gfp_t gfp, int nid, struct mempolicy *pol, unsigned long nr_pages, struct page **page_array) { @@ -2548,7 +2548,7 @@ static unsigned long alloc_pages_bulk_array_preferred_many(gfp_t gfp, int nid, * It can accelerate memory allocation especially interleaving * allocate memory. */ -unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, +unsigned long alloc_pages_bulk_mempolicy_noprof(gfp_t gfp, unsigned long nr_pages, struct page **page_array) { struct mempolicy *pol = &default_policy; @@ -2559,15 +2559,15 @@ unsigned long alloc_pages_bulk_array_mempolicy_noprof(gfp_t gfp, pol = get_task_policy(current); if (pol->mode == MPOL_INTERLEAVE) - return alloc_pages_bulk_array_interleave(gfp, pol, + return alloc_pages_bulk_interleave(gfp, pol, nr_pages, page_array); if (pol->mode == MPOL_WEIGHTED_INTERLEAVE) - return alloc_pages_bulk_array_weighted_interleave( + return alloc_pages_bulk_weighted_interleave( gfp, pol, nr_pages, page_array); if (pol->mode == MPOL_PREFERRED_MANY) - return alloc_pages_bulk_array_preferred_many(gfp, + return alloc_pages_bulk_preferred_many(gfp, numa_node_id(), pol, nr_pages, page_array); nid = numa_node_id(); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 5c88d0e90c20..a6e7acebe9ad 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3562,11 +3562,11 @@ vm_area_alloc_pages(gfp_t gfp, int nid, * but mempolicy wants to alloc memory by interleaving. */ if (IS_ENABLED(CONFIG_NUMA) && nid == NUMA_NO_NODE) - nr = alloc_pages_bulk_array_mempolicy_noprof(gfp, + nr = alloc_pages_bulk_mempolicy_noprof(gfp, nr_pages_request, pages + nr_allocated); else - nr = alloc_pages_bulk_array_node_noprof(gfp, nid, + nr = alloc_pages_bulk_node_noprof(gfp, nid, nr_pages_request, pages + nr_allocated); diff --git a/net/core/page_pool.c b/net/core/page_pool.c index f89cf93f6eb4..8a91c1972dc5 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -532,12 +532,11 @@ static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool, if (unlikely(pool->alloc.count > 0)) return pool->alloc.cache[--pool->alloc.count]; - /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */ + /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk */ memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); - nr_pages = alloc_pages_bulk_array_node(gfp, - pool->p.nid, bulk, - (struct page **)pool->alloc.cache); + nr_pages = alloc_pages_bulk_node(gfp, pool->p.nid, bulk, + (struct page **)pool->alloc.cache); if (unlikely(!nr_pages)) return 0; diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 79879b7d39cb..e7f9c295d13c 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -651,8 +651,8 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node) if (pages > RPCSVC_MAXPAGES) pages = RPCSVC_MAXPAGES; - ret = alloc_pages_bulk_array_node(GFP_KERNEL, node, pages, - rqstp->rq_pages); + ret = alloc_pages_bulk_node(GFP_KERNEL, node, pages, + rqstp->rq_pages); return ret == pages; } diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 43c57124de52..aebc0d8ddff5 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -671,8 +671,7 @@ static bool svc_alloc_arg(struct svc_rqst *rqstp) } for (filled = 0; filled < pages; filled = ret) { - ret = alloc_pages_bulk_array(GFP_KERNEL, pages, - rqstp->rq_pages); + ret = alloc_pages_bulk(GFP_KERNEL, pages, rqstp->rq_pages); if (ret > filled) /* Made progress, don't sleep yet */ continue; From c6ab6b0ff1f77827a57bf7f8116f31bac96168ce Mon Sep 17 00:00:00 2001 From: yangge Date: Wed, 8 Jan 2025 19:30:54 +0800 Subject: [PATCH 319/504] mm: compaction: skip memory compaction when there are not enough migratable pages There are 4 NUMA nodes on my machine, and each NUMA node has 32GB of memory. I have configured 16GB of CMA memory on each NUMA node, and starting a 32GB virtual machine with device passthrough is extremely slow, taking almost an hour. During the startup of the virtual machine, it will call pin_user_pages_remote(..., FOLL_LONGTERM, ...) to allocate memory. Long term GUP cannot allocate memory from CMA area, so a maximum of 16 GB of no-CMA memory on a NUMA node can be used as virtual machine memory. There is 16GB of free CMA memory on a NUMA node, which is sufficient to pass the order-0 watermark check, causing the __compaction_suitable() function to consistently return true. However, if there aren't enough migratable pages available, performing memory compaction is also meaningless. Besides checking whether the order-0 watermark is met, __compaction_suitable() also needs to determine whether there are sufficient migratable pages available for memory compaction. For costly allocations, because __compaction_suitable() always returns true, __alloc_pages_slowpath() can't exit at the appropriate place, resulting in excessively long virtual machine startup times. Call trace: __alloc_pages_slowpath if (compact_result == COMPACT_SKIPPED || compact_result == COMPACT_DEFERRED) goto nopage; // should exit __alloc_pages_slowpath() from here When the 16G of non-CMA memory on a single node is exhausted, we will fallback to allocating memory on other nodes. In order to quickly fallback to remote nodes, we should skip memory compaction when migratable pages are insufficient. After this fix, it only takes a few tens of seconds to start a 32GB virtual machine with device passthrough functionality. Link: https://lkml.kernel.org/r/1736335854-548-1-git-send-email-yangge1116@126.com Signed-off-by: yangge Cc: Baolin Wang Cc: David Hildenbrand Cc: Johannes Weiner Signed-off-by: Andrew Morton --- mm/compaction.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/mm/compaction.c b/mm/compaction.c index 07bd22789f07..a9f1261972c8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2383,7 +2383,27 @@ static bool __compaction_suitable(struct zone *zone, int order, int highest_zoneidx, unsigned long wmark_target) { + pg_data_t __maybe_unused *pgdat = zone->zone_pgdat; + unsigned long sum, nr_pinned; unsigned long watermark; + + sum = node_page_state(pgdat, NR_INACTIVE_FILE) + + node_page_state(pgdat, NR_INACTIVE_ANON) + + node_page_state(pgdat, NR_ACTIVE_FILE) + + node_page_state(pgdat, NR_ACTIVE_ANON) + + node_page_state(pgdat, NR_UNEVICTABLE); + + nr_pinned = node_page_state(pgdat, NR_FOLL_PIN_ACQUIRED) - + node_page_state(pgdat, NR_FOLL_PIN_RELEASED); + + /* + * Gup-pinned pages are non-migratable. After subtracting these pages, + * we need to check if the remaining pages are sufficient for memory + * compaction. + */ + if ((sum - nr_pinned) < (1 << order)) + return false; + /* * Watermarks for order-0 must be met for compaction to be able to * isolate free pages for migration targets. This means that the From 7c294d04ee7e58bee3158de096adf22cc00404f7 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 9 Jan 2025 09:51:17 -0800 Subject: [PATCH 320/504] mm/damon: fixup damos_filter kernel-doc Patch series "mm/damon: extend DAMOS filters for inclusion", v2. DAMOS fitlers are exclusive filters. It only excludes memory of given criterias from the DAMOS action targets. This has below limitations. First, the name is not explicitly explaining the behavior. This actually resulted in users' confusions[1]. Secondly, combined uses of multiple filters provide only restriced coverages. For example, building a DAMOS scheme that applies the action to memory that belongs to cgroup A "or" cgroup B is impossible. A workaround would be using two schemes that fitlers out memory that not belong to cgroup A and cgroup B, respectively. It is cumbersome, and difficult to control quota-like per-scheme features in an orchestration. Monitoring of filters-passed memory statistic will also be complicated. Extend DAMOS filters to support not only exclusion (rejecting), but also inclusion (allowing) behavior. For this, add a new damos_filter struct field called 'allow' for DAMON kernel API users. The filter works as an inclusion or exclusion filter when it is set or unset, respectively. For DAMON user-space ABI users, add a DAMON sysfs file of same name under DAMOS filter sysfs directory. To prevent exposing a behavioral change to old users, set rejecting as the default behavior. Note that allow-filters work for only inclusion, not exclusion of memory that not satisfying the criteria. And the default behavior of DAMOS for memory that no filter has involved is that the action can be applied to those memory. Also, filters-passed memory statistics are for any memory that passed through the DAMOS filters check stage. These implies installing allow-filters at the endof the filter list is useless. Refer to the design doc change of this series for more details. [1] https://lore.kernel.org/20240320165619.71478-1-sj@kernel.org This patch (of 10): The comment is slightly wrong. DAMOS filters are not only for pages, but general bytes of memory. Also the description of 'matching' is bit confusing, since DAMOS filters do only filtering out. Update the comments to be less confusing. Link: https://lkml.kernel.org/r/20250109175126.57878-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250109175126.57878-2-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 298b1a831e62..72afba74ac6d 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -347,8 +347,8 @@ enum damos_filter_type { /** * struct damos_filter - DAMOS action target memory filter. - * @type: Type of the page. - * @matching: If the matching page should filtered out or in. + * @type: Type of the target memory. + * @matching: If the @type-matching memory should be filtered out. * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG. * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR. * @target_idx: Index of the &struct damon_target of @@ -357,9 +357,10 @@ enum damos_filter_type { * @list: List head for siblings. * * Before applying the &damos->action to a memory region, DAMOS checks if each - * page of the region matches to this and avoid applying the action if so. - * Support of each filter type depends on the running &struct damon_operations - * and the type. Refer to &enum damos_filter_type for more detai. + * byte of the region matches to this given condition and avoid applying the + * action if so. Support of each filter type depends on the running &struct + * damon_operations and the type. Refer to &enum damos_filter_type for more + * details. */ struct damos_filter { enum damos_filter_type type; From 800267536c9df6994f88c957f458dc1ca25ec008 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 9 Jan 2025 09:51:18 -0800 Subject: [PATCH 321/504] mm/damon/core: add damos_filter->allow field DAMOS filters work as only exclusive (reject) filters. This makes it easy to be confused, and restrictive at combining multiple filters for covering various types of memory. Add a field named 'allow' to damos_filter. The field will be used to indicate whether the filter should work for inclusion or exclusion. To keep the old behavior, set it as 'false' (work as exclusive filter) by default, from damos_new_filter(). Following two commits will make the core and operations set layers, which handles damos_filter objects, respect the field, respectively. Link: https://lkml.kernel.org/r/20250109175126.57878-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 4 +++- mm/damon/core.c | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 72afba74ac6d..8a2d104df5a3 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -348,7 +348,8 @@ enum damos_filter_type { /** * struct damos_filter - DAMOS action target memory filter. * @type: Type of the target memory. - * @matching: If the @type-matching memory should be filtered out. + * @matching: Whether this is for @type-matching memory. + * @allow: Whether to include or exclude the @matching memory. * @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG. * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR. * @target_idx: Index of the &struct damon_target of @@ -365,6 +366,7 @@ enum damos_filter_type { struct damos_filter { enum damos_filter_type type; bool matching; + bool allow; union { unsigned short memcg_id; struct damon_addr_range addr_range; diff --git a/mm/damon/core.c b/mm/damon/core.c index 52e50f183ffe..bdde532ebbc8 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -275,6 +275,7 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type, return NULL; filter->type = type; filter->matching = matching; + filter->allow = false; INIT_LIST_HEAD(&filter->list); return filter; } From d8918fa2ac1831f9189d3a9008e2ff0cce686fc9 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 9 Jan 2025 09:51:19 -0800 Subject: [PATCH 322/504] mm/damon/core: support damos_filter->allow DAMOS filters supports allowing behavior, but the core layer's DAMOS filters handling logic still assumes only rejecting (filtering-out) behavior. Update the logic to aware of and respect the behavioral decision by reading damos_filter->allow when making the decision to exclude a region or not. Link: https://lkml.kernel.org/r/20250109175126.57878-4-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/core.c | 6 +++--- mm/damon/tests/core-kunit.h | 10 +++++----- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/mm/damon/core.c b/mm/damon/core.c index bdde532ebbc8..76707b0635d6 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -1374,7 +1374,7 @@ static void damos_update_stat(struct damos *s, s->stat.sz_ops_filter_passed += sz_ops_filter_passed; } -static bool __damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, +static bool damos_filter_match(struct damon_ctx *ctx, struct damon_target *t, struct damon_region *r, struct damos_filter *filter) { bool matched = false; @@ -1428,8 +1428,8 @@ static bool damos_filter_out(struct damon_ctx *ctx, struct damon_target *t, struct damos_filter *filter; damos_for_each_filter(filter, s) { - if (__damos_filter_out(ctx, t, r, filter)) - return true; + if (damos_filter_match(ctx, t, r, filter)) + return !filter->allow; } return false; } diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h index cf22e09a3507..8f58d3424c21 100644 --- a/mm/damon/tests/core-kunit.h +++ b/mm/damon/tests/core-kunit.h @@ -434,25 +434,25 @@ static void damos_test_filter_out(struct kunit *test) damon_add_region(r, t); /* region in the range */ - KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f)); + KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f)); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); /* region before the range */ r->ar.start = DAMON_MIN_REGION * 1; r->ar.end = DAMON_MIN_REGION * 2; - KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f)); + KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f)); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); /* region after the range */ r->ar.start = DAMON_MIN_REGION * 6; r->ar.end = DAMON_MIN_REGION * 8; - KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f)); + KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f)); KUNIT_EXPECT_EQ(test, damon_nr_regions(t), 1); /* region started before the range */ r->ar.start = DAMON_MIN_REGION * 1; r->ar.end = DAMON_MIN_REGION * 4; - KUNIT_EXPECT_FALSE(test, __damos_filter_out(NULL, t, r, f)); + KUNIT_EXPECT_FALSE(test, damos_filter_match(NULL, t, r, f)); /* filter should have split the region */ KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 1); KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 2); @@ -465,7 +465,7 @@ static void damos_test_filter_out(struct kunit *test) /* region started in the range */ r->ar.start = DAMON_MIN_REGION * 2; r->ar.end = DAMON_MIN_REGION * 8; - KUNIT_EXPECT_TRUE(test, __damos_filter_out(NULL, t, r, f)); + KUNIT_EXPECT_TRUE(test, damos_filter_match(NULL, t, r, f)); /* filter should have split the region */ KUNIT_EXPECT_EQ(test, r->ar.start, DAMON_MIN_REGION * 2); KUNIT_EXPECT_EQ(test, r->ar.end, DAMON_MIN_REGION * 6); From d2c6c57617a2c8970df99be7ec4b345ce2ff5751 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 9 Jan 2025 09:51:20 -0800 Subject: [PATCH 323/504] mm/damon/paddr: support damos_filter->allow Respect damos_filter->allow from 'paddr', which is a DAMON operations set implementation for the physical address space and supports a few types of region-internal DAMOS filters (anon, memcg and young). The change is similar to that of the previous commit for core layer update. Link: https://lkml.kernel.org/r/20250109175126.57878-5-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/paddr.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index b0c283808ba6..817acfd4f8a2 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -198,7 +198,7 @@ static unsigned int damon_pa_check_accesses(struct damon_ctx *ctx) return max_nr_accesses; } -static bool __damos_pa_filter_out(struct damos_filter *filter, +static bool damos_pa_filter_match(struct damos_filter *filter, struct folio *folio) { bool matched = false; @@ -237,8 +237,8 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio) struct damos_filter *filter; damos_for_each_filter(filter, scheme) { - if (__damos_pa_filter_out(filter, folio)) - return true; + if (damos_pa_filter_match(filter, folio)) + return !filter->allow; } return false; } From 982c07e0711eb105d3fd9d15775e7aa0282a84c3 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 9 Jan 2025 09:51:21 -0800 Subject: [PATCH 324/504] mm/damon: add 'allow' argument to damos_new_filter() DAMON API users should set damos_filter->allow manually to use a DAMOS allow-filter, since damos_new_filter() unsets the field always. It is cumbersome and easy to mistake. Add an arugment for setting the field to damos_new_filter(). Link: https://lkml.kernel.org/r/20250109175126.57878-6-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 2 +- mm/damon/core.c | 7 ++++--- mm/damon/paddr.c | 3 ++- mm/damon/reclaim.c | 2 +- mm/damon/sysfs-schemes.c | 2 +- mm/damon/tests/core-kunit.h | 4 ++-- 6 files changed, 11 insertions(+), 9 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 8a2d104df5a3..0834d7ffcb84 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -801,7 +801,7 @@ void damon_update_region_access_rate(struct damon_region *r, bool accessed, struct damon_attrs *attrs); struct damos_filter *damos_new_filter(enum damos_filter_type type, - bool matching); + bool matching, bool allow); void damos_add_filter(struct damos *s, struct damos_filter *f); void damos_destroy_filter(struct damos_filter *f); diff --git a/mm/damon/core.c b/mm/damon/core.c index 76707b0635d6..55a435bdd89d 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -266,7 +266,7 @@ int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges, } struct damos_filter *damos_new_filter(enum damos_filter_type type, - bool matching) + bool matching, bool allow) { struct damos_filter *filter; @@ -275,7 +275,7 @@ struct damos_filter *damos_new_filter(enum damos_filter_type type, return NULL; filter->type = type; filter->matching = matching; - filter->allow = false; + filter->allow = allow; INIT_LIST_HEAD(&filter->list); return filter; } @@ -806,7 +806,8 @@ static int damos_commit_filters(struct damos *dst, struct damos *src) continue; new_filter = damos_new_filter( - src_filter->type, src_filter->matching); + src_filter->type, src_filter->matching, + src_filter->allow); if (!new_filter) return -ENOMEM; damos_commit_filter_arg(new_filter, src_filter); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 817acfd4f8a2..6b4397de4199 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -259,7 +259,8 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s, } } if (install_young_filter) { - filter = damos_new_filter(DAMOS_FILTER_TYPE_YOUNG, true); + filter = damos_new_filter( + DAMOS_FILTER_TYPE_YOUNG, true, false); if (!filter) return 0; damos_add_filter(s, filter); diff --git a/mm/damon/reclaim.c b/mm/damon/reclaim.c index 9e0077a9404e..a675150965e0 100644 --- a/mm/damon/reclaim.c +++ b/mm/damon/reclaim.c @@ -221,7 +221,7 @@ static int damon_reclaim_apply_parameters(void) } if (skip_anon) { - filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true); + filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false); if (!filter) goto out; damos_add_filter(scheme, filter); diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index deeaf23c1fcf..9a883e8aea1c 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -1901,7 +1901,7 @@ static int damon_sysfs_add_scheme_filters(struct damos *scheme, sysfs_filters->filters_arr[i]; struct damos_filter *filter = damos_new_filter(sysfs_filter->type, - sysfs_filter->matching); + sysfs_filter->matching, false); int err; if (!filter) diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h index 8f58d3424c21..532c6a6f21f9 100644 --- a/mm/damon/tests/core-kunit.h +++ b/mm/damon/tests/core-kunit.h @@ -411,7 +411,7 @@ static void damos_test_new_filter(struct kunit *test) { struct damos_filter *filter; - filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true); + filter = damos_new_filter(DAMOS_FILTER_TYPE_ANON, true, false); KUNIT_EXPECT_EQ(test, filter->type, DAMOS_FILTER_TYPE_ANON); KUNIT_EXPECT_EQ(test, filter->matching, true); KUNIT_EXPECT_PTR_EQ(test, filter->list.prev, &filter->list); @@ -425,7 +425,7 @@ static void damos_test_filter_out(struct kunit *test) struct damon_region *r, *r2; struct damos_filter *f; - f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true); + f = damos_new_filter(DAMOS_FILTER_TYPE_ADDR, true, false); f->addr_range = (struct damon_addr_range){ .start = DAMON_MIN_REGION * 2, .end = DAMON_MIN_REGION * 6}; From 2b04211a82ebd3de2e574b7f9848ae9e5035e56f Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 9 Jan 2025 09:51:22 -0800 Subject: [PATCH 325/504] mm/damon/sysfs-schemes: add a file for setting damos_filter->allow Only kernel-space DAMON API users can use inclusive DAMOS filters. Add a sysfs file named 'allow' under DAMOS filter directory of DAMON sysfs interface, to let the user-space users use inclusive DAMOS filters. Link: https://lkml.kernel.org/r/20250109175126.57878-7-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- mm/damon/sysfs-schemes.c | 32 +++++++++++++++++++++++++++++++- 1 file changed, 31 insertions(+), 1 deletion(-) diff --git a/mm/damon/sysfs-schemes.c b/mm/damon/sysfs-schemes.c index 9a883e8aea1c..98f93ae9f59e 100644 --- a/mm/damon/sysfs-schemes.c +++ b/mm/damon/sysfs-schemes.c @@ -313,6 +313,7 @@ struct damon_sysfs_scheme_filter { struct kobject kobj; enum damos_filter_type type; bool matching; + bool allow; char *memcg_path; struct damon_addr_range addr_range; int target_idx; @@ -385,6 +386,30 @@ static ssize_t matching_store(struct kobject *kobj, return count; } +static ssize_t allow_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + + return sysfs_emit(buf, "%c\n", filter->allow ? 'Y' : 'N'); +} + +static ssize_t allow_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, size_t count) +{ + struct damon_sysfs_scheme_filter *filter = container_of(kobj, + struct damon_sysfs_scheme_filter, kobj); + bool allow; + int err = kstrtobool(buf, &allow); + + if (err) + return err; + + filter->allow = allow; + return count; +} + static ssize_t memcg_path_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { @@ -482,6 +507,9 @@ static struct kobj_attribute damon_sysfs_scheme_filter_type_attr = static struct kobj_attribute damon_sysfs_scheme_filter_matching_attr = __ATTR_RW_MODE(matching, 0600); +static struct kobj_attribute damon_sysfs_scheme_filter_allow_attr = + __ATTR_RW_MODE(allow, 0600); + static struct kobj_attribute damon_sysfs_scheme_filter_memcg_path_attr = __ATTR_RW_MODE(memcg_path, 0600); @@ -497,6 +525,7 @@ static struct kobj_attribute damon_sysfs_scheme_filter_damon_target_idx_attr = static struct attribute *damon_sysfs_scheme_filter_attrs[] = { &damon_sysfs_scheme_filter_type_attr.attr, &damon_sysfs_scheme_filter_matching_attr.attr, + &damon_sysfs_scheme_filter_allow_attr.attr, &damon_sysfs_scheme_filter_memcg_path_attr.attr, &damon_sysfs_scheme_filter_addr_start_attr.attr, &damon_sysfs_scheme_filter_addr_end_attr.attr, @@ -1901,7 +1930,8 @@ static int damon_sysfs_add_scheme_filters(struct damos *scheme, sysfs_filters->filters_arr[i]; struct damos_filter *filter = damos_new_filter(sysfs_filter->type, - sysfs_filter->matching, false); + sysfs_filter->matching, + sysfs_filter->allow); int err; if (!filter) From 166a805992446e5a305f2f2165ef01a8e3f6b5c7 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 9 Jan 2025 09:51:23 -0800 Subject: [PATCH 326/504] Docs/mm/damon/design: document allow/reject DAMOS filter behaviors Update DAMOS filters design document to describe the allow/reject behavior of filters. Link: https://lkml.kernel.org/r/20250109175126.57878-8-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 33 +++++++++++++++++++++++++++---- 1 file changed, 29 insertions(+), 4 deletions(-) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 449eb33688c2..667775bab86c 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -504,9 +504,34 @@ have a list of latency-critical processes. To let users optimize DAMOS schemes with such special knowledge, DAMOS provides a feature called DAMOS filters. The feature allows users to set an arbitrary -number of filters for each scheme. Each filter specifies the type of target -memory, and whether it should exclude the memory of the type (filter-out), or -all except the memory of the type (filter-in). +number of filters for each scheme. Each filter specifies + +- a type of memory (``type``), +- whether it is for the memory of the type or all except the type + (``matching``), and +- whether it is to allow (include) or reject (exclude) applying + the scheme's action to the memory (``allow``). + +When multiple filters are installed, each filter is evaluated in the installed +order. If a part of memory is matched to one of the filter, next filters are +ignored. If the memory passes through the filters evaluation stage because it +is not matched to any of the filters, applying the scheme's action to it is +allowed, same to the behavior when no filter exists. + +For example, let's assume 1) a filter for allowing anonymous pages and 2) +another filter for rejecting young pages are installed in the order. If a page +of a region that eligible to apply the scheme's action is an anonymous page, +the scheme's action will be applied to the page regardless of whether it is +young or not, since it matches with the first allow-filter. If the page is +not anonymous but young, the scheme's action will not be applied, since the +second reject-filter blocks it. If the page is neither anonymous nor young, +the page will pass through the filters evaluation stage since there is no +matching filter, and the action will be applied to the page. + +Note that the action can equally be applied to memory that either explicitly +filter-allowed or filters evaluation stage passed. It means that installing +allow-filters at the end of the list makes no practical change but only +filters-checking overhead. For efficient handling of filters, some types of filters are handled by the core layer, while others are handled by operations set. In the latter case, @@ -516,7 +541,7 @@ filter are not counted as the scheme has tried to the region. In contrast, if a memory regions is filtered by an operations set layer-handled filter, it is counted as the scheme has tried. This difference affects the statistics. -Below types of filters are currently supported. +Below ``type`` of filters are currently supported. - anonymous page - Applied to pages that containing data that not stored in files. From 7b169d15b9ab6da4ccadbd92d45116ea55f1e9e3 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 9 Jan 2025 09:51:24 -0800 Subject: [PATCH 327/504] Docs/ABI/damon: document DAMOS filter allow sysfs file Update DAMON ABI document for added DAMOS filter 'allow' file. Link: https://lkml.kernel.org/r/20250109175126.57878-9-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/ABI/testing/sysfs-kernel-mm-damon | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-damon b/Documentation/ABI/testing/sysfs-kernel-mm-damon index 8c0acb31638b..b057eddefbfc 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-damon +++ b/Documentation/ABI/testing/sysfs-kernel-mm-damon @@ -355,10 +355,15 @@ Description: If 'target' is written to the 'type' file, writing to or What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//filters//matching Date: Dec 2022 Contact: SeongJae Park -Description: Writing 'Y' or 'N' to this file sets whether to filter out - pages that do or do not match to the 'type' and 'memcg_path', - respectively. Filter out means the action of the scheme will - not be applied to. +Description: Writing 'Y' or 'N' to this file sets whether the filter is for + the memory of the 'type', or all except the 'type'. + +What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//filters//allow +Date: Jan 2025 +Contact: SeongJae Park +Description: Writing 'Y' or 'N' to this file sets whether to allow or reject + applying the scheme's action to the memory that satisfies the + 'type' and the 'matching' of the directory. What: /sys/kernel/mm/damon/admin/kdamonds//contexts//schemes//stats/nr_tried Date: Mar 2022 From 31d092ea9a557053d3b46ff38e33915e7ad1e9df Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 9 Jan 2025 09:51:25 -0800 Subject: [PATCH 328/504] Docs/admin-guide/mm/damon/usage: omit DAMOS filter details in favor of design doc DAMON usage document is describing some details about DAMOS filters, which are also documented on the design doc. Deduplicate the details in favor of the design doc. Link: https://lkml.kernel.org/r/20250109175126.57878-10-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 29 ++++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index 71cf29ae8502..8df3357dcfa3 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -411,13 +411,17 @@ Each filter directory contains six files, namely ``type``, ``matcing``, file, you can write one of five special keywords: ``anon`` for anonymous pages, ``memcg`` for specific memory cgroup, ``young`` for young pages, ``addr`` for specific address range (an open-ended interval), or ``target`` for specific -DAMON monitoring target filtering. In case of the memory cgroup filtering, you -can specify the memory cgroup of the interest by writing the path of the memory -cgroup from the cgroups mount point to ``memcg_path`` file. In case of the -address range filtering, you can specify the start and end address of the range -to ``addr_start`` and ``addr_end`` files, respectively. For the DAMON -monitoring target filtering, you can specify the index of the target between -the list of the DAMON context's monitoring targets list to ``target_idx`` file. +DAMON monitoring target filtering. Meaning of the types are same to the +description on the :ref:`design doc `. + +In case of the memory cgroup filtering, you can specify the memory cgroup of +the interest by writing the path of the memory cgroup from the cgroups mount +point to ``memcg_path`` file. In case of the address range filtering, you can +specify the start and end address of the range to ``addr_start`` and +``addr_end`` files, respectively. For the DAMON monitoring target filtering, +you can specify the index of the target between the list of the DAMON context's +monitoring targets list to ``target_idx`` file. + You can write ``Y`` or ``N`` to ``matching`` file to filter out pages that does or does not match to the type, respectively. Then, the scheme's action will not be applied to the pages that specified to be filtered out. @@ -434,14 +438,9 @@ pages of all memory cgroups except ``/having_care_already``.:: echo /having_care_already > 1/memcg_path echo Y > 1/matching -Note that ``anon`` and ``memcg`` filters are currently supported only when -``paddr`` :ref:`implementation ` is being used. - -Also, memory regions that are filtered out by ``addr`` or ``target`` filters -are not counted as the scheme has tried to those, while regions that filtered -out by other type filters are counted as the scheme has tried to. The -difference is applied to :ref:`stats ` and -:ref:`tried regions `. +Refer to the :ref:`DAMOS filters design documentation +` for more details including when each of the +filters are supported and differences on stats. .. _sysfs_schemes_stats: From c4b55c06239acb3530a541d0504101d26629157a Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 9 Jan 2025 09:51:26 -0800 Subject: [PATCH 329/504] Docs/admin-guide/mm/damon/usage: document DAMOS filter 'allow' sysfs file Update DAMON usage document for the newly added 'allow' sysfs file for DAMOS filters. Link: https://lkml.kernel.org/r/20250109175126.57878-11-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 33 +++++++++++--------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index 8df3357dcfa3..f0d0c20711d6 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -83,7 +83,7 @@ comma (","). │ │ │ │ │ │ │ │ │ 0/target_metric,target_value,current_value │ │ │ │ │ │ │ :ref:`watermarks `/metric,interval_us,high,mid,low │ │ │ │ │ │ │ :ref:`filters `/nr_filters - │ │ │ │ │ │ │ │ 0/type,matching,memcg_id + │ │ │ │ │ │ │ │ 0/type,matching,memcg_id,allow │ │ │ │ │ │ │ :ref:`stats `/nr_tried,sz_tried,nr_applied,sz_applied,sz_ops_filter_passed,qt_exceeds │ │ │ │ │ │ │ :ref:`tried_regions `/total_bytes │ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age,sz_filter_passed @@ -406,13 +406,14 @@ number (``N``) to the file creates the number of child directories named ``0`` to ``N-1``. Each directory represents each filter. The filters are evaluated in the numeric order. -Each filter directory contains six files, namely ``type``, ``matcing``, -``memcg_path``, ``addr_start``, ``addr_end``, and ``target_idx``. To ``type`` -file, you can write one of five special keywords: ``anon`` for anonymous pages, -``memcg`` for specific memory cgroup, ``young`` for young pages, ``addr`` for -specific address range (an open-ended interval), or ``target`` for specific -DAMON monitoring target filtering. Meaning of the types are same to the -description on the :ref:`design doc `. +Each filter directory contains seven files, namely ``type``, ``matching``, +``allow``, ``memcg_path``, ``addr_start``, ``addr_end``, and ``target_idx``. +To ``type`` file, you can write one of five special keywords: ``anon`` for +anonymous pages, ``memcg`` for specific memory cgroup, ``young`` for young +pages, ``addr`` for specific address range (an open-ended interval), or +``target`` for specific DAMON monitoring target filtering. Meaning of the +types are same to the description on the :ref:`design doc +`. In case of the memory cgroup filtering, you can specify the memory cgroup of the interest by writing the path of the memory cgroup from the cgroups mount @@ -422,25 +423,29 @@ specify the start and end address of the range to ``addr_start`` and you can specify the index of the target between the list of the DAMON context's monitoring targets list to ``target_idx`` file. -You can write ``Y`` or ``N`` to ``matching`` file to filter out pages that does -or does not match to the type, respectively. Then, the scheme's action will -not be applied to the pages that specified to be filtered out. +You can write ``Y`` or ``N`` to ``matching`` file to specify whether the filter +is for memory that matches the ``type``. You can write ``Y`` or ``N`` to +``allow`` file to specify if applying the action to the memory that satisfies +the ``type`` and ``matching`` should be allowed or not. For example, below restricts a DAMOS action to be applied to only non-anonymous pages of all memory cgroups except ``/having_care_already``.:: # echo 2 > nr_filters - # # filter out anonymous pages + # # disallow anonymous pages echo anon > 0/type echo Y > 0/matching + echo N > 0/allow # # further filter out all cgroups except one at '/having_care_already' echo memcg > 1/type echo /having_care_already > 1/memcg_path echo Y > 1/matching + echo N > 1/allow Refer to the :ref:`DAMOS filters design documentation -` for more details including when each of the -filters are supported and differences on stats. +` for more details including how multiple filters +of different ``allow`` works, when each of the filters are supported, and +differences on stats. .. _sysfs_schemes_stats: From afedcc5bdc3cf9cb1e2c99b880af78c6256d722b Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:27 +0500 Subject: [PATCH 330/504] selftests/mm: remove argc and argv unused parameters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "selftest/mm: Remove warnings found by adding compiler flags". Recently, I reviewed a patch on the mm/kselftest mailing list about a test which had obvious type mismatch fix in it. It was strange why that wasn't caught during development and when patch was accepted. This led me to discover that those extra compiler options to catch these warnings aren't being used. When I added them, I found tens of warnings in just mm suite. In this series, I'm fixing those warnings. The last check adds the compiler flags with which the warnings have been caught. This patch (of 16): Remove the following warnings by removing unused argc and argv parameters: In function `main': warning: unused parameter `argc' [-Wunused-parameter] 158 | int main(int argc, char *argv[]) | ~~~~^~~~ warning: unused parameter `argv' [-Wunused-parameter] 158 | int main(int argc, char *argv[]) | ~~~~~~^~~~~~ Link: https://lkml.kernel.org/r/20250109173842.1142376-1-usama.anjum@collabora.com Link: https://lkml.kernel.org/r/20250109173842.1142376-2-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/compaction_test.c | 2 +- tools/testing/selftests/mm/cow.c | 2 +- tools/testing/selftests/mm/droppable.c | 2 +- tools/testing/selftests/mm/gup_longterm.c | 2 +- tools/testing/selftests/mm/hugepage-vmemmap.c | 2 +- tools/testing/selftests/mm/hugetlb-madvise.c | 2 +- tools/testing/selftests/mm/hugetlb-soft-offline.c | 2 +- tools/testing/selftests/mm/madv_populate.c | 2 +- tools/testing/selftests/mm/map_populate.c | 2 +- tools/testing/selftests/mm/memfd_secret.c | 2 +- tools/testing/selftests/mm/mlock-random-test.c | 2 +- tools/testing/selftests/mm/mlock2-tests.c | 2 +- tools/testing/selftests/mm/on-fault-limit.c | 2 +- tools/testing/selftests/mm/pkey_sighandler_tests.c | 2 +- tools/testing/selftests/mm/soft-dirty.c | 2 +- tools/testing/selftests/mm/uffd-wp-mremap.c | 2 +- tools/testing/selftests/mm/virtual_address_range.c | 2 +- 17 files changed, 17 insertions(+), 17 deletions(-) diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c index 2c3a0eb6b22d..8d23b698ce9d 100644 --- a/tools/testing/selftests/mm/compaction_test.c +++ b/tools/testing/selftests/mm/compaction_test.c @@ -194,7 +194,7 @@ int set_zero_hugepages(unsigned long *initial_nr_hugepages) return ret; } -int main(int argc, char **argv) +int main(void) { struct rlimit lim; struct map_list *list = NULL, *entry; diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c index 1238e1c5aae1..ea00c85c76ca 100644 --- a/tools/testing/selftests/mm/cow.c +++ b/tools/testing/selftests/mm/cow.c @@ -1769,7 +1769,7 @@ static int tests_per_non_anon_test_case(void) return tests; } -int main(int argc, char **argv) +int main(void) { int err; struct thp_settings default_settings; diff --git a/tools/testing/selftests/mm/droppable.c b/tools/testing/selftests/mm/droppable.c index f3d9ecf96890..90ea6377810c 100644 --- a/tools/testing/selftests/mm/droppable.c +++ b/tools/testing/selftests/mm/droppable.c @@ -15,7 +15,7 @@ #include "../kselftest.h" -int main(int argc, char *argv[]) +int main(void) { size_t alloc_size = 134217728; size_t page_size = getpagesize(); diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c index 9423ad439a61..03a31dcb5757 100644 --- a/tools/testing/selftests/mm/gup_longterm.c +++ b/tools/testing/selftests/mm/gup_longterm.c @@ -444,7 +444,7 @@ static int tests_per_test_case(void) return 3 + nr_hugetlbsizes; } -int main(int argc, char **argv) +int main(void) { int i, err; diff --git a/tools/testing/selftests/mm/hugepage-vmemmap.c b/tools/testing/selftests/mm/hugepage-vmemmap.c index df366a4d1b92..23e97e552057 100644 --- a/tools/testing/selftests/mm/hugepage-vmemmap.c +++ b/tools/testing/selftests/mm/hugepage-vmemmap.c @@ -87,7 +87,7 @@ static int check_page_flags(unsigned long pfn) return 0; } -int main(int argc, char **argv) +int main(void) { void *addr; unsigned long pfn; diff --git a/tools/testing/selftests/mm/hugetlb-madvise.c b/tools/testing/selftests/mm/hugetlb-madvise.c index e74107185324..43f16c12c8e9 100644 --- a/tools/testing/selftests/mm/hugetlb-madvise.c +++ b/tools/testing/selftests/mm/hugetlb-madvise.c @@ -58,7 +58,7 @@ void read_fault_pages(void *addr, unsigned long nr_pages) } } -int main(int argc, char **argv) +int main(int __attribute__((unused)) argc, char **argv) { unsigned long free_hugepages; void *addr, *addr2; diff --git a/tools/testing/selftests/mm/hugetlb-soft-offline.c b/tools/testing/selftests/mm/hugetlb-soft-offline.c index f086f0e04756..cb087303f5ed 100644 --- a/tools/testing/selftests/mm/hugetlb-soft-offline.c +++ b/tools/testing/selftests/mm/hugetlb-soft-offline.c @@ -216,7 +216,7 @@ static void test_soft_offline_common(int enable_soft_offline) enable_soft_offline); } -int main(int argc, char **argv) +int main(void) { ksft_print_header(); ksft_set_plan(2); diff --git a/tools/testing/selftests/mm/madv_populate.c b/tools/testing/selftests/mm/madv_populate.c index ef7d911da13e..c6a3ee56a54a 100644 --- a/tools/testing/selftests/mm/madv_populate.c +++ b/tools/testing/selftests/mm/madv_populate.c @@ -281,7 +281,7 @@ static int system_has_softdirty(void) #endif } -int main(int argc, char **argv) +int main(void) { int nr_tests = 16; int err; diff --git a/tools/testing/selftests/mm/map_populate.c b/tools/testing/selftests/mm/map_populate.c index 5c8a53869b1b..0dd849b4affa 100644 --- a/tools/testing/selftests/mm/map_populate.c +++ b/tools/testing/selftests/mm/map_populate.c @@ -74,7 +74,7 @@ static int child_f(int sock, unsigned long *smap, int fd) return ksft_cnt.ksft_pass; } -int main(int argc, char **argv) +int main(void) { int sock[2], child, ret; FILE *ftmp; diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c index 74c911aa3aea..b9659fa35737 100644 --- a/tools/testing/selftests/mm/memfd_secret.c +++ b/tools/testing/selftests/mm/memfd_secret.c @@ -297,7 +297,7 @@ static void prepare(void) #define NUM_TESTS 6 -int main(int argc, char *argv[]) +int main(void) { int fd; diff --git a/tools/testing/selftests/mm/mlock-random-test.c b/tools/testing/selftests/mm/mlock-random-test.c index 1cd80b0f76c3..0d95d630d045 100644 --- a/tools/testing/selftests/mm/mlock-random-test.c +++ b/tools/testing/selftests/mm/mlock-random-test.c @@ -236,7 +236,7 @@ static void test_mlock_outof_limit(char *p, int alloc_size) ksft_test_result_pass("%s\n", __func__); } -int main(int argc, char **argv) +int main(void) { char *p = NULL; diff --git a/tools/testing/selftests/mm/mlock2-tests.c b/tools/testing/selftests/mm/mlock2-tests.c index 7f0d50fa361d..358711e8191f 100644 --- a/tools/testing/selftests/mm/mlock2-tests.c +++ b/tools/testing/selftests/mm/mlock2-tests.c @@ -425,7 +425,7 @@ static void test_mlockall(void) munlockall(); } -int main(int argc, char **argv) +int main(void) { int ret, size = 3 * getpagesize(); void *map; diff --git a/tools/testing/selftests/mm/on-fault-limit.c b/tools/testing/selftests/mm/on-fault-limit.c index 431c1277d83a..ade160966c92 100644 --- a/tools/testing/selftests/mm/on-fault-limit.c +++ b/tools/testing/selftests/mm/on-fault-limit.c @@ -28,7 +28,7 @@ static void test_limit(void) munlockall(); } -int main(int argc, char **argv) +int main(void) { ksft_print_header(); ksft_set_plan(1); diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c index 1ac8c8809880..249989f8b7a2 100644 --- a/tools/testing/selftests/mm/pkey_sighandler_tests.c +++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c @@ -528,7 +528,7 @@ static void (*pkey_tests[])(void) = { test_pkru_sigreturn }; -int main(int argc, char *argv[]) +int main(void) { int i; diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c index 8e1462ce0532..7286c90fff1a 100644 --- a/tools/testing/selftests/mm/soft-dirty.c +++ b/tools/testing/selftests/mm/soft-dirty.c @@ -187,7 +187,7 @@ static void test_mprotect_file(int pagemap_fd, int pagesize) test_mprotect(pagemap_fd, pagesize, false); } -int main(int argc, char **argv) +int main(void) { int pagemap_fd; int pagesize; diff --git a/tools/testing/selftests/mm/uffd-wp-mremap.c b/tools/testing/selftests/mm/uffd-wp-mremap.c index 2c4f984bd73c..f548b1e1f197 100644 --- a/tools/testing/selftests/mm/uffd-wp-mremap.c +++ b/tools/testing/selftests/mm/uffd-wp-mremap.c @@ -331,7 +331,7 @@ static const struct testcase testcases[] = { }, }; -int main(int argc, char **argv) +int main(void) { struct thp_settings settings; int i, j, plan = 0; diff --git a/tools/testing/selftests/mm/virtual_address_range.c b/tools/testing/selftests/mm/virtual_address_range.c index 484f82c7b7c8..6e4269b9b54d 100644 --- a/tools/testing/selftests/mm/virtual_address_range.c +++ b/tools/testing/selftests/mm/virtual_address_range.c @@ -160,7 +160,7 @@ static int validate_complete_va_space(void) return 0; } -int main(int argc, char *argv[]) +int main(void) { char *ptr[NR_CHUNKS_LOW]; char **hptr; From da61c9b3006fac275f9ad81b0cd728130c331278 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:28 +0500 Subject: [PATCH 331/504] selftests/mm: fix unused parameter warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix warnings generated by unused parameters. Link: https://lkml.kernel.org/r/20250109173842.1142376-3-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- .../selftests/mm/hugetlb_fault_after_madv.c | 4 +- .../selftests/mm/hugetlb_madv_vs_map.c | 6 +-- tools/testing/selftests/mm/ksm_tests.c | 17 +++--- tools/testing/selftests/mm/memfd_secret.c | 4 +- tools/testing/selftests/mm/soft-dirty.c | 4 +- .../selftests/mm/split_huge_page_test.c | 4 +- tools/testing/selftests/mm/uffd-common.c | 18 ++++--- tools/testing/selftests/mm/uffd-common.h | 2 +- tools/testing/selftests/mm/uffd-stress.c | 2 +- tools/testing/selftests/mm/uffd-unit-tests.c | 52 +++++++++---------- 10 files changed, 57 insertions(+), 56 deletions(-) diff --git a/tools/testing/selftests/mm/hugetlb_fault_after_madv.c b/tools/testing/selftests/mm/hugetlb_fault_after_madv.c index e2640529dbb2..e62f4e1388f2 100644 --- a/tools/testing/selftests/mm/hugetlb_fault_after_madv.c +++ b/tools/testing/selftests/mm/hugetlb_fault_after_madv.c @@ -28,7 +28,7 @@ static void signal_handler(int signal) } /* Touch the memory while it is being madvised() */ -void *touch(void *unused) +void *touch(void __attribute__((unused)) *unused) { char *ptr = (char *)huge_ptr; @@ -41,7 +41,7 @@ void *touch(void *unused) return NULL; } -void *madv(void *unused) +void *madv(void __attribute__((unused)) *unused) { usleep(rand() % 10); diff --git a/tools/testing/selftests/mm/hugetlb_madv_vs_map.c b/tools/testing/selftests/mm/hugetlb_madv_vs_map.c index 8f122a0f0828..6c326cf3dcf6 100644 --- a/tools/testing/selftests/mm/hugetlb_madv_vs_map.c +++ b/tools/testing/selftests/mm/hugetlb_madv_vs_map.c @@ -33,7 +33,7 @@ size_t mmap_size; char *huge_ptr; /* Touch the memory while it is being madvised() */ -void *touch(void *unused) +void *touch(void __attribute__((unused)) *unused) { for (int i = 0; i < INLOOP_ITER; i++) huge_ptr[0] = '.'; @@ -41,7 +41,7 @@ void *touch(void *unused) return NULL; } -void *madv(void *unused) +void *madv(void __attribute__((unused)) *unused) { for (int i = 0; i < INLOOP_ITER; i++) madvise(huge_ptr, mmap_size, MADV_DONTNEED); @@ -54,7 +54,7 @@ void *madv(void *unused) * The other hugepage should be flipping from used <-> reserved, because * of madvise(DONTNEED). */ -void *map_extra(void *unused) +void *map_extra(void __attribute__((unused)) *unused) { void *ptr; diff --git a/tools/testing/selftests/mm/ksm_tests.c b/tools/testing/selftests/mm/ksm_tests.c index dcdd5bb20f3d..323cfcb14e4d 100644 --- a/tools/testing/selftests/mm/ksm_tests.c +++ b/tools/testing/selftests/mm/ksm_tests.c @@ -265,8 +265,7 @@ static int ksm_merge_pages(int merge_type, void *addr, size_t size, return 0; } -static int ksm_unmerge_pages(void *addr, size_t size, - struct timespec start_time, int timeout) +static int ksm_unmerge_pages(void *addr, size_t size) { if (madvise(addr, size, MADV_UNMERGEABLE)) { perror("madvise"); @@ -483,7 +482,7 @@ static int get_first_mem_node(void) return get_next_mem_node(numa_max_node()); } -static int check_ksm_numa_merge(int merge_type, int mapping, int prot, int timeout, +static int check_ksm_numa_merge(int merge_type, int timeout, bool merge_across_nodes, size_t page_size) { void *numa1_map_ptr, *numa2_map_ptr; @@ -547,8 +546,7 @@ err_out: return KSFT_FAIL; } -static int ksm_merge_hugepages_time(int merge_type, int mapping, int prot, - int timeout, size_t map_size) +static int ksm_merge_hugepages_time(int merge_type, int timeout, size_t map_size) { void *map_ptr, *map_ptr_orig; struct timespec start_time, end_time; @@ -678,7 +676,7 @@ static int ksm_unmerge_time(int merge_type, int mapping, int prot, int timeout, perror("clock_gettime"); goto err_out; } - if (ksm_unmerge_pages(map_ptr, map_size, start_time, timeout)) + if (ksm_unmerge_pages(map_ptr, map_size)) goto err_out; if (clock_gettime(CLOCK_MONOTONIC_RAW, &end_time)) { perror("clock_gettime"); @@ -906,8 +904,8 @@ int main(int argc, char *argv[]) page_size); break; case CHECK_KSM_NUMA_MERGE: - ret = check_ksm_numa_merge(merge_type, MAP_PRIVATE | MAP_ANONYMOUS, prot, - ksm_scan_limit_sec, merge_across_nodes, page_size); + ret = check_ksm_numa_merge(merge_type, ksm_scan_limit_sec, merge_across_nodes, + page_size); break; case KSM_MERGE_TIME: if (size_MB == 0) { @@ -922,8 +920,7 @@ int main(int argc, char *argv[]) printf("Option '-s' is required.\n"); return KSFT_FAIL; } - ret = ksm_merge_hugepages_time(merge_type, MAP_PRIVATE | MAP_ANONYMOUS, prot, - ksm_scan_limit_sec, size_MB); + ret = ksm_merge_hugepages_time(merge_type, ksm_scan_limit_sec, size_MB); break; case KSM_UNMERGE_TIME: if (size_MB == 0) { diff --git a/tools/testing/selftests/mm/memfd_secret.c b/tools/testing/selftests/mm/memfd_secret.c index b9659fa35737..f9d728e18678 100644 --- a/tools/testing/selftests/mm/memfd_secret.c +++ b/tools/testing/selftests/mm/memfd_secret.c @@ -121,7 +121,7 @@ close_pipe: close(pipefd[1]); } -static void try_process_vm_read(int fd, int pipefd[2]) +static void try_process_vm_read(int __attribute__((unused)) fd, int pipefd[2]) { struct iovec liov, riov; char buf[64]; @@ -145,7 +145,7 @@ static void try_process_vm_read(int fd, int pipefd[2]) exit(KSFT_FAIL); } -static void try_ptrace(int fd, int pipefd[2]) +static void try_ptrace(int __attribute__((unused)) fd, int pipefd[2]) { pid_t ppid = getppid(); int status; diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c index 7286c90fff1a..b6eb5c4642ce 100644 --- a/tools/testing/selftests/mm/soft-dirty.c +++ b/tools/testing/selftests/mm/soft-dirty.c @@ -74,7 +74,7 @@ static void test_vma_reuse(int pagemap_fd, int pagesize) munmap(map2, pagesize); } -static void test_hugepage(int pagemap_fd, int pagesize) +static void test_hugepage(int pagemap_fd) { char *map; int i, ret; @@ -203,7 +203,7 @@ int main(void) test_simple(pagemap_fd, pagesize); test_vma_reuse(pagemap_fd, pagesize); - test_hugepage(pagemap_fd, pagesize); + test_hugepage(pagemap_fd); test_mprotect_anon(pagemap_fd, pagesize); test_mprotect_file(pagemap_fd, pagesize); diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index eb6d1b9fc362..84b1251666aa 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -103,7 +103,7 @@ static char *allocate_zero_filled_hugepage(size_t len) return result; } -static void verify_rss_anon_split_huge_page_all_zeroes(char *one_page, int nr_hpages, size_t len) +static void verify_rss_anon_split_huge_page_all_zeroes(char *one_page, size_t len) { unsigned long rss_anon_before, rss_anon_after; size_t i; @@ -149,7 +149,7 @@ void split_pmd_zero_pages(void) size_t len = nr_hpages * pmd_pagesize; one_page = allocate_zero_filled_hugepage(len); - verify_rss_anon_split_huge_page_all_zeroes(one_page, nr_hpages, len); + verify_rss_anon_split_huge_page_all_zeroes(one_page, len); printf("Split zero filled huge pages successful\n"); free(one_page); } diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c index 717539eddf98..c15674a60f29 100644 --- a/tools/testing/selftests/mm/uffd-common.c +++ b/tools/testing/selftests/mm/uffd-common.c @@ -46,7 +46,7 @@ static void anon_release_pages(char *rel_area) err("madvise(MADV_DONTNEED) failed"); } -static int anon_allocate_area(void **alloc_area, bool is_src) +static int anon_allocate_area(void **alloc_area, bool __attribute__((unused)) is_src) { *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); @@ -57,7 +57,9 @@ static int anon_allocate_area(void **alloc_area, bool is_src) return 0; } -static void noop_alias_mapping(__u64 *start, size_t len, unsigned long offset) +static void noop_alias_mapping(__u64 __attribute__((unused)) *start, + size_t __attribute__((unused)) len, + unsigned long __attribute__((unused)) offset) { } @@ -108,7 +110,8 @@ static int hugetlb_allocate_area(void **alloc_area, bool is_src) return 0; } -static void hugetlb_alias_mapping(__u64 *start, size_t len, unsigned long offset) +static void hugetlb_alias_mapping(__u64 *start, size_t __attribute__((unused)) len, + unsigned long offset) { if (!map_shared) return; @@ -167,12 +170,13 @@ static int shmem_allocate_area(void **alloc_area, bool is_src) return 0; } -static void shmem_alias_mapping(__u64 *start, size_t len, unsigned long offset) +static void shmem_alias_mapping(__u64 *start, size_t __attribute__((unused)) len, + unsigned long offset) { *start = (unsigned long)area_dst_alias + offset; } -static void shmem_check_pmd_mapping(void *p, int expect_nr_hpages) +static void shmem_check_pmd_mapping(void __attribute__((unused)) *p, int expect_nr_hpages) { if (!check_huge_shmem(area_dst_alias, expect_nr_hpages, read_pmd_pagesize())) @@ -416,7 +420,7 @@ static void continue_range(int ufd, __u64 start, __u64 len, bool wp) ret, (int64_t) req.mapped); } -int uffd_read_msg(int ufd, struct uffd_msg *msg) +int uffd_read_msg(struct uffd_msg *msg) { int ret = read(uffd, msg, sizeof(*msg)); @@ -537,7 +541,7 @@ void *uffd_poll_thread(void *arg) } if (!(pollfd[0].revents & POLLIN)) err("pollfd[0].revents %d", pollfd[0].revents); - if (uffd_read_msg(uffd, &msg)) + if (uffd_read_msg(&msg)) continue; switch (msg.event) { default: diff --git a/tools/testing/selftests/mm/uffd-common.h b/tools/testing/selftests/mm/uffd-common.h index a70ae10b5f62..4a5d5b37107c 100644 --- a/tools/testing/selftests/mm/uffd-common.h +++ b/tools/testing/selftests/mm/uffd-common.h @@ -117,7 +117,7 @@ void uffd_stats_report(struct uffd_args *args, int n_cpus); int uffd_test_ctx_init(uint64_t features, const char **errmsg); void uffd_test_ctx_clear(void); int userfaultfd_open(uint64_t *features); -int uffd_read_msg(int ufd, struct uffd_msg *msg); +int uffd_read_msg(struct uffd_msg *msg); void wp_range(int ufd, __u64 start, __u64 len, bool wp); void uffd_handle_page_fault(struct uffd_msg *msg, struct uffd_args *args); int __copy_page(int ufd, unsigned long offset, bool retry, bool wp); diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index a4b83280998a..1270ff3104cc 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -136,7 +136,7 @@ static void *uffd_read_thread(void *arg) /* from here cancellation is ok */ for (;;) { - if (uffd_read_msg(uffd, &msg)) + if (uffd_read_msg(&msg)) continue; uffd_handle_page_fault(&msg, args); } diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index 9ff71fa1f9bf..81b315bedb74 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -244,7 +244,7 @@ static void *fork_event_consumer(void *data) ready_for_fork = true; /* Read until a full msg received */ - while (uffd_read_msg(args->parent_uffd, &msg)); + while (uffd_read_msg(&msg)); if (msg.event != UFFD_EVENT_FORK) err("wrong message: %u\n", msg.event); @@ -357,7 +357,7 @@ static int pagemap_test_fork(int uffd, bool with_event, bool test_pin) return result; } -static void uffd_wp_unpopulated_test(uffd_test_args_t *args) +static void uffd_wp_unpopulated_test(uffd_test_args_t __attribute__((unused)) *args) { uint64_t value; int pagemap_fd; @@ -483,8 +483,7 @@ static void uffd_wp_fork_with_event_test(uffd_test_args_t *args) uffd_wp_fork_test_common(args, true); } -static void uffd_wp_fork_pin_test_common(uffd_test_args_t *args, - bool with_event) +static void uffd_wp_fork_pin_test_common(bool with_event) { int pagemap_fd; pin_args pin_args = {}; @@ -535,14 +534,14 @@ out: close(pagemap_fd); } -static void uffd_wp_fork_pin_test(uffd_test_args_t *args) +static void uffd_wp_fork_pin_test(uffd_test_args_t __attribute__((unused)) *args) { - uffd_wp_fork_pin_test_common(args, false); + uffd_wp_fork_pin_test_common(false); } -static void uffd_wp_fork_pin_with_event_test(uffd_test_args_t *args) +static void uffd_wp_fork_pin_with_event_test(uffd_test_args_t __attribute__((unused)) *args) { - uffd_wp_fork_pin_test_common(args, true); + uffd_wp_fork_pin_test_common(true); } static void check_memory_contents(char *p) @@ -627,24 +626,25 @@ static void uffd_minor_test_common(bool test_collapse, bool test_wp) uffd_test_pass(); } -void uffd_minor_test(uffd_test_args_t *args) +void uffd_minor_test(uffd_test_args_t __attribute__((unused)) *args) { uffd_minor_test_common(false, false); } -void uffd_minor_wp_test(uffd_test_args_t *args) +void uffd_minor_wp_test(uffd_test_args_t __attribute__((unused)) *args) { uffd_minor_test_common(false, true); } -void uffd_minor_collapse_test(uffd_test_args_t *args) +void uffd_minor_collapse_test(uffd_test_args_t __attribute__((unused)) *args) { uffd_minor_test_common(true, false); } static sigjmp_buf jbuf, *sigbuf; -static void sighndl(int sig, siginfo_t *siginfo, void *ptr) +static void sighndl(int sig, siginfo_t __attribute__((unused)) *siginfo, + void __attribute__((unused)) *ptr) { if (sig == SIGBUS) { if (sigbuf) @@ -820,12 +820,12 @@ static void uffd_sigbus_test_common(bool wp) uffd_test_pass(); } -static void uffd_sigbus_test(uffd_test_args_t *args) +static void uffd_sigbus_test(uffd_test_args_t __attribute__((unused)) *args) { uffd_sigbus_test_common(false); } -static void uffd_sigbus_wp_test(uffd_test_args_t *args) +static void uffd_sigbus_wp_test(uffd_test_args_t __attribute__((unused)) *args) { uffd_sigbus_test_common(true); } @@ -873,12 +873,12 @@ static void uffd_events_test_common(bool wp) uffd_test_pass(); } -static void uffd_events_test(uffd_test_args_t *args) +static void uffd_events_test(uffd_test_args_t __attribute__((unused)) *args) { uffd_events_test_common(false); } -static void uffd_events_wp_test(uffd_test_args_t *args) +static void uffd_events_wp_test(uffd_test_args_t __attribute__((unused)) *args) { uffd_events_test_common(true); } @@ -946,7 +946,7 @@ uffd_register_detect_zeropage(int uffd, void *addr, uint64_t len) } /* exercise UFFDIO_ZEROPAGE */ -static void uffd_zeropage_test(uffd_test_args_t *args) +static void uffd_zeropage_test(uffd_test_args_t __attribute__((unused)) *args) { bool has_zeropage; int i; @@ -1002,7 +1002,7 @@ static void do_uffdio_poison(int uffd, unsigned long offset) } static void uffd_poison_handle_fault( - struct uffd_msg *msg, struct uffd_args *args) + struct uffd_msg *msg, struct uffd_args __attribute__((unused)) *args) { unsigned long offset; @@ -1023,7 +1023,7 @@ static void uffd_poison_handle_fault( do_uffdio_poison(uffd, offset); } -static void uffd_poison_test(uffd_test_args_t *targs) +static void uffd_poison_test(uffd_test_args_t __attribute__((unused)) *targs) { pthread_t uffd_mon; char c; @@ -1114,7 +1114,7 @@ static void uffd_move_pmd_handle_fault(struct uffd_msg *msg, } static void -uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size, +uffd_move_test_common(unsigned long chunk_size, void (*handle_fault)(struct uffd_msg *msg, struct uffd_args *args)) { unsigned long nr; @@ -1206,24 +1206,24 @@ uffd_move_test_common(uffd_test_args_t *targs, unsigned long chunk_size, uffd_test_pass(); } -static void uffd_move_test(uffd_test_args_t *targs) +static void uffd_move_test(uffd_test_args_t __attribute__((unused)) *targs) { - uffd_move_test_common(targs, page_size, uffd_move_handle_fault); + uffd_move_test_common(page_size, uffd_move_handle_fault); } -static void uffd_move_pmd_test(uffd_test_args_t *targs) +static void uffd_move_pmd_test(uffd_test_args_t __attribute__((unused)) *targs) { if (madvise(area_dst, nr_pages * page_size, MADV_HUGEPAGE)) err("madvise(MADV_HUGEPAGE) failure"); - uffd_move_test_common(targs, read_pmd_pagesize(), + uffd_move_test_common(read_pmd_pagesize(), uffd_move_pmd_handle_fault); } -static void uffd_move_pmd_split_test(uffd_test_args_t *targs) +static void uffd_move_pmd_split_test(uffd_test_args_t __attribute__((unused)) *targs) { if (madvise(area_dst, nr_pages * page_size, MADV_NOHUGEPAGE)) err("madvise(MADV_NOHUGEPAGE) failure"); - uffd_move_test_common(targs, read_pmd_pagesize(), + uffd_move_test_common(read_pmd_pagesize(), uffd_move_pmd_handle_fault); } From 4d53cdc24113a867e418b0950085f4d9fe6dd8f9 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:30 +0500 Subject: [PATCH 332/504] selftests/mm: fix type mismatch warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix type mismatch warnings in different tests. Link: https://lkml.kernel.org/r/20250109173842.1142376-5-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/compaction_test.c | 2 +- tools/testing/selftests/mm/gup_longterm.c | 3 ++- tools/testing/selftests/mm/hugetlb_dio.c | 2 +- tools/testing/selftests/mm/hugetlb_fault_after_madv.c | 2 +- tools/testing/selftests/mm/hugetlb_madv_vs_map.c | 2 +- tools/testing/selftests/mm/ksm_functional_tests.c | 6 +++--- tools/testing/selftests/mm/mlock-random-test.c | 4 ++-- tools/testing/selftests/mm/pkey_sighandler_tests.c | 2 +- tools/testing/selftests/mm/soft-dirty.c | 2 +- 9 files changed, 13 insertions(+), 12 deletions(-) diff --git a/tools/testing/selftests/mm/compaction_test.c b/tools/testing/selftests/mm/compaction_test.c index 8d23b698ce9d..f6f32a5732e9 100644 --- a/tools/testing/selftests/mm/compaction_test.c +++ b/tools/testing/selftests/mm/compaction_test.c @@ -134,7 +134,7 @@ int check_compaction(unsigned long mem_free, unsigned long hugepage_size, lseek(fd, 0, SEEK_SET); if (write(fd, init_nr_hugepages, strlen(init_nr_hugepages)) - != strlen(init_nr_hugepages)) { + != (signed long int)strlen(init_nr_hugepages)) { ksft_print_msg("Failed to write value to /proc/sys/vm/nr_hugepages: %s\n", strerror(errno)); goto close_fd; diff --git a/tools/testing/selftests/mm/gup_longterm.c b/tools/testing/selftests/mm/gup_longterm.c index 03a31dcb5757..7f1b4ad7fcae 100644 --- a/tools/testing/selftests/mm/gup_longterm.c +++ b/tools/testing/selftests/mm/gup_longterm.c @@ -446,7 +446,8 @@ static int tests_per_test_case(void) int main(void) { - int i, err; + unsigned int i; + int err; pagesize = getpagesize(); nr_hugetlbsizes = detect_hugetlb_page_sizes(hugetlbsizes, diff --git a/tools/testing/selftests/mm/hugetlb_dio.c b/tools/testing/selftests/mm/hugetlb_dio.c index db63abe5ee5e..62f368d4c8c1 100644 --- a/tools/testing/selftests/mm/hugetlb_dio.c +++ b/tools/testing/selftests/mm/hugetlb_dio.c @@ -63,7 +63,7 @@ void run_dio_using_hugetlb(unsigned int start_off, unsigned int end_off) memset(buffer, 'A', writesize); /* Write the buffer to the file */ - if (write(fd, buffer, writesize) != (writesize)) { + if (write(fd, buffer, writesize) != (signed int)writesize) { munmap(orig_buffer, h_pagesize); close(fd); ksft_exit_fail_perror("Error writing to file\n"); diff --git a/tools/testing/selftests/mm/hugetlb_fault_after_madv.c b/tools/testing/selftests/mm/hugetlb_fault_after_madv.c index e62f4e1388f2..2b5acb13ee0b 100644 --- a/tools/testing/selftests/mm/hugetlb_fault_after_madv.c +++ b/tools/testing/selftests/mm/hugetlb_fault_after_madv.c @@ -88,7 +88,7 @@ int main(void) MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); - if ((unsigned long)huge_ptr == -1) + if (huge_ptr == MAP_FAILED) ksft_exit_skip("Failed to allocated huge page\n"); pthread_create(&thread1, NULL, madv, NULL); diff --git a/tools/testing/selftests/mm/hugetlb_madv_vs_map.c b/tools/testing/selftests/mm/hugetlb_madv_vs_map.c index 6c326cf3dcf6..eda38b63e9e8 100644 --- a/tools/testing/selftests/mm/hugetlb_madv_vs_map.c +++ b/tools/testing/selftests/mm/hugetlb_madv_vs_map.c @@ -100,7 +100,7 @@ int main(void) MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, -1, 0); - if ((unsigned long)huge_ptr == -1) { + if (huge_ptr == MAP_FAILED) { ksft_test_result_fail("Failed to allocate huge page\n"); return KSFT_FAIL; } diff --git a/tools/testing/selftests/mm/ksm_functional_tests.c b/tools/testing/selftests/mm/ksm_functional_tests.c index 66b4e111b5a2..4f96126e4e1f 100644 --- a/tools/testing/selftests/mm/ksm_functional_tests.c +++ b/tools/testing/selftests/mm/ksm_functional_tests.c @@ -306,7 +306,7 @@ static void test_unmerge_zero_pages(void) /* Check if ksm_zero_pages is updated correctly after KSM merging */ pages_expected = size / pagesize; - if (pages_expected != get_my_ksm_zero_pages()) { + if ((signed long)pages_expected != get_my_ksm_zero_pages()) { ksft_test_result_fail("'ksm_zero_pages' updated after merging\n"); goto unmap; } @@ -319,7 +319,7 @@ static void test_unmerge_zero_pages(void) /* Check if ksm_zero_pages is updated correctly after unmerging */ pages_expected /= 2; - if (pages_expected != get_my_ksm_zero_pages()) { + if ((signed long)pages_expected != get_my_ksm_zero_pages()) { ksft_test_result_fail("'ksm_zero_pages' updated after unmerging\n"); goto unmap; } @@ -625,7 +625,7 @@ static void test_prot_none(void) { const unsigned int size = 2 * MiB; char *map; - int i; + unsigned int i; ksft_print_msg("[RUN] %s\n", __func__); diff --git a/tools/testing/selftests/mm/mlock-random-test.c b/tools/testing/selftests/mm/mlock-random-test.c index 0d95d630d045..f410699458f2 100644 --- a/tools/testing/selftests/mm/mlock-random-test.c +++ b/tools/testing/selftests/mm/mlock-random-test.c @@ -138,7 +138,7 @@ static void test_mlock_within_limit(char *p, int alloc_size) int page_size = 0; getrlimit(RLIMIT_MEMLOCK, &cur); - if (cur.rlim_cur < alloc_size) + if (cur.rlim_cur < (unsigned int)alloc_size) ksft_exit_fail_msg("alloc_size[%d] < %u rlimit,lead to mlock failure\n", alloc_size, (unsigned int)cur.rlim_cur); @@ -204,7 +204,7 @@ static void test_mlock_outof_limit(char *p, int alloc_size) struct rlimit cur; getrlimit(RLIMIT_MEMLOCK, &cur); - if (cur.rlim_cur >= alloc_size) + if (cur.rlim_cur >= (unsigned int)alloc_size) ksft_exit_fail_msg("alloc_size[%d] >%u rlimit, violates test condition\n", alloc_size, (unsigned int)cur.rlim_cur); diff --git a/tools/testing/selftests/mm/pkey_sighandler_tests.c b/tools/testing/selftests/mm/pkey_sighandler_tests.c index 249989f8b7a2..600ef57f4baa 100644 --- a/tools/testing/selftests/mm/pkey_sighandler_tests.c +++ b/tools/testing/selftests/mm/pkey_sighandler_tests.c @@ -530,7 +530,7 @@ static void (*pkey_tests[])(void) = { int main(void) { - int i; + unsigned int i; ksft_print_header(); ksft_set_plan(ARRAY_SIZE(pkey_tests)); diff --git a/tools/testing/selftests/mm/soft-dirty.c b/tools/testing/selftests/mm/soft-dirty.c index b6eb5c4642ce..68edb2475ccd 100644 --- a/tools/testing/selftests/mm/soft-dirty.c +++ b/tools/testing/selftests/mm/soft-dirty.c @@ -77,7 +77,7 @@ static void test_vma_reuse(int pagemap_fd, int pagesize) static void test_hugepage(int pagemap_fd) { char *map; - int i, ret; + unsigned int i, ret; size_t hpage_len = read_pmd_pagesize(); if (!hpage_len) From aa6ecc94d07955a69a320b6543e578ddb4787a31 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:31 +0500 Subject: [PATCH 333/504] selftests/mm: kselftest_harness: Fix warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Found warnings through hmm-tests and mdwe_test. Fix following warnings: - Mark unused variable with unused attribute - __EXPECT is causing types mismatch warnings when __exp is unsigned and _seen is equal to a constant number, __typeof__(_seen) returns signed type. hmm-tests.c: In function `hmm_anon_read': ../kselftest_harness.h:523:52: warning: comparison of integer expressions of different signedness: `long unsigned int' and `int' [-Wsign-compare] 523 | __EXPECT(expected, #expected, seen, #seen, !=, 1) | ^~ ../kselftest_harness.h:759:21: note: in definition of macro `__EXPECT' 759 | if (!(__exp _t __seen)) { \ | ^~ hmm-tests.c:303:9: note: in expansion of macro `ASSERT_NE' 303 | ASSERT_NE(npages, 0); | ^~~~~~~~~ - Mark variant as unused: mdwe_test.c: In function `wrapper_prctl_flags': ../kselftest_harness.h:177:52: warning: unused parameter `variant' [-Wunused-parameter] 177 | struct __fixture_variant_metadata *variant) \ | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~ ../kselftest_harness.h:152:25: note: in expansion of macro `__TEST_IMPL' 152 | #define TEST(test_name) __TEST_IMPL(test_name, -1) | ^~~~~~~~~~~ mdwe_test.c:23:1: note: in expansion of macro `TEST' 23 | TEST(prctl_flags) | ^~~~ Link: https://lkml.kernel.org/r/20250109173842.1142376-6-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/kselftest_harness.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h index a5a72415e37b..66d444ae1676 100644 --- a/tools/testing/selftests/kselftest_harness.h +++ b/tools/testing/selftests/kselftest_harness.h @@ -174,7 +174,7 @@ static void test_name(struct __test_metadata *_metadata); \ static inline void wrapper_##test_name( \ struct __test_metadata *_metadata, \ - struct __fixture_variant_metadata *variant) \ + struct __fixture_variant_metadata __attribute__((unused)) *variant) \ { \ _metadata->setup_completed = true; \ if (setjmp(_metadata->env) == 0) \ @@ -756,7 +756,7 @@ /* Avoid multiple evaluation of the cases */ \ __typeof__(_expected) __exp = (_expected); \ __typeof__(_seen) __seen = (_seen); \ - if (!(__exp _t __seen)) { \ + if (!(__exp _t (__typeof__(_expected)) __seen)) { \ /* Report with actual signedness to avoid weird output. */ \ switch (is_signed_type(__exp) * 2 + is_signed_type(__seen)) { \ case 0: { \ @@ -965,7 +965,7 @@ static inline void __test_check_assert(struct __test_metadata *t) } struct __test_metadata *__active_test; -static void __timeout_handler(int sig, siginfo_t *info, void *ucontext) +static void __timeout_handler(int sig, siginfo_t *info, void __attribute__((unused)) *ucontext) { struct __test_metadata *t = __active_test; From e8581553b3d3cb620f4a45f56752037e71bb97f7 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:32 +0500 Subject: [PATCH 334/504] selftests/mm: cow: remove unused variables and fix type mismatch errors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix following warnings: - Remove unused variables - Use __attribute__(unused) with unused variables which aren't being used and cannot be removed because of function pointer declaration - Fix type mismatches Link: https://lkml.kernel.org/r/20250109173842.1142376-7-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/cow.c | 46 +++++++++++++++++++------------- 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/tools/testing/selftests/mm/cow.c b/tools/testing/selftests/mm/cow.c index ea00c85c76ca..e37eb863e66c 100644 --- a/tools/testing/selftests/mm/cow.c +++ b/tools/testing/selftests/mm/cow.c @@ -84,7 +84,7 @@ static void detect_huge_zeropage(void) return; ret = pread(fd, buf, sizeof(buf), 0); - if (ret > 0 && ret < sizeof(buf)) { + if (ret > 0 && (unsigned int)ret < sizeof(buf)) { buf[ret] = 0; enabled = strtoul(buf, NULL, 10); @@ -263,12 +263,14 @@ close_comm_pipes: close_comm_pipes(&comm_pipes); } -static void test_cow_in_parent(char *mem, size_t size, bool is_hugetlb) +static void test_cow_in_parent(char *mem, size_t size, + bool __attribute__((unused)) is_hugetlb) { do_test_cow_in_parent(mem, size, false, child_memcmp_fn, false); } -static void test_cow_in_parent_mprotect(char *mem, size_t size, bool is_hugetlb) +static void test_cow_in_parent_mprotect(char *mem, size_t size, + bool __attribute__((unused)) is_hugetlb) { do_test_cow_in_parent(mem, size, true, child_memcmp_fn, false); } @@ -408,10 +410,11 @@ static void do_test_iouring(char *mem, size_t size, bool use_fork) struct io_uring_cqe *cqe; struct io_uring_sqe *sqe; struct io_uring ring; - ssize_t cur, total; struct iovec iov; char *buf, *tmp; + size_t total; int ret, fd; + ssize_t cur; FILE *file; ret = setup_comm_pipes(&comm_pipes); @@ -515,7 +518,7 @@ static void do_test_iouring(char *mem, size_t size, bool use_fork) goto quit_child; } - if (cqe->res != size) { + if ((unsigned int) cqe->res != size) { ksft_test_result_fail("write_fixed failed\n"); goto quit_child; } @@ -529,7 +532,7 @@ static void do_test_iouring(char *mem, size_t size, bool use_fork) ksft_test_result_fail("pread() failed\n"); goto quit_child; } - total += cur; + total += (size_t)cur; } /* Finally, check if we read what we expected. */ @@ -553,12 +556,14 @@ close_comm_pipes: close_comm_pipes(&comm_pipes); } -static void test_iouring_ro(char *mem, size_t size, bool is_hugetlb) +static void test_iouring_ro(char *mem, size_t size, + bool __attribute__((unused)) is_hugetlb) { do_test_iouring(mem, size, false); } -static void test_iouring_fork(char *mem, size_t size, bool is_hugetlb) +static void test_iouring_fork(char *mem, size_t size, + bool __attribute__((unused)) is_hugetlb) { do_test_iouring(mem, size, true); } @@ -702,36 +707,38 @@ free_tmp: free(tmp); } -static void test_ro_pin_on_shared(char *mem, size_t size, bool is_hugetlb) +static void test_ro_pin_on_shared(char *mem, size_t size, + bool __attribute__((unused)) is_hugetlb) { do_test_ro_pin(mem, size, RO_PIN_TEST_SHARED, false); } -static void test_ro_fast_pin_on_shared(char *mem, size_t size, bool is_hugetlb) +static void test_ro_fast_pin_on_shared(char *mem, size_t size, + bool __attribute__((unused)) is_hugetlb) { do_test_ro_pin(mem, size, RO_PIN_TEST_SHARED, true); } static void test_ro_pin_on_ro_previously_shared(char *mem, size_t size, - bool is_hugetlb) + bool __attribute__((unused)) is_hugetlb) { do_test_ro_pin(mem, size, RO_PIN_TEST_PREVIOUSLY_SHARED, false); } static void test_ro_fast_pin_on_ro_previously_shared(char *mem, size_t size, - bool is_hugetlb) + bool __attribute__((unused)) is_hugetlb) { do_test_ro_pin(mem, size, RO_PIN_TEST_PREVIOUSLY_SHARED, true); } static void test_ro_pin_on_ro_exclusive(char *mem, size_t size, - bool is_hugetlb) + bool __attribute__((unused)) is_hugetlb) { do_test_ro_pin(mem, size, RO_PIN_TEST_RO_EXCLUSIVE, false); } static void test_ro_fast_pin_on_ro_exclusive(char *mem, size_t size, - bool is_hugetlb) + bool __attribute__((unused)) is_hugetlb) { do_test_ro_pin(mem, size, RO_PIN_TEST_RO_EXCLUSIVE, true); } @@ -1192,7 +1199,7 @@ static void run_anon_test_case(struct test_case const *test_case) static void run_anon_test_cases(void) { - int i; + unsigned int i; ksft_print_msg("[INFO] Anonymous memory tests in private mappings\n"); @@ -1420,7 +1427,7 @@ static const struct test_case anon_thp_test_cases[] = { static void run_anon_thp_test_cases(void) { - int i; + unsigned int i; if (!pmdsize) return; @@ -1457,13 +1464,14 @@ static void test_cow(char *mem, const char *smem, size_t size) "Other mapping not modified\n"); free(old); } +//typedef void (*non_anon_test_fn)(char *mem, const char *smem, size_t size); -static void test_ro_pin(char *mem, const char *smem, size_t size) +static void test_ro_pin(char *mem, const char __attribute__((unused)) *smem, size_t size) { do_test_ro_pin(mem, size, RO_PIN_TEST, false); } -static void test_ro_fast_pin(char *mem, const char *smem, size_t size) +static void test_ro_fast_pin(char *mem, const char __attribute__((unused)) *smem, size_t size) { do_test_ro_pin(mem, size, RO_PIN_TEST, true); } @@ -1752,7 +1760,7 @@ static void run_non_anon_test_case(struct non_anon_test_case const *test_case) static void run_non_anon_test_cases(void) { - int i; + unsigned int i; ksft_print_msg("[RUN] Non-anonymous memory tests in private mappings\n"); From 66d2b7ccd9ef7c0219046b595d56afdf4680d73e Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:33 +0500 Subject: [PATCH 335/504] selftests/mm: hmm-tests: remove always false expressions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As default_hsize is unsigned, it is always greater than equal to zero. Remove expression which is checking if it is less than 0. Link: https://lkml.kernel.org/r/20250109173842.1142376-8-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/hmm-tests.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/mm/hmm-tests.c b/tools/testing/selftests/mm/hmm-tests.c index 141bf63cbe05..3b4db583bd3b 100644 --- a/tools/testing/selftests/mm/hmm-tests.c +++ b/tools/testing/selftests/mm/hmm-tests.c @@ -796,7 +796,7 @@ TEST_F(hmm, anon_write_hugetlbfs) int ret; default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:"); - if (default_hsize < 0 || default_hsize*1024 < default_hsize) + if (default_hsize*1024 < default_hsize) SKIP(return, "Huge page size could not be determined"); default_hsize = default_hsize*1024; /* KB to B */ @@ -1579,7 +1579,7 @@ TEST_F(hmm, compound) /* Skip test if we can't allocate a hugetlbfs page. */ default_hsize = file_read_ulong("/proc/meminfo", "Hugepagesize:"); - if (default_hsize < 0 || default_hsize*1024 < default_hsize) + if (default_hsize*1024 < default_hsize) SKIP(return, "Huge page size could not be determined"); default_hsize = default_hsize*1024; /* KB to B */ From 730225f8f6ff6b39470ca237e5bded43fe1f672f Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:34 +0500 Subject: [PATCH 336/504] selftests/mm: guard-pages: fix type mismatch warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix type mismatch warnings. Link: https://lkml.kernel.org/r/20250109173842.1142376-9-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/guard-pages.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/mm/guard-pages.c b/tools/testing/selftests/mm/guard-pages.c index ece37212a8a2..fc1165ef2015 100644 --- a/tools/testing/selftests/mm/guard-pages.c +++ b/tools/testing/selftests/mm/guard-pages.c @@ -142,7 +142,7 @@ TEST_F(guard_pages, basic) const unsigned long NUM_PAGES = 10; const unsigned long page_size = self->page_size; char *ptr; - int i; + unsigned int i; ptr = mmap(NULL, NUM_PAGES * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); @@ -1044,7 +1044,7 @@ TEST_F(guard_pages, fork_cow) const unsigned long page_size = self->page_size; char *ptr; pid_t pid; - int i; + unsigned int i; /* Map 10 pages. */ ptr = mmap(NULL, 10 * page_size, PROT_READ | PROT_WRITE, From 308f440bc1160b913a842b8d6a098347c8990cbf Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:35 +0500 Subject: [PATCH 337/504] selftests/mm: hugetlb-madvise: fix type mismatch issues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix type mismatch warnings: hugetlb-madvise.c:30:25: warning: comparison of integer expressions of different signedness: `int' and `long unsigned int' [-Wsign-compare] 30 | if (fhp != (exp_free)) { \ | ^~ hugetlb-madvise.c:114:9: note: in expansion of macro `validate_free_pages' 114 | validate_free_pages(free_hugepages - NR_HUGE_PAGES); | ^~~~~~~~~~~~~~~~~~~ Link: https://lkml.kernel.org/r/20250109173842.1142376-10-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/hugetlb-madvise.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/hugetlb-madvise.c b/tools/testing/selftests/mm/hugetlb-madvise.c index 43f16c12c8e9..8f527084858d 100644 --- a/tools/testing/selftests/mm/hugetlb-madvise.c +++ b/tools/testing/selftests/mm/hugetlb-madvise.c @@ -26,7 +26,7 @@ #define validate_free_pages(exp_free) \ do { \ - int fhp = get_free_hugepages(); \ + unsigned int fhp = get_free_hugepages(); \ if (fhp != (exp_free)) { \ printf("Unexpected number of free huge " \ "pages line %d\n", __LINE__); \ From f7c3c4b3e6e3a1f3e64edddd9a3d5a6e33173111 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:36 +0500 Subject: [PATCH 338/504] selftests/mm: hugepage-vmemmap: fix type mismatch warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix type mismatch warnings. Link: https://lkml.kernel.org/r/20250109173842.1142376-11-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/hugepage-vmemmap.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/mm/hugepage-vmemmap.c b/tools/testing/selftests/mm/hugepage-vmemmap.c index 23e97e552057..8d30ebfc9b86 100644 --- a/tools/testing/selftests/mm/hugepage-vmemmap.c +++ b/tools/testing/selftests/mm/hugepage-vmemmap.c @@ -51,7 +51,8 @@ static unsigned long virt_to_pfn(void *addr) static int check_page_flags(unsigned long pfn) { - int fd, i; + int fd; + unsigned int i; unsigned long pageflags; fd = open("/proc/kpageflags", O_RDONLY); From 9c914431f6257e616c4986905f7f9cff694cce3e Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:37 +0500 Subject: [PATCH 339/504] selftests/mm: hugetlb-read-hwpoison: fix type mismatch warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix type mismatch warnings. Link: https://lkml.kernel.org/r/20250109173842.1142376-12-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/hugetlb-read-hwpoison.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/mm/hugetlb-read-hwpoison.c b/tools/testing/selftests/mm/hugetlb-read-hwpoison.c index ba6cc6f9cabc..e2a2bb1989d5 100644 --- a/tools/testing/selftests/mm/hugetlb-read-hwpoison.c +++ b/tools/testing/selftests/mm/hugetlb-read-hwpoison.c @@ -72,7 +72,7 @@ static bool seek_read_hugepage_filemap(int fd, size_t len, size_t wr_chunk_size, { char buf[MAX_WRITE_READ_CHUNK_SIZE]; ssize_t ret_count = 0; - ssize_t total_ret_count = 0; + size_t total_ret_count = 0; char val = offset / wr_chunk_size + offset % wr_chunk_size; printf(PREFIX PREFIX "init val=%u with offset=0x%lx\n", val, offset); @@ -83,7 +83,7 @@ static bool seek_read_hugepage_filemap(int fd, size_t len, size_t wr_chunk_size, return false; } - while (offset + total_ret_count < len) { + while ((unsigned long)offset + total_ret_count < len) { ret_count = read(fd, buf, wr_chunk_size); if (ret_count == 0) { printf(PREFIX PREFIX "read reach end of the file\n"); @@ -109,7 +109,7 @@ static bool read_hugepage_filemap(int fd, size_t len, { char buf[MAX_WRITE_READ_CHUNK_SIZE]; ssize_t ret_count = 0; - ssize_t total_ret_count = 0; + size_t total_ret_count = 0; char val = 0; printf(PREFIX PREFIX "expect to read 0x%lx bytes of data in total\n", From 74558db236b298fdd144a7bece27017968ee0a65 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:38 +0500 Subject: [PATCH 340/504] selftests/mm: khugepaged: fix type mismatch warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix type mismatch warnings. Link: https://lkml.kernel.org/r/20250109173842.1142376-13-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/khugepaged.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/mm/khugepaged.c b/tools/testing/selftests/mm/khugepaged.c index 8a4d34cce36b..dde7c7fbbac2 100644 --- a/tools/testing/selftests/mm/khugepaged.c +++ b/tools/testing/selftests/mm/khugepaged.c @@ -140,7 +140,7 @@ static void get_finfo(const char *dir) exit(EXIT_FAILURE); } if (snprintf(finfo.path, sizeof(finfo.path), "%s/" TEST_FILE, - finfo.dir) >= sizeof(finfo.path)) { + finfo.dir) >= (signed int)sizeof(finfo.path)) { printf("%s: Pathname is too long\n", __func__); exit(EXIT_FAILURE); } @@ -155,7 +155,7 @@ static void get_finfo(const char *dir) /* Find owning device's queue/read_ahead_kb control */ if (snprintf(path, sizeof(path), "/sys/dev/block/%d:%d/uevent", major(path_stat.st_dev), minor(path_stat.st_dev)) - >= sizeof(path)) { + >= (signed int)sizeof(path)) { printf("%s: Pathname is too long\n", __func__); exit(EXIT_FAILURE); } @@ -169,7 +169,7 @@ static void get_finfo(const char *dir) sizeof(finfo.dev_queue_read_ahead_path), "/sys/dev/block/%d:%d/queue/read_ahead_kb", major(path_stat.st_dev), minor(path_stat.st_dev)) - >= sizeof(finfo.dev_queue_read_ahead_path)) { + >= (signed int)sizeof(finfo.dev_queue_read_ahead_path)) { printf("%s: Pathname is too long\n", __func__); exit(EXIT_FAILURE); } @@ -197,7 +197,7 @@ static void get_finfo(const char *dir) if (snprintf(finfo.dev_queue_read_ahead_path, sizeof(finfo.dev_queue_read_ahead_path), "/sys/block/%s/queue/read_ahead_kb", - str) >= sizeof(finfo.dev_queue_read_ahead_path)) { + str) >= (signed int)sizeof(finfo.dev_queue_read_ahead_path)) { printf("%s: Pathname is too long\n", __func__); exit(EXIT_FAILURE); } @@ -271,7 +271,7 @@ static void *alloc_mapping(int nr) static void fill_memory(int *p, unsigned long start, unsigned long end) { - int i; + unsigned int i; for (i = start / page_size; i < end / page_size; i++) p[i * page_size / sizeof(*p)] = i + 0xdead0000; @@ -333,10 +333,10 @@ static void *alloc_hpage(struct mem_ops *ops) static void validate_memory(int *p, unsigned long start, unsigned long end) { - int i; + unsigned int i; for (i = start / page_size; i < end / page_size; i++) { - if (p[i * page_size / sizeof(*p)] != i + 0xdead0000) { + if ((unsigned int)p[i * page_size / sizeof(*p)] != i + 0xdead0000) { printf("Page %d is corrupted: %#x\n", i, p[i * page_size / sizeof(*p)]); exit(EXIT_FAILURE); @@ -537,7 +537,7 @@ static void madvise_collapse(const char *msg, char *p, int nr_hpages, static bool wait_for_scan(const char *msg, char *p, int nr_hpages, struct mem_ops *ops) { - int full_scans; + unsigned int full_scans; int timeout = 6; /* 3 seconds */ /* Sanity check */ From e519cc0f5734f5a045ef7b9d52894418df2899d1 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:39 +0500 Subject: [PATCH 341/504] selftests/mm: protection_keys: fix variables types mismatch warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix variable type mismatch warnings. Link: https://lkml.kernel.org/r/20250109173842.1142376-14-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/protection_keys.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/mm/protection_keys.c b/tools/testing/selftests/mm/protection_keys.c index a4683f2476f2..28960634044c 100644 --- a/tools/testing/selftests/mm/protection_keys.c +++ b/tools/testing/selftests/mm/protection_keys.c @@ -78,7 +78,7 @@ static void cat_into_file(char *str, char *file) } ret = write(fd, str, strlen(str)); - if (ret != strlen(str)) { + if (ret != (signed int)strlen(str)) { perror("write to file failed"); fprintf(stderr, "filename: '%s' str: '%s'\n", file, str); exit(__LINE__); @@ -597,10 +597,10 @@ struct pkey_malloc_record *pkey_last_malloc_record; static long nr_pkey_malloc_records; void record_pkey_malloc(void *ptr, long size, int prot) { - long i; + unsigned long i; struct pkey_malloc_record *rec = NULL; - for (i = 0; i < nr_pkey_malloc_records; i++) { + for (i = 0; i < (unsigned long)nr_pkey_malloc_records; i++) { rec = &pkey_malloc_records[i]; /* find a free record */ if (rec) @@ -866,7 +866,7 @@ static int nr_test_fds; static void __save_test_fd(int fd) { pkey_assert(fd >= 0); - pkey_assert(nr_test_fds < ARRAY_SIZE(test_fds)); + pkey_assert(nr_test_fds < (signed int)ARRAY_SIZE(test_fds)); test_fds[nr_test_fds] = fd; nr_test_fds++; } @@ -897,7 +897,7 @@ static void test_pkey_alloc_free_attach_pkey0(int *ptr, u16 pkey) int max_nr_pkey_allocs; int alloced_pkeys[NR_PKEYS]; int nr_alloced = 0; - long size; + unsigned long size; pkey_assert(pkey_last_malloc_record); size = pkey_last_malloc_record->size; @@ -1280,7 +1280,7 @@ static void test_pkey_init_state(int *ptr, u16 pkey) */ static void test_mprotect_with_pkey_0(int *ptr, u16 pkey) { - long size; + unsigned long size; int prot; assert(pkey_last_malloc_record); @@ -1528,7 +1528,7 @@ static void test_ptrace_modifies_pkru(int *ptr, u16 pkey) pkey_assert(WIFSTOPPED(status) && WSTOPSIG(status) == SIGSTOP); xsave = (void *)malloc(xsave_size); - pkey_assert(xsave > 0); + pkey_assert(xsave != NULL); /* Modify the PKRU register directly */ iov.iov_base = xsave; @@ -1725,7 +1725,7 @@ static void run_tests_once(void) int *ptr; int prot = PROT_READ|PROT_WRITE; - for (test_nr = 0; test_nr < ARRAY_SIZE(pkey_tests); test_nr++) { + for (test_nr = 0; test_nr < (signed int)ARRAY_SIZE(pkey_tests); test_nr++) { int pkey; int orig_pkey_faults = pkey_faults; From 5dcb0e45e6a01916157ef6825c1d8fe81e88dba9 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:40 +0500 Subject: [PATCH 342/504] selftests/mm: thuge-gen: fix type mismatch warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix all the found type mismatch warnings. Link: https://lkml.kernel.org/r/20250109173842.1142376-15-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/thuge-gen.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/mm/thuge-gen.c b/tools/testing/selftests/mm/thuge-gen.c index e4370b79b62f..515b89ac4eb5 100644 --- a/tools/testing/selftests/mm/thuge-gen.c +++ b/tools/testing/selftests/mm/thuge-gen.c @@ -65,7 +65,7 @@ void show(unsigned long ps) { char buf[100]; - if (ps == getpagesize()) + if ((signed long)ps == getpagesize()) return; ksft_print_msg("%luMB: ", ps >> 20); @@ -106,7 +106,7 @@ unsigned long read_sysfs(int warn, char *fmt, ...) unsigned long read_free(unsigned long ps) { - return read_sysfs(ps != getpagesize(), + return read_sysfs((signed long)ps != getpagesize(), "/sys/kernel/mm/hugepages/hugepages-%lukB/free_hugepages", ps >> 10); } @@ -126,7 +126,7 @@ void test_mmap(unsigned long size, unsigned flags) after = read_free(size); show(size); - ksft_test_result(size == getpagesize() || (before - after) == NUM_PAGES, + ksft_test_result((signed long)size == getpagesize() || (before - after) == NUM_PAGES, "%s mmap\n", __func__); if (munmap(map, size * NUM_PAGES)) @@ -164,7 +164,7 @@ void test_shmget(unsigned long size, unsigned flags) after = read_free(size); show(size); - ksft_test_result(size == getpagesize() || (before - after) == NUM_PAGES, + ksft_test_result((signed long)size == getpagesize() || (before - after) == NUM_PAGES, "%s: mmap\n", __func__); if (shmdt(map)) ksft_exit_fail_msg("%s: shmdt: %s\n", __func__, strerror(errno)); @@ -173,7 +173,7 @@ void test_shmget(unsigned long size, unsigned flags) void find_pagesizes(void) { unsigned long largest = getpagesize(); - int i; + unsigned int i; glob_t g; glob("/sys/kernel/mm/hugepages/hugepages-*kB", 0, NULL, &g); From 288daefae84c06113aa50dfef38351bbc3d947e7 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:41 +0500 Subject: [PATCH 343/504] selftests/mm: uffd-*: fix all type mismatch warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix all type mismatch warnings in all uffd-* files. Link: https://lkml.kernel.org/r/20250109173842.1142376-16-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/uffd-common.c | 6 +++--- tools/testing/selftests/mm/uffd-stress.c | 2 +- tools/testing/selftests/mm/uffd-unit-tests.c | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/mm/uffd-common.c b/tools/testing/selftests/mm/uffd-common.c index c15674a60f29..47bdcb47481a 100644 --- a/tools/testing/selftests/mm/uffd-common.c +++ b/tools/testing/selftests/mm/uffd-common.c @@ -450,7 +450,7 @@ void uffd_handle_page_fault(struct uffd_msg *msg, struct uffd_args *args) args->wp_faults++; } else if (msg->arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_MINOR) { uint8_t *area; - int b; + unsigned int b; /* * Minor page faults @@ -621,7 +621,7 @@ int __copy_page(int ufd, unsigned long offset, bool retry, bool wp) err("UFFDIO_COPY error: %"PRId64, (int64_t)uffdio_copy.copy); wake_range(ufd, uffdio_copy.dst, page_size); - } else if (uffdio_copy.copy != page_size) { + } else if (uffdio_copy.copy != (signed long)page_size) { err("UFFDIO_COPY error: %"PRId64, (int64_t)uffdio_copy.copy); } else { if (test_uffdio_copy_eexist && retry) { @@ -655,7 +655,7 @@ int move_page(int ufd, unsigned long offset, unsigned long len) err("UFFDIO_MOVE error: %"PRId64, (int64_t)uffdio_move.move); wake_range(ufd, uffdio_move.dst, len); - } else if (uffdio_move.move != len) { + } else if (uffdio_move.move != (signed long)len) { err("UFFDIO_MOVE error: %"PRId64, (int64_t)uffdio_move.move); } else return 1; diff --git a/tools/testing/selftests/mm/uffd-stress.c b/tools/testing/selftests/mm/uffd-stress.c index 1270ff3104cc..5509ec32c329 100644 --- a/tools/testing/selftests/mm/uffd-stress.c +++ b/tools/testing/selftests/mm/uffd-stress.c @@ -77,7 +77,7 @@ static void usage(void) static void uffd_stats_reset(struct uffd_args *args, unsigned long n_cpus) { - int i; + unsigned int i; for (i = 0; i < n_cpus; i++) { args[i].cpu = i; diff --git a/tools/testing/selftests/mm/uffd-unit-tests.c b/tools/testing/selftests/mm/uffd-unit-tests.c index 81b315bedb74..c3d59ec75404 100644 --- a/tools/testing/selftests/mm/uffd-unit-tests.c +++ b/tools/testing/selftests/mm/uffd-unit-tests.c @@ -917,7 +917,7 @@ static bool do_uffdio_zeropage(int ufd, bool has_zeropage) else if (res != -EINVAL) err("UFFDIO_ZEROPAGE not -EINVAL"); } else if (has_zeropage) { - if (res != page_size) + if (res != (signed long)page_size) err("UFFDIO_ZEROPAGE unexpected size"); else retry_uffdio_zeropage(ufd, &uffdio_zeropage); @@ -949,7 +949,7 @@ uffd_register_detect_zeropage(int uffd, void *addr, uint64_t len) static void uffd_zeropage_test(uffd_test_args_t __attribute__((unused)) *args) { bool has_zeropage; - int i; + unsigned int i; has_zeropage = uffd_register_detect_zeropage(uffd, area_dst, page_size); if (area_dst_alias) @@ -997,7 +997,7 @@ static void do_uffdio_poison(int uffd, unsigned long offset) if (ret) err("UFFDIO_POISON error: %"PRId64, (int64_t)res); - else if (res != page_size) + else if (res != (signed long)page_size) err("UFFDIO_POISON unexpected size: %"PRId64, (int64_t)res); } From 8e5dbd4647ba071be75b6c0dfec77ac4484876d7 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 9 Jan 2025 22:38:42 +0500 Subject: [PATCH 344/504] selftests/mm: Makefile: add the compiler flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add the compiler flags to catch warnings during development. Link: https://lkml.kernel.org/r/20250109173842.1142376-17-usama.anjum@collabora.com Signed-off-by: Muhammad Usama Anjum Cc: Andy Lutomirski Cc: Jérôme Glisse Cc: Kees Cook Cc: Shuah Khan Cc: Will Drewry Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/selftests/mm/Makefile b/tools/testing/selftests/mm/Makefile index 63ce39d024bb..006ed2e8df87 100644 --- a/tools/testing/selftests/mm/Makefile +++ b/tools/testing/selftests/mm/Makefile @@ -34,6 +34,7 @@ endif MAKEFLAGS += --no-builtin-rules CFLAGS = -Wall -O2 -I $(top_srcdir) $(EXTRA_CFLAGS) $(KHDR_INCLUDES) $(TOOLS_INCLUDES) +CFLAGS += -Wunreachable-code -Wunused -Wunused-parameter -Wunused-function -Wunused-variable LDLIBS = -lrt -lpthread -lm # Some distributions (such as Ubuntu) configure GCC so that _FORTIFY_SOURCE is From 5b9022e8c160ddf25546da97ff47132a2f630347 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Thu, 9 Jan 2025 15:22:21 +0000 Subject: [PATCH 345/504] mm: remove PageTransTail() The last caller was removed in October. Also remove the FALSE definition of PageTransCompoundMap(); the normal definition was removed a few years ago. Link: https://lkml.kernel.org/r/20250109152245.1591914-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Acked-by: David Hildenbrand Acked-by: Zi Yan Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 2414e7921eea..14226d6bd6f8 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -936,21 +936,9 @@ static inline int PageTransCompound(const struct page *page) { return PageCompound(page); } - -/* - * PageTransTail returns true for both transparent huge pages - * and hugetlbfs pages, so it should only be called when it's known - * that hugetlbfs pages aren't involved. - */ -static inline int PageTransTail(const struct page *page) -{ - return PageTail(page); -} #else TESTPAGEFLAG_FALSE(TransHuge, transhuge) TESTPAGEFLAG_FALSE(TransCompound, transcompound) -TESTPAGEFLAG_FALSE(TransCompoundMap, transcompoundmap) -TESTPAGEFLAG_FALSE(TransTail, transtail) #endif #if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_TRANSPARENT_HUGEPAGE) From f49997a3930c104bd4b5e136fb382fa2790bdcd3 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 10 Jan 2025 10:52:28 -0800 Subject: [PATCH 346/504] Docs/mm/damon/design: add monitoring parameters tuning guide Patch series "Docs/mm/damon: add tuning guide and misc updates". Add DAMON monitoring parameters tuning guide (patches 1 and 2), with misc documentation fixes (patch 3), updates (patch 4) and clarifications (patch 5). This patch (of 5): DAMON monitoring parameters including sampling and aggregation intervals should be tuned for given workloads. However, the fact is not explicitly documented. Also there is no official guide to help the tuning. This apparently confused a number of people[1] at best, or made people forgive DAMON without tuning. Add a guide on the design document. [1] https://lore.kernel.org/20241202175459.2005526-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250110185232.54907-1-sj@kernel.org Link: https://lkml.kernel.org/r/20250110185232.54907-2-sj@kernel.org Signed-off-by: SeongJae Park Cc: Jonathan Corbet Cc: Honggyu Kim Cc: Yunjeong Mun Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 48 +++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index 667775bab86c..dd7e0f63a69a 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -203,6 +203,8 @@ This scheme, however, cannot preserve the quality of the output if the assumption is not guaranteed. +.. _damon_design_adaptive_regions_adjustment: + Adaptive Regions Adjustment ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -264,6 +266,52 @@ tracepoints. For more details, please refer to the documentations for respectively. +Monitoring Parameters Tuning Guide +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In short, set ``aggregation interval`` to capture meaningful amount of accesses +for the purpose. The amount of accesses can be measured using ``nr_accesses`` +and ``age`` of regions in the aggregated monitoring results snapshot. The +default value of the interval, ``100ms``, turns out to be too short in many +cases. Set ``sampling interval`` proportional to ``aggregation interval``. By +default, ``1/20`` is recommended as the ratio. + +``Aggregation interval`` should be set as the time interval that the workload +can make an amount of accesses for the monitoring purpose, within the interval. +If the interval is too short, only small number of accesses are captured. As a +result, the monitoring results look everything is samely accessed only rarely. +For many purposes, that would be useless. If it is too long, however, the time +to converge regions with the :ref:`regions adjustment mechanism +` can be too long, depending on the +time scale of the given purpose. This could happen if the workload is actually +making only rare accesses but the user thinks the amount of accesses for the +monitoring purpose too high. For such cases, the target amount of access to +capture per ``aggregation interval`` should carefully reconsidered. Also, note +that the captured amount of accesses is represented with not only +``nr_accesses``, but also ``age``. For example, even if every region on the +monitoring results show zero ``nr_accesses``, regions could still be +distinguished using ``age`` values as the recency information. + +Hence the optimum value of ``aggregation interval`` depends on the access +intensiveness of the workload. The user should tune the interval based on the +amount of access that captured on each aggregated snapshot of the monitoring +results. + +Note that the default value of the interval is 100 milliseconds, which is too +short in many cases, especially on large systems. + +``Sampling interval`` defines the resolution of each aggregation. If it is set +too large, monitoring results will look like every region was samely rarely +accessed, or samely frequently accessed. That is, regions become +undistinguishable based on access pattern, and therefore the results will be +useless in many use cases. If ``sampling interval`` is too small, it will not +degrade the resolution, but will increase the monitoring overhead. If it is +appropriate enough to provide a resolution of the monitoring results that +sufficient for the given purpose, it shouldn't be unnecessarily further +lowered. It is recommended to be set proportional to ``aggregation interval``. +By default, the ratio is set as ``1/20``, and it is still recommended. + + .. _damon_design_damos: Operation Schemes From 01bdeb9c4586aec707c80541f7c75288b117557a Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 10 Jan 2025 10:52:29 -0800 Subject: [PATCH 347/504] Docs/mm/damon: add an example monitoring intervals tuning Add a DAMON monitoring intervals tuning example that contains output from a demonstration of the guide on a real server workload system. The example with real world numbers will help users better understanding the guide instructions and what outputs they can expect and verify. Those will again help finding the rooms for improvements on the guide. Link: https://lkml.kernel.org/r/20250110185232.54907-3-sj@kernel.org Signed-off-by: SeongJae Park Cc: Honggyu Kim Cc: Jonathan Corbet Cc: Yunjeong Mun Signed-off-by: Andrew Morton --- Documentation/mm/damon/design.rst | 9 + .../monitoring_intervals_tuning_example.rst | 247 ++++++++++++++++++ 2 files changed, 256 insertions(+) create mode 100644 Documentation/mm/damon/monitoring_intervals_tuning_example.rst diff --git a/Documentation/mm/damon/design.rst b/Documentation/mm/damon/design.rst index dd7e0f63a69a..e28c6a1b40ae 100644 --- a/Documentation/mm/damon/design.rst +++ b/Documentation/mm/damon/design.rst @@ -266,6 +266,8 @@ tracepoints. For more details, please refer to the documentations for respectively. +.. _damon_design_monitoring_params_tuning_guide: + Monitoring Parameters Tuning Guide ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -311,6 +313,13 @@ sufficient for the given purpose, it shouldn't be unnecessarily further lowered. It is recommended to be set proportional to ``aggregation interval``. By default, the ratio is set as ``1/20``, and it is still recommended. +Refer to below documents for an example tuning based on the above guide. + +.. toctree:: + :maxdepth: 1 + + monitoring_intervals_tuning_example + .. _damon_design_damos: diff --git a/Documentation/mm/damon/monitoring_intervals_tuning_example.rst b/Documentation/mm/damon/monitoring_intervals_tuning_example.rst new file mode 100644 index 000000000000..334a854efb40 --- /dev/null +++ b/Documentation/mm/damon/monitoring_intervals_tuning_example.rst @@ -0,0 +1,247 @@ +.. SPDX-License-Identifier: GPL-2.0 + +================================================= +DAMON Moniting Interval Parameters Tuning Example +================================================= + +DAMON's monitoring parameters need tuning based on given workload and the +monitoring purpose. There is a :ref:`tuning guide +` for that. This document +provides an example tuning based on the guide. + +Setup +===== + +For below example, DAMON of Linux kernel v6.11 and `damo +`_ (DAMON user-space tool) v2.5.9 was used to +monitor and visualize access patterns on the physical address space of a system +running a real-world server workload. + +5ms/100ms intervals: Too Short Interval +======================================= + +Let's start by capturing the access pattern snapshot on the physical address +space of the system using DAMON, with the default interval parameters (5 +milliseconds and 100 milliseconds for the sampling and the aggregation +intervals, respectively). Wait ten minutes between the start of DAMON and +the capturing of the snapshot, to show a meaningful time-wise access patterns. +:: + + # damo start + # sleep 600 + # damo record --snapshot 0 1 + # damo stop + +Then, list the DAMON-found regions of different access patterns, sorted by the +"access temperature". "Access temperature" is a metric representing the +access-hotness of a region. It is calculated as a weighted sum of the access +frequency and the age of the region. If the access frequency is 0 %, the +temperature is multipled by minus one. That is, if a region is not accessed, +it gets minus temperature and it gets lower as not accessed for longer time. +The sorting is in temperature-ascendint order, so the region at the top of the +list is the coldest, and the one at the bottom is the hottest one. :: + + # damo report access --sort_regions_by temperature + 0 addr 16.052 GiB size 5.985 GiB access 0 % age 5.900 s # coldest + 1 addr 22.037 GiB size 6.029 GiB access 0 % age 5.300 s + 2 addr 28.065 GiB size 6.045 GiB access 0 % age 5.200 s + 3 addr 10.069 GiB size 5.983 GiB access 0 % age 4.500 s + 4 addr 4.000 GiB size 6.069 GiB access 0 % age 4.400 s + 5 addr 62.008 GiB size 3.992 GiB access 0 % age 3.700 s + 6 addr 56.795 GiB size 5.213 GiB access 0 % age 3.300 s + 7 addr 39.393 GiB size 6.096 GiB access 0 % age 2.800 s + 8 addr 50.782 GiB size 6.012 GiB access 0 % age 2.800 s + 9 addr 34.111 GiB size 5.282 GiB access 0 % age 2.300 s + 10 addr 45.489 GiB size 5.293 GiB access 0 % age 1.800 s # hottest + total size: 62.000 GiB + +The list shows not seemingly hot regions, and only minimum access pattern +diversity. Every region has zero access frequency. The number of region is +10, which is the default ``min_nr_regions value``. Size of each region is also +nearly idential. We can suspect this is because “adaptive regions adjustment” +mechanism was not well working. As the guide suggested, we can get relative +hotness of regions using ``age`` as the recency information. That would be +better than nothing, but given the fact that the longest age is only about 6 +seconds while we waited about ten minuts, it is unclear how useful this will +be. + +The temperature ranges to total size of regions of each range histogram +visualization of the results also shows no interesting distribution pattern. :: + + # damo report access --style temperature-sz-hist + + [-,590,000,000, -,549,000,000) 5.985 GiB |********** | + [-,549,000,000, -,508,000,000) 12.074 GiB |********************| + [-,508,000,000, -,467,000,000) 0 B | | + [-,467,000,000, -,426,000,000) 12.052 GiB |********************| + [-,426,000,000, -,385,000,000) 0 B | | + [-,385,000,000, -,344,000,000) 3.992 GiB |******* | + [-,344,000,000, -,303,000,000) 5.213 GiB |********* | + [-,303,000,000, -,262,000,000) 12.109 GiB |********************| + [-,262,000,000, -,221,000,000) 5.282 GiB |********* | + [-,221,000,000, -,180,000,000) 0 B | | + [-,180,000,000, -,139,000,000) 5.293 GiB |********* | + total size: 62.000 GiB + +In short, the parameters provide poor quality monitoring results for hot +regions detection. According to the :ref:`guide +`, this is due to the too short +aggregation interval. + +100ms/2s intervals: Starts Showing Small Hot Regions +==================================================== + +Following the guide, increase the interval 20 times (100 milliseocnds and 2 +seconds for sampling and aggregation intervals, respectively). :: + + # damo start -s 100ms -a 2s + # sleep 600 + # damo record --snapshot 0 1 + # damo stop + # damo report access --sort_regions_by temperature + 0 addr 10.180 GiB size 6.117 GiB access 0 % age 7 m 8 s # coldest + 1 addr 49.275 GiB size 6.195 GiB access 0 % age 6 m 14 s + 2 addr 62.421 GiB size 3.579 GiB access 0 % age 6 m 4 s + 3 addr 40.154 GiB size 6.127 GiB access 0 % age 5 m 40 s + 4 addr 16.296 GiB size 6.182 GiB access 0 % age 5 m 32 s + 5 addr 34.254 GiB size 5.899 GiB access 0 % age 5 m 24 s + 6 addr 46.281 GiB size 2.995 GiB access 0 % age 5 m 20 s + 7 addr 28.420 GiB size 5.835 GiB access 0 % age 5 m 6 s + 8 addr 4.000 GiB size 6.180 GiB access 0 % age 4 m 16 s + 9 addr 22.478 GiB size 5.942 GiB access 0 % age 3 m 58 s + 10 addr 55.470 GiB size 915.645 MiB access 0 % age 3 m 6 s + 11 addr 56.364 GiB size 6.056 GiB access 0 % age 2 m 8 s + 12 addr 56.364 GiB size 4.000 KiB access 95 % age 16 s + 13 addr 49.275 GiB size 4.000 KiB access 100 % age 8 m 24 s # hottest + total size: 62.000 GiB + # damo report access --style temperature-sz-hist + + [-42,800,000,000, -33,479,999,000) 22.018 GiB |***************** | + [-33,479,999,000, -24,159,998,000) 27.090 GiB |********************| + [-24,159,998,000, -14,839,997,000) 6.836 GiB |****** | + [-14,839,997,000, -5,519,996,000) 6.056 GiB |***** | + [-5,519,996,000, 3,800,005,000) 4.000 KiB |* | + [3,800,005,000, 13,120,006,000) 0 B | | + [13,120,006,000, 22,440,007,000) 0 B | | + [22,440,007,000, 31,760,008,000) 0 B | | + [31,760,008,000, 41,080,009,000) 0 B | | + [41,080,009,000, 50,400,010,000) 0 B | | + [50,400,010,000, 59,720,011,000) 4.000 KiB |* | + total size: 62.000 GiB + +DAMON found two distinct 4 KiB regions that pretty hot. The regions are also +well aged. The hottest 4 KiB region was keeping the access frequency for about +8 minutes, and the coldest region was keeping no access for about 7 minutes. +The distribution on the histogram also looks like having a pattern. + +Especially, the finding of the 4 KiB regions among the 62 GiB total memory +shows DAMON’s adaptive regions adjustment is working as designed. + +Still the number of regions is close to the ``min_nr_regions``, and sizes of +cold regions are similar, though. Apparently it is improved, but it still has +rooms to improve. + +400ms/8s intervals: Pretty Improved Results +=========================================== + +Increase the intervals four times (400 milliseconds and 8 seconds +for sampling and aggregation intervals, respectively). :: + + # damo start -s 400ms -a 8s + # sleep 600 + # damo record --snapshot 0 1 + # damo stop + # damo report access --sort_regions_by temperature + 0 addr 64.492 GiB size 1.508 GiB access 0 % age 6 m 48 s # coldest + 1 addr 21.749 GiB size 5.674 GiB access 0 % age 6 m 8 s + 2 addr 27.422 GiB size 5.801 GiB access 0 % age 6 m + 3 addr 49.431 GiB size 8.675 GiB access 0 % age 5 m 28 s + 4 addr 33.223 GiB size 5.645 GiB access 0 % age 5 m 12 s + 5 addr 58.321 GiB size 6.170 GiB access 0 % age 5 m 4 s + [...] + 25 addr 6.615 GiB size 297.531 MiB access 15 % age 0 ns + 26 addr 9.513 GiB size 12.000 KiB access 20 % age 0 ns + 27 addr 9.511 GiB size 108.000 KiB access 25 % age 0 ns + 28 addr 9.513 GiB size 20.000 KiB access 25 % age 0 ns + 29 addr 9.511 GiB size 12.000 KiB access 30 % age 0 ns + 30 addr 9.520 GiB size 4.000 KiB access 40 % age 0 ns + [...] + 41 addr 9.520 GiB size 4.000 KiB access 80 % age 56 s + 42 addr 9.511 GiB size 12.000 KiB access 100 % age 6 m 16 s + 43 addr 58.321 GiB size 4.000 KiB access 100 % age 6 m 24 s + 44 addr 9.512 GiB size 4.000 KiB access 100 % age 6 m 48 s + 45 addr 58.106 GiB size 4.000 KiB access 100 % age 6 m 48 s # hottest + total size: 62.000 GiB + # damo report access --style temperature-sz-hist + + [-40,800,000,000, -32,639,999,000) 21.657 GiB |********************| + [-32,639,999,000, -24,479,998,000) 17.938 GiB |***************** | + [-24,479,998,000, -16,319,997,000) 16.885 GiB |**************** | + [-16,319,997,000, -8,159,996,000) 586.879 MiB |* | + [-8,159,996,000, 5,000) 4.946 GiB |***** | + [5,000, 8,160,006,000) 260.000 KiB |* | + [8,160,006,000, 16,320,007,000) 0 B | | + [16,320,007,000, 24,480,008,000) 0 B | | + [24,480,008,000, 32,640,009,000) 0 B | | + [32,640,009,000, 40,800,010,000) 16.000 KiB |* | + [40,800,010,000, 48,960,011,000) 8.000 KiB |* | + total size: 62.000 GiB + +The number of regions having different access patterns has significantly +increased. Size of each region is also more varied. Total size of non-zero +access frequency regions is also significantly increased. Maybe this is already +good enough to make some meaningful memory management efficieny changes. + +800ms/16s intervals: Another bias +================================= + +Further double the intervals (800 milliseconds and 16 seconds for sampling +and aggregation intervals, respectively). The results is more improved for the +hot regions detection, but starts looking degrading cold regions detection. :: + + # damo start -s 800ms -a 16s + # sleep 600 + # damo record --snapshot 0 1 + # damo stop + # damo report access --sort_regions_by temperature + 0 addr 64.781 GiB size 1.219 GiB access 0 % age 4 m 48 s + 1 addr 24.505 GiB size 2.475 GiB access 0 % age 4 m 16 s + 2 addr 26.980 GiB size 504.273 MiB access 0 % age 4 m + 3 addr 29.443 GiB size 2.462 GiB access 0 % age 4 m + 4 addr 37.264 GiB size 5.645 GiB access 0 % age 4 m + 5 addr 31.905 GiB size 5.359 GiB access 0 % age 3 m 44 s + [...] + 20 addr 8.711 GiB size 40.000 KiB access 5 % age 2 m 40 s + 21 addr 27.473 GiB size 1.970 GiB access 5 % age 4 m + 22 addr 48.185 GiB size 4.625 GiB access 5 % age 4 m + 23 addr 47.304 GiB size 902.117 MiB access 10 % age 4 m + 24 addr 8.711 GiB size 4.000 KiB access 100 % age 4 m + 25 addr 20.793 GiB size 3.713 GiB access 5 % age 4 m 16 s + 26 addr 8.773 GiB size 4.000 KiB access 100 % age 4 m 16 s + total size: 62.000 GiB + # damo report access --style temperature-sz-hist + + [-28,800,000,000, -23,359,999,000) 12.294 GiB |***************** | + [-23,359,999,000, -17,919,998,000) 9.753 GiB |************* | + [-17,919,998,000, -12,479,997,000) 15.131 GiB |********************| + [-12,479,997,000, -7,039,996,000) 0 B | | + [-7,039,996,000, -1,599,995,000) 7.506 GiB |********** | + [-1,599,995,000, 3,840,006,000) 6.127 GiB |********* | + [3,840,006,000, 9,280,007,000) 0 B | | + [9,280,007,000, 14,720,008,000) 136.000 KiB |* | + [14,720,008,000, 20,160,009,000) 40.000 KiB |* | + [20,160,009,000, 25,600,010,000) 11.188 GiB |*************** | + [25,600,010,000, 31,040,011,000) 4.000 KiB |* | + total size: 62.000 GiB + +It found more non-zero access frequency regions. The number of regions is still +much higher than the ``min_nr_regions``, but it is reduced from that of the +previous setup. And apparently the distribution seems bit biased to hot +regions. + +Conclusion +========== + +With the above experimental tuning results, we can conclude the theory and the +guide makes sense to at least this workload, and could be applied to similar +cases. From 3efbb9b225f04ebbad224706486cc4176e822e7c Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 10 Jan 2025 10:52:30 -0800 Subject: [PATCH 348/504] Docs/admin-guide/mm/damon/usage: fix and add missing DAMOS filter sysfs files on files hierarchy DAMOS filter directory part of DAMON sysfs files hierarchy on the usage document is wrong. 'memcg_path' file under the directory is wrongly written as 'memcg_id'. Also the directory has 'addr_start', 'addr_end', and 'target_idx' files, but the list is missing those. Fix the wrong name and add missing files. Link: https://lkml.kernel.org/r/20250110185232.54907-4-sj@kernel.org Signed-off-by: SeongJae Park Cc: Honggyu Kim Cc: Jonathan Corbet Cc: Yunjeong Mun Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/usage.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/admin-guide/mm/damon/usage.rst b/Documentation/admin-guide/mm/damon/usage.rst index f0d0c20711d6..47a44bd348ab 100644 --- a/Documentation/admin-guide/mm/damon/usage.rst +++ b/Documentation/admin-guide/mm/damon/usage.rst @@ -83,7 +83,7 @@ comma (","). │ │ │ │ │ │ │ │ │ 0/target_metric,target_value,current_value │ │ │ │ │ │ │ :ref:`watermarks `/metric,interval_us,high,mid,low │ │ │ │ │ │ │ :ref:`filters `/nr_filters - │ │ │ │ │ │ │ │ 0/type,matching,memcg_id,allow + │ │ │ │ │ │ │ │ 0/type,matching,allow,memcg_path,addr_start,addr_end,target_idx │ │ │ │ │ │ │ :ref:`stats `/nr_tried,sz_tried,nr_applied,sz_applied,sz_ops_filter_passed,qt_exceeds │ │ │ │ │ │ │ :ref:`tried_regions `/total_bytes │ │ │ │ │ │ │ │ 0/start,end,nr_accesses,age,sz_filter_passed From cdaa732edc806e223df680decb3b8c3285dc3ea6 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 10 Jan 2025 10:52:31 -0800 Subject: [PATCH 349/504] Docs/admin-guide/mm/damon/start: update snapshot example Two of DAMON user-space tool (damo) commands that are used for examples on DAMON getting started document, namely 'damo show' and 'damo report heats' are deprecated[1,2], and replaced by new commands that provides same functions with unified and simplified user interfaces. Also the example output of 'damo show' is outdated. 'damo schemes' command is not deprecated, but users are recommended to use 'damo start' or 'damo tune' instead. Update the examples to use the replacements, recommendations, and up-to-date output formats. [1] https://git.kernel.org/sj/damo/c/3272e0ac94ecc5e1 [2] https://git.kernel.org/sj/damo/c/da3ec66bbdd9e87d Link: https://lkml.kernel.org/r/20250110185232.54907-5-sj@kernel.org Signed-off-by: SeongJae Park Cc: Honggyu Kim Cc: Jonathan Corbet Cc: Yunjeong Mun Signed-off-by: Andrew Morton --- Documentation/admin-guide/mm/damon/start.rst | 65 ++++++++++++-------- 1 file changed, 39 insertions(+), 26 deletions(-) diff --git a/Documentation/admin-guide/mm/damon/start.rst b/Documentation/admin-guide/mm/damon/start.rst index c4dddf6733cd..ede14b679d02 100644 --- a/Documentation/admin-guide/mm/damon/start.rst +++ b/Documentation/admin-guide/mm/damon/start.rst @@ -42,32 +42,45 @@ the execution. :: $ git clone https://github.com/sjp38/masim; cd masim; make $ sudo damo start "./masim ./configs/stairs.cfg --quiet" - $ sudo ./damo show - 0 addr [85.541 TiB , 85.541 TiB ) (57.707 MiB ) access 0 % age 10.400 s - 1 addr [85.541 TiB , 85.542 TiB ) (413.285 MiB) access 0 % age 11.400 s - 2 addr [127.649 TiB , 127.649 TiB) (57.500 MiB ) access 0 % age 1.600 s - 3 addr [127.649 TiB , 127.649 TiB) (32.500 MiB ) access 0 % age 500 ms - 4 addr [127.649 TiB , 127.649 TiB) (9.535 MiB ) access 100 % age 300 ms - 5 addr [127.649 TiB , 127.649 TiB) (8.000 KiB ) access 60 % age 0 ns - 6 addr [127.649 TiB , 127.649 TiB) (6.926 MiB ) access 0 % age 1 s - 7 addr [127.998 TiB , 127.998 TiB) (120.000 KiB) access 0 % age 11.100 s - 8 addr [127.998 TiB , 127.998 TiB) (8.000 KiB ) access 40 % age 100 ms - 9 addr [127.998 TiB , 127.998 TiB) (4.000 KiB ) access 0 % age 11 s - total size: 577.590 MiB - $ sudo ./damo stop + $ sudo damo report access + heatmap: 641111111000000000000000000000000000000000000000000000[...]33333333333333335557984444[...]7 + # min/max temperatures: -1,840,000,000, 370,010,000, column size: 3.925 MiB + 0 addr 86.182 TiB size 8.000 KiB access 0 % age 14.900 s + 1 addr 86.182 TiB size 8.000 KiB access 60 % age 0 ns + 2 addr 86.182 TiB size 3.422 MiB access 0 % age 4.100 s + 3 addr 86.182 TiB size 2.004 MiB access 95 % age 2.200 s + 4 addr 86.182 TiB size 29.688 MiB access 0 % age 14.100 s + 5 addr 86.182 TiB size 29.516 MiB access 0 % age 16.700 s + 6 addr 86.182 TiB size 29.633 MiB access 0 % age 17.900 s + 7 addr 86.182 TiB size 117.652 MiB access 0 % age 18.400 s + 8 addr 126.990 TiB size 62.332 MiB access 0 % age 9.500 s + 9 addr 126.990 TiB size 13.980 MiB access 0 % age 5.200 s + 10 addr 126.990 TiB size 9.539 MiB access 100 % age 3.700 s + 11 addr 126.990 TiB size 16.098 MiB access 0 % age 6.400 s + 12 addr 127.987 TiB size 132.000 KiB access 0 % age 2.900 s + total size: 314.008 MiB + $ sudo damo stop The first command of the above example downloads and builds an artificial memory access generator program called ``masim``. The second command asks DAMO -to execute the artificial generator process start via the given command and -make DAMON monitors the generator process. The third command retrieves the -current snapshot of the monitored access pattern of the process from DAMON and -shows the pattern in a human readable format. +to start the program via the given command and make DAMON monitors the newly +started process. The third command retrieves the current snapshot of the +monitored access pattern of the process from DAMON and shows the pattern in a +human readable format. -Each line of the output shows which virtual address range (``addr [XX, XX)``) -of the process is how frequently (``access XX %``) accessed for how long time -(``age XX``). For example, the fifth region of ~9 MiB size is being most -frequently accessed for last 300 milliseconds. Finally, the fourth command -stops DAMON. +The first line of the output shows the relative access temperature (hotness) of +the regions in a single row hetmap format. Each column on the heatmap +represents regions of same size on the monitored virtual address space. The +position of the colun on the row and the number on the column represents the +relative location and access temperature of the region. ``[...]`` means +unmapped huge regions on the virtual address spaces. The second line shows +additional information for better understanding the heatmap. + +Each line of the output from the third line shows which virtual address range +(``addr XX size XX``) of the process is how frequently (``access XX %``) +accessed for how long time (``age XX``). For example, the evelenth region of +~9.5 MiB size is being most frequently accessed for last 3.7 seconds. Finally, +the fourth command stops DAMON. Note that DAMON can monitor not only virtual address spaces but multiple types of address spaces including the physical address space. @@ -95,7 +108,7 @@ Visualizing Recorded Patterns You can visualize the pattern in a heatmap, showing which memory region (x-axis) got accessed when (y-axis) and how frequently (number).:: - $ sudo damo report heats --heatmap stdout + $ sudo damo report heatmap 22222222222222222222222222222222222222211111111111111111111111111111111111111100 44444444444444444444444444444444444444434444444444444444444444444444444444443200 44444444444444444444444444444444444444433444444444444444444444444444444444444200 @@ -160,6 +173,6 @@ Data Access Pattern Aware Memory Management Below command makes every memory region of size >=4K that has not accessed for >=60 seconds in your workload to be swapped out. :: - $ sudo damo schemes --damos_access_rate 0 0 --damos_sz_region 4K max \ - --damos_age 60s max --damos_action pageout \ - + $ sudo damo start --damos_access_rate 0 0 --damos_sz_region 4K max \ + --damos_age 60s max --damos_action pageout \ + From d39237ea2413126321b48c7f4fbcfa0d0a0186ec Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Fri, 10 Jan 2025 10:52:32 -0800 Subject: [PATCH 350/504] mm/damon: explain "effective quota" on kernel-doc comment The kernel-doc comment for 'struct damos_quota' describes how "effective quota" is calculated, but does not explain what it is. Actually there was an input[1] about it. Add the explanation on the comment. Also, fix a trivial typo on the comment block: s/empt/empty/ [1] https://github.com/damonitor/damo/issues/17#issuecomment-2497525043 Link: https://lkml.kernel.org/r/20250110185232.54907-6-sj@kernel.org Signed-off-by: SeongJae Park Suggested-by: Honggyu Kim Cc: Yunjeong Mun Cc: Honggyu Kim Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/damon.h | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/include/linux/damon.h b/include/linux/damon.h index 0834d7ffcb84..af525252b853 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -193,11 +193,16 @@ struct damos_quota_goal { * size quota is set, DAMON tries to apply the action only up to &sz bytes * within &reset_interval. * - * Internally, the time quota is transformed to a size quota using estimated - * throughput of the scheme's action. DAMON then compares it against &sz and - * uses smaller one as the effective quota. + * To convince the different types of quotas and goals, DAMON internally + * converts those into one single size quota called "effective quota". DAMON + * internally uses it as the only one real quota. The conversion is made as + * follows. * - * If @goals is not empt, DAMON calculates yet another size quota based on the + * The time quota is transformed to a size quota using estimated throughput of + * the scheme's action. DAMON then compares it against &sz and uses smaller + * one as the effective quota. + * + * If @goals is not empty, DAMON calculates yet another size quota based on the * goals using its internal feedback loop algorithm, for every @reset_interval. * Then, if the new size quota is smaller than the effective quota, it uses the * new size quota as the effective quota. From 7ed0eedf1b9a70dfd6187380404ea05189126d3d Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 10 Jan 2025 19:21:44 +0100 Subject: [PATCH 351/504] mm/huge_memory: convert has_hwpoisoned into a pure folio flag Patch series "mm/hugetlb: folio and migration cleanups". Some cleanups around more folio conversion and migration handling that I collected working on random stuff. This patch (of 6): Let's stop setting it on pages, there is no need to anymore. Link: https://lkml.kernel.org/r/20250110182149.746551-1-david@redhat.com Link: https://lkml.kernel.org/r/20250110182149.746551-2-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- include/linux/page-flags.h | 6 ++---- mm/huge_memory.c | 2 +- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 14226d6bd6f8..3f6a64ff968a 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -948,11 +948,9 @@ TESTPAGEFLAG_FALSE(TransCompound, transcompound) * * This flag is set by hwpoison handler. Cleared by THP split or free page. */ -PAGEFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) - TESTSCFLAG(HasHWPoisoned, has_hwpoisoned, PF_SECOND) +FOLIO_FLAG(has_hwpoisoned, FOLIO_SECOND_PAGE) #else -PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) - TESTSCFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) +FOLIO_FLAG_FALSE(has_hwpoisoned) #endif /* diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2654a9548749..3d3ebdc002d5 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -3290,7 +3290,7 @@ static void __split_huge_page(struct page *page, struct list_head *list, /* lock lru list/PageCompound, ref frozen by page_ref_freeze */ lruvec = folio_lruvec_lock(folio); - ClearPageHasHWPoisoned(head); + folio_clear_has_hwpoisoned(folio); for (i = nr - new_nr; i >= new_nr; i -= new_nr) { struct folio *tail; From 446cd09eb8c14fc56e2418d0ea198df892ab5e69 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 10 Jan 2025 19:21:45 +0100 Subject: [PATCH 352/504] mm/hugetlb: rename isolate_hugetlb() to folio_isolate_hugetlb() Let's make the function name match "folio_isolate_lru()", and add some kernel doc. Link: https://lkml.kernel.org/r/20250110182149.746551-3-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 4 ++-- mm/gup.c | 2 +- mm/hugetlb.c | 23 ++++++++++++++++++++--- mm/mempolicy.c | 2 +- mm/migrate.c | 6 +++--- 5 files changed, 27 insertions(+), 10 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 49ec2362ce92..c95ad5cd7894 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -153,7 +153,7 @@ bool hugetlb_reserve_pages(struct inode *inode, long from, long to, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, long freed); -bool isolate_hugetlb(struct folio *folio, struct list_head *list); +bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list); int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); int get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared); @@ -414,7 +414,7 @@ static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, return NULL; } -static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list) +static inline bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) { return false; } diff --git a/mm/gup.c b/mm/gup.c index 00a1269cbee0..2cc3a9d28e70 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2344,7 +2344,7 @@ static unsigned long collect_longterm_unpinnable_folios( continue; if (folio_test_hugetlb(folio)) { - isolate_hugetlb(folio, movable_folio_list); + folio_isolate_hugetlb(folio, movable_folio_list); continue; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 58c2c5498207..4c04536549a6 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2812,7 +2812,7 @@ retry: * Fail with -EBUSY if not possible. */ spin_unlock_irq(&hugetlb_lock); - isolated = isolate_hugetlb(old_folio, list); + isolated = folio_isolate_hugetlb(old_folio, list); ret = isolated ? 0 : -EBUSY; spin_lock_irq(&hugetlb_lock); goto free_new; @@ -2897,7 +2897,7 @@ int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list) if (hstate_is_gigantic(h)) return -ENOMEM; - if (folio_ref_count(folio) && isolate_hugetlb(folio, list)) + if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list)) ret = 0; else if (!folio_ref_count(folio)) ret = alloc_and_dissolve_hugetlb_folio(h, folio, list); @@ -7421,7 +7421,24 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ -bool isolate_hugetlb(struct folio *folio, struct list_head *list) +/** + * folio_isolate_hugetlb: try to isolate an allocated hugetlb folio + * @folio: the folio to isolate + * @list: the list to add the folio to on success + * + * Isolate an allocated (refcount > 0) hugetlb folio, marking it as + * isolated/non-migratable, and moving it from the active list to the + * given list. + * + * Isolation will fail if @folio is not an allocated hugetlb folio, or if + * it is already isolated/non-migratable. + * + * On success, an additional folio reference is taken that must be dropped + * using folio_putback_active_hugetlb() to undo the isolation. + * + * Return: True if isolation worked, otherwise False. + */ +bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list) { bool ret = true; diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f83b73236ffe..bbaadbeeb291 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -647,7 +647,7 @@ static int queue_folios_hugetlb(pte_t *pte, unsigned long hmask, */ if ((flags & MPOL_MF_MOVE_ALL) || (!folio_likely_mapped_shared(folio) && !hugetlb_pmd_shared(pte))) - if (!isolate_hugetlb(folio, qp->pagelist)) + if (!folio_isolate_hugetlb(folio, qp->pagelist)) qp->nr_failed++; unlock: spin_unlock(ptl); diff --git a/mm/migrate.c b/mm/migrate.c index caadbe393aa2..80887cadb277 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -128,7 +128,7 @@ static void putback_movable_folio(struct folio *folio) * * This function shall be used whenever the isolated pageset has been * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range() - * and isolate_hugetlb(). + * and folio_isolate_hugetlb(). */ void putback_movable_pages(struct list_head *l) { @@ -169,7 +169,7 @@ bool isolate_folio_to_list(struct folio *folio, struct list_head *list) bool isolated, lru; if (folio_test_hugetlb(folio)) - return isolate_hugetlb(folio, list); + return folio_isolate_hugetlb(folio, list); lru = !__folio_test_movable(folio); if (lru) @@ -2203,7 +2203,7 @@ static int __add_folio_for_migration(struct folio *folio, int node, return -EACCES; if (folio_test_hugetlb(folio)) { - if (isolate_hugetlb(folio, pagelist)) + if (folio_isolate_hugetlb(folio, pagelist)) return 1; } else if (folio_isolate_lru(folio)) { list_add_tail(&folio->lru, pagelist); From 1a1b57c802a86cc35ffccbe3f00a8e7ad4062c6d Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 10 Jan 2025 15:02:30 -0800 Subject: [PATCH 353/504] mm-hugetlb-rename-isolate_hugetlb-to-folio_isolate_hugetlb-fix fix kernekdoc, per Matthew Cc: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 4c04536549a6..15a689964265 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7422,7 +7422,7 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */ /** - * folio_isolate_hugetlb: try to isolate an allocated hugetlb folio + * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio * @folio: the folio to isolate * @list: the list to add the folio to on success * From cba372914a1dacbc5cb8ac82d83ed1813afc63ef Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 10 Jan 2025 19:21:46 +0100 Subject: [PATCH 354/504] mm/migrate: don't call folio_putback_active_hugetlb() on dst hugetlb folio We replaced a simple put_page() by a putback_active_hugepage() call in commit 3aaa76e125c1 ("mm: migrate: hugetlb: putback destination hugepage to active list"), to set the "active" flag on the dst hugetlb folio. Nowadays, we decoupled the "active" list from the flag, by calling the flag "migratable". Calling "putback" on something that wasn't allocated is weird and not future proof, especially if we might reach that path when migration failed and we just want to free the freshly allocated hugetlb folio. Let's simply set the "migratable" flag in move_hugetlb_state(), where we know that allocation succeeded, and use simple folio_put() to return our reference. Do we need the hugetlb_lock for setting that flag? Staring at other users of folio_set_hugetlb_migratable(), it does not look like it. After all, the dst folio should already be on the active list, and we are not modifying that list. Link: https://lkml.kernel.org/r/20250110182149.746551-4-david@redhat.com Signed-off-by: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb.c | 5 +++++ mm/migrate.c | 8 ++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 15a689964265..1f70cbc1a650 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7533,6 +7533,11 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re } spin_unlock_irq(&hugetlb_lock); } + /* + * Our old folio is isolated and has "migratable" cleared until it + * is putback. As migration succeeded, set the new folio "migratable". + */ + folio_set_hugetlb_migratable(new_folio); } static void hugetlb_unshare_pmds(struct vm_area_struct *vma, diff --git a/mm/migrate.c b/mm/migrate.c index 80887cadb277..7e23e78f1e57 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1542,14 +1542,14 @@ out: list_move_tail(&src->lru, ret); /* - * If migration was not successful and there's a freeing callback, use - * it. Otherwise, put_page() will drop the reference grabbed during - * isolation. + * If migration was not successful and there's a freeing callback, + * return the folio to that special allocator. Otherwise, simply drop + * our additional reference. */ if (put_new_folio) put_new_folio(dst, private); else - folio_putback_active_hugetlb(dst); + folio_put(dst); return rc; } From 90037d158c78cc6eb607438376aeb7b2909c048e Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 10 Jan 2025 19:21:47 +0100 Subject: [PATCH 355/504] mm/hugetlb: rename folio_putback_active_hugetlb() to folio_putback_hugetlb() Now that folio_putback_hugetlb() is only called on folios that were previously isolated through folio_isolate_hugetlb(), let's rename it to match folio_putback_lru(). Add some kernel doc to clarify how this function is supposed to be used. Link: https://lkml.kernel.org/r/20250110182149.746551-5-david@redhat.com Signed-off-by: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 4 ++-- mm/hugetlb.c | 15 +++++++++++++-- mm/migrate.c | 6 +++--- 3 files changed, 18 insertions(+), 7 deletions(-) diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index c95ad5cd7894..ec8c0ccc8f95 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -157,7 +157,7 @@ bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list); int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); int get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared); -void folio_putback_active_hugetlb(struct folio *folio); +void folio_putback_hugetlb(struct folio *folio); void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); void hugetlb_fix_reserve_counts(struct inode *inode); extern struct mutex *hugetlb_fault_mutex_table; @@ -430,7 +430,7 @@ static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags, return 0; } -static inline void folio_putback_active_hugetlb(struct folio *folio) +static inline void folio_putback_hugetlb(struct folio *folio) { } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1f70cbc1a650..a847f0925c73 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7434,7 +7434,7 @@ __weak unsigned long hugetlb_mask_last_page(struct hstate *h) * it is already isolated/non-migratable. * * On success, an additional folio reference is taken that must be dropped - * using folio_putback_active_hugetlb() to undo the isolation. + * using folio_putback_hugetlb() to undo the isolation. * * Return: True if isolation worked, otherwise False. */ @@ -7486,7 +7486,18 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags, return ret; } -void folio_putback_active_hugetlb(struct folio *folio) +/** + * folio_putback_hugetlb: unisolate a hugetlb folio + * @folio: the isolated hugetlb folio + * + * Putback/un-isolate the hugetlb folio that was previous isolated using + * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it + * back onto the active list. + * + * Will drop the additional folio reference obtained through + * folio_isolate_hugetlb(). + */ +void folio_putback_hugetlb(struct folio *folio) { spin_lock_irq(&hugetlb_lock); folio_set_hugetlb_migratable(folio); diff --git a/mm/migrate.c b/mm/migrate.c index 7e23e78f1e57..be9e3b48cd62 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -137,7 +137,7 @@ void putback_movable_pages(struct list_head *l) list_for_each_entry_safe(folio, folio2, l, lru) { if (unlikely(folio_test_hugetlb(folio))) { - folio_putback_active_hugetlb(folio); + folio_putback_hugetlb(folio); continue; } list_del(&folio->lru); @@ -1454,7 +1454,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, if (folio_ref_count(src) == 1) { /* page was freed from under us. So we are done. */ - folio_putback_active_hugetlb(src); + folio_putback_hugetlb(src); return MIGRATEPAGE_SUCCESS; } @@ -1537,7 +1537,7 @@ out_unlock: folio_unlock(src); out: if (rc == MIGRATEPAGE_SUCCESS) - folio_putback_active_hugetlb(src); + folio_putback_hugetlb(src); else if (rc != -EAGAIN) list_move_tail(&src->lru, ret); From 7aa91765996ddd95e1cef5ba9df2e5a0abbb1e14 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 10 Jan 2025 19:21:48 +0100 Subject: [PATCH 356/504] mm/hugetlb-cgroup: convert hugetlb_cgroup_css_offline() to work on folios Let's convert hugetlb_cgroup_css_offline() and hugetlb_cgroup_move_parent() to work on folios. hugepage_activelist contains folios, not pages. While at it, rename page_hcg simply to hcg, removing most of the "page" terminology. Also removes an unnecessary call to compound_head(). Link: https://lkml.kernel.org/r/20250110182149.746551-6-david@redhat.com Signed-off-by: David Hildenbrand Reviewed-by: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb_cgroup.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index 89a8ad45a533..bb9578bd99f9 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -195,24 +195,23 @@ static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css) * cannot fail. */ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg, - struct page *page) + struct folio *folio) { unsigned int nr_pages; struct page_counter *counter; - struct hugetlb_cgroup *page_hcg; + struct hugetlb_cgroup *hcg; struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg); - struct folio *folio = page_folio(page); - page_hcg = hugetlb_cgroup_from_folio(folio); + hcg = hugetlb_cgroup_from_folio(folio); /* * We can have pages in active list without any cgroup * ie, hugepage with less than 3 pages. We can safely * ignore those pages. */ - if (!page_hcg || page_hcg != h_cg) + if (!hcg || hcg != h_cg) goto out; - nr_pages = compound_nr(page); + nr_pages = folio_nr_pages(folio); if (!parent) { parent = root_h_cgroup; /* root has no limit */ @@ -235,13 +234,13 @@ static void hugetlb_cgroup_css_offline(struct cgroup_subsys_state *css) { struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); struct hstate *h; - struct page *page; + struct folio *folio; do { for_each_hstate(h) { spin_lock_irq(&hugetlb_lock); - list_for_each_entry(page, &h->hugepage_activelist, lru) - hugetlb_cgroup_move_parent(hstate_index(h), h_cg, page); + list_for_each_entry(folio, &h->hugepage_activelist, lru) + hugetlb_cgroup_move_parent(hstate_index(h), h_cg, folio); spin_unlock_irq(&hugetlb_lock); } From e498ef0baef76323afa37599b4ef0c9dbd6fadd4 Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Fri, 10 Jan 2025 19:21:49 +0100 Subject: [PATCH 357/504] mm/hugetlb: use folio->lru int demote_free_hugetlb_folios() We are demoting hugetlb folios to smaller hugetlb folios; let's avoid messing with pages where avoidable. Link: https://lkml.kernel.org/r/20250110182149.746551-7-david@redhat.com Signed-off-by: David Hildenbrand Cc: Matthew Wilcox (Oracle) Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a847f0925c73..6a0ea28f5bac 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3826,13 +3826,15 @@ static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst, for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) { struct page *page = folio_page(folio, i); + struct folio *new_folio; page->mapping = NULL; clear_compound_head(page); prep_compound_page(page, dst->order); + new_folio = page_folio(page); - init_new_hugetlb_folio(dst, page_folio(page)); - list_add(&page->lru, &dst_list); + init_new_hugetlb_folio(dst, new_folio); + list_add(&new_folio->lru, &dst_list); } } From 2d2fbf1a2701351fa57ec3802d1988f7f93201c2 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Fri, 10 Jan 2025 08:58:59 -0800 Subject: [PATCH 358/504] mm/memfd: refactor and cleanup the logic in memfd_create() Patch series "Cleanup for memfd_create()", v4. memfd_create() handles all of its logic in a single function. Some of the logic in the function is also somewhat contrived (i.e. copying the memfd name from userpace). This series aims to cleanup memfd_create() by splitting out the logic into helper functions, and simplifying the memfd name copying to make the code easier to follow. This has no intended functional changes. Thank you Alice and Lorenzo for reviewing v3 of this series and for your feedback! This patch (of 2): memfd_create() is a pretty busy function that could be easier to read if some of the logic was split out into helper functions. Therefore, split the flags sanitization, name allocation, and file structure allocation into their own helper functions. No functional change. Link: https://lkml.kernel.org/r/20250110165904.3437374-1-isaacmanjarres@google.com Link: https://lkml.kernel.org/r/20250110165904.3437374-2-isaacmanjarres@google.com Signed-off-by: Isaac J. Manjarres Reviewed-by: Alice Ryhl Reviewed-by: Lorenzo Stoakes Cc: Isaac J. Manjarres Cc: John Stultz Cc: Kalesh Singh Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- mm/memfd.c | 81 ++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 58 insertions(+), 23 deletions(-) diff --git a/mm/memfd.c b/mm/memfd.c index 5f5a23c9051d..04d9e2a23df8 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -369,15 +369,9 @@ int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr) return err; } -SYSCALL_DEFINE2(memfd_create, - const char __user *, uname, - unsigned int, flags) +static int sanitize_flags(unsigned int *flags_ptr) { - unsigned int *file_seals; - struct file *file; - int fd, error; - char *name; - long len; + unsigned int flags = *flags_ptr; if (!(flags & MFD_HUGETLB)) { if (flags & ~(unsigned int)MFD_ALL_FLAGS) @@ -393,20 +387,25 @@ SYSCALL_DEFINE2(memfd_create, if ((flags & MFD_EXEC) && (flags & MFD_NOEXEC_SEAL)) return -EINVAL; - error = check_sysctl_memfd_noexec(&flags); - if (error < 0) - return error; + return check_sysctl_memfd_noexec(flags_ptr); +} + +static char *alloc_name(const char __user *uname) +{ + int error; + char *name; + long len; /* length includes terminating zero */ len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1); if (len <= 0) - return -EFAULT; + return ERR_PTR(-EFAULT); if (len > MFD_NAME_MAX_LEN + 1) - return -EINVAL; + return ERR_PTR(-EINVAL); name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL); if (!name) - return -ENOMEM; + return ERR_PTR(-ENOMEM); strcpy(name, MFD_NAME_PREFIX); if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) { @@ -420,23 +419,28 @@ SYSCALL_DEFINE2(memfd_create, goto err_name; } - fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0); - if (fd < 0) { - error = fd; - goto err_name; - } + return name; + +err_name: + kfree(name); + return ERR_PTR(error); +} + +static struct file *alloc_file(const char *name, unsigned int flags) +{ + unsigned int *file_seals; + struct file *file; if (flags & MFD_HUGETLB) { file = hugetlb_file_setup(name, 0, VM_NORESERVE, HUGETLB_ANONHUGE_INODE, (flags >> MFD_HUGE_SHIFT) & MFD_HUGE_MASK); - } else + } else { file = shmem_file_setup(name, 0, VM_NORESERVE); - if (IS_ERR(file)) { - error = PTR_ERR(file); - goto err_fd; } + if (IS_ERR(file)) + return file; file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE; file->f_flags |= O_LARGEFILE; @@ -456,6 +460,37 @@ SYSCALL_DEFINE2(memfd_create, *file_seals &= ~F_SEAL_SEAL; } + return file; +} + +SYSCALL_DEFINE2(memfd_create, + const char __user *, uname, + unsigned int, flags) +{ + struct file *file; + int fd, error; + char *name; + + error = sanitize_flags(&flags); + if (error < 0) + return error; + + name = alloc_name(uname); + if (IS_ERR(name)) + return PTR_ERR(name); + + fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0); + if (fd < 0) { + error = fd; + goto err_name; + } + + file = alloc_file(name, flags); + if (IS_ERR(file)) { + error = PTR_ERR(file); + goto err_fd; + } + fd_install(fd, file); kfree(name); return fd; From b9bd18b66047057ad744e27cfef51a636f6e5ba0 Mon Sep 17 00:00:00 2001 From: "Isaac J. Manjarres" Date: Fri, 10 Jan 2025 08:59:00 -0800 Subject: [PATCH 359/504] mm/memfd: use strncpy_from_user() to read memfd name The existing logic uses strnlen_user() to calculate the length of the memfd name from userspace and then copies the string into a buffer using copy_from_user(). This is error-prone, as the string length could have changed between the time when it was calculated and when the string was copied. The existing logic handles this by ensuring that the last byte in the buffer is the terminating zero. This handling is contrived and can better be handled by using strncpy_from_user(), which gets the length of the string and copies it in one shot. Therefore, simplify the logic for copying the memfd name by using strncpy_from_user(). No functional change. Link: https://lkml.kernel.org/r/20250110165904.3437374-3-isaacmanjarres@google.com Signed-off-by: Isaac J. Manjarres Reviewed-by: Alice Ryhl Reviewed-by: Lorenzo Stoakes Cc: John Stultz Cc: Kalesh Singh Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- mm/memfd.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/mm/memfd.c b/mm/memfd.c index 04d9e2a23df8..37f7be57c2f5 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -396,26 +396,18 @@ static char *alloc_name(const char __user *uname) char *name; long len; - /* length includes terminating zero */ - len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1); - if (len <= 0) - return ERR_PTR(-EFAULT); - if (len > MFD_NAME_MAX_LEN + 1) - return ERR_PTR(-EINVAL); - - name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_KERNEL); + name = kmalloc(NAME_MAX + 1, GFP_KERNEL); if (!name) return ERR_PTR(-ENOMEM); strcpy(name, MFD_NAME_PREFIX); - if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) { + /* returned length does not include terminating zero */ + len = strncpy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, MFD_NAME_MAX_LEN + 1); + if (len < 0) { error = -EFAULT; goto err_name; - } - - /* terminating-zero may have changed after strnlen_user() returned */ - if (name[len + MFD_NAME_PREFIX_LEN - 1]) { - error = -EFAULT; + } else if (len > MFD_NAME_MAX_LEN) { + error = -EINVAL; goto err_name; } From 58a2a0fa3c0754b2d58ebf86a48d0b81326ecf0a Mon Sep 17 00:00:00 2001 From: xu xin Date: Fri, 10 Jan 2025 17:40:34 +0800 Subject: [PATCH 360/504] ksm: add ksm involvement information for each process In /proc//ksm_stat, add two extra ksm involvement items including KSM_mergeable and KSM_merge_any. It helps administrators to better know the system's KSM behavior at process level. ksm_merge_any: yes/no whether the process'mm is added by prctl() into the candidate list of KSM or not, and fully enabled at process level. ksm_mergeable: yes/no whether any VMAs of the process'mm are currently applicable to KSM. Purpose ======= These two items are just to improve the observability of KSM at process level, so that users can know if a certain process has enable KSM. For example, if without these two items, when we look at /proc//ksm_stat and there's no merging pages found, We are not sure whether it is because KSM was not enabled or because KSM did not successfully merge any pages. Althrough "mg" in /proc//smaps indicate VM_MERGEABLE, it's opaque and not very obvious for non professionals. Link: https://lkml.kernel.org/r/20250110174034304QOb8eDoqtFkp3_t8mqnqc@zte.com.cn Signed-off-by: xu xin Cc: Wang Yaxin Cc: David Hildenbrand Cc: Yang Yang Signed-off-by: Andrew Morton --- Documentation/filesystems/proc.rst | 66 ++++++++++++++++++++++++++++++ fs/proc/base.c | 11 +++++ include/linux/ksm.h | 1 + mm/ksm.c | 19 +++++++++ 4 files changed, 97 insertions(+) diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 6a882c57a7e7..916f83203de0 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -48,6 +48,7 @@ fixes/update part 1.1 Stefani Seibold June 9 2009 3.11 /proc//patch_state - Livepatch patch operation state 3.12 /proc//arch_status - Task architecture specific information 3.13 /proc//fd - List of symlinks to open files + 3.14 /proc//fd for fast access. ------------------------------------------------------- +3.14 /proc//ksm_merging_pages shows. + +ksm_process_profit +^^^^^^^^^^^^^^^^^^ + +The profit that KSM brings (Saved bytes). KSM can save memory by merging +identical pages, but also can consume additional memory, because it needs +to generate a number of rmap_items to save each scanned page's brief rmap +information. Some of these pages may be merged, but some may not be abled +to be merged after being checked several times, which are unprofitable +memory consumed. + +ksm_merge_any +^^^^^^^^^^^^^ + +It specifies whether the process'mm is added by prctl() into the candidate list +of KSM or not, and KSM scanning is fully enabled at process level. + +ksm_mergeable +^^^^^^^^^^^^^ + +It specifies whether any VMAs of the process'mm are currently applicable to KSM. + +More information about KSM can be found at Documentation/admin-guide/mm/ksm.rst. + Chapter 4: Configuring procfs ============================= diff --git a/fs/proc/base.c b/fs/proc/base.c index 0edf14a9840e..a50b222a5917 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -3269,6 +3269,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { struct mm_struct *mm; + int ret = 0; mm = get_task_mm(task); if (mm) { @@ -3276,6 +3277,16 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns, seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm)); seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages); seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm)); + seq_printf(m, "ksm_merge_any: %s\n", + test_bit(MMF_VM_MERGE_ANY, &mm->flags) ? "yes" : "no"); + ret = mmap_read_lock_killable(mm); + if (ret) { + mmput(mm); + return ret; + } + seq_printf(m, "ksm_mergeable: %s\n", + ksm_process_mergeable(mm) ? "yes" : "no"); + mmap_read_unlock(mm); mmput(mm); } diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 6a53ac4885bb..d73095b5cd96 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -93,6 +93,7 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio); void collect_procs_ksm(const struct folio *folio, const struct page *page, struct list_head *to_kill, int force_early); long ksm_process_profit(struct mm_struct *); +bool ksm_process_mergeable(struct mm_struct *mm); #else /* !CONFIG_KSM */ diff --git a/mm/ksm.c b/mm/ksm.c index 31a9bc365437..8be2b144fefd 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -3262,6 +3262,25 @@ static void wait_while_offlining(void) #endif /* CONFIG_MEMORY_HOTREMOVE */ #ifdef CONFIG_PROC_FS +/* + * The process is mergeable only if any VMA is currently + * applicable to KSM. + * + * The mmap lock must be held in read mode. + */ +bool ksm_process_mergeable(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + + mmap_assert_locked(mm); + VMA_ITERATOR(vmi, mm, 0); + for_each_vma(vmi, vma) + if (vma->vm_flags & VM_MERGEABLE) + return true; + + return false; +} + long ksm_process_profit(struct mm_struct *mm) { return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE - From 8732fcec041da0af6df1af873ab683d9d89687ca Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Fri, 10 Jan 2025 16:38:41 -0800 Subject: [PATCH 361/504] Documentation/filesystems/proc.rst: fix possessive form of "process" The possessive form of "process" is "process's". Fix up various misdirected attempts at this. Also reflow some paragraphs. Cc: David Hildenbrand Cc: Wang Yaxin Cc: xu xin Cc: Yang Yang Signed-off-by: Andrew Morton --- Documentation/filesystems/proc.rst | 36 +++++++++++++++++------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/Documentation/filesystems/proc.rst b/Documentation/filesystems/proc.rst index 916f83203de0..238afcb86d1f 100644 --- a/Documentation/filesystems/proc.rst +++ b/Documentation/filesystems/proc.rst @@ -48,7 +48,7 @@ fixes/update part 1.1 Stefani Seibold June 9 2009 3.11 /proc//patch_state - Livepatch patch operation state 3.12 /proc//arch_status - Task architecture specific information 3.13 /proc//fd - List of symlinks to open files - 3.14 /proc//fd for fast access. ------------------------------------------------------- -3.14 /proc// will be fully invisible to other users. It doesn't mean that it hides a fact whether a process with a specific pid value exists (it can be learned by other means, e.g. -by "kill -0 $PID"), but it hides process' uid and gid, which may be learned by +by "kill -0 $PID"), but it hides process's uid and gid, which may be learned by stat()'ing /proc// otherwise. It greatly complicates an intruder's task of gathering information about running processes, whether some daemon runs with elevated privileges, whether other user runs some sensitive program, whether From 6ea263c7bad339f7b8e005803b4f1a9b586dc6aa Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 10 Jan 2025 18:50:27 -0500 Subject: [PATCH 362/504] selftests/mm: use selftests framework to print test result Otherwise the number of tests does not match the reality. Link: https://lkml.kernel.org/r/20250110235028.96824-1-ziy@nvidia.com Fixes: 391e86971161 ("mm: selftest to verify zero-filled pages are mapped to zeropage") Signed-off-by: Zi Yan Cc: Alexander Zhu Cc: Rik van Riel Cc: Shuah Khan Cc: Usama Arif Signed-off-by: Andrew Morton --- .../selftests/mm/split_huge_page_test.c | 34 +++++++------------ 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 84b1251666aa..128004308233 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -108,38 +108,28 @@ static void verify_rss_anon_split_huge_page_all_zeroes(char *one_page, size_t le unsigned long rss_anon_before, rss_anon_after; size_t i; - if (!check_huge_anon(one_page, 4, pmd_pagesize)) { - printf("No THP is allocated\n"); - exit(EXIT_FAILURE); - } + if (!check_huge_anon(one_page, 4, pmd_pagesize)) + ksft_exit_fail_msg("No THP is allocated\n"); rss_anon_before = rss_anon(); - if (!rss_anon_before) { - printf("No RssAnon is allocated before split\n"); - exit(EXIT_FAILURE); - } + if (!rss_anon_before) + ksft_exit_fail_msg("No RssAnon is allocated before split\n"); /* split all THPs */ write_debugfs(PID_FMT, getpid(), (uint64_t)one_page, (uint64_t)one_page + len, 0); for (i = 0; i < len; i++) - if (one_page[i] != (char)0) { - printf("%ld byte corrupted\n", i); - exit(EXIT_FAILURE); - } + if (one_page[i] != (char)0) + ksft_exit_fail_msg("%ld byte corrupted\n", i); - if (!check_huge_anon(one_page, 0, pmd_pagesize)) { - printf("Still AnonHugePages not split\n"); - exit(EXIT_FAILURE); - } + if (!check_huge_anon(one_page, 0, pmd_pagesize)) + ksft_exit_fail_msg("Still AnonHugePages not split\n"); rss_anon_after = rss_anon(); - if (rss_anon_after >= rss_anon_before) { - printf("Incorrect RssAnon value. Before: %ld After: %ld\n", + if (rss_anon_after >= rss_anon_before) + ksft_exit_fail_msg("Incorrect RssAnon value. Before: %ld After: %ld\n", rss_anon_before, rss_anon_after); - exit(EXIT_FAILURE); - } } void split_pmd_zero_pages(void) @@ -150,7 +140,7 @@ void split_pmd_zero_pages(void) one_page = allocate_zero_filled_hugepage(len); verify_rss_anon_split_huge_page_all_zeroes(one_page, len); - printf("Split zero filled huge pages successful\n"); + ksft_test_result_pass("Split zero filled huge pages successful\n"); free(one_page); } @@ -491,7 +481,7 @@ int main(int argc, char **argv) if (argc > 1) optional_xfs_path = argv[1]; - ksft_set_plan(3+9); + ksft_set_plan(4+9); pagesize = getpagesize(); pageshift = ffs(pagesize) - 1; From f559411a8586c34f16691dd4078b6626caccba44 Mon Sep 17 00:00:00 2001 From: Zi Yan Date: Fri, 10 Jan 2025 18:50:28 -0500 Subject: [PATCH 363/504] selftests/mm: add tests for splitting pmd THPs to all lower orders Kernel already supports splitting a folio to any lower order. Test it. Link: https://lkml.kernel.org/r/20250110235028.96824-2-ziy@nvidia.com Signed-off-by: Zi Yan Cc: Alexander Zhu Cc: Rik van Riel Cc: Shuah Khan Cc: Usama Arif Signed-off-by: Andrew Morton --- tools/testing/selftests/mm/split_huge_page_test.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/mm/split_huge_page_test.c b/tools/testing/selftests/mm/split_huge_page_test.c index 128004308233..3d3bc40a268b 100644 --- a/tools/testing/selftests/mm/split_huge_page_test.c +++ b/tools/testing/selftests/mm/split_huge_page_test.c @@ -144,7 +144,7 @@ void split_pmd_zero_pages(void) free(one_page); } -void split_pmd_thp(void) +void split_pmd_thp_to_order(int order) { char *one_page; size_t len = 4 * pmd_pagesize; @@ -164,7 +164,7 @@ void split_pmd_thp(void) /* split all THPs */ write_debugfs(PID_FMT, getpid(), (uint64_t)one_page, - (uint64_t)one_page + len, 0); + (uint64_t)one_page + len, order); for (i = 0; i < len; i++) if (one_page[i] != (char)i) @@ -174,7 +174,7 @@ void split_pmd_thp(void) if (!check_huge_anon(one_page, 0, pmd_pagesize)) ksft_exit_fail_msg("Still AnonHugePages not split\n"); - ksft_test_result_pass("Split huge pages successful\n"); + ksft_test_result_pass("Split huge pages to order %d successful\n", order); free(one_page); } @@ -481,7 +481,7 @@ int main(int argc, char **argv) if (argc > 1) optional_xfs_path = argv[1]; - ksft_set_plan(4+9); + ksft_set_plan(1+9+2+9); pagesize = getpagesize(); pageshift = ffs(pagesize) - 1; @@ -492,7 +492,10 @@ int main(int argc, char **argv) fd_size = 2 * pmd_pagesize; split_pmd_zero_pages(); - split_pmd_thp(); + + for (i = 0; i < 9; i++) + split_pmd_thp_to_order(i); + split_pte_mapped_thp(); split_file_backed_thp(); From 6eb591f62877e4016aecb70dbb0694b1065916ec Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 10 Jan 2025 22:33:05 -0800 Subject: [PATCH 364/504] mm/zsmalloc: fix function parameter kernel-doc notation Use the correct function parameter name to eliminate a kernel-doc warning: zpdesc.h:90: warning: Function parameter or struct member 'zp' not described in 'zpdesc_folio' zpdesc.h:90: warning: Excess function parameter 'zpdesc' description in 'zpdesc_folio' Link: https://lkml.kernel.org/r/20250111063305.911010-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap Cc: Minchan Kim Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton --- mm/zpdesc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/zpdesc.h b/mm/zpdesc.h index 2da58339ac5b..fa47fece2237 100644 --- a/mm/zpdesc.h +++ b/mm/zpdesc.h @@ -74,7 +74,7 @@ static_assert(sizeof(struct zpdesc) <= sizeof(struct page)); /** * zpdesc_folio - The folio allocated for a zpdesc - * @zpdesc: The zpdesc. + * @zp: The zpdesc. * * Zpdescs are descriptors for zpool memory. The zpool memory itself is * allocated as folios that contain the zpool objects, and zpdesc uses specific From 56ebfd8378ace0c206bb84c2008ca58b7322a67c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 10 Jan 2025 22:32:49 -0800 Subject: [PATCH 365/504] kasan: use correct kernel-doc format Use the correct kernel-doc character following function parameters or struct members (':' instead of '-') to eliminate kernel-doc warnings. kasan.h:509: warning: Function parameter or struct member 'addr' not described in 'kasan_poison' kasan.h:509: warning: Function parameter or struct member 'size' not described in 'kasan_poison' kasan.h:509: warning: Function parameter or struct member 'value' not described in 'kasan_poison' kasan.h:509: warning: Function parameter or struct member 'init' not described in 'kasan_poison' kasan.h:522: warning: Function parameter or struct member 'addr' not described in 'kasan_unpoison' kasan.h:522: warning: Function parameter or struct member 'size' not described in 'kasan_unpoison' kasan.h:522: warning: Function parameter or struct member 'init' not described in 'kasan_unpoison' kasan.h:539: warning: Function parameter or struct member 'address' not described in 'kasan_poison_last_granule' kasan.h:539: warning: Function parameter or struct member 'size' not described in 'kasan_poison_last_granule' Link: https://lkml.kernel.org/r/20250111063249.910975-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap Reviewed-by: Andrey Konovalov Cc: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: Vincenzo Frascino Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index b7e4b81421b3..129178be5e64 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -501,18 +501,18 @@ static inline bool kasan_byte_accessible(const void *addr) /** * kasan_poison - mark the memory range as inaccessible - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE - * @size - range size, must be aligned to KASAN_GRANULE_SIZE - * @value - value that's written to metadata for the range - * @init - whether to initialize the memory range (only for hardware tag-based) + * @addr: range start address, must be aligned to KASAN_GRANULE_SIZE + * @size: range size, must be aligned to KASAN_GRANULE_SIZE + * @value: value that's written to metadata for the range + * @init: whether to initialize the memory range (only for hardware tag-based) */ void kasan_poison(const void *addr, size_t size, u8 value, bool init); /** * kasan_unpoison - mark the memory range as accessible - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE - * @size - range size, can be unaligned - * @init - whether to initialize the memory range (only for hardware tag-based) + * @addr: range start address, must be aligned to KASAN_GRANULE_SIZE + * @size: range size, can be unaligned + * @init: whether to initialize the memory range (only for hardware tag-based) * * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before * marking the range. @@ -530,8 +530,8 @@ bool kasan_byte_accessible(const void *addr); /** * kasan_poison_last_granule - mark the last granule of the memory range as * inaccessible - * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE - * @size - range size + * @address: range start address, must be aligned to KASAN_GRANULE_SIZE + * @size: range size * * This function is only available for the generic mode, as it's the only mode * that has partially poisoned memory granules. From 6e012061fd8712abfd188438c979aa760d113a58 Mon Sep 17 00:00:00 2001 From: Mateusz Guzik Date: Tue, 19 Nov 2024 15:35:26 +0100 Subject: [PATCH 366/504] get_task_exe_file: check PF_KTHREAD locklessly Same thing as 8ac5dc66599c ("get_task_mm: check PF_KTHREAD lockless") Nowadays PF_KTHREAD is sticky and it was never protected by ->alloc_lock. Move the PF_KTHREAD check outside of task_lock() section to make this code more understandable. Link: https://lkml.kernel.org/r/20241119143526.704986-1-mjguzik@gmail.com Signed-off-by: Mateusz Guzik Acked-by: Oleg Nesterov Signed-off-by: Andrew Morton --- kernel/fork.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c index 9b301180fd41..19d7fe31869b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1514,12 +1514,13 @@ struct file *get_task_exe_file(struct task_struct *task) struct file *exe_file = NULL; struct mm_struct *mm; + if (task->flags & PF_KTHREAD) + return NULL; + task_lock(task); mm = task->mm; - if (mm) { - if (!(task->flags & PF_KTHREAD)) - exe_file = get_mm_exe_file(mm); - } + if (mm) + exe_file = get_mm_exe_file(mm); task_unlock(task); return exe_file; } From 947363f19e92d41af1b0477f112ed01b1016fff0 Mon Sep 17 00:00:00 2001 From: Pratyush Mittal Date: Sat, 23 Nov 2024 02:29:29 -0800 Subject: [PATCH 367/504] lib/rhashtable: fix the typo for preemptible Fix the spelling of the mis-spelled word Link: https://lkml.kernel.org/r/20241123102929.11660-1-pratyushmittal@gmail.com Signed-off-by: Pratyush Mittal Signed-off-by: Andrew Morton --- lib/rhashtable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 6c902639728b..0682c9a8de82 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -665,7 +665,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow); * structure outside the hash table. * * This function may be called from any process context, including - * non-preemptable context, but cannot be called from softirq or + * non-preemptible context, but cannot be called from softirq or * hardirq context. * * You must call rhashtable_walk_exit after this function returns. From db675217944a41981162967aa4f553970e37ba70 Mon Sep 17 00:00:00 2001 From: Thorsten Blum Date: Tue, 26 Nov 2024 12:47:26 +0100 Subject: [PATCH 368/504] alpha: remove duplicate included header file Remove duplicate included header file asm/fpu.h Link: https://lkml.kernel.org/r/20241126114728.139029-1-thorsten.blum@linux.dev Signed-off-by: Thorsten Blum Cc: Alexander Viro Cc: Arnd Bergmann Cc: Kent Overstreet Cc: Matt Turner Cc: Pasha Tatashin Cc: Richard Henderson Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- arch/alpha/lib/fpreg.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c index 9a238e7536ae..3d32165043f8 100644 --- a/arch/alpha/lib/fpreg.c +++ b/arch/alpha/lib/fpreg.c @@ -10,7 +10,6 @@ #include #include #include -#include #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); From df088fcb346837efb34f07ffa0ac6b574f24c43a Mon Sep 17 00:00:00 2001 From: Daniel Yang Date: Sun, 17 Nov 2024 13:52:18 -0800 Subject: [PATCH 369/504] ocfs2: heartbeat: replace simple_strtoul with kstrtoul simple_strtoul() is deprecated due to ignoring overflows and also requires clunkier error checking. Replacing with kstrtoul() leads to safer code and cleaner error checking. Link: https://lkml.kernel.org/r/20241117215219.4012-1-danielyangkang@gmail.com Signed-off-by: Daniel Yang Reviewed-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Changwei Ge Cc: Jun Piao Signed-off-by: Andrew Morton --- fs/ocfs2/cluster/heartbeat.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 4200a0341343..a67817e0e663 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1535,10 +1535,11 @@ static int o2hb_read_block_input(struct o2hb_region *reg, { unsigned long bytes; char *p = (char *)page; + int ret; - bytes = simple_strtoul(p, &p, 0); - if (!p || (*p && (*p != '\n'))) - return -EINVAL; + ret = kstrtoul(p, 0, &bytes); + if (ret) + return ret; /* Heartbeat and fs min / max block sizes are the same. */ if (bytes > 4096 || bytes < 512) @@ -1622,13 +1623,14 @@ static ssize_t o2hb_region_blocks_store(struct config_item *item, struct o2hb_region *reg = to_o2hb_region(item); unsigned long tmp; char *p = (char *)page; + int ret; if (reg->hr_bdev_file) return -EINVAL; - tmp = simple_strtoul(p, &p, 0); - if (!p || (*p && (*p != '\n'))) - return -EINVAL; + ret = kstrtoul(p, 0, &tmp); + if (ret) + return ret; if (tmp > O2NM_MAX_NODES || tmp == 0) return -ERANGE; @@ -2136,10 +2138,11 @@ static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *ite { unsigned long tmp; char *p = (char *)page; + int ret; - tmp = simple_strtoul(p, &p, 10); - if (!p || (*p && (*p != '\n'))) - return -EINVAL; + ret = kstrtoul(p, 10, &tmp); + if (ret) + return ret; /* this will validate ranges for us. */ o2hb_dead_threshold_set((unsigned int) tmp); From 7b3edd9d2f4dd20bdba8eff2ec79507a5da77ff0 Mon Sep 17 00:00:00 2001 From: Dmitry Antipov Date: Fri, 15 Nov 2024 18:10:13 +0300 Subject: [PATCH 370/504] ocfs2: miscellaneous spelling fixes Correct spelling here and there as suggested by codespell. Link: https://lkml.kernel.org/r/20241115151013.1404929-1-dmantipov@yandex.ru Signed-off-by: Dmitry Antipov Acked-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Changwei Ge Cc: Jun Piao Signed-off-by: Andrew Morton --- fs/ocfs2/alloc.c | 10 +++++----- fs/ocfs2/aops.c | 2 +- fs/ocfs2/cluster/heartbeat.c | 2 +- fs/ocfs2/cluster/masklog.h | 2 +- fs/ocfs2/cluster/quorum.c | 6 +++--- fs/ocfs2/cluster/tcp.c | 8 ++++---- fs/ocfs2/dlm/dlmapi.h | 2 +- fs/ocfs2/dlm/dlmrecovery.c | 6 +++--- fs/ocfs2/dlmglue.c | 12 ++++++------ fs/ocfs2/inode.c | 4 ++-- fs/ocfs2/ioctl.c | 2 +- fs/ocfs2/journal.c | 2 +- fs/ocfs2/move_extents.c | 8 ++++---- fs/ocfs2/ocfs2_fs.h | 8 ++++---- fs/ocfs2/ocfs2_ioctl.h | 2 +- fs/ocfs2/ocfs2_lockid.h | 2 +- fs/ocfs2/refcounttree.c | 6 +++--- fs/ocfs2/reservations.h | 4 ++-- fs/ocfs2/stack_o2cb.c | 2 +- fs/ocfs2/stackglue.h | 2 +- fs/ocfs2/super.c | 2 +- fs/ocfs2/xattr.c | 10 +++++----- 22 files changed, 52 insertions(+), 52 deletions(-) diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 395e23920632..b3fa953e5637 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -566,7 +566,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle, struct ocfs2_path *path, struct ocfs2_extent_rec *insert_rec); /* - * Reset the actual path elements so that we can re-use the structure + * Reset the actual path elements so that we can reuse the structure * to build another path. Generally, this involves freeing the buffer * heads. */ @@ -1182,7 +1182,7 @@ static int ocfs2_add_branch(handle_t *handle, /* * If there is a gap before the root end and the real end - * of the righmost leaf block, we need to remove the gap + * of the rightmost leaf block, we need to remove the gap * between new_cpos and root_end first so that the tree * is consistent after we add a new branch(it will start * from new_cpos). @@ -1238,7 +1238,7 @@ static int ocfs2_add_branch(handle_t *handle, /* Note: new_eb_bhs[new_blocks - 1] is the guy which will be * linked with the rest of the tree. - * conversly, new_eb_bhs[0] is the new bottommost leaf. + * conversely, new_eb_bhs[0] is the new bottommost leaf. * * when we leave the loop, new_last_eb_blk will point to the * newest leaf, and next_blkno will point to the topmost extent @@ -3712,7 +3712,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle, * update split_index here. * * When the split_index is zero, we need to merge it to the - * prevoius extent block. It is more efficient and easier + * previous extent block. It is more efficient and easier * if we do merge_right first and merge_left later. */ ret = ocfs2_merge_rec_right(path, handle, et, split_rec, @@ -4517,7 +4517,7 @@ static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et, } /* - * This should only be called against the righmost leaf extent list. + * This should only be called against the rightmost leaf extent list. * * ocfs2_figure_appending_type() will figure out whether we'll have to * insert at the tail of the rightmost leaf. diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index db72b3e924b3..64b3ddeb3555 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -305,7 +305,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio) } /* - * i_size might have just been updated as we grabed the meta lock. We + * i_size might have just been updated as we grabbed the meta lock. We * might now be discovering a truncate that hit on another node. * block_read_full_folio->get_block freaks out if it is asked to read * beyond the end of a file, so we check here. Callers diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index a67817e0e663..467127e361be 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -1020,7 +1020,7 @@ fire_callbacks: if (list_empty(&slot->ds_live_item)) goto out; - /* live nodes only go dead after enough consequtive missed + /* live nodes only go dead after enough consecutive missed * samples.. reset the missed counter whenever we see * activity */ if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) { diff --git a/fs/ocfs2/cluster/masklog.h b/fs/ocfs2/cluster/masklog.h index b73fc42e46ff..630bd5a3dd0d 100644 --- a/fs/ocfs2/cluster/masklog.h +++ b/fs/ocfs2/cluster/masklog.h @@ -29,7 +29,7 @@ * just calling printk() so that this can eventually make its way through * relayfs along with the debugging messages. Everything else gets KERN_DEBUG. * The inline tests and macro dance give GCC the opportunity to quite cleverly - * only emit the appropriage printk() when the caller passes in a constant + * only emit the appropriate printk() when the caller passes in a constant * mask, as is almost always the case. * * All this bitmask nonsense is managed from the files under diff --git a/fs/ocfs2/cluster/quorum.c b/fs/ocfs2/cluster/quorum.c index 8bf17231d7b7..bfb8b456876c 100644 --- a/fs/ocfs2/cluster/quorum.c +++ b/fs/ocfs2/cluster/quorum.c @@ -23,7 +23,7 @@ * race between when we see a node start heartbeating and when we connect * to it. * - * So nodes that are in this transtion put a hold on the quorum decision + * So nodes that are in this transition put a hold on the quorum decision * with a counter. As they fall out of this transition they drop the count * and if they're the last, they fire off the decision. */ @@ -189,7 +189,7 @@ static void o2quo_clear_hold(struct o2quo_state *qs, u8 node) } /* as a node comes up we delay the quorum decision until we know the fate of - * the connection. the hold will be droped in conn_up or hb_down. it might be + * the connection. the hold will be dropped in conn_up or hb_down. it might be * perpetuated by con_err until hb_down. if we already have a conn, we might * be dropping a hold that conn_up got. */ void o2quo_hb_up(u8 node) @@ -256,7 +256,7 @@ void o2quo_hb_still_up(u8 node) } /* This is analogous to hb_up. as a node's connection comes up we delay the - * quorum decision until we see it heartbeating. the hold will be droped in + * quorum decision until we see it heartbeating. the hold will be dropped in * hb_up or hb_down. it might be perpetuated by con_err until hb_down. if * it's already heartbeating we might be dropping a hold that conn_up got. * */ diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c index 2b8fa3e782fb..0f46b22561d6 100644 --- a/fs/ocfs2/cluster/tcp.c +++ b/fs/ocfs2/cluster/tcp.c @@ -5,13 +5,13 @@ * * ---- * - * Callers for this were originally written against a very simple synchronus + * Callers for this were originally written against a very simple synchronous * API. This implementation reflects those simple callers. Some day I'm sure * we'll need to move to a more robust posting/callback mechanism. * * Transmit calls pass in kernel virtual addresses and block copying this into * the socket's tx buffers via a usual blocking sendmsg. They'll block waiting - * for a failed socket to timeout. TX callers can also pass in a poniter to an + * for a failed socket to timeout. TX callers can also pass in a pointer to an * 'int' which gets filled with an errno off the wire in response to the * message they send. * @@ -101,7 +101,7 @@ static struct socket *o2net_listen_sock; * o2net_wq. teardown detaches the callbacks before destroying the workqueue. * quorum work is queued as sock containers are shutdown.. stop_listening * tears down all the node's sock containers, preventing future shutdowns - * and queued quroum work, before canceling delayed quorum work and + * and queued quorum work, before canceling delayed quorum work and * destroying the work queue. */ static struct workqueue_struct *o2net_wq; @@ -1419,7 +1419,7 @@ out: return ret; } -/* this work func is triggerd by data ready. it reads until it can read no +/* this work func is triggered by data ready. it reads until it can read no * more. it interprets 0, eof, as fatal. if data_ready hits while we're doing * our work the work struct will be marked and we'll be called again. */ static void o2net_rx_until_empty(struct work_struct *work) diff --git a/fs/ocfs2/dlm/dlmapi.h b/fs/ocfs2/dlm/dlmapi.h index 847a52dcbe7d..1969db8ffa9c 100644 --- a/fs/ocfs2/dlm/dlmapi.h +++ b/fs/ocfs2/dlm/dlmapi.h @@ -118,7 +118,7 @@ struct dlm_lockstatus { #define LKM_VALBLK 0x00000100 /* lock value block request */ #define LKM_NOQUEUE 0x00000200 /* non blocking request */ #define LKM_CONVERT 0x00000400 /* conversion request */ -#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */ +#define LKM_NODLCKWT 0x00000800 /* this lock won't deadlock (U) */ #define LKM_UNLOCK 0x00001000 /* deallocate this lock */ #define LKM_CANCEL 0x00002000 /* cancel conversion request */ #define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */ diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 50da8af988c1..54c548ef037a 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -207,7 +207,7 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) * 1) all recovery threads cluster wide will work on recovering * ONE node at a time * 2) negotiate who will take over all the locks for the dead node. - * thats right... ALL the locks. + * that's right... ALL the locks. * 3) once a new master is chosen, everyone scans all locks * and moves aside those mastered by the dead guy * 4) each of these locks should be locked until recovery is done @@ -1469,7 +1469,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, * The first one is handled at the end of this function. The * other two are handled in the worker thread after locks have * been attached. Yes, we don't wait for purge time to match - * kref_init. The lockres will still have atleast one ref + * kref_init. The lockres will still have at least one ref * added because it is in the hash __dlm_insert_lockres() */ extra_refs++; @@ -1735,7 +1735,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, spin_unlock(&res->spinlock); } } else { - /* put.. incase we are not the master */ + /* put.. in case we are not the master */ spin_unlock(&res->spinlock); dlm_lockres_put(res); } diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 764ecbd5ad41..8f08e89be37d 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -794,7 +794,7 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res) /* * Keep a list of processes who have interest in a lockres. - * Note: this is now only uesed for check recursive cluster locking. + * Note: this is now only used for check recursive cluster locking. */ static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres, struct ocfs2_lock_holder *oh) @@ -2532,7 +2532,7 @@ bail: * locks while holding a page lock and the downconvert thread which * blocks dlm lock acquiry while acquiring page locks. * - * ** These _with_page variantes are only intended to be called from aop + * ** These _with_page variants are only intended to be called from aop * methods that hold page locks and return a very specific *positive* error * code that aop methods pass up to the VFS -- test for errors with != 0. ** * @@ -2630,7 +2630,7 @@ void ocfs2_inode_unlock(struct inode *inode, } /* - * This _tracker variantes are introduced to deal with the recursive cluster + * This _tracker variants are introduced to deal with the recursive cluster * locking issue. The idea is to keep track of a lock holder on the stack of * the current process. If there's a lock holder on the stack, we know the * task context is already protected by cluster locking. Currently, they're @@ -2735,7 +2735,7 @@ void ocfs2_inode_unlock_tracker(struct inode *inode, struct ocfs2_lock_res *lockres; lockres = &OCFS2_I(inode)->ip_inode_lockres; - /* had_lock means that the currect process already takes the cluster + /* had_lock means that the current process already takes the cluster * lock previously. * If had_lock is 1, we have nothing to do here. * If had_lock is 0, we will release the lock. @@ -3802,9 +3802,9 @@ recheck: * set when the ast is received for an upconvert just before the * OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast * on the heels of the ast, we want to delay the downconvert just - * enough to allow the up requestor to do its task. Because this + * enough to allow the up requester to do its task. Because this * lock is in the blocked queue, the lock will be downconverted - * as soon as the requestor is done with the lock. + * as soon as the requester is done with the lock. */ if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING) goto leave_requeue; diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index 2cc5c99fe941..cd3173062ae3 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -1122,7 +1122,7 @@ static void ocfs2_clear_inode(struct inode *inode) dquot_drop(inode); - /* To preven remote deletes we hold open lock before, now it + /* To prevent remote deletes we hold open lock before, now it * is time to unlock PR and EX open locks. */ ocfs2_open_unlock(inode); @@ -1437,7 +1437,7 @@ static int ocfs2_filecheck_validate_inode_block(struct super_block *sb, * Call ocfs2_validate_meta_ecc() first since it has ecc repair * function, but we should not return error immediately when ecc * validation fails, because the reason is quite likely the invalid - * inode number inputed. + * inode number inputted. */ rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check); if (rc) { diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 71beef7f8a60..7ae96fb8807a 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c @@ -796,7 +796,7 @@ bail: /* * OCFS2_IOC_INFO handles an array of requests passed from userspace. * - * ocfs2_info_handle() recevies a large info aggregation, grab and + * ocfs2_info_handle() receives a large info aggregation, grab and * validate the request count from header, then break it into small * pieces, later specific handlers can handle them one by one. * diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 1bf188b6866a..f1b4b3e611cb 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c @@ -1956,7 +1956,7 @@ bail: /* * Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some - * randomness to the timeout to minimize multple nodes firing the timer at the + * randomness to the timeout to minimize multiple nodes firing the timer at the * same time. */ static inline unsigned long ocfs2_orphan_scan_timeout(void) diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index f9d6a4f9ca92..369c7d27befd 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c @@ -492,7 +492,7 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode, bg = (struct ocfs2_group_desc *)gd_bh->b_data; /* - * moving goal is not allowd to start with a group desc blok(#0 blk) + * moving goal is not allowed to start with a group desc blok(#0 blk) * let's compromise to the latter cluster. */ if (range->me_goal == le64_to_cpu(bg->bg_blkno)) @@ -658,7 +658,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, /* * probe the victim cluster group to find a proper - * region to fit wanted movement, it even will perfrom + * region to fit wanted movement, it even will perform * a best-effort attempt by compromising to a threshold * around the goal. */ @@ -920,7 +920,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) } /* - * rememer ip_xattr_sem also needs to be held if necessary + * remember ip_xattr_sem also needs to be held if necessary */ down_write(&OCFS2_I(inode)->ip_alloc_sem); @@ -1022,7 +1022,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp) context->range = ⦥ /* - * ok, the default theshold for the defragmentation + * ok, the default threshold for the defragmentation * is 1M, since our maximum clustersize was 1M also. * any thought? */ diff --git a/fs/ocfs2/ocfs2_fs.h b/fs/ocfs2/ocfs2_fs.h index c93689b568fe..e8e94599e907 100644 --- a/fs/ocfs2/ocfs2_fs.h +++ b/fs/ocfs2/ocfs2_fs.h @@ -132,7 +132,7 @@ * well as the name of the cluster being joined. * mount.ocfs2 must pass in a matching stack name. * - * If not set, the classic stack will be used. This is compatbile with + * If not set, the classic stack will be used. This is compatible with * all older versions. */ #define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080 @@ -143,7 +143,7 @@ /* Support for extended attributes */ #define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200 -/* Support for indexed directores */ +/* Support for indexed directories */ #define OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS 0x0400 /* Metadata checksum and error correction */ @@ -156,7 +156,7 @@ #define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000 /* - * Incompat bit to indicate useable clusterinfo with stackflags for all + * Incompat bit to indicate usable clusterinfo with stackflags for all * cluster stacks (userspace adnd o2cb). If this bit is set, * INCOMPAT_USERSPACE_STACK becomes superfluous and thus should not be set. */ @@ -1083,7 +1083,7 @@ struct ocfs2_xattr_block { struct ocfs2_xattr_header xb_header; /* xattr header if this block contains xattr */ struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this - block cotains xattr + block contains xattr tree. */ } xb_attrs; }; diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h index 9680797bc531..2de2f8733283 100644 --- a/fs/ocfs2/ocfs2_ioctl.h +++ b/fs/ocfs2/ocfs2_ioctl.h @@ -215,7 +215,7 @@ struct ocfs2_move_extents { movement less likely to fail, may make fs even more fragmented */ -#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmenation +#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmentation completely gets done. */ diff --git a/fs/ocfs2/ocfs2_lockid.h b/fs/ocfs2/ocfs2_lockid.h index 8ac357ce6a30..9b234c03d693 100644 --- a/fs/ocfs2/ocfs2_lockid.h +++ b/fs/ocfs2/ocfs2_lockid.h @@ -93,7 +93,7 @@ static char *ocfs2_lock_type_strings[] = { [OCFS2_LOCK_TYPE_DATA] = "Data", [OCFS2_LOCK_TYPE_SUPER] = "Super", [OCFS2_LOCK_TYPE_RENAME] = "Rename", - /* Need to differntiate from [R]ename.. serializing writes is the + /* Need to differentiate from [R]ename.. serializing writes is the * important job it does, anyway. */ [OCFS2_LOCK_TYPE_RW] = "Write/Read", [OCFS2_LOCK_TYPE_DENTRY] = "Dentry", diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 004393b13c0a..73caf991ede5 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2420,7 +2420,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb, * * If we will insert a new one, this is easy and only happens * during adding refcounted flag to the extent, so we don't - * have a chance of spliting. We just need one record. + * have a chance of splitting. We just need one record. * * If the refcount rec already exists, that would be a little * complicated. we may have to: @@ -2610,11 +2610,11 @@ static inline unsigned int ocfs2_cow_align_length(struct super_block *sb, /* * Calculate out the start and number of virtual clusters we need to CoW. * - * cpos is vitual start cluster position we want to do CoW in a + * cpos is virtual start cluster position we want to do CoW in a * file and write_len is the cluster length. * max_cpos is the place where we want to stop CoW intentionally. * - * Normal we will start CoW from the beginning of extent record cotaining cpos. + * Normal we will start CoW from the beginning of extent record containing cpos. * We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we * get good I/O from the resulting extent tree. */ diff --git a/fs/ocfs2/reservations.h b/fs/ocfs2/reservations.h index ec8101ef5717..4fce17180342 100644 --- a/fs/ocfs2/reservations.h +++ b/fs/ocfs2/reservations.h @@ -31,7 +31,7 @@ struct ocfs2_alloc_reservation { #define OCFS2_RESV_FLAG_INUSE 0x01 /* Set when r_node is part of a btree */ #define OCFS2_RESV_FLAG_TMP 0x02 /* Temporary reservation, will be - * destroyed immedately after use */ + * destroyed immediately after use */ #define OCFS2_RESV_FLAG_DIR 0x04 /* Reservation is for an unindexed * directory btree */ @@ -125,7 +125,7 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap, /** * ocfs2_resmap_claimed_bits() - Tell the reservation code that bits were used. * @resmap: reservations bitmap - * @resv: optional reservation to recalulate based on new bitmap + * @resv: optional reservation to recalculate based on new bitmap * @cstart: start of allocation in clusters * @clen: end of allocation in clusters. * diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c index 10157d9d7a9c..f58e891aa2da 100644 --- a/fs/ocfs2/stack_o2cb.c +++ b/fs/ocfs2/stack_o2cb.c @@ -227,7 +227,7 @@ static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb) } /* - * o2dlm aways has a "valid" LVB. If the dlm loses track of the LVB + * o2dlm always has a "valid" LVB. If the dlm loses track of the LVB * contents, it will zero out the LVB. Thus the caller can always trust * the contents. */ diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h index 02ab072c528a..5486a6dce70a 100644 --- a/fs/ocfs2/stackglue.h +++ b/fs/ocfs2/stackglue.h @@ -210,7 +210,7 @@ struct ocfs2_stack_operations { struct file_lock *fl); /* - * This is an optoinal debugging hook. If provided, the + * This is an optional debugging hook. If provided, the * stack can dump debugging information about this lock. */ void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index c79b4291777f..5a501adb7c39 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -1858,7 +1858,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err) osb = OCFS2_SB(sb); BUG_ON(!osb); - /* Remove file check sysfs related directores/files, + /* Remove file check sysfs related directories/files, * and wait for the pending file check operations */ ocfs2_filecheck_remove_sysfs(osb); diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 73a6f6fd8a8e..d70a20d29e3e 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -648,7 +648,7 @@ int ocfs2_calc_xattr_init(struct inode *dir, * 256(name) + 80(value) + 16(entry) = 352 bytes, * The max space of acl xattr taken inline is * 80(value) + 16(entry) * 2(if directory) = 192 bytes, - * when blocksize = 512, may reserve one more cluser for + * when blocksize = 512, may reserve one more cluster for * xattr bucket, otherwise reserve one metadata block * for them is ok. * If this is a new directory with inline data, @@ -4371,7 +4371,7 @@ static int cmp_xe_offset(const void *a, const void *b) /* * defrag a xattr bucket if we find that the bucket has some - * holes beteen name/value pairs. + * holes between name/value pairs. * We will move all the name/value pairs to the end of the bucket * so that we can spare some space for insertion. */ @@ -5011,7 +5011,7 @@ static int ocfs2_divide_xattr_cluster(struct inode *inode, * 2. If cluster_size == bucket_size: * a) If the previous extent rec has more than one cluster and the insert * place isn't in the last cluster, copy the entire last cluster to the - * new one. This time, we don't need to upate the first_bh and header_bh + * new one. This time, we don't need to update the first_bh and header_bh * since they will not be moved into the new cluster. * b) Otherwise, move the bottom half of the xattrs in the last cluster into * the new one. And we set the extend flag to zero if the insert place is @@ -6189,7 +6189,7 @@ struct ocfs2_xattr_reflink { /* * Given a xattr header and xe offset, * return the proper xv and the corresponding bh. - * xattr in inode, block and xattr tree have different implementaions. + * xattr in inode, block and xattr tree have different implementations. */ typedef int (get_xattr_value_root)(struct super_block *sb, struct buffer_head *bh, @@ -6269,7 +6269,7 @@ static int ocfs2_get_xattr_value_root(struct super_block *sb, } /* - * Lock the meta_ac and caculate how much credits we need for reflink xattrs. + * Lock the meta_ac and calculate how much credits we need for reflink xattrs. * It is only used for inline xattr and xattr block. */ static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb, From e41ac688542f3828225d82b3f82a18548d9847d5 Mon Sep 17 00:00:00 2001 From: Daniel Yang Date: Fri, 15 Nov 2024 00:00:17 -0800 Subject: [PATCH 371/504] ocfs2: replace deprecated simple_strtol with kstrtol simple_strtol() ignores overflows and has an awkward interface for error checking. Replace with the recommended kstrtol function leads to clearer error checking and safer conversions. Link: https://lkml.kernel.org/r/20241115080018.5372-1-danielyangkang@gmail.com Signed-off-by: Daniel Yang Reviewed-by: Joseph Qi Cc: Mark Fasheh Cc: Joel Becker Cc: Junxiao Bi Cc: Changwei Ge Cc: Jun Piao Signed-off-by: Andrew Morton --- fs/ocfs2/cluster/heartbeat.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index 467127e361be..724350925aff 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c @@ -3,6 +3,7 @@ * Copyright (C) 2004, 2005 Oracle. All rights reserved. */ +#include "linux/kstrtox.h" #include #include #include @@ -1778,8 +1779,8 @@ static ssize_t o2hb_region_dev_store(struct config_item *item, if (o2nm_this_node() == O2NM_MAX_NODES) return -EINVAL; - fd = simple_strtol(p, &p, 0); - if (!p || (*p && (*p != '\n'))) + ret = kstrtol(p, 0, &fd); + if (ret < 0) return -EINVAL; if (fd < 0 || fd >= INT_MAX) From dd2ca52f75e3b4b85e1b7466dbf547765c111fff Mon Sep 17 00:00:00 2001 From: David Laight Date: Mon, 18 Nov 2024 19:11:24 +0000 Subject: [PATCH 372/504] minmax.h: add whitespace around operators and after commas Patch series "minmax.h: Cleanups and minor optimisations". Some tidyups and minor changes to minmax.h. This patch (of 7): Link: https://lkml.kernel.org/r/c50365d214e04f9ba256d417c8bebbc0@AcuMS.aculab.com Link: https://lkml.kernel.org/r/f04b2e1310244f62826267346fde0553@AcuMS.aculab.com Signed-off-by: David Laight Cc: Andy Shevchenko Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Dan Carpenter Cc: Jason A. Donenfeld Cc: Jens Axboe Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Pedro Falcato Signed-off-by: Andrew Morton --- include/linux/minmax.h | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 98008dd92153..51b0d988e322 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -51,10 +51,10 @@ * only need to be careful to not cause warnings for * pointer use. */ -#define __signed_type_use(x,ux) (2+__is_nonneg(x,ux)) -#define __unsigned_type_use(x,ux) (1+2*(sizeof(ux)<4)) -#define __sign_use(x,ux) (is_signed_type(typeof(ux))? \ - __signed_type_use(x,ux):__unsigned_type_use(x,ux)) +#define __signed_type_use(x, ux) (2 + __is_nonneg(x, ux)) +#define __unsigned_type_use(x, ux) (1 + 2 * (sizeof(ux) < 4)) +#define __sign_use(x, ux) (is_signed_type(typeof(ux)) ? \ + __signed_type_use(x, ux) : __unsigned_type_use(x, ux)) /* * To avoid warnings about casting pointers to integers @@ -74,15 +74,15 @@ #ifdef CONFIG_64BIT #define __signed_type(ux) long #else - #define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux)>4,1LL,1L)) + #define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)) #endif -#define __is_nonneg(x,ux) statically_true((__signed_type(ux))(x)>=0) +#define __is_nonneg(x, ux) statically_true((__signed_type(ux))(x) >= 0) -#define __types_ok(x,y,ux,uy) \ - (__sign_use(x,ux) & __sign_use(y,uy)) +#define __types_ok(x, y, ux, uy) \ + (__sign_use(x, ux) & __sign_use(y, uy)) -#define __types_ok3(x,y,z,ux,uy,uz) \ - (__sign_use(x,ux) & __sign_use(y,uy) & __sign_use(z,uz)) +#define __types_ok3(x, y, z, ux, uy, uz) \ + (__sign_use(x, ux) & __sign_use(y, uy) & __sign_use(z, uz)) #define __cmp_op_min < #define __cmp_op_max > @@ -97,7 +97,7 @@ #define __careful_cmp_once(op, x, y, ux, uy) ({ \ __auto_type ux = (x); __auto_type uy = (y); \ - BUILD_BUG_ON_MSG(!__types_ok(x,y,ux,uy), \ + BUILD_BUG_ON_MSG(!__types_ok(x, y, ux, uy), \ #op"("#x", "#y") signedness error"); \ __cmp(op, ux, uy); }) @@ -114,7 +114,7 @@ static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \ (lo) <= (hi), true), \ "clamp() low limit " #lo " greater than high limit " #hi); \ - BUILD_BUG_ON_MSG(!__types_ok3(val,lo,hi,uval,ulo,uhi), \ + BUILD_BUG_ON_MSG(!__types_ok3(val, lo, hi, uval, ulo, uhi), \ "clamp("#val", "#lo", "#hi") signedness error"); \ __clamp(uval, ulo, uhi); }) @@ -154,7 +154,7 @@ #define __careful_op3(op, x, y, z, ux, uy, uz) ({ \ __auto_type ux = (x); __auto_type uy = (y);__auto_type uz = (z);\ - BUILD_BUG_ON_MSG(!__types_ok3(x,y,z,ux,uy,uz), \ + BUILD_BUG_ON_MSG(!__types_ok3(x, y, z, ux, uy, uz), \ #op"3("#x", "#y", "#z") signedness error"); \ __cmp(op, ux, __cmp(op, uy, uz)); }) @@ -326,9 +326,9 @@ static inline bool in_range32(u32 val, u32 start, u32 len) * Use these carefully: no type checking, and uses the arguments * multiple times. Use for obvious constants only. */ -#define MIN(a,b) __cmp(min,a,b) -#define MAX(a,b) __cmp(max,a,b) -#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b)) -#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b)) +#define MIN(a, b) __cmp(min, a, b) +#define MAX(a, b) __cmp(max, a, b) +#define MIN_T(type, a, b) __cmp(min, (type)(a), (type)(b)) +#define MAX_T(type, a, b) __cmp(max, (type)(a), (type)(b)) #endif /* _LINUX_MINMAX_H */ From 574dc928d028fa3e0f8dfa8c7cc5613488df69fd Mon Sep 17 00:00:00 2001 From: David Laight Date: Mon, 18 Nov 2024 19:12:07 +0000 Subject: [PATCH 373/504] minmax.h: update some comments - Change three to several. - Remove the comment about retaining constant expressions, no longer true. - Realign to nearer 80 columns and break on major punctiation. - Add a leading comment to the block before __signed_type() and __is_nonneg() Otherwise the block explaining the cast is a bit 'floating'. Reword the rest of that comment to improve readability. Link: https://lkml.kernel.org/r/85b050c81c1d4076aeb91a6cded45fee@AcuMS.aculab.com Signed-off-by: David Laight Cc: Andy Shevchenko Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Dan Carpenter Cc: Jason A. Donenfeld Cc: Jens Axboe Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Pedro Falcato Signed-off-by: Andrew Morton --- include/linux/minmax.h | 53 +++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 29 deletions(-) diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 51b0d988e322..24e4b372649a 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -8,13 +8,10 @@ #include /* - * min()/max()/clamp() macros must accomplish three things: + * min()/max()/clamp() macros must accomplish several things: * * - Avoid multiple evaluations of the arguments (so side-effects like * "x++" happen only once) when non-constant. - * - Retain result as a constant expressions when called with only - * constant expressions (to avoid tripping VLA warnings in stack - * allocation usage). * - Perform signed v unsigned type-checking (to generate compile * errors instead of nasty runtime surprises). * - Unsigned char/short are always promoted to signed int and can be @@ -31,25 +28,23 @@ * bit #0 set if ok for unsigned comparisons * bit #1 set if ok for signed comparisons * - * In particular, statically non-negative signed integer - * expressions are ok for both. + * In particular, statically non-negative signed integer expressions + * are ok for both. * - * NOTE! Unsigned types smaller than 'int' are implicitly - * converted to 'int' in expressions, and are accepted for - * signed conversions for now. This is debatable. + * NOTE! Unsigned types smaller than 'int' are implicitly converted to 'int' + * in expressions, and are accepted for signed conversions for now. + * This is debatable. * - * Note that 'x' is the original expression, and 'ux' is - * the unique variable that contains the value. + * Note that 'x' is the original expression, and 'ux' is the unique variable + * that contains the value. * - * We use 'ux' for pure type checking, and 'x' for when - * we need to look at the value (but without evaluating - * it for side effects! Careful to only ever evaluate it - * with sizeof() or __builtin_constant_p() etc). + * We use 'ux' for pure type checking, and 'x' for when we need to look at the + * value (but without evaluating it for side effects! + * Careful to only ever evaluate it with sizeof() or __builtin_constant_p() etc). * - * Pointers end up being checked by the normal C type - * rules at the actual comparison, and these expressions - * only need to be careful to not cause warnings for - * pointer use. + * Pointers end up being checked by the normal C type rules at the actual + * comparison, and these expressions only need to be careful to not cause + * warnings for pointer use. */ #define __signed_type_use(x, ux) (2 + __is_nonneg(x, ux)) #define __unsigned_type_use(x, ux) (1 + 2 * (sizeof(ux) < 4)) @@ -57,19 +52,19 @@ __signed_type_use(x, ux) : __unsigned_type_use(x, ux)) /* - * To avoid warnings about casting pointers to integers - * of different sizes, we need that special sign type. + * Check whether a signed value is always non-negative. * - * On 64-bit we can just always use 'long', since any - * integer or pointer type can just be cast to that. + * A cast is needed to avoid any warnings from values that aren't signed + * integer types (in which case the result doesn't matter). * - * This does not work for 128-bit signed integers since - * the cast would truncate them, but we do not use s128 - * types in the kernel (we do use 'u128', but they will - * be handled by the !is_signed_type() case). + * On 64-bit any integer or pointer type can safely be cast to 'long'. + * But on 32-bit we need to avoid warnings about casting pointers to integers + * of different sizes without truncating 64-bit values so 'long' or 'long long' + * must be used depending on the size of the value. * - * NOTE! The cast is there only to avoid any warnings - * from when values that aren't signed integer types. + * This does not work for 128-bit signed integers since the cast would truncate + * them, but we do not use s128 types in the kernel (we do use 'u128', + * but they are handled by the !is_signed_type() case). */ #ifdef CONFIG_64BIT #define __signed_type(ux) long From 85660ce91b240b222d334ebd7ae94135b68cceb2 Mon Sep 17 00:00:00 2001 From: David Laight Date: Mon, 18 Nov 2024 19:12:50 +0000 Subject: [PATCH 374/504] minmax.h: reduce the #define expansion of min(), max() and clamp() Since the test for signed values being non-negative only relies on __builtion_constant_p() (not is_constexpr()) it can use the 'ux' variable instead of the caller supplied expression. This means that the #define parameters are only expanded twice. Once in the code and once quoted in the error message. Link: https://lkml.kernel.org/r/051afc171806425da991908ed8688a98@AcuMS.aculab.com Signed-off-by: David Laight Cc: Andy Shevchenko Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Dan Carpenter Cc: Jason A. Donenfeld Cc: Jens Axboe Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Pedro Falcato Signed-off-by: Andrew Morton --- include/linux/minmax.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 24e4b372649a..6f7ea669d305 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -46,10 +46,10 @@ * comparison, and these expressions only need to be careful to not cause * warnings for pointer use. */ -#define __signed_type_use(x, ux) (2 + __is_nonneg(x, ux)) -#define __unsigned_type_use(x, ux) (1 + 2 * (sizeof(ux) < 4)) -#define __sign_use(x, ux) (is_signed_type(typeof(ux)) ? \ - __signed_type_use(x, ux) : __unsigned_type_use(x, ux)) +#define __signed_type_use(ux) (2 + __is_nonneg(ux)) +#define __unsigned_type_use(ux) (1 + 2 * (sizeof(ux) < 4)) +#define __sign_use(ux) (is_signed_type(typeof(ux)) ? \ + __signed_type_use(ux) : __unsigned_type_use(ux)) /* * Check whether a signed value is always non-negative. @@ -71,13 +71,13 @@ #else #define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)) #endif -#define __is_nonneg(x, ux) statically_true((__signed_type(ux))(x) >= 0) +#define __is_nonneg(ux) statically_true((__signed_type(ux))(ux) >= 0) -#define __types_ok(x, y, ux, uy) \ - (__sign_use(x, ux) & __sign_use(y, uy)) +#define __types_ok(ux, uy) \ + (__sign_use(ux) & __sign_use(uy)) -#define __types_ok3(x, y, z, ux, uy, uz) \ - (__sign_use(x, ux) & __sign_use(y, uy) & __sign_use(z, uz)) +#define __types_ok3(ux, uy, uz) \ + (__sign_use(ux) & __sign_use(uy) & __sign_use(uz)) #define __cmp_op_min < #define __cmp_op_max > @@ -92,7 +92,7 @@ #define __careful_cmp_once(op, x, y, ux, uy) ({ \ __auto_type ux = (x); __auto_type uy = (y); \ - BUILD_BUG_ON_MSG(!__types_ok(x, y, ux, uy), \ + BUILD_BUG_ON_MSG(!__types_ok(ux, uy), \ #op"("#x", "#y") signedness error"); \ __cmp(op, ux, uy); }) @@ -109,7 +109,7 @@ static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \ (lo) <= (hi), true), \ "clamp() low limit " #lo " greater than high limit " #hi); \ - BUILD_BUG_ON_MSG(!__types_ok3(val, lo, hi, uval, ulo, uhi), \ + BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \ "clamp("#val", "#lo", "#hi") signedness error"); \ __clamp(uval, ulo, uhi); }) @@ -149,7 +149,7 @@ #define __careful_op3(op, x, y, z, ux, uy, uz) ({ \ __auto_type ux = (x); __auto_type uy = (y);__auto_type uz = (z);\ - BUILD_BUG_ON_MSG(!__types_ok3(x, y, z, ux, uy, uz), \ + BUILD_BUG_ON_MSG(!__types_ok3(ux, uy, uz), \ #op"3("#x", "#y", "#z") signedness error"); \ __cmp(op, ux, __cmp(op, uy, uz)); }) From e4978fee760fd04c8a82c270042475c5bcf4c931 Mon Sep 17 00:00:00 2001 From: David Laight Date: Mon, 18 Nov 2024 19:13:31 +0000 Subject: [PATCH 375/504] minmax.h: use BUILD_BUG_ON_MSG() for the lo < hi test in clamp() Use BUILD_BUG_ON_MSG(statically_true(ulo > uhi), ...) for the sanity check of the bounds in clamp(). Gives better error coverage and one less expansion of the arguments. Link: https://lkml.kernel.org/r/34d53778977747f19cce2abb287bb3e6@AcuMS.aculab.com Signed-off-by: David Laight Cc: Andy Shevchenko Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Dan Carpenter Cc: Jason A. Donenfeld Cc: Jens Axboe Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Pedro Falcato Signed-off-by: Andrew Morton --- include/linux/minmax.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 6f7ea669d305..91aa1b90c1bb 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -106,8 +106,7 @@ __auto_type uval = (val); \ __auto_type ulo = (lo); \ __auto_type uhi = (hi); \ - static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \ - (lo) <= (hi), true), \ + BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \ "clamp() low limit " #lo " greater than high limit " #hi); \ BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \ "clamp("#val", "#lo", "#hi") signedness error"); \ From 6604d4389ce768b9597e93f6a2c36fc3d5ca1c80 Mon Sep 17 00:00:00 2001 From: David Laight Date: Mon, 18 Nov 2024 19:14:19 +0000 Subject: [PATCH 376/504] minmax.h: move all the clamp() definitions after the min/max() ones At some point the definitions for clamp() got added in the middle of the ones for min() and max(). Re-order the definitions so they are more sensibly grouped. Link: https://lkml.kernel.org/r/8bb285818e4846469121c8abc3dfb6e2@AcuMS.aculab.com Signed-off-by: David Laight Cc: Andy Shevchenko Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Dan Carpenter Cc: Jason A. Donenfeld Cc: Jens Axboe Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Pedro Falcato Signed-off-by: Andrew Morton --- include/linux/minmax.h | 131 +++++++++++++++++++---------------------- 1 file changed, 62 insertions(+), 69 deletions(-) diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 91aa1b90c1bb..75fb7a6ad4c6 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -99,22 +99,6 @@ #define __careful_cmp(op, x, y) \ __careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_)) -#define __clamp(val, lo, hi) \ - ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val))) - -#define __clamp_once(val, lo, hi, uval, ulo, uhi) ({ \ - __auto_type uval = (val); \ - __auto_type ulo = (lo); \ - __auto_type uhi = (hi); \ - BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \ - "clamp() low limit " #lo " greater than high limit " #hi); \ - BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \ - "clamp("#val", "#lo", "#hi") signedness error"); \ - __clamp(uval, ulo, uhi); }) - -#define __careful_clamp(val, lo, hi) \ - __clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_)) - /** * min - return minimum of two values of the same or compatible types * @x: first value @@ -170,34 +154,6 @@ #define max3(x, y, z) \ __careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_)) -/** - * min_not_zero - return the minimum that is _not_ zero, unless both are zero - * @x: value1 - * @y: value2 - */ -#define min_not_zero(x, y) ({ \ - typeof(x) __x = (x); \ - typeof(y) __y = (y); \ - __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) - -/** - * clamp - return a value clamped to a given range with strict typechecking - * @val: current value - * @lo: lowest allowable value - * @hi: highest allowable value - * - * This macro does strict typechecking of @lo/@hi to make sure they are of the - * same type as @val. See the unnecessary pointer comparisons. - */ -#define clamp(val, lo, hi) __careful_clamp(val, lo, hi) - -/* - * ..and if you can't take the strict - * types, you can specify one yourself. - * - * Or not use min/max/clamp at all, of course. - */ - /** * min_t - return minimum of two values, using the specified type * @type: data type to use @@ -214,6 +170,68 @@ */ #define max_t(type, x, y) __cmp_once(max, type, x, y) +/** + * min_not_zero - return the minimum that is _not_ zero, unless both are zero + * @x: value1 + * @y: value2 + */ +#define min_not_zero(x, y) ({ \ + typeof(x) __x = (x); \ + typeof(y) __y = (y); \ + __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) + +#define __clamp(val, lo, hi) \ + ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val))) + +#define __clamp_once(val, lo, hi, uval, ulo, uhi) ({ \ + __auto_type uval = (val); \ + __auto_type ulo = (lo); \ + __auto_type uhi = (hi); \ + BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \ + "clamp() low limit " #lo " greater than high limit " #hi); \ + BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \ + "clamp("#val", "#lo", "#hi") signedness error"); \ + __clamp(uval, ulo, uhi); }) + +#define __careful_clamp(val, lo, hi) \ + __clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_)) + +/** + * clamp - return a value clamped to a given range with strict typechecking + * @val: current value + * @lo: lowest allowable value + * @hi: highest allowable value + * + * This macro does strict typechecking of @lo/@hi to make sure they are of the + * same type as @val. See the unnecessary pointer comparisons. + */ +#define clamp(val, lo, hi) __careful_clamp(val, lo, hi) + +/** + * clamp_t - return a value clamped to a given range using a given type + * @type: the type of variable to use + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of type + * @type to make all the comparisons. + */ +#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi)) + +/** + * clamp_val - return a value clamped to a given range using val's type + * @val: current value + * @lo: minimum allowable value + * @hi: maximum allowable value + * + * This macro does no typechecking and uses temporary variables of whatever + * type the input argument @val is. This is useful when @val is an unsigned + * type and @lo and @hi are literals that will otherwise be assigned a signed + * integer type. + */ +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) + /* * Do not check the array parameter using __must_be_array(). * In the following legit use-case where the "array" passed is a simple pointer, @@ -257,31 +275,6 @@ */ #define max_array(array, len) __minmax_array(max, array, len) -/** - * clamp_t - return a value clamped to a given range using a given type - * @type: the type of variable to use - * @val: current value - * @lo: minimum allowable value - * @hi: maximum allowable value - * - * This macro does no typechecking and uses temporary variables of type - * @type to make all the comparisons. - */ -#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi)) - -/** - * clamp_val - return a value clamped to a given range using val's type - * @val: current value - * @lo: minimum allowable value - * @hi: maximum allowable value - * - * This macro does no typechecking and uses temporary variables of whatever - * type the input argument @val is. This is useful when @val is an unsigned - * type and @lo and @hi are literals that will otherwise be assigned a signed - * integer type. - */ -#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) - static inline bool in_range64(u64 val, u64 start, u64 len) { return (val - start) < len; From 84df5f6184f5b91342b51476d877c0bbd03d1254 Mon Sep 17 00:00:00 2001 From: David Laight Date: Mon, 18 Nov 2024 19:15:05 +0000 Subject: [PATCH 377/504] minmax.h: simplify the variants of clamp() Always pass a 'type' through to __clamp_once(), pass '__auto_type' from clamp() itself. The expansion of __types_ok3() is reasonable so it isn't worth the added complexity of avoiding it when a fixed type is used for all three values. Link: https://lkml.kernel.org/r/8f69f4deac014f558bab186444bac2e8@AcuMS.aculab.com Signed-off-by: David Laight Cc: Andy Shevchenko Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Dan Carpenter Cc: Jason A. Donenfeld Cc: Jens Axboe Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Pedro Falcato Signed-off-by: Andrew Morton --- include/linux/minmax.h | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 75fb7a6ad4c6..2bbdd5b5e07e 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -183,29 +183,29 @@ #define __clamp(val, lo, hi) \ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val))) -#define __clamp_once(val, lo, hi, uval, ulo, uhi) ({ \ - __auto_type uval = (val); \ - __auto_type ulo = (lo); \ - __auto_type uhi = (hi); \ +#define __clamp_once(type, val, lo, hi, uval, ulo, uhi) ({ \ + type uval = (val); \ + type ulo = (lo); \ + type uhi = (hi); \ BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \ "clamp() low limit " #lo " greater than high limit " #hi); \ BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \ "clamp("#val", "#lo", "#hi") signedness error"); \ __clamp(uval, ulo, uhi); }) -#define __careful_clamp(val, lo, hi) \ - __clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_)) +#define __careful_clamp(type, val, lo, hi) \ + __clamp_once(type, val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_)) /** - * clamp - return a value clamped to a given range with strict typechecking + * clamp - return a value clamped to a given range with typechecking * @val: current value * @lo: lowest allowable value * @hi: highest allowable value * - * This macro does strict typechecking of @lo/@hi to make sure they are of the - * same type as @val. See the unnecessary pointer comparisons. + * This macro checks @val/@lo/@hi to make sure they have compatible + * signedness. */ -#define clamp(val, lo, hi) __careful_clamp(val, lo, hi) +#define clamp(val, lo, hi) __careful_clamp(__auto_type, val, lo, hi) /** * clamp_t - return a value clamped to a given range using a given type @@ -217,7 +217,7 @@ * This macro does no typechecking and uses temporary variables of type * @type to make all the comparisons. */ -#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi)) +#define clamp_t(type, val, lo, hi) __careful_clamp(type, val, lo, hi) /** * clamp_val - return a value clamped to a given range using val's type @@ -230,7 +230,7 @@ * type and @lo and @hi are literals that will otherwise be assigned a signed * integer type. */ -#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) +#define clamp_val(val, lo, hi) __careful_clamp(typeof(val), val, lo, hi) /* * Do not check the array parameter using __must_be_array(). From f7756a1a0aec97476524e657ce04f2a61470537a Mon Sep 17 00:00:00 2001 From: David Laight Date: Mon, 18 Nov 2024 19:15:51 +0000 Subject: [PATCH 378/504] minmax.h: remove some #defines that are only expanded once The bodies of __signed_type_use() and __unsigned_type_use() are much the same size as their names - so put the bodies in the only line that expands them. Similarly __signed_type() is defined separately for 64bit and then used exactly once just below. Change the test for __signed_type from CONFIG_64BIT to one based on gcc defined macros so that the code is valid if it gets used outside of a kernel build. Link: https://lkml.kernel.org/r/9386d1ebb8974fbabbed2635160c3975@AcuMS.aculab.com Signed-off-by: David Laight Cc: Andy Shevchenko Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Dan Carpenter Cc: Jason A. Donenfeld Cc: Jens Axboe Cc: Lorenzo Stoakes Cc: Mateusz Guzik Cc: Matthew Wilcox Cc: Pedro Falcato Signed-off-by: Andrew Morton --- include/linux/minmax.h | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/include/linux/minmax.h b/include/linux/minmax.h index 2bbdd5b5e07e..eaaf5c008e4d 100644 --- a/include/linux/minmax.h +++ b/include/linux/minmax.h @@ -46,10 +46,8 @@ * comparison, and these expressions only need to be careful to not cause * warnings for pointer use. */ -#define __signed_type_use(ux) (2 + __is_nonneg(ux)) -#define __unsigned_type_use(ux) (1 + 2 * (sizeof(ux) < 4)) #define __sign_use(ux) (is_signed_type(typeof(ux)) ? \ - __signed_type_use(ux) : __unsigned_type_use(ux)) + (2 + __is_nonneg(ux)) : (1 + 2 * (sizeof(ux) < 4))) /* * Check whether a signed value is always non-negative. @@ -57,7 +55,7 @@ * A cast is needed to avoid any warnings from values that aren't signed * integer types (in which case the result doesn't matter). * - * On 64-bit any integer or pointer type can safely be cast to 'long'. + * On 64-bit any integer or pointer type can safely be cast to 'long long'. * But on 32-bit we need to avoid warnings about casting pointers to integers * of different sizes without truncating 64-bit values so 'long' or 'long long' * must be used depending on the size of the value. @@ -66,12 +64,12 @@ * them, but we do not use s128 types in the kernel (we do use 'u128', * but they are handled by the !is_signed_type() case). */ -#ifdef CONFIG_64BIT - #define __signed_type(ux) long +#if __SIZEOF_POINTER__ == __SIZEOF_LONG_LONG__ +#define __is_nonneg(ux) statically_true((long long)(ux) >= 0) #else - #define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)) +#define __is_nonneg(ux) statically_true( \ + (typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)))(ux) >= 0) #endif -#define __is_nonneg(ux) statically_true((__signed_type(ux))(ux) >= 0) #define __types_ok(ux, uy) \ (__sign_use(ux) & __sign_use(uy)) From 15208352b165c438961b9347f10720a6d145fc67 Mon Sep 17 00:00:00 2001 From: Kuan-Wei Chiu Date: Sat, 30 Nov 2024 02:12:19 +0800 Subject: [PATCH 379/504] lib min_heap: improve type safety in min_heap macros by using container_of Patch series "lib min_heap: Improve min_heap safety, testing, and documentation". Improve the min heap implementation by enhancing type safety with container_of, reducing the attack vector by replacing test function calls with inline variants, and adding a brief API introduction in min_heap.h. It also includes author information in Documentation/core-api/min_heap.rst. This patch (of 4): The current implementation of min_heap macros uses explicit casting to min_heap_char *, which prevents the compiler from detecting incorrect pointer types. This can lead to errors if non-min_heap pointers are passed inadvertently. To enhance safety, replace all explicit casts to min_heap_char * with the use of container_of(&(_heap)->nr, min_heap_char, nr). This approach ensures that the _heap parameter is indeed a min_heap_char-compatible structure, allowing the compiler to catch improper usages. Link: https://lkml.kernel.org/r/20241129181222.646855-1-visitorckw@gmail.com Link: https://lore.kernel.org/lkml/CAMuHMdVO5DPuD9HYWBFqKDHphx7+0BEhreUxtVC40A=8p6VAhQ@mail.gmail.com Link: https://lkml.kernel.org/r/20241129181222.646855-2-visitorckw@gmail.com Signed-off-by: Kuan-Wei Chiu Suggested-by: Geert Uytterhoeven Cc: Ching-Chun (Jim) Huang Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/min_heap.h | 61 +++++++++++++++++++++++----------------- 1 file changed, 35 insertions(+), 26 deletions(-) diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h index e781727c8916..456cfbc1b8f5 100644 --- a/include/linux/min_heap.h +++ b/include/linux/min_heap.h @@ -218,7 +218,7 @@ void __min_heap_init_inline(min_heap_char *heap, void *data, int size) } #define min_heap_init_inline(_heap, _data, _size) \ - __min_heap_init_inline((min_heap_char *)_heap, _data, _size) + __min_heap_init_inline(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size) /* Get the minimum element from the heap. */ static __always_inline @@ -228,7 +228,8 @@ void *__min_heap_peek_inline(struct min_heap_char *heap) } #define min_heap_peek_inline(_heap) \ - (__minheap_cast(_heap) __min_heap_peek_inline((min_heap_char *)_heap)) + (__minheap_cast(_heap) \ + __min_heap_peek_inline(container_of(&(_heap)->nr, min_heap_char, nr))) /* Check if the heap is full. */ static __always_inline @@ -238,7 +239,7 @@ bool __min_heap_full_inline(min_heap_char *heap) } #define min_heap_full_inline(_heap) \ - __min_heap_full_inline((min_heap_char *)_heap) + __min_heap_full_inline(container_of(&(_heap)->nr, min_heap_char, nr)) /* Sift the element at pos down the heap. */ static __always_inline @@ -277,8 +278,8 @@ void __min_heap_sift_down_inline(min_heap_char *heap, int pos, size_t elem_size, } #define min_heap_sift_down_inline(_heap, _pos, _func, _args) \ - __min_heap_sift_down_inline((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_sift_down_inline(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \ + __minheap_obj_size(_heap), _func, _args) /* Sift up ith element from the heap, O(log2(nr)). */ static __always_inline @@ -304,8 +305,8 @@ void __min_heap_sift_up_inline(min_heap_char *heap, size_t elem_size, size_t idx } #define min_heap_sift_up_inline(_heap, _idx, _func, _args) \ - __min_heap_sift_up_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \ - _func, _args) + __min_heap_sift_up_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) /* Floyd's approach to heapification that is O(nr). */ static __always_inline @@ -319,7 +320,8 @@ void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size, } #define min_heapify_all_inline(_heap, _func, _args) \ - __min_heapify_all_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heapify_all_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) /* Remove minimum element from the heap, O(log2(nr)). */ static __always_inline @@ -340,7 +342,8 @@ bool __min_heap_pop_inline(min_heap_char *heap, size_t elem_size, } #define min_heap_pop_inline(_heap, _func, _args) \ - __min_heap_pop_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heap_pop_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) /* * Remove the minimum element and then push the given element. The @@ -356,8 +359,8 @@ void __min_heap_pop_push_inline(min_heap_char *heap, const void *element, size_t } #define min_heap_pop_push_inline(_heap, _element, _func, _args) \ - __min_heap_pop_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_pop_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) /* Push an element on to the heap, O(log2(nr)). */ static __always_inline @@ -382,8 +385,8 @@ bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t ele } #define min_heap_push_inline(_heap, _element, _func, _args) \ - __min_heap_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) /* Remove ith element from the heap, O(log2(nr)). */ static __always_inline @@ -411,8 +414,8 @@ bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx, } #define min_heap_del_inline(_heap, _idx, _func, _args) \ - __min_heap_del_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \ - _func, _args) + __min_heap_del_inline(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) void __min_heap_init(min_heap_char *heap, void *data, int size); void *__min_heap_peek(struct min_heap_char *heap); @@ -433,25 +436,31 @@ bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx, const struct min_heap_callbacks *func, void *args); #define min_heap_init(_heap, _data, _size) \ - __min_heap_init((min_heap_char *)_heap, _data, _size) + __min_heap_init(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size) #define min_heap_peek(_heap) \ - (__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap)) + (__minheap_cast(_heap) __min_heap_peek(container_of(&(_heap)->nr, min_heap_char, nr))) #define min_heap_full(_heap) \ - __min_heap_full((min_heap_char *)_heap) + __min_heap_full(container_of(&(_heap)->nr, min_heap_char, nr)) #define min_heap_sift_down(_heap, _pos, _func, _args) \ - __min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args) + __min_heap_sift_down(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_sift_up(_heap, _idx, _func, _args) \ - __min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) + __min_heap_sift_up(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) #define min_heapify_all(_heap, _func, _args) \ - __min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heapify_all(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_pop(_heap, _func, _args) \ - __min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args) + __min_heap_pop(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_pop_push(_heap, _element, _func, _args) \ - __min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \ - _func, _args) + __min_heap_pop_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_push(_heap, _element, _func, _args) \ - __min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args) + __min_heap_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \ + __minheap_obj_size(_heap), _func, _args) #define min_heap_del(_heap, _idx, _func, _args) \ - __min_heap_del((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args) + __min_heap_del(container_of(&(_heap)->nr, min_heap_char, nr), \ + __minheap_obj_size(_heap), _idx, _func, _args) #endif /* _LINUX_MIN_HEAP_H */ From 005e3ec8ca2400e4aa29dc97550c9cfeaa882605 Mon Sep 17 00:00:00 2001 From: Kuan-Wei Chiu Date: Sat, 30 Nov 2024 02:12:20 +0800 Subject: [PATCH 380/504] lib/test_min_heap: use inline min heap variants to reduce attack vector To address concerns about increasing the attack vector, remove the select MIN_HEAP dependency from TEST_MIN_HEAP in Kconfig.debug. Additionally, all min heap test function calls in lib/test_min_heap.c are replaced with their inline variants. By exclusively using inline variants, we eliminate the need to enable CONFIG_MIN_HEAP for testing purposes. Link: https://lore.kernel.org/lkml/CAMuHMdVO5DPuD9HYWBFqKDHphx7+0BEhreUxtVC40A=8p6VAhQ@mail.gmail.com Link: https://lkml.kernel.org/r/20241129181222.646855-3-visitorckw@gmail.com Signed-off-by: Kuan-Wei Chiu Suggested-by: Geert Uytterhoeven Cc: Ching-Chun (Jim) Huang Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- lib/Kconfig.debug | 1 - lib/test_min_heap.c | 30 +++++++++++++++--------------- 2 files changed, 15 insertions(+), 16 deletions(-) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index f3d723705879..e968f083d356 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2269,7 +2269,6 @@ config TEST_LIST_SORT config TEST_MIN_HEAP tristate "Min heap test" depends on DEBUG_KERNEL || m - select MIN_HEAP help Enable this to turn on min heap function tests. This test is executed only once during system boot (so affects only boot time), diff --git a/lib/test_min_heap.c b/lib/test_min_heap.c index e6fbb798558b..a9c4a74d3898 100644 --- a/lib/test_min_heap.c +++ b/lib/test_min_heap.c @@ -32,7 +32,7 @@ static __init int pop_verify_heap(bool min_heap, int last; last = values[0]; - min_heap_pop(heap, funcs, NULL); + min_heap_pop_inline(heap, funcs, NULL); while (heap->nr > 0) { if (min_heap) { if (last > values[0]) { @@ -48,7 +48,7 @@ static __init int pop_verify_heap(bool min_heap, } } last = values[0]; - min_heap_pop(heap, funcs, NULL); + min_heap_pop_inline(heap, funcs, NULL); } return err; } @@ -69,7 +69,7 @@ static __init int test_heapify_all(bool min_heap) int i, err; /* Test with known set of values. */ - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); @@ -78,7 +78,7 @@ static __init int test_heapify_all(bool min_heap) for (i = 0; i < heap.nr; i++) values[i] = get_random_u32(); - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); err += pop_verify_heap(min_heap, &heap, &funcs); return err; @@ -102,14 +102,14 @@ static __init int test_heap_push(bool min_heap) /* Test with known set of values copied from data. */ for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_push(&heap, &data[i], &funcs, NULL); + min_heap_push_inline(&heap, &data[i], &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); /* Test with randomly generated values. */ while (heap.nr < heap.size) { temp = get_random_u32(); - min_heap_push(&heap, &temp, &funcs, NULL); + min_heap_push_inline(&heap, &temp, &funcs, NULL); } err += pop_verify_heap(min_heap, &heap, &funcs); @@ -135,22 +135,22 @@ static __init int test_heap_pop_push(bool min_heap) /* Fill values with data to pop and replace. */ temp = min_heap ? 0x80000000 : 0x7FFFFFFF; for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_push(&heap, &temp, &funcs, NULL); + min_heap_push_inline(&heap, &temp, &funcs, NULL); /* Test with known set of values copied from data. */ for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_pop_push(&heap, &data[i], &funcs, NULL); + min_heap_pop_push_inline(&heap, &data[i], &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); heap.nr = 0; for (i = 0; i < ARRAY_SIZE(data); i++) - min_heap_push(&heap, &temp, &funcs, NULL); + min_heap_push_inline(&heap, &temp, &funcs, NULL); /* Test with randomly generated values. */ for (i = 0; i < ARRAY_SIZE(data); i++) { temp = get_random_u32(); - min_heap_pop_push(&heap, &temp, &funcs, NULL); + min_heap_pop_push_inline(&heap, &temp, &funcs, NULL); } err += pop_verify_heap(min_heap, &heap, &funcs); @@ -163,7 +163,7 @@ static __init int test_heap_del(bool min_heap) -3, -1, -2, -4, 0x8000000, 0x7FFFFFF }; struct min_heap_test heap; - min_heap_init(&heap, values, ARRAY_SIZE(values)); + min_heap_init_inline(&heap, values, ARRAY_SIZE(values)); heap.nr = ARRAY_SIZE(values); struct min_heap_callbacks funcs = { .less = min_heap ? less_than : greater_than, @@ -172,9 +172,9 @@ static __init int test_heap_del(bool min_heap) int i, err; /* Test with known set of values. */ - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); for (i = 0; i < ARRAY_SIZE(values) / 2; i++) - min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL); + min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL); err = pop_verify_heap(min_heap, &heap, &funcs); @@ -182,10 +182,10 @@ static __init int test_heap_del(bool min_heap) heap.nr = ARRAY_SIZE(values); for (i = 0; i < heap.nr; i++) values[i] = get_random_u32(); - min_heapify_all(&heap, &funcs, NULL); + min_heapify_all_inline(&heap, &funcs, NULL); for (i = 0; i < ARRAY_SIZE(values) / 2; i++) - min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL); + min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL); err += pop_verify_heap(min_heap, &heap, &funcs); return err; From 0b82cadf640bf2ced5ee82f1a9f39bbb527277cd Mon Sep 17 00:00:00 2001 From: Kuan-Wei Chiu Date: Sat, 30 Nov 2024 02:12:21 +0800 Subject: [PATCH 381/504] lib min_heap: add brief introduction to Min Heap API A short description of the Min Heap API is added to the min_heap.h, explaining its purpose for managing min-heaps and emphasizing the use of macro wrappers instead of direct function calls. For more details, users are directed to the documentation at Documentation/core-api/min_heap.rst. Link: https://lkml.kernel.org/r/20241129181222.646855-4-visitorckw@gmail.com Signed-off-by: Kuan-Wei Chiu Cc: Ching-Chun (Jim) Huang Cc: Geert Uytterhoeven Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- include/linux/min_heap.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h index 456cfbc1b8f5..55bfe670bbb9 100644 --- a/include/linux/min_heap.h +++ b/include/linux/min_heap.h @@ -6,6 +6,17 @@ #include #include +/* + * The Min Heap API provides utilities for managing min-heaps, a binary tree + * structure where each node's value is less than or equal to its children's + * values, ensuring the smallest element is at the root. + * + * Users should avoid directly calling functions prefixed with __min_heap_*(). + * Instead, use the provided macro wrappers. + * + * For further details and examples, refer to Documentation/core-api/min_heap.rst. + */ + /** * Data structure to hold a min-heap. * @nr: Number of elements currently in the heap. From ba36f8197d5ec09b8e0540596cb633f2924c8440 Mon Sep 17 00:00:00 2001 From: Kuan-Wei Chiu Date: Sat, 30 Nov 2024 02:12:22 +0800 Subject: [PATCH 382/504] Documentation/core-api: min_heap: add author information As with other documentation files, author information is added to min_heap.rst, providing contact details for any questions regarding the Min Heap API or the document itself. Link: https://lkml.kernel.org/r/20241129181222.646855-5-visitorckw@gmail.com Signed-off-by: Kuan-Wei Chiu Cc: Ching-Chun (Jim) Huang Cc: Geert Uytterhoeven Cc: Jonathan Corbet Signed-off-by: Andrew Morton --- Documentation/core-api/min_heap.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/Documentation/core-api/min_heap.rst b/Documentation/core-api/min_heap.rst index 0c636c8b7aa5..683bc6d09f00 100644 --- a/Documentation/core-api/min_heap.rst +++ b/Documentation/core-api/min_heap.rst @@ -4,6 +4,8 @@ Min Heap API ============ +:Author: Kuan-Wei Chiu + Introduction ============ From 10ea13b0047ad568d6c634dcfb10504678ae8c27 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 13 Nov 2024 10:21:06 +0000 Subject: [PATCH 383/504] scripts/spelling.txt: add more spellings to spelling.txt Add some of the more common spelling mistakes and typos that I've found while fixing up spelling mistakes in the kernel over the past year. Link: https://lkml.kernel.org/r/20241113102106.1163050-1-colin.i.king@gmail.com Signed-off-by: Colin Ian King Signed-off-by: Andrew Morton --- scripts/spelling.txt | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 05bd9ca1fbfa..2decc50f5a6e 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -222,6 +222,7 @@ autonymous||autonomous auxillary||auxiliary auxilliary||auxiliary avaiable||available +avaialable||available avaible||available availabe||available availabled||available @@ -267,6 +268,7 @@ broadcase||broadcast broadcat||broadcast bufer||buffer bufferred||buffered +bufferur||buffer bufufer||buffer cacluated||calculated caculate||calculate @@ -405,6 +407,7 @@ configutation||configuration congiuration||configuration conider||consider conjuction||conjunction +connction||connection connecetd||connected connectinos||connections connetor||connector @@ -413,6 +416,7 @@ connnections||connections consistancy||consistency consistant||consistent consits||consists +constructred||constructed containes||contains containts||contains contaisn||contains @@ -450,6 +454,7 @@ creationg||creating cryptocraphic||cryptographic cummulative||cumulative cunter||counter +curent||current curently||currently cylic||cyclic dafault||default @@ -461,6 +466,7 @@ decendant||descendant decendants||descendants decompres||decompress decsribed||described +decrese||decrease decription||description detault||default dectected||detected @@ -485,6 +491,7 @@ delare||declare delares||declares delaring||declaring delemiter||delimiter +deley||delay delibrately||deliberately delievered||delivered demodualtor||demodulator @@ -551,6 +558,7 @@ disgest||digest disired||desired dispalying||displaying dissable||disable +dissapeared||disappeared diplay||display directon||direction direcly||directly @@ -606,6 +614,7 @@ eigth||eight elementry||elementary eletronic||electronic embeded||embedded +emtpy||empty enabledi||enabled enbale||enable enble||enable @@ -723,10 +732,12 @@ followign||following followings||following follwing||following fonud||found +forcebly||forcibly forseeable||foreseeable forse||force fortan||fortran forwardig||forwarding +forwared||forwarded frambuffer||framebuffer framming||framing framwork||framework @@ -767,6 +778,7 @@ grahpical||graphical granularty||granularity grapic||graphic grranted||granted +grups||groups guage||gauge guarenteed||guaranteed guarentee||guarantee @@ -780,6 +792,7 @@ hardare||hardware harware||hardware hardward||hardware havind||having +heigth||height heirarchically||hierarchically heirarchy||hierarchy heirachy||hierarchy @@ -788,9 +801,11 @@ hearbeat||heartbeat heterogenous||heterogeneous hexdecimal||hexadecimal hybernate||hibernate +hiearchy||hierarchy hierachy||hierarchy hierarchie||hierarchy homogenous||homogeneous +horizental||horizontal howver||however hsould||should hypervior||hypervisor @@ -842,6 +857,7 @@ independed||independent indiate||indicate indicat||indicate inexpect||inexpected +infalte||inflate inferface||interface infinit||infinite infomation||information @@ -861,6 +877,7 @@ initators||initiators initialiazation||initialization initializationg||initialization initializiation||initialization +initializtion||initialization initialze||initialize initialzed||initialized initialzing||initializing @@ -877,6 +894,7 @@ instanciate||instantiate instanciated||instantiated instuments||instruments insufficent||insufficient +intead||instead inteface||interface integreated||integrated integrety||integrity @@ -1081,6 +1099,7 @@ notications||notifications notifcations||notifications notifed||notified notity||notify +notfify||notify nubmer||number numebr||number numer||number @@ -1122,6 +1141,7 @@ orientatied||orientated orientied||oriented orignal||original originial||original +orphanded||orphaned otherise||otherwise ouput||output oustanding||outstanding @@ -1184,9 +1204,11 @@ peroid||period persistance||persistence persistant||persistent phoneticly||phonetically +pipline||pipeline plaform||platform plalform||platform platfoem||platform +platfomr||platform platfrom||platform plattform||platform pleaes||please @@ -1211,6 +1233,7 @@ preceeding||preceding preceed||precede precendence||precedence precission||precision +predicition||prediction preemptable||preemptible prefered||preferred prefferably||preferably @@ -1289,6 +1312,7 @@ querrying||querying queus||queues randomally||randomly raoming||roaming +readyness||readiness reasearcher||researcher reasearchers||researchers reasearch||research @@ -1305,8 +1329,10 @@ recieves||receives recieving||receiving recogniced||recognised recognizeable||recognizable +recompte||recompute recommanded||recommended recyle||recycle +redect||reject redircet||redirect redirectrion||redirection redundacy||redundancy @@ -1314,6 +1340,7 @@ reename||rename refcounf||refcount refence||reference refered||referred +referencce||reference referenace||reference refererence||reference refering||referring @@ -1348,11 +1375,13 @@ replys||replies reponse||response representaion||representation repsonse||response +reqested||requested reqeust||request reqister||register requed||requeued requestied||requested requiere||require +requieres||requires requirment||requirement requred||required requried||required @@ -1440,6 +1469,7 @@ sequencial||sequential serivce||service serveral||several servive||service +sesion||session setts||sets settting||setting shapshot||snapshot @@ -1602,11 +1632,13 @@ trys||tries thses||these tiggers||triggers tiggered||triggered +tiggerring||triggering tipically||typically timeing||timing timming||timing timout||timeout tmis||this +tolarance||tolerance toogle||toggle torerable||tolerable torlence||tolerance @@ -1633,6 +1665,7 @@ trasfer||transfer trasmission||transmission trasmitter||transmitter treshold||threshold +trigged||triggered triggerd||triggered trigerred||triggered trigerring||triggering @@ -1648,6 +1681,7 @@ uknown||unknown usccess||success uncommited||uncommitted uncompatible||incompatible +uncomressed||uncompressed unconditionaly||unconditionally undeflow||underflow undelying||underlying @@ -1715,6 +1749,7 @@ utitity||utility utitlty||utility vaid||valid vaild||valid +validationg||validating valide||valid variantions||variations varible||variable @@ -1724,6 +1759,7 @@ verbse||verbose veify||verify verfication||verification veriosn||version +versoin||version verisons||versions verison||version veritical||vertical From e5a5560bcb3d0669a519b1f9dfec937578113430 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Tue, 12 Nov 2024 14:25:36 -0500 Subject: [PATCH 384/504] xarray: extract xa_zero_to_null Patch series "xarray: extract __xa_cmpxchg_raw". This series reduces duplication between __xa_cmpxchg and __xa_insert by extracting a new function that does not coerce zero entries to null on the return path. The new function may be used by the upcoming Rust xarray abstraction in its reservation API where it is useful to tell the difference between zero entries and null slots. This patch (of 2): Reduce code duplication by extracting a static inline function that returns its argument if it is non-zero and NULL otherwise. This changes xas_result to check for errors before checking for zero but this cannot change the behavior of existing callers: - __xa_erase: passes the result of xas_store(_, NULL) which cannot fail. - __xa_store: passes the result of xas_store(_, entry) which may fail. xas_store calls xas_create when entry is not NULL which returns NULL on error, which is immediately checked. This should not change observable behavior. - __xa_cmpxchg: passes the result of xas_load(_) which might be zero. This would previously return NULL regardless of the outcome of xas_store but xas_store cannot fail if xas_load returns zero because there is no need to allocate memory. - xa_store_range: same as __xa_erase. Link: https://lkml.kernel.org/r/20241112-xarray-insert-cmpxchg-v1-0-dc2bdd8c4136@gmail.com Link: https://lkml.kernel.org/r/20241112-xarray-insert-cmpxchg-v1-1-dc2bdd8c4136@gmail.com Signed-off-by: Tamir Duberstein Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- lib/xarray.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/lib/xarray.c b/lib/xarray.c index 32d4bac8c94c..1b8305bffbff 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -435,6 +435,11 @@ static unsigned long max_index(void *entry) return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1; } +static inline void *xa_zero_to_null(void *entry) +{ + return xa_is_zero(entry) ? NULL : entry; +} + static void xas_shrink(struct xa_state *xas) { struct xarray *xa = xas->xa; @@ -451,8 +456,8 @@ static void xas_shrink(struct xa_state *xas) break; if (!xa_is_node(entry) && node->shift) break; - if (xa_is_zero(entry) && xa_zero_busy(xa)) - entry = NULL; + if (xa_zero_busy(xa)) + entry = xa_zero_to_null(entry); xas->xa_node = XAS_BOUNDS; RCU_INIT_POINTER(xa->xa_head, entry); @@ -1474,9 +1479,7 @@ void *xa_load(struct xarray *xa, unsigned long index) rcu_read_lock(); do { - entry = xas_load(&xas); - if (xa_is_zero(entry)) - entry = NULL; + entry = xa_zero_to_null(xas_load(&xas)); } while (xas_retry(&xas, entry)); rcu_read_unlock(); @@ -1486,11 +1489,9 @@ EXPORT_SYMBOL(xa_load); static void *xas_result(struct xa_state *xas, void *curr) { - if (xa_is_zero(curr)) - return NULL; if (xas_error(xas)) curr = xas->xa_node; - return curr; + return xa_zero_to_null(curr); } /** From 5801c2876b73ead7f10f6ed7f3bf73392e310c1c Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Tue, 12 Nov 2024 14:25:37 -0500 Subject: [PATCH 385/504] xarray: extract helper from __xa_{insert,cmpxchg} Reduce code duplication by extracting a static inline function. This function is identical to __xa_cmpxchg with the exception that it does not coerce zero entries to null on the return path. Link: https://lkml.kernel.org/r/20241112-xarray-insert-cmpxchg-v1-2-dc2bdd8c4136@gmail.com Signed-off-by: Tamir Duberstein Cc: Alice Ryhl Cc: Andreas Hindborg Cc: Matthew Wilcox Signed-off-by: Andrew Morton --- lib/xarray.c | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/lib/xarray.c b/lib/xarray.c index 1b8305bffbff..2af86bede3c1 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1491,7 +1491,7 @@ static void *xas_result(struct xa_state *xas, void *curr) { if (xas_error(xas)) curr = xas->xa_node; - return xa_zero_to_null(curr); + return curr; } /** @@ -1568,7 +1568,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) xas_clear_mark(&xas, XA_FREE_MARK); } while (__xas_nomem(&xas, gfp)); - return xas_result(&xas, curr); + return xas_result(&xas, xa_zero_to_null(curr)); } EXPORT_SYMBOL(__xa_store); @@ -1601,6 +1601,9 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) } EXPORT_SYMBOL(xa_store); +static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp); + /** * __xa_cmpxchg() - Store this entry in the XArray. * @xa: XArray. @@ -1619,6 +1622,13 @@ EXPORT_SYMBOL(xa_store); */ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, void *old, void *entry, gfp_t gfp) +{ + return xa_zero_to_null(__xa_cmpxchg_raw(xa, index, old, entry, gfp)); +} +EXPORT_SYMBOL(__xa_cmpxchg); + +static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index, + void *old, void *entry, gfp_t gfp) { XA_STATE(xas, xa, index); void *curr; @@ -1637,7 +1647,6 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index, return xas_result(&xas, curr); } -EXPORT_SYMBOL(__xa_cmpxchg); /** * __xa_insert() - Store this entry in the XArray if no entry is present. @@ -1657,26 +1666,16 @@ EXPORT_SYMBOL(__xa_cmpxchg); */ int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp) { - XA_STATE(xas, xa, index); void *curr; + int errno; - if (WARN_ON_ONCE(xa_is_advanced(entry))) - return -EINVAL; if (!entry) entry = XA_ZERO_ENTRY; - - do { - curr = xas_load(&xas); - if (!curr) { - xas_store(&xas, entry); - if (xa_track_free(xa)) - xas_clear_mark(&xas, XA_FREE_MARK); - } else { - xas_set_err(&xas, -EBUSY); - } - } while (__xas_nomem(&xas, gfp)); - - return xas_error(&xas); + curr = __xa_cmpxchg_raw(xa, index, NULL, entry, gfp); + errno = xa_err(curr); + if (errno) + return errno; + return (curr != NULL) ? -EBUSY : 0; } EXPORT_SYMBOL(__xa_insert); From 8becc9d8196686c4af74c190949cd4d6de304c33 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Wed, 4 Dec 2024 10:21:59 -0500 Subject: [PATCH 386/504] xarray-extract-helper-from-__xa_insertcmpxchg-fix fix __xa_erase() Link: https://lkml.kernel.org/r/CAJ-ks9kN_qddZ3Ne5d=cADu5POC1rHd4rQcbVSD_spnZOrLLZg@mail.gmail.com Signed-off-by: Tamir Duberstein Reported-by: Cc: Jens Axboe Cc: Matthew Wilcox Cc: Pavel Begunkov Signed-off-by: Andrew Morton --- lib/xarray.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/xarray.c b/lib/xarray.c index 2af86bede3c1..5da8d18899a1 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1509,7 +1509,7 @@ static void *xas_result(struct xa_state *xas, void *curr) void *__xa_erase(struct xarray *xa, unsigned long index) { XA_STATE(xas, xa, index); - return xas_result(&xas, xas_store(&xas, NULL)); + return xas_result(&xas, xa_zero_to_null(xas_store(&xas, NULL))); } EXPORT_SYMBOL(__xa_erase); From 667ee1a0bc76633c454bf7351563663d03660a4e Mon Sep 17 00:00:00 2001 From: Zijun Hu Date: Thu, 17 Oct 2024 23:34:49 +0800 Subject: [PATCH 387/504] kernel/resource: simplify API __devm_release_region() implementation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Simplify __devm_release_region() implementation by dedicated API devres_release() which have below advantages than current __release_region() + devres_destroy(): It is simpler if __devm_release_region() is undoing what __devm_request_region() did, otherwise, it can avoid wrong and undesired __release_region(). Link: https://lkml.kernel.org/r/20241017-release_region_fix-v1-1-84a3e8441284@quicinc.com Signed-off-by: Zijun Hu Cc: Andy Shevchenko Cc: Bjorn Helgaas Cc: Ilpo Järvinen Cc: Mika Westerberg Signed-off-by: Andrew Morton --- kernel/resource.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/resource.c b/kernel/resource.c index b7c0e24d9398..12004452d999 100644 --- a/kernel/resource.c +++ b/kernel/resource.c @@ -1683,8 +1683,7 @@ void __devm_release_region(struct device *dev, struct resource *parent, { struct region_devres match_data = { parent, start, n }; - __release_region(parent, start, n); - WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match, + WARN_ON(devres_release(dev, devm_region_release, devm_region_match, &match_data)); } EXPORT_SYMBOL(__devm_release_region); From 2ef8f1b73f2508a9ed7541ab723505bf41dc0509 Mon Sep 17 00:00:00 2001 From: Wang Yaxin Date: Tue, 3 Dec 2024 16:48:48 +0800 Subject: [PATCH 388/504] delayacct: add delay max to record delay peak Introduce the use cases of delay max, which can help quickly detect potential abnormal delays in the system and record the types and specific details of delay spikes. Problem ======== Delay accounting can track the average delay of processes to show system workload. However, when a process experiences a significant delay, maybe a delay spike, which adversely affects performance, getdelays can only display the average system delay over a period of time. Yet, average delay is unhelpful for diagnosing delay peak. It is not even possible to determine which type of delay has spiked, as this information might be masked by the average delay. Solution ========= the 'delay max' can display delay peak since the system's startup, which can record potential abnormal delays over time, including the type of delay and the maximum delay. This is helpful for quickly identifying crash caused by delay. Use case ========= bash# ./getdelays -d -p 244 print delayacct stats ON PID 244 CPU count real total virtual total delay total delay average delay max 68 192000000 213676651 705643 0.010ms 0.306381ms IO count delay total delay average delay max 0 0 0.000ms 0.000000ms SWAP count delay total delay average delay max 0 0 0.000ms 0.000000ms RECLAIM count delay total delay average delay max 0 0 0.000ms 0.000000ms THRASHING count delay total delay average delay max 0 0 0.000ms 0.000000ms COMPACT count delay total delay average delay max 0 0 0.000ms 0.000000ms WPCOPY count delay total delay average delay max 235 15648284 0.067ms 0.263842ms IRQ count delay total delay average delay max 0 0 0.000ms 0.000000ms Link: https://lkml.kernel.org/r/20241203164848805CS62CQPQWG9GLdQj2_BxS@zte.com.cn Co-developed-by: Wang Yong Signed-off-by: Wang Yong Co-developed-by: xu xin Signed-off-by: xu xin Co-developed-by: Wang Yaxin Signed-off-by: Wang Yaxin Signed-off-by: Kun Jiang Cc: Balbir Singh Cc: David Hildenbrand Cc: Fan Yu Cc: Peilin He Cc: tuqiang Cc: Yang Yang Cc: ye xingchen Cc: Yunkai Zhang Signed-off-by: Andrew Morton --- include/linux/delayacct.h | 7 ++++ include/linux/sched.h | 3 ++ include/uapi/linux/taskstats.h | 9 ++++++ kernel/delayacct.c | 35 ++++++++++++++------ kernel/sched/stats.h | 5 ++- tools/accounting/getdelays.c | 59 ++++++++++++++++++++-------------- 6 files changed, 83 insertions(+), 35 deletions(-) diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 6639f48dac36..56fbfa2c2ac5 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -29,25 +29,32 @@ struct task_delay_info { * XXX_delay contains the accumulated delay time in nanoseconds. */ u64 blkio_start; + u64 blkio_delay_max; u64 blkio_delay; /* wait for sync block io completion */ u64 swapin_start; + u64 swapin_delay_max; u64 swapin_delay; /* wait for swapin */ u32 blkio_count; /* total count of the number of sync block */ /* io operations performed */ u32 swapin_count; /* total count of swapin */ u64 freepages_start; + u64 freepages_delay_max; u64 freepages_delay; /* wait for memory reclaim */ u64 thrashing_start; + u64 thrashing_delay_max; u64 thrashing_delay; /* wait for thrashing page */ u64 compact_start; + u64 compact_delay_max; u64 compact_delay; /* wait for memory compact */ u64 wpcopy_start; + u64 wpcopy_delay_max; u64 wpcopy_delay; /* wait for write-protect copy */ + u64 irq_delay_max; u64 irq_delay; /* wait for IRQ/SOFTIRQ */ u32 freepages_count; /* total count of memory reclaim */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 64934e0830af..a0ae3923b41d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -398,6 +398,9 @@ struct sched_info { /* Time spent waiting on a runqueue: */ unsigned long long run_delay; + /* Max time spent waiting on a runqueue: */ + unsigned long long max_run_delay; + /* Timestamps: */ /* When did we last run on a CPU? */ diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h index b50b2eb257a0..e0d1c6fc9f3b 100644 --- a/include/uapi/linux/taskstats.h +++ b/include/uapi/linux/taskstats.h @@ -72,6 +72,7 @@ struct taskstats { */ __u64 cpu_count __attribute__((aligned(8))); __u64 cpu_delay_total; + __u64 cpu_delay_max; /* Following four fields atomically updated using task->delays->lock */ @@ -80,10 +81,12 @@ struct taskstats { */ __u64 blkio_count; __u64 blkio_delay_total; + __u64 blkio_delay_max; /* Delay waiting for page fault I/O (swap in only) */ __u64 swapin_count; __u64 swapin_delay_total; + __u64 swapin_delay_max; /* cpu "wall-clock" running time * On some architectures, value will adjust for cpu time stolen @@ -166,10 +169,12 @@ struct taskstats { /* Delay waiting for memory reclaim */ __u64 freepages_count; __u64 freepages_delay_total; + __u64 freepages_delay_max; /* Delay waiting for thrashing page */ __u64 thrashing_count; __u64 thrashing_delay_total; + __u64 thrashing_delay_max; /* v10: 64-bit btime to avoid overflow */ __u64 ac_btime64; /* 64-bit begin time */ @@ -177,6 +182,7 @@ struct taskstats { /* v11: Delay waiting for memory compact */ __u64 compact_count; __u64 compact_delay_total; + __u64 compact_delay_max; /* v12 begin */ __u32 ac_tgid; /* thread group ID */ @@ -198,10 +204,13 @@ struct taskstats { /* v13: Delay waiting for write-protect copy */ __u64 wpcopy_count; __u64 wpcopy_delay_total; + __u64 wpcopy_delay_max; /* v14: Delay waiting for IRQ/SOFTIRQ */ __u64 irq_count; __u64 irq_delay_total; + __u64 irq_delay_max; + /* v15: add Delay max */ }; diff --git a/kernel/delayacct.c b/kernel/delayacct.c index dead51de8eb5..d64ad2a48b4f 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -95,7 +95,7 @@ void __delayacct_tsk_init(struct task_struct *tsk) * Finish delay accounting for a statistic using its timestamps (@start), * accumalator (@total) and @count */ -static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count) +static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count, u64 *max) { s64 ns = local_clock() - *start; unsigned long flags; @@ -104,6 +104,8 @@ static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *cou raw_spin_lock_irqsave(lock, flags); *total += ns; (*count)++; + if (ns > *max) + *max = ns; raw_spin_unlock_irqrestore(lock, flags); } } @@ -122,7 +124,8 @@ void __delayacct_blkio_end(struct task_struct *p) delayacct_end(&p->delays->lock, &p->delays->blkio_start, &p->delays->blkio_delay, - &p->delays->blkio_count); + &p->delays->blkio_count, + &p->delays->blkio_delay_max); } int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) @@ -153,10 +156,11 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) d->cpu_count += t1; + d->cpu_delay_max = tsk->sched_info.max_run_delay; tmp = (s64)d->cpu_delay_total + t2; d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; - tmp = (s64)d->cpu_run_virtual_total + t3; + d->cpu_run_virtual_total = (tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp; @@ -164,20 +168,26 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) return 0; /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ - raw_spin_lock_irqsave(&tsk->delays->lock, flags); + d->blkio_delay_max = tsk->delays->blkio_delay_max; tmp = d->blkio_delay_total + tsk->delays->blkio_delay; d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; + d->swapin_delay_max = tsk->delays->swapin_delay_max; tmp = d->swapin_delay_total + tsk->delays->swapin_delay; d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; + d->freepages_delay_max = tsk->delays->freepages_delay_max; tmp = d->freepages_delay_total + tsk->delays->freepages_delay; d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp; + d->thrashing_delay_max = tsk->delays->thrashing_delay_max; tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay; d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp; + d->compact_delay_max = tsk->delays->compact_delay_max; tmp = d->compact_delay_total + tsk->delays->compact_delay; d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp; + d->wpcopy_delay_max = tsk->delays->wpcopy_delay_max; tmp = d->wpcopy_delay_total + tsk->delays->wpcopy_delay; d->wpcopy_delay_total = (tmp < d->wpcopy_delay_total) ? 0 : tmp; + d->irq_delay_max = tsk->delays->irq_delay_max; tmp = d->irq_delay_total + tsk->delays->irq_delay; d->irq_delay_total = (tmp < d->irq_delay_total) ? 0 : tmp; d->blkio_count += tsk->delays->blkio_count; @@ -213,7 +223,8 @@ void __delayacct_freepages_end(void) delayacct_end(¤t->delays->lock, ¤t->delays->freepages_start, ¤t->delays->freepages_delay, - ¤t->delays->freepages_count); + ¤t->delays->freepages_count, + ¤t->delays->freepages_delay_max); } void __delayacct_thrashing_start(bool *in_thrashing) @@ -235,7 +246,8 @@ void __delayacct_thrashing_end(bool *in_thrashing) delayacct_end(¤t->delays->lock, ¤t->delays->thrashing_start, ¤t->delays->thrashing_delay, - ¤t->delays->thrashing_count); + ¤t->delays->thrashing_count, + ¤t->delays->thrashing_delay_max); } void __delayacct_swapin_start(void) @@ -248,7 +260,8 @@ void __delayacct_swapin_end(void) delayacct_end(¤t->delays->lock, ¤t->delays->swapin_start, ¤t->delays->swapin_delay, - ¤t->delays->swapin_count); + ¤t->delays->swapin_count, + ¤t->delays->swapin_delay_max); } void __delayacct_compact_start(void) @@ -261,7 +274,8 @@ void __delayacct_compact_end(void) delayacct_end(¤t->delays->lock, ¤t->delays->compact_start, ¤t->delays->compact_delay, - ¤t->delays->compact_count); + ¤t->delays->compact_count, + ¤t->delays->compact_delay_max); } void __delayacct_wpcopy_start(void) @@ -274,7 +288,8 @@ void __delayacct_wpcopy_end(void) delayacct_end(¤t->delays->lock, ¤t->delays->wpcopy_start, ¤t->delays->wpcopy_delay, - ¤t->delays->wpcopy_count); + ¤t->delays->wpcopy_count, + ¤t->delays->wpcopy_delay_max); } void __delayacct_irq(struct task_struct *task, u32 delta) @@ -284,6 +299,8 @@ void __delayacct_irq(struct task_struct *task, u32 delta) raw_spin_lock_irqsave(&task->delays->lock, flags); task->delays->irq_delay += delta; task->delays->irq_count++; + if (delta > task->delays->irq_delay_max) + task->delays->irq_delay_max = delta; raw_spin_unlock_irqrestore(&task->delays->lock, flags); } diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index 8ee0add5a48a..ed72435aef51 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -244,7 +244,8 @@ static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t) delta = rq_clock(rq) - t->sched_info.last_queued; t->sched_info.last_queued = 0; t->sched_info.run_delay += delta; - + if (delta > t->sched_info.max_run_delay) + t->sched_info.max_run_delay = delta; rq_sched_info_dequeue(rq, delta); } @@ -266,6 +267,8 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t) t->sched_info.run_delay += delta; t->sched_info.last_arrival = now; t->sched_info.pcount++; + if (delta > t->sched_info.max_run_delay) + t->sched_info.max_run_delay = delta; rq_sched_info_arrive(rq, delta); } diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c index 1334214546d7..e570bcad185d 100644 --- a/tools/accounting/getdelays.c +++ b/tools/accounting/getdelays.c @@ -192,60 +192,69 @@ static int get_family_id(int sd) } #define average_ms(t, c) (t / 1000000ULL / (c ? c : 1)) +#define delay_max_ms(t) (t / 1000000ULL) static void print_delayacct(struct taskstats *t) { - printf("\n\nCPU %15s%15s%15s%15s%15s\n" - " %15llu%15llu%15llu%15llu%15.3fms\n" - "IO %15s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "SWAP %15s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "RECLAIM %12s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "THRASHING%12s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "COMPACT %12s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "WPCOPY %12s%15s%15s\n" - " %15llu%15llu%15.3fms\n" - "IRQ %15s%15s%15s\n" - " %15llu%15llu%15.3fms\n", + printf("\n\nCPU %15s%15s%15s%15s%15s%15s\n" + " %15llu%15llu%15llu%15llu%15.3fms%13.6fms\n" + "IO %15s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "SWAP %15s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "RECLAIM %12s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "THRASHING%12s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "COMPACT %12s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "WPCOPY %12s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n" + "IRQ %15s%15s%15s%15s\n" + " %15llu%15llu%15.3fms%13.6fms\n", "count", "real total", "virtual total", - "delay total", "delay average", + "delay total", "delay average", "delay max", (unsigned long long)t->cpu_count, (unsigned long long)t->cpu_run_real_total, (unsigned long long)t->cpu_run_virtual_total, (unsigned long long)t->cpu_delay_total, average_ms((double)t->cpu_delay_total, t->cpu_count), - "count", "delay total", "delay average", + delay_max_ms((double)t->cpu_delay_max), + "count", "delay total", "delay average", "delay max", (unsigned long long)t->blkio_count, (unsigned long long)t->blkio_delay_total, average_ms((double)t->blkio_delay_total, t->blkio_count), - "count", "delay total", "delay average", + delay_max_ms((double)t->blkio_delay_max), + "count", "delay total", "delay average", "delay max", (unsigned long long)t->swapin_count, (unsigned long long)t->swapin_delay_total, average_ms((double)t->swapin_delay_total, t->swapin_count), - "count", "delay total", "delay average", + delay_max_ms((double)t->swapin_delay_max), + "count", "delay total", "delay average", "delay max", (unsigned long long)t->freepages_count, (unsigned long long)t->freepages_delay_total, average_ms((double)t->freepages_delay_total, t->freepages_count), - "count", "delay total", "delay average", + delay_max_ms((double)t->freepages_delay_max), + "count", "delay total", "delay average", "delay max", (unsigned long long)t->thrashing_count, (unsigned long long)t->thrashing_delay_total, average_ms((double)t->thrashing_delay_total, t->thrashing_count), - "count", "delay total", "delay average", + delay_max_ms((double)t->thrashing_delay_max), + "count", "delay total", "delay average", "delay max", (unsigned long long)t->compact_count, (unsigned long long)t->compact_delay_total, average_ms((double)t->compact_delay_total, t->compact_count), - "count", "delay total", "delay average", + delay_max_ms((double)t->compact_delay_max), + "count", "delay total", "delay average", "delay max", (unsigned long long)t->wpcopy_count, (unsigned long long)t->wpcopy_delay_total, average_ms((double)t->wpcopy_delay_total, t->wpcopy_count), - "count", "delay total", "delay average", + delay_max_ms((double)t->wpcopy_delay_max), + "count", "delay total", "delay average", "delay max", (unsigned long long)t->irq_count, (unsigned long long)t->irq_delay_total, - average_ms((double)t->irq_delay_total, t->irq_count)); + average_ms((double)t->irq_delay_total, t->irq_count), + delay_max_ms((double)t->irq_delay_max)); } static void task_context_switch_counts(struct taskstats *t) From 36856eb09890b03fc91e4e78279744124fa091c8 Mon Sep 17 00:00:00 2001 From: Yaxin Wang Date: Fri, 13 Dec 2024 19:27:00 +0800 Subject: [PATCH 389/504] delayacct: update docs and fix some spelling errors Update delay-accounting.rst to include the 'delay max' in the output of getdelays, and fix some spelling errors before. Link: https://lkml.kernel.org/r/20241213192700771XKZ8H30OtHSeziGqRVMs0@zte.com.cn Signed-off-by: Yaxin Wang Signed-off-by: Jiang Kun Cc: Balbir Singh Cc: David Hildenbrand Cc: Fan Yu Cc: Peilin He Cc: tuqiang Cc: Wang Yong Cc: xu xin Cc: ye xingchen Cc: Yunkai Zhang Signed-off-by: Andrew Morton --- Documentation/accounting/delay-accounting.rst | 38 +++++++++---------- kernel/delayacct.c | 2 +- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Documentation/accounting/delay-accounting.rst b/Documentation/accounting/delay-accounting.rst index f61c01fc376e..8a0277428ccf 100644 --- a/Documentation/accounting/delay-accounting.rst +++ b/Documentation/accounting/delay-accounting.rst @@ -100,29 +100,29 @@ Get delays, since system boot, for pid 10:: # ./getdelays -d -p 10 (output similar to next case) -Get sum of delays, since system boot, for all pids with tgid 5:: +Get sum and peak of delays, since system boot, for all pids with tgid 242:: - # ./getdelays -d -t 5 + bash-4.4# ./getdelays -d -t 242 print delayacct stats ON - TGID 5 + TGID 242 - CPU count real total virtual total delay total delay average - 8 7000000 6872122 3382277 0.423ms - IO count delay total delay average - 0 0 0.000ms - SWAP count delay total delay average - 0 0 0.000ms - RECLAIM count delay total delay average - 0 0 0.000ms - THRASHING count delay total delay average - 0 0 0.000ms - COMPACT count delay total delay average - 0 0 0.000ms - WPCOPY count delay total delay average - 0 0 0.000ms - IRQ count delay total delay average - 0 0 0.000ms + CPU count real total virtual total delay total delay average delay max + 239 296000000 307724885 1127792 0.005ms 0.238382ms + IO count delay total delay average delay max + 0 0 0.000ms 0.000000ms + SWAP count delay total delay average delay max + 0 0 0.000ms 0.000000ms + RECLAIM count delay total delay average delay max + 0 0 0.000ms 0.000000ms + THRASHING count delay total delay average delay max + 0 0 0.000ms 0.000000ms + COMPACT count delay total delay average delay max + 0 0 0.000ms 0.000000ms + WPCOPY count delay total delay average delay max + 230 19100476 0.083ms 0.383822ms + IRQ count delay total delay average delay max + 0 0 0.000ms 0.000000ms Get IO accounting for pid 1, it works only with -p:: diff --git a/kernel/delayacct.c b/kernel/delayacct.c index d64ad2a48b4f..23212a0c88e4 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -93,7 +93,7 @@ void __delayacct_tsk_init(struct task_struct *tsk) /* * Finish delay accounting for a statistic using its timestamps (@start), - * accumalator (@total) and @count + * accumulator (@total) and @count */ static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count, u64 *max) { From a890a45d41cd14b399e76be0aa9b14237f7df802 Mon Sep 17 00:00:00 2001 From: zhang jiao Date: Tue, 3 Dec 2024 10:05:50 +0800 Subject: [PATCH 390/504] tools/accounting/procacct: fix minor errors The logfile option was documented but not working. Add it and optimized the while loop. Link: https://lkml.kernel.org/r/20241203020550.3145-1-zhangjiao2@cmss.chinamobile.com Signed-off-by: zhang jiao Reviewed-by: Dr. Thomas Orgis Signed-off-by: Andrew Morton --- tools/accounting/procacct.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tools/accounting/procacct.c b/tools/accounting/procacct.c index 90c4a37f53d9..e8dee05a6264 100644 --- a/tools/accounting/procacct.c +++ b/tools/accounting/procacct.c @@ -274,12 +274,11 @@ int main(int argc, char *argv[]) int maskset = 0; char *logfile = NULL; int cfd = 0; - int forking = 0; struct msgtemplate msg; - while (!forking) { - c = getopt(argc, argv, "m:vr:"); + while (1) { + c = getopt(argc, argv, "m:vr:w:"); if (c < 0) break; From df9f4936ee10cbce677b64bc27855a71fdda23c9 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 5 Dec 2024 14:20:42 +0100 Subject: [PATCH 391/504] checkpatch: update reference to include/asm- Patch series "Update reference to include/asm-". Despite "include/asm-" having been replaced by "arch//include/asm" 15 years ago, there are still several references left. This patch series updates the most visible ones. This patch (of 3): "include/asm-" was replaced by "arch//include/asm" a long time ago. Link: https://lkml.kernel.org/r/cover.1733404444.git.geert+renesas@glider.be Link: https://lkml.kernel.org/r/2c4a75726a976d117055055b68a31c40dcab044e.1733404444.git.geert+renesas@glider.be Signed-off-by: Geert Uytterhoeven Cc: Andy Whitcroft Cc: Arnd Bergmann Cc: Dwaipayan Ray Cc: Joe Perches Cc: Lukas Bulwahn Cc: Masahiro Yamada Cc: Nathan Chancellor Cc: Nicolas Schier Cc: Oleg Nesterov Cc: Rasmus Villemoes Cc: Yury Norov Signed-off-by: Andrew Morton --- scripts/checkpatch.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 9eed3683ad76..dbb9c3c6fe30 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2875,7 +2875,7 @@ sub process { if ($realfile =~ m@^include/asm/@) { ERROR("MODIFIED_INCLUDE_ASM", - "do not modify files in include/asm, change architecture specific files in include/asm-\n" . "$here$rawline\n"); + "do not modify files in include/asm, change architecture specific files in arch//include/asm\n" . "$here$rawline\n"); } $found_file = 1; } From 4b833778a96aab76c7550e46777bca5f1ab1c9ed Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 5 Dec 2024 14:20:44 +0100 Subject: [PATCH 392/504] include: update references to include/asm- "include/asm-" was replaced by "arch//include/asm" a long time ago. Link: https://lkml.kernel.org/r/541258219b0441fa1da890e2f8458a7ac18c2ef9.1733404444.git.geert+renesas@glider.be Signed-off-by: Geert Uytterhoeven Cc: Andy Whitcroft Cc: Arnd Bergmann Cc: Dwaipayan Ray Cc: Joe Perches Cc: Lukas Bulwahn Cc: Masahiro Yamada Cc: Nathan Chancellor Cc: Nicolas Schier Cc: Oleg Nesterov Cc: Rasmus Villemoes Cc: Yury Norov Signed-off-by: Andrew Morton --- include/asm-generic/syscall.h | 2 +- include/linux/bitmap.h | 2 +- include/linux/types.h | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/include/asm-generic/syscall.h b/include/asm-generic/syscall.h index 5a80fe728dc8..182b039ce5fa 100644 --- a/include/asm-generic/syscall.h +++ b/include/asm-generic/syscall.h @@ -5,7 +5,7 @@ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. * * This file is a stub providing documentation for what functions - * asm-ARCH/syscall.h files need to define. Most arch definitions + * arch/ARCH/include/asm/syscall.h files need to define. Most arch definitions * will be simple inlines. * * All of these functions expect to be called with no locks, diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 262b6596eca5..2026953e2c4e 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h @@ -23,7 +23,7 @@ struct device; * * Function implementations generic to all architectures are in * lib/bitmap.c. Functions implementations that are architecture - * specific are in various include/asm-/bitops.h headers + * specific are in various arch//include/asm/bitops.h headers * and other arch/ specific files. * * See lib/bitmap.c for more details. diff --git a/include/linux/types.h b/include/linux/types.h index 2d7b9ae8714c..1c509ce8f7f6 100644 --- a/include/linux/types.h +++ b/include/linux/types.h @@ -43,7 +43,7 @@ typedef unsigned long uintptr_t; typedef long intptr_t; #ifdef CONFIG_HAVE_UID16 -/* This is defined by include/asm-{arch}/posix_types.h */ +/* This is defined by arch/{arch}/include/asm/posix_types.h */ typedef __kernel_old_uid_t old_uid_t; typedef __kernel_old_gid_t old_gid_t; #endif /* CONFIG_UID16 */ From 31068df16850cdfa550d360c7e4578eab8f3c154 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Thu, 5 Dec 2024 10:11:26 -0500 Subject: [PATCH 393/504] xarray: port tests to kunit Minimally rewrite the XArray unit tests to use kunit. This integrates nicely with existing kunit tools which produce nicer human-readable output compared to the existing machinery. Running the xarray tests before this change requires an obscure invocation ``` tools/testing/kunit/kunit.py run --arch arm64 --make_options LLVM=1 \ --kconfig_add CONFIG_TEST_XARRAY=y --raw_output=all nothing ``` which on failure produces ``` BUG at check_reserve:513 ... XArray: 6782340 of 6782364 tests passed ``` and exits 0. Running the xarray tests after this change requires a simpler invocation ``` tools/testing/kunit/kunit.py run --arch arm64 --make_options LLVM=1 \ xarray ``` which on failure produces (colors omitted) ``` [09:50:53] ====================== check_reserve ====================== [09:50:53] [FAILED] param-0 [09:50:53] # check_reserve: EXPECTATION FAILED at lib/test_xarray.c:536 [09:50:53] xa_erase(xa, 12345678) != NULL ... [09:50:53] # module: test_xarray [09:50:53] # xarray: pass:26 fail:3 skip:0 total:29 [09:50:53] # Totals: pass:28 fail:3 skip:0 total:31 [09:50:53] ===================== [FAILED] xarray ====================== ``` and exits 1. Use of richer kunit assertions is intentionally omitted to reduce the scope of the change. Link: https://lkml.kernel.org/r/20241205-xarray-kunit-port-v1-1-ee44bc7aa201@gmail.com Signed-off-by: Tamir Duberstein Cc: Bill Wendling Cc: Christophe Leroy Cc: Geert Uytterhoeven Cc: Justin Stitt Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Nathan Chancellor Cc: Naveen N Rao Cc: Nicholas Piggin Cc: Nick Desaulniers Signed-off-by: Andrew Morton --- arch/m68k/configs/amiga_defconfig | 1 - arch/m68k/configs/apollo_defconfig | 1 - arch/m68k/configs/atari_defconfig | 1 - arch/m68k/configs/bvme6000_defconfig | 1 - arch/m68k/configs/hp300_defconfig | 1 - arch/m68k/configs/mac_defconfig | 1 - arch/m68k/configs/multi_defconfig | 1 - arch/m68k/configs/mvme147_defconfig | 1 - arch/m68k/configs/mvme16x_defconfig | 1 - arch/m68k/configs/q40_defconfig | 1 - arch/m68k/configs/sun3_defconfig | 1 - arch/m68k/configs/sun3x_defconfig | 1 - arch/powerpc/configs/ppc64_defconfig | 1 - lib/Kconfig.debug | 18 +- lib/Makefile | 2 +- lib/test_xarray.c | 655 ++++++++++++++++----------- 16 files changed, 402 insertions(+), 286 deletions(-) diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index c705247e7b5b..581f0080814e 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -629,7 +629,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 6d62b9187a58..25628a1e8fa1 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -586,7 +586,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index c3c644df852d..503a9ea526b5 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -606,7 +606,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 20261f819691..3358349898ef 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -578,7 +578,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index ce4fe93a0f70..a5e933a7fdf9 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -588,7 +588,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 040ae75f47c3..a90676c04da6 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -605,7 +605,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 20d877cb4e30..f28f7783b090 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -692,7 +692,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 5e1c8d0d3da5..61308c2cd96c 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -578,7 +578,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 5d1409e6a137..9ec8eb9ea614 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -579,7 +579,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index e4c30e2b9bbb..5fd094391b9e 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -595,7 +595,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 980843a9ea1e..5e9c9d704c2e 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -575,7 +575,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 38681cc6b598..b2f5f398fe42 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -576,7 +576,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index f39c0d000c43..bc48063fd860 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -451,7 +451,6 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m -CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index e968f083d356..b53f5da22e40 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2456,8 +2456,22 @@ config TEST_BITMAP config TEST_UUID tristate "Test functions located in the uuid module at runtime" -config TEST_XARRAY - tristate "Test the XArray code at runtime" +config XARRAY_KUNIT + tristate "KUnit test XArray code at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable this option to test the Xarray code at boot. + + KUnit tests run during boot and output the results to the debug log + in TAP format (http://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. config TEST_MAPLE_TREE tristate "Test the Maple Tree code at runtime or module load" diff --git a/lib/Makefile b/lib/Makefile index a8155c972f02..c0458ff841fe 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -94,7 +94,6 @@ GCOV_PROFILE_test_bitmap.o := n endif obj-$(CONFIG_TEST_UUID) += test_uuid.o -obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o @@ -375,6 +374,7 @@ CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o +obj-$(CONFIG_XARRAY_KUNIT) += test_xarray.o obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o diff --git a/lib/test_xarray.c b/lib/test_xarray.c index d5c5cbba33ed..9d0e797b825f 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -6,11 +6,10 @@ * Author: Matthew Wilcox */ -#include -#include +#include -static unsigned int tests_run; -static unsigned int tests_passed; +#include +#include static const unsigned int order_limit = IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; @@ -20,15 +19,12 @@ static const unsigned int order_limit = void xa_dump(const struct xarray *xa) { } # endif #undef XA_BUG_ON -#define XA_BUG_ON(xa, x) do { \ - tests_run++; \ - if (x) { \ - printk("BUG at %s:%d\n", __func__, __LINE__); \ - xa_dump(xa); \ - dump_stack(); \ - } else { \ - tests_passed++; \ - } \ +#define XA_BUG_ON(xa, x) do { \ + if (x) { \ + KUNIT_FAIL(test, #x); \ + xa_dump(xa); \ + dump_stack(); \ + } \ } while (0) #endif @@ -42,13 +38,13 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) return xa_store(xa, index, xa_mk_index(index), gfp); } -static void xa_insert_index(struct xarray *xa, unsigned long index) +static void xa_insert_index(struct kunit *test, struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), GFP_KERNEL) != 0); } -static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) +static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long index, gfp_t gfp) { u32 id; @@ -57,7 +53,7 @@ static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) XA_BUG_ON(xa, id != index); } -static void xa_erase_index(struct xarray *xa, unsigned long index) +static void xa_erase_index(struct kunit *test, struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); XA_BUG_ON(xa, xa_load(xa, index) != NULL); @@ -83,8 +79,15 @@ static void *xa_store_order(struct xarray *xa, unsigned long index, return curr; } -static noinline void check_xa_err(struct xarray *xa) +static inline struct xarray *xa_param(struct kunit *test) { + return *(struct xarray **)test->param_value; +} + +static noinline void check_xa_err(struct kunit *test) +{ + struct xarray *xa = xa_param(test); + XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); #ifndef __KERNEL__ @@ -99,8 +102,10 @@ static noinline void check_xa_err(struct xarray *xa) // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); } -static noinline void check_xas_retry(struct xarray *xa) +static noinline void check_xas_retry(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; @@ -109,7 +114,7 @@ static noinline void check_xas_retry(struct xarray *xa) rcu_read_lock(); XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); - xa_erase_index(xa, 1); + xa_erase_index(test, xa, 1); XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); XA_BUG_ON(xa, xas_retry(&xas, NULL)); XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); @@ -140,12 +145,14 @@ static noinline void check_xas_retry(struct xarray *xa) } xas_unlock(&xas); - xa_erase_index(xa, 0); - xa_erase_index(xa, 1); + xa_erase_index(test, xa, 0); + xa_erase_index(test, xa, 1); } -static noinline void check_xa_load(struct xarray *xa) +static noinline void check_xa_load(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long i, j; for (i = 0; i < 1024; i++) { @@ -167,13 +174,15 @@ static noinline void check_xa_load(struct xarray *xa) else XA_BUG_ON(xa, entry); } - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) +static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) { + struct xarray *xa = xa_param(test); + unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; @@ -193,7 +202,7 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); /* Storing NULL clears marks, and they can't be set again */ - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); xa_set_mark(xa, index, XA_MARK_0); @@ -244,15 +253,17 @@ static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); - xa_erase_index(xa, index); - xa_erase_index(xa, next); + xa_erase_index(test, xa, index); + xa_erase_index(test, xa, next); XA_BUG_ON(xa, !xa_empty(xa)); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_mark_2(struct xarray *xa) +static noinline void check_xa_mark_2(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned long index; unsigned int count = 0; @@ -289,9 +300,11 @@ static noinline void check_xa_mark_2(struct xarray *xa) xa_destroy(xa); } -static noinline void check_xa_mark_3(struct xarray *xa) +static noinline void check_xa_mark_3(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0x41); void *entry; int count = 0; @@ -310,19 +323,21 @@ static noinline void check_xa_mark_3(struct xarray *xa) #endif } -static noinline void check_xa_mark(struct xarray *xa) +static noinline void check_xa_mark(struct kunit *test) { unsigned long index; for (index = 0; index < 16384; index += 4) - check_xa_mark_1(xa, index); + check_xa_mark_1(test, index); - check_xa_mark_2(xa); - check_xa_mark_3(xa); + check_xa_mark_2(test); + check_xa_mark_3(test); } -static noinline void check_xa_shrink(struct xarray *xa) +static noinline void check_xa_shrink(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 1); struct xa_node *node; unsigned int order; @@ -347,7 +362,7 @@ static noinline void check_xa_shrink(struct xarray *xa) XA_BUG_ON(xa, xas_load(&xas) != NULL); xas_unlock(&xas); XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); for (order = 0; order < max_order; order++) { @@ -364,45 +379,49 @@ static noinline void check_xa_shrink(struct xarray *xa) XA_BUG_ON(xa, xa_head(xa) == node); rcu_read_unlock(); XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); XA_BUG_ON(xa, xa->xa_head != node); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); } } -static noinline void check_insert(struct xarray *xa) +static noinline void check_insert(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long i; for (i = 0; i < 1024; i++) { - xa_insert_index(xa, i); + xa_insert_index(test, xa, i); XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); } for (i = 10; i < BITS_PER_LONG; i++) { - xa_insert_index(xa, 1UL << i); + xa_insert_index(test, xa, 1UL << i); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); - xa_erase_index(xa, 1UL << i); + xa_erase_index(test, xa, 1UL << i); - xa_insert_index(xa, (1UL << i) - 1); + xa_insert_index(test, xa, (1UL << i) - 1); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); - xa_erase_index(xa, (1UL << i) - 1); + xa_erase_index(test, xa, (1UL << i) - 1); } - xa_insert_index(xa, ~0UL); + xa_insert_index(test, xa, ~0UL); XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); - xa_erase_index(xa, ~0UL); + xa_erase_index(test, xa, ~0UL); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_cmpxchg(struct xarray *xa) +static noinline void check_cmpxchg(struct kunit *test) { + struct xarray *xa = xa_param(test); + void *FIVE = xa_mk_value(5); void *SIX = xa_mk_value(6); void *LOTS = xa_mk_value(12345678); @@ -418,14 +437,16 @@ static noinline void check_cmpxchg(struct xarray *xa) XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); - xa_erase_index(xa, 12345678); - xa_erase_index(xa, 5); + xa_erase_index(test, xa, 12345678); + xa_erase_index(test, xa, 5); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_cmpxchg_order(struct xarray *xa) +static noinline void check_cmpxchg_order(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + void *FIVE = xa_mk_value(5); unsigned int i, order = 3; @@ -476,8 +497,10 @@ static noinline void check_cmpxchg_order(struct xarray *xa) #endif } -static noinline void check_reserve(struct xarray *xa) +static noinline void check_reserve(struct kunit *test) { + struct xarray *xa = xa_param(test); + void *entry; unsigned long index; int count; @@ -494,7 +517,7 @@ static noinline void check_reserve(struct xarray *xa) XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); xa_release(xa, 12345678); - xa_erase_index(xa, 12345678); + xa_erase_index(test, xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* cmpxchg sees a reserved entry as ZERO */ @@ -502,7 +525,7 @@ static noinline void check_reserve(struct xarray *xa) XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, xa_mk_value(12345678), GFP_NOWAIT) != NULL); xa_release(xa, 12345678); - xa_erase_index(xa, 12345678); + xa_erase_index(test, xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* xa_insert treats it as busy */ @@ -542,8 +565,10 @@ static noinline void check_reserve(struct xarray *xa) xa_destroy(xa); } -static noinline void check_xas_erase(struct xarray *xa) +static noinline void check_xas_erase(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; unsigned long i, j; @@ -581,9 +606,11 @@ static noinline void check_xas_erase(struct xarray *xa) } #ifdef CONFIG_XARRAY_MULTI -static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, +static noinline void check_multi_store_1(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); unsigned long min = index & ~((1UL << order) - 1); unsigned long max = min + (1UL << order); @@ -602,13 +629,15 @@ static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); - xa_erase_index(xa, min); + xa_erase_index(test, xa, min); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, +static noinline void check_multi_store_2(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); @@ -620,9 +649,11 @@ static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, +static noinline void check_multi_store_3(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; int n = 0; @@ -647,9 +678,11 @@ static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, } #endif -static noinline void check_multi_store(struct xarray *xa) +static noinline void check_multi_store(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + unsigned long i, j, k; unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; @@ -714,26 +747,28 @@ static noinline void check_multi_store(struct xarray *xa) } for (i = 0; i < 20; i++) { - check_multi_store_1(xa, 200, i); - check_multi_store_1(xa, 0, i); - check_multi_store_1(xa, (1UL << i) + 1, i); + check_multi_store_1(test, 200, i); + check_multi_store_1(test, 0, i); + check_multi_store_1(test, (1UL << i) + 1, i); } - check_multi_store_2(xa, 4095, 9); + check_multi_store_2(test, 4095, 9); for (i = 1; i < 20; i++) { - check_multi_store_3(xa, 0, i); - check_multi_store_3(xa, 1UL << i, i); + check_multi_store_3(test, 0, i); + check_multi_store_3(test, 1UL << i, i); } #endif } #ifdef CONFIG_XARRAY_MULTI /* mimics page cache __filemap_add_folio() */ -static noinline void check_xa_multi_store_adv_add(struct xarray *xa, +static noinline void check_xa_multi_store_adv_add(struct kunit *test, unsigned long index, unsigned int order, void *p) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); unsigned int nrpages = 1UL << order; @@ -761,10 +796,12 @@ static noinline void check_xa_multi_store_adv_add(struct xarray *xa, } /* mimics page_cache_delete() */ -static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, +static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, index); xas_set_order(&xas, index, order); @@ -772,12 +809,14 @@ static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, xas_init_marks(&xas); } -static noinline void check_xa_multi_store_adv_delete(struct xarray *xa, +static noinline void check_xa_multi_store_adv_delete(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + xa_lock_irq(xa); - check_xa_multi_store_adv_del_entry(xa, index, order); + check_xa_multi_store_adv_del_entry(test, index, order); xa_unlock_irq(xa); } @@ -814,10 +853,12 @@ static unsigned long some_val = 0xdeadbeef; static unsigned long some_val_2 = 0xdeaddead; /* mimics the page cache usage */ -static noinline void check_xa_multi_store_adv(struct xarray *xa, +static noinline void check_xa_multi_store_adv(struct kunit *test, unsigned long pos, unsigned int order) { + struct xarray *xa = xa_param(test); + unsigned int nrpages = 1UL << order; unsigned long index, base, next_index, next_next_index; unsigned int i; @@ -827,7 +868,7 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, next_index = round_down(base + nrpages, nrpages); next_next_index = round_down(next_index + nrpages, nrpages); - check_xa_multi_store_adv_add(xa, base, order, &some_val); + check_xa_multi_store_adv_add(test, base, order, &some_val); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); @@ -835,20 +876,20 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); /* Use order 0 for the next item */ - check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2); + check_xa_multi_store_adv_add(test, next_index, 0, &some_val_2); XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); /* Remove the next item */ - check_xa_multi_store_adv_delete(xa, next_index, 0); + check_xa_multi_store_adv_delete(test, next_index, 0); /* Now use order for a new pointer */ - check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); + check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); - check_xa_multi_store_adv_delete(xa, next_index, order); - check_xa_multi_store_adv_delete(xa, base, order); + check_xa_multi_store_adv_delete(test, next_index, order); + check_xa_multi_store_adv_delete(test, base, order); XA_BUG_ON(xa, !xa_empty(xa)); /* starting fresh again */ @@ -856,7 +897,7 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, /* let's test some holes now */ /* hole at base and next_next */ - check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); + check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); @@ -867,12 +908,12 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); - check_xa_multi_store_adv_delete(xa, next_index, order); + check_xa_multi_store_adv_delete(test, next_index, order); XA_BUG_ON(xa, !xa_empty(xa)); /* hole at base and next */ - check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2); + check_xa_multi_store_adv_add(test, next_next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); @@ -883,12 +924,12 @@ static noinline void check_xa_multi_store_adv(struct xarray *xa, for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); - check_xa_multi_store_adv_delete(xa, next_next_index, order); + check_xa_multi_store_adv_delete(test, next_next_index, order); XA_BUG_ON(xa, !xa_empty(xa)); } #endif -static noinline void check_multi_store_advanced(struct xarray *xa) +static noinline void check_multi_store_advanced(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; @@ -900,59 +941,59 @@ static noinline void check_multi_store_advanced(struct xarray *xa) */ for (pos = 7; pos < end; pos = (pos * pos) + 564) { for (i = 0; i < max_order; i++) { - check_xa_multi_store_adv(xa, pos, i); - check_xa_multi_store_adv(xa, pos + 157, i); + check_xa_multi_store_adv(test, pos, i); + check_xa_multi_store_adv(test, pos + 157, i); } } #endif } -static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, unsigned int base) { int i; u32 id; XA_BUG_ON(xa, !xa_empty(xa)); /* An empty array should assign %base to the first alloc */ - xa_alloc_index(xa, base, GFP_KERNEL); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* Erasing it should make the array empty again */ - xa_erase_index(xa, base); + xa_erase_index(test, xa, base); XA_BUG_ON(xa, !xa_empty(xa)); /* And it should assign %base again */ - xa_alloc_index(xa, base, GFP_KERNEL); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* Allocating and then erasing a lot should not lose base */ for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) - xa_alloc_index(xa, i, GFP_KERNEL); + xa_alloc_index(test, xa, i, GFP_KERNEL); for (i = base; i < 2 * XA_CHUNK_SIZE; i++) - xa_erase_index(xa, i); - xa_alloc_index(xa, base, GFP_KERNEL); + xa_erase_index(test, xa, i); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* Destroying the array should do the same as erasing */ xa_destroy(xa); /* And it should assign %base again */ - xa_alloc_index(xa, base, GFP_KERNEL); + xa_alloc_index(test, xa, base, GFP_KERNEL); /* The next assigned ID should be base+1 */ - xa_alloc_index(xa, base + 1, GFP_KERNEL); - xa_erase_index(xa, base + 1); + xa_alloc_index(test, xa, base + 1, GFP_KERNEL); + xa_erase_index(test, xa, base + 1); /* Storing a value should mark it used */ xa_store_index(xa, base + 1, GFP_KERNEL); - xa_alloc_index(xa, base + 2, GFP_KERNEL); + xa_alloc_index(test, xa, base + 2, GFP_KERNEL); /* If we then erase base, it should be free */ - xa_erase_index(xa, base); - xa_alloc_index(xa, base, GFP_KERNEL); + xa_erase_index(test, xa, base); + xa_alloc_index(test, xa, base, GFP_KERNEL); - xa_erase_index(xa, base + 1); - xa_erase_index(xa, base + 2); + xa_erase_index(test, xa, base + 1); + xa_erase_index(test, xa, base + 2); for (i = 1; i < 5000; i++) { - xa_alloc_index(xa, base + i, GFP_KERNEL); + xa_alloc_index(test, xa, base + i, GFP_KERNEL); } xa_destroy(xa); @@ -978,11 +1019,11 @@ static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); - xa_erase_index(xa, 3); + xa_erase_index(test, xa, 3); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, unsigned int base) { unsigned int i, id; unsigned long index; @@ -1018,7 +1059,7 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, id != 5); xa_for_each(xa, index, entry) { - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); } for (i = base; i < base + 9; i++) { @@ -1033,7 +1074,7 @@ static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) xa_destroy(xa); } -static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, unsigned int base) { struct xa_limit limit = XA_LIMIT(1, 0x3fff); u32 next = 0; @@ -1049,8 +1090,8 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0x3ffd); - xa_erase_index(xa, 0x3ffd); - xa_erase_index(xa, 1); + xa_erase_index(test, xa, 0x3ffd); + xa_erase_index(test, xa, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (i = 0x3ffe; i < 0x4003; i++) { @@ -1065,8 +1106,8 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) /* Check wrap-around is handled correctly */ if (base != 0) - xa_erase_index(xa, base); - xa_erase_index(xa, base + 1); + xa_erase_index(test, xa, base); + xa_erase_index(test, xa, base + 1); next = UINT_MAX; XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), xa_limit_32b, &next, GFP_KERNEL) != 0); @@ -1079,7 +1120,7 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) XA_BUG_ON(xa, id != base + 1); xa_for_each(xa, index, entry) - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -1087,19 +1128,21 @@ static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) static DEFINE_XARRAY_ALLOC(xa0); static DEFINE_XARRAY_ALLOC1(xa1); -static noinline void check_xa_alloc(void) +static noinline void check_xa_alloc(struct kunit *test) { - check_xa_alloc_1(&xa0, 0); - check_xa_alloc_1(&xa1, 1); - check_xa_alloc_2(&xa0, 0); - check_xa_alloc_2(&xa1, 1); - check_xa_alloc_3(&xa0, 0); - check_xa_alloc_3(&xa1, 1); + check_xa_alloc_1(test, &xa0, 0); + check_xa_alloc_1(test, &xa1, 1); + check_xa_alloc_2(test, &xa0, 0); + check_xa_alloc_2(test, &xa1, 1); + check_xa_alloc_3(test, &xa0, 0); + check_xa_alloc_3(test, &xa1, 1); } -static noinline void __check_store_iter(struct xarray *xa, unsigned long start, +static noinline void __check_store_iter(struct kunit *test, unsigned long start, unsigned int order, unsigned int present) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, start, order); void *entry; unsigned int count = 0; @@ -1123,50 +1166,54 @@ retry: XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != xa_mk_index(start)); - xa_erase_index(xa, start); + xa_erase_index(test, xa, start); } -static noinline void check_store_iter(struct xarray *xa) +static noinline void check_store_iter(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int i, j; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; for (i = 0; i < max_order; i++) { unsigned int min = 1 << i; unsigned int max = (2 << i) - 1; - __check_store_iter(xa, 0, i, 0); + __check_store_iter(test, 0, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); - __check_store_iter(xa, min, i, 0); + __check_store_iter(test, min, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, min, GFP_KERNEL); - __check_store_iter(xa, min, i, 1); + __check_store_iter(test, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, max, GFP_KERNEL); - __check_store_iter(xa, min, i, 1); + __check_store_iter(test, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, j, GFP_KERNEL); - __check_store_iter(xa, 0, i, min); + __check_store_iter(test, 0, i, min); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, min + j, GFP_KERNEL); - __check_store_iter(xa, min, i, min); + __check_store_iter(test, min, i, min); XA_BUG_ON(xa, !xa_empty(xa)); } #ifdef CONFIG_XARRAY_MULTI xa_store_index(xa, 63, GFP_KERNEL); xa_store_index(xa, 65, GFP_KERNEL); - __check_store_iter(xa, 64, 2, 1); - xa_erase_index(xa, 63); + __check_store_iter(test, 64, 2, 1); + xa_erase_index(test, xa, 63); #endif XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_find_1(struct xarray *xa, unsigned order) +static noinline void check_multi_find_1(struct kunit *test, unsigned int order) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + unsigned long multi = 3 << order; unsigned long next = 4 << order; unsigned long index; @@ -1189,15 +1236,17 @@ static noinline void check_multi_find_1(struct xarray *xa, unsigned order) XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); XA_BUG_ON(xa, index != next); - xa_erase_index(xa, multi); - xa_erase_index(xa, next); - xa_erase_index(xa, next + 1); + xa_erase_index(test, xa, multi); + xa_erase_index(test, xa, next); + xa_erase_index(test, xa, next + 1); XA_BUG_ON(xa, !xa_empty(xa)); #endif } -static noinline void check_multi_find_2(struct xarray *xa) +static noinline void check_multi_find_2(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; unsigned int i, j; void *entry; @@ -1211,17 +1260,19 @@ static noinline void check_multi_find_2(struct xarray *xa) GFP_KERNEL); rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) { - xa_erase_index(xa, index); + xa_erase_index(test, xa, index); } rcu_read_unlock(); - xa_erase_index(xa, index - 1); + xa_erase_index(test, xa, index - 1); XA_BUG_ON(xa, !xa_empty(xa)); } } } -static noinline void check_multi_find_3(struct xarray *xa) +static noinline void check_multi_find_3(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int order; for (order = 5; order < order_limit; order++) { @@ -1230,12 +1281,14 @@ static noinline void check_multi_find_3(struct xarray *xa) XA_BUG_ON(xa, !xa_empty(xa)); xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); } } -static noinline void check_find_1(struct xarray *xa) +static noinline void check_find_1(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long i, j, k; XA_BUG_ON(xa, !xa_empty(xa)); @@ -1272,18 +1325,20 @@ static noinline void check_find_1(struct xarray *xa) else XA_BUG_ON(xa, entry != NULL); } - xa_erase_index(xa, j); + xa_erase_index(test, xa, j); XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); } - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_find_2(struct xarray *xa) +static noinline void check_find_2(struct kunit *test) { + struct xarray *xa = xa_param(test); + void *entry; unsigned long i, j, index; @@ -1303,8 +1358,10 @@ static noinline void check_find_2(struct xarray *xa) xa_destroy(xa); } -static noinline void check_find_3(struct xarray *xa) +static noinline void check_find_3(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned long i, j, k; void *entry; @@ -1328,8 +1385,10 @@ static noinline void check_find_3(struct xarray *xa) xa_destroy(xa); } -static noinline void check_find_4(struct xarray *xa) +static noinline void check_find_4(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long index = 0; void *entry; @@ -1341,22 +1400,22 @@ static noinline void check_find_4(struct xarray *xa) entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); XA_BUG_ON(xa, entry); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); } -static noinline void check_find(struct xarray *xa) +static noinline void check_find(struct kunit *test) { unsigned i; - check_find_1(xa); - check_find_2(xa); - check_find_3(xa); - check_find_4(xa); + check_find_1(test); + check_find_2(test); + check_find_3(test); + check_find_4(test); for (i = 2; i < 10; i++) - check_multi_find_1(xa, i); - check_multi_find_2(xa); - check_multi_find_3(xa); + check_multi_find_1(test, i); + check_multi_find_2(test); + check_multi_find_3(test); } /* See find_swap_entry() in mm/shmem.c */ @@ -1382,8 +1441,10 @@ static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) return entry ? xas.xa_index : -1; } -static noinline void check_find_entry(struct xarray *xa) +static noinline void check_find_entry(struct kunit *test) { + struct xarray *xa = xa_param(test); + #ifdef CONFIG_XARRAY_MULTI unsigned int order; unsigned long offset, index; @@ -1410,12 +1471,14 @@ static noinline void check_find_entry(struct xarray *xa) xa_store_index(xa, ULONG_MAX, GFP_KERNEL); XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_pause(struct xarray *xa) +static noinline void check_pause(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; unsigned int order; @@ -1450,8 +1513,10 @@ static noinline void check_pause(struct xarray *xa) xa_destroy(xa); } -static noinline void check_move_tiny(struct xarray *xa) +static noinline void check_move_tiny(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); @@ -1468,12 +1533,14 @@ static noinline void check_move_tiny(struct xarray *xa) XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); XA_BUG_ON(xa, xas_prev(&xas) != NULL); rcu_read_unlock(); - xa_erase_index(xa, 0); + xa_erase_index(test, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move_max(struct xarray *xa) +static noinline void check_move_max(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); xa_store_index(xa, ULONG_MAX, GFP_KERNEL); @@ -1489,12 +1556,14 @@ static noinline void check_move_max(struct xarray *xa) XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); rcu_read_unlock(); - xa_erase_index(xa, ULONG_MAX); + xa_erase_index(test, xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move_small(struct xarray *xa, unsigned long idx) +static noinline void check_move_small(struct kunit *test, unsigned long idx) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned long i; @@ -1536,13 +1605,15 @@ static noinline void check_move_small(struct xarray *xa, unsigned long idx) XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); rcu_read_unlock(); - xa_erase_index(xa, 0); - xa_erase_index(xa, idx); + xa_erase_index(test, xa, 0); + xa_erase_index(test, xa, idx); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move(struct xarray *xa) +static noinline void check_move(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, (1 << 16) - 1); unsigned long i; @@ -1569,7 +1640,7 @@ static noinline void check_move(struct xarray *xa) rcu_read_unlock(); for (i = (1 << 8); i < (1 << 15); i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); i = xas.xa_index; @@ -1600,17 +1671,17 @@ static noinline void check_move(struct xarray *xa) xa_destroy(xa); - check_move_tiny(xa); - check_move_max(xa); + check_move_tiny(test); + check_move_max(test); for (i = 0; i < 16; i++) - check_move_small(xa, 1UL << i); + check_move_small(test, 1UL << i); for (i = 2; i < 16; i++) - check_move_small(xa, (1UL << i) - 1); + check_move_small(test, (1UL << i) - 1); } -static noinline void xa_store_many_order(struct xarray *xa, +static noinline void xa_store_many_order(struct kunit *test, struct xarray *xa, unsigned long index, unsigned order) { XA_STATE_ORDER(xas, xa, index, order); @@ -1633,30 +1704,34 @@ unlock: XA_BUG_ON(xa, xas_error(&xas)); } -static noinline void check_create_range_1(struct xarray *xa, +static noinline void check_create_range_1(struct kunit *test, unsigned long index, unsigned order) { + struct xarray *xa = xa_param(test); + unsigned long i; - xa_store_many_order(xa, index, order); + xa_store_many_order(test, xa, index, order); for (i = index; i < index + (1UL << order); i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_2(struct xarray *xa, unsigned order) +static noinline void check_create_range_2(struct kunit *test, unsigned int order) { + struct xarray *xa = xa_param(test); + unsigned long i; unsigned long nr = 1UL << order; for (i = 0; i < nr * nr; i += nr) - xa_store_many_order(xa, i, order); + xa_store_many_order(test, xa, i, order); for (i = 0; i < nr * nr; i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_3(void) +static noinline void check_create_range_3(struct kunit *test) { XA_STATE(xas, NULL, 0); xas_set_err(&xas, -EEXIST); @@ -1664,9 +1739,11 @@ static noinline void check_create_range_3(void) XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); } -static noinline void check_create_range_4(struct xarray *xa, +static noinline void check_create_range_4(struct kunit *test, unsigned long index, unsigned order) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, index, order); unsigned long base = xas.xa_index; unsigned long i = 0; @@ -1692,13 +1769,15 @@ unlock: XA_BUG_ON(xa, xas_error(&xas)); for (i = base; i < base + (1UL << order); i++) - xa_erase_index(xa, i); + xa_erase_index(test, xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_5(struct xarray *xa, +static noinline void check_create_range_5(struct kunit *test, unsigned long index, unsigned int order) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, index, order); unsigned int i; @@ -1715,44 +1794,46 @@ static noinline void check_create_range_5(struct xarray *xa, xa_destroy(xa); } -static noinline void check_create_range(struct xarray *xa) +static noinline void check_create_range(struct kunit *test) { unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; for (order = 0; order < max_order; order++) { - check_create_range_1(xa, 0, order); - check_create_range_1(xa, 1U << order, order); - check_create_range_1(xa, 2U << order, order); - check_create_range_1(xa, 3U << order, order); - check_create_range_1(xa, 1U << 24, order); + check_create_range_1(test, 0, order); + check_create_range_1(test, 1U << order, order); + check_create_range_1(test, 2U << order, order); + check_create_range_1(test, 3U << order, order); + check_create_range_1(test, 1U << 24, order); if (order < 10) - check_create_range_2(xa, order); + check_create_range_2(test, order); - check_create_range_4(xa, 0, order); - check_create_range_4(xa, 1U << order, order); - check_create_range_4(xa, 2U << order, order); - check_create_range_4(xa, 3U << order, order); - check_create_range_4(xa, 1U << 24, order); + check_create_range_4(test, 0, order); + check_create_range_4(test, 1U << order, order); + check_create_range_4(test, 2U << order, order); + check_create_range_4(test, 3U << order, order); + check_create_range_4(test, 1U << 24, order); - check_create_range_4(xa, 1, order); - check_create_range_4(xa, (1U << order) + 1, order); - check_create_range_4(xa, (2U << order) + 1, order); - check_create_range_4(xa, (2U << order) - 1, order); - check_create_range_4(xa, (3U << order) + 1, order); - check_create_range_4(xa, (3U << order) - 1, order); - check_create_range_4(xa, (1U << 24) + 1, order); + check_create_range_4(test, 1, order); + check_create_range_4(test, (1U << order) + 1, order); + check_create_range_4(test, (2U << order) + 1, order); + check_create_range_4(test, (2U << order) - 1, order); + check_create_range_4(test, (3U << order) + 1, order); + check_create_range_4(test, (3U << order) - 1, order); + check_create_range_4(test, (1U << 24) + 1, order); - check_create_range_5(xa, 0, order); - check_create_range_5(xa, (1U << order), order); + check_create_range_5(test, 0, order); + check_create_range_5(test, (1U << order), order); } - check_create_range_3(); + check_create_range_3(test); } -static noinline void __check_store_range(struct xarray *xa, unsigned long first, +static noinline void __check_store_range(struct kunit *test, unsigned long first, unsigned long last) { + struct xarray *xa = xa_param(test); + #ifdef CONFIG_XARRAY_MULTI xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); @@ -1767,26 +1848,28 @@ static noinline void __check_store_range(struct xarray *xa, unsigned long first, XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_store_range(struct xarray *xa) +static noinline void check_store_range(struct kunit *test) { unsigned long i, j; for (i = 0; i < 128; i++) { for (j = i; j < 128; j++) { - __check_store_range(xa, i, j); - __check_store_range(xa, 128 + i, 128 + j); - __check_store_range(xa, 4095 + i, 4095 + j); - __check_store_range(xa, 4096 + i, 4096 + j); - __check_store_range(xa, 123456 + i, 123456 + j); - __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); + __check_store_range(test, i, j); + __check_store_range(test, 128 + i, 128 + j); + __check_store_range(test, 4095 + i, 4095 + j); + __check_store_range(test, 4096 + i, 4096 + j); + __check_store_range(test, 123456 + i, 123456 + j); + __check_store_range(test, (1 << 24) + i, (1 << 24) + j); } } } #ifdef CONFIG_XARRAY_MULTI -static void check_split_1(struct xarray *xa, unsigned long index, +static void check_split_1(struct kunit *test, unsigned long index, unsigned int order, unsigned int new_order) { + struct xarray *xa = xa_param(test); + XA_STATE_ORDER(xas, xa, index, new_order); unsigned int i, found; void *entry; @@ -1822,26 +1905,30 @@ static void check_split_1(struct xarray *xa, unsigned long index, xa_destroy(xa); } -static noinline void check_split(struct xarray *xa) +static noinline void check_split(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int order, new_order; XA_BUG_ON(xa, !xa_empty(xa)); for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { for (new_order = 0; new_order < order; new_order++) { - check_split_1(xa, 0, order, new_order); - check_split_1(xa, 1UL << order, order, new_order); - check_split_1(xa, 3UL << order, order, new_order); + check_split_1(test, 0, order, new_order); + check_split_1(test, 1UL << order, order, new_order); + check_split_1(test, 3UL << order, order, new_order); } } } #else -static void check_split(struct xarray *xa) { } +static void check_split(struct kunit *test) { } #endif -static void check_align_1(struct xarray *xa, char *name) +static void check_align_1(struct kunit *test, char *name) { + struct xarray *xa = xa_param(test); + int i; unsigned int id; unsigned long index; @@ -1861,8 +1948,10 @@ static void check_align_1(struct xarray *xa, char *name) * We should always be able to store without allocating memory after * reserving a slot. */ -static void check_align_2(struct xarray *xa, char *name) +static void check_align_2(struct kunit *test, char *name) { + struct xarray *xa = xa_param(test); + int i; XA_BUG_ON(xa, !xa_empty(xa)); @@ -1881,15 +1970,15 @@ static void check_align_2(struct xarray *xa, char *name) XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_align(struct xarray *xa) +static noinline void check_align(struct kunit *test) { char name[] = "Motorola 68000"; - check_align_1(xa, name); - check_align_1(xa, name + 1); - check_align_1(xa, name + 2); - check_align_1(xa, name + 3); - check_align_2(xa, name); + check_align_1(test, name); + check_align_1(test, name + 1); + check_align_1(test, name + 2); + check_align_1(test, name + 3); + check_align_2(test, name); } static LIST_HEAD(shadow_nodes); @@ -1905,7 +1994,7 @@ static void test_update_node(struct xa_node *node) } } -static noinline void shadow_remove(struct xarray *xa) +static noinline void shadow_remove(struct kunit *test, struct xarray *xa) { struct xa_node *node; @@ -1919,8 +2008,17 @@ static noinline void shadow_remove(struct xarray *xa) xa_unlock(xa); } -static noinline void check_workingset(struct xarray *xa, unsigned long index) +struct workingset_testcase { + struct xarray *xa; + unsigned long index; +}; + +static noinline void check_workingset(struct kunit *test) { + struct workingset_testcase tc = *(struct workingset_testcase *)test->param_value; + struct xarray *xa = tc.xa; + unsigned long index = tc.index; + XA_STATE(xas, xa, index); xas_set_update(&xas, test_update_node); @@ -1943,7 +2041,7 @@ static noinline void check_workingset(struct xarray *xa, unsigned long index) xas_unlock(&xas); XA_BUG_ON(xa, list_empty(&shadow_nodes)); - shadow_remove(xa); + shadow_remove(test, xa); XA_BUG_ON(xa, !list_empty(&shadow_nodes)); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -1952,9 +2050,11 @@ static noinline void check_workingset(struct xarray *xa, unsigned long index) * Check that the pointer / value / sibling entries are accounted the * way we expect them to be. */ -static noinline void check_account(struct xarray *xa) +static noinline void check_account(struct kunit *test) { #ifdef CONFIG_XARRAY_MULTI + struct xarray *xa = xa_param(test); + unsigned int order; for (order = 1; order < 12; order++) { @@ -1981,8 +2081,10 @@ static noinline void check_account(struct xarray *xa) #endif } -static noinline void check_get_order(struct xarray *xa) +static noinline void check_get_order(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; unsigned int order; unsigned long i, j; @@ -2001,8 +2103,10 @@ static noinline void check_get_order(struct xarray *xa) } } -static noinline void check_xas_get_order(struct xarray *xa) +static noinline void check_xas_get_order(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; @@ -2034,8 +2138,10 @@ static noinline void check_xas_get_order(struct xarray *xa) } } -static noinline void check_xas_conflict_get_order(struct xarray *xa) +static noinline void check_xas_conflict_get_order(struct kunit *test) { + struct xarray *xa = xa_param(test); + XA_STATE(xas, xa, 0); void *entry; @@ -2092,8 +2198,10 @@ static noinline void check_xas_conflict_get_order(struct xarray *xa) } -static noinline void check_destroy(struct xarray *xa) +static noinline void check_destroy(struct kunit *test) { + struct xarray *xa = xa_param(test); + unsigned long index; XA_BUG_ON(xa, !xa_empty(xa)); @@ -2126,52 +2234,59 @@ static noinline void check_destroy(struct xarray *xa) } static DEFINE_XARRAY(array); +static struct xarray *arrays[] = { &array }; +KUNIT_ARRAY_PARAM(array, arrays, NULL); -static int xarray_checks(void) -{ - check_xa_err(&array); - check_xas_retry(&array); - check_xa_load(&array); - check_xa_mark(&array); - check_xa_shrink(&array); - check_xas_erase(&array); - check_insert(&array); - check_cmpxchg(&array); - check_cmpxchg_order(&array); - check_reserve(&array); - check_reserve(&xa0); - check_multi_store(&array); - check_multi_store_advanced(&array); - check_get_order(&array); - check_xas_get_order(&array); - check_xas_conflict_get_order(&array); - check_xa_alloc(); - check_find(&array); - check_find_entry(&array); - check_pause(&array); - check_account(&array); - check_destroy(&array); - check_move(&array); - check_create_range(&array); - check_store_range(&array); - check_store_iter(&array); - check_align(&xa0); - check_split(&array); +static struct xarray *xa0s[] = { &xa0 }; +KUNIT_ARRAY_PARAM(xa0, xa0s, NULL); - check_workingset(&array, 0); - check_workingset(&array, 64); - check_workingset(&array, 4096); +static struct workingset_testcase workingset_testcases[] = { + { &array, 0 }, + { &array, 64 }, + { &array, 4096 }, +}; +KUNIT_ARRAY_PARAM(workingset, workingset_testcases, NULL); - printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); - return (tests_run == tests_passed) ? 0 : -EINVAL; -} +static struct kunit_case xarray_cases[] = { + KUNIT_CASE_PARAM(check_xa_err, array_gen_params), + KUNIT_CASE_PARAM(check_xas_retry, array_gen_params), + KUNIT_CASE_PARAM(check_xa_load, array_gen_params), + KUNIT_CASE_PARAM(check_xa_mark, array_gen_params), + KUNIT_CASE_PARAM(check_xa_shrink, array_gen_params), + KUNIT_CASE_PARAM(check_xas_erase, array_gen_params), + KUNIT_CASE_PARAM(check_insert, array_gen_params), + KUNIT_CASE_PARAM(check_cmpxchg, array_gen_params), + KUNIT_CASE_PARAM(check_cmpxchg_order, array_gen_params), + KUNIT_CASE_PARAM(check_reserve, array_gen_params), + KUNIT_CASE_PARAM(check_reserve, xa0_gen_params), + KUNIT_CASE_PARAM(check_multi_store, array_gen_params), + KUNIT_CASE_PARAM(check_multi_store_advanced, array_gen_params), + KUNIT_CASE_PARAM(check_get_order, array_gen_params), + KUNIT_CASE_PARAM(check_xas_get_order, array_gen_params), + KUNIT_CASE_PARAM(check_xas_conflict_get_order, array_gen_params), + KUNIT_CASE(check_xa_alloc), + KUNIT_CASE_PARAM(check_find, array_gen_params), + KUNIT_CASE_PARAM(check_find_entry, array_gen_params), + KUNIT_CASE_PARAM(check_pause, array_gen_params), + KUNIT_CASE_PARAM(check_account, array_gen_params), + KUNIT_CASE_PARAM(check_destroy, array_gen_params), + KUNIT_CASE_PARAM(check_move, array_gen_params), + KUNIT_CASE_PARAM(check_create_range, array_gen_params), + KUNIT_CASE_PARAM(check_store_range, array_gen_params), + KUNIT_CASE_PARAM(check_store_iter, array_gen_params), + KUNIT_CASE_PARAM(check_align, xa0_gen_params), + KUNIT_CASE_PARAM(check_split, array_gen_params), + KUNIT_CASE_PARAM(check_workingset, workingset_gen_params), + {}, +}; -static void xarray_exit(void) -{ -} +static struct kunit_suite xarray_suite = { + .name = "xarray", + .test_cases = xarray_cases, +}; + +kunit_test_suite(xarray_suite); -module_init(xarray_checks); -module_exit(xarray_exit); MODULE_AUTHOR("Matthew Wilcox "); MODULE_DESCRIPTION("XArray API test module"); MODULE_LICENSE("GPL"); From 41756d11292aca72fcd10e946aa728a9ba8118e9 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 9 Dec 2024 19:06:54 -0800 Subject: [PATCH 394/504] xarray-port-tests-to-kunit-fix Fix cocci warning: lib/test_xarray.c:1019:52-53: WARNING comparing pointer to 0 Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202412081700.YXB3vBbg-lkp@intel.com/ Cc: Tamir Duberstein Signed-off-by: Andrew Morton --- lib/test_xarray.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index 9d0e797b825f..b6cac747ec46 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1016,7 +1016,7 @@ static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, uns XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); - XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); + XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != NULL); XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); xa_erase_index(test, xa, 3); From 5020e704415c7754af79469ac7e676b99b622bde Mon Sep 17 00:00:00 2001 From: MengEn Sun Date: Fri, 6 Dec 2024 12:13:47 +0800 Subject: [PATCH 395/504] ucounts: move kfree() out of critical zone protected by ucounts_lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Although kfree is a non-sleep function, it is possible to enter a long chain of calls probabilistically, so it looks better to move kfree from alloc_ucounts() out of the critical zone of ucounts_lock. Link: https://lkml.kernel.org/r/1733458427-11794-1-git-send-email-mengensun@tencent.com Signed-off-by: MengEn Sun Reviewed-by: YueHong Wu Reviewed-by: Andrew Morton Cc: Andrei Vagin Cc: Joel Granados Cc: Thomas Weißschuh Signed-off-by: Andrew Morton --- kernel/ucount.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/kernel/ucount.c b/kernel/ucount.c index f950b5e59d63..86c5f1c0bad9 100644 --- a/kernel/ucount.c +++ b/kernel/ucount.c @@ -164,8 +164,8 @@ struct ucounts *get_ucounts(struct ucounts *ucounts) struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) { struct hlist_head *hashent = ucounts_hashentry(ns, uid); - struct ucounts *ucounts, *new; bool wrapped; + struct ucounts *ucounts, *new = NULL; spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); @@ -182,17 +182,17 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid) spin_lock_irq(&ucounts_lock); ucounts = find_ucounts(ns, uid, hashent); - if (ucounts) { - kfree(new); - } else { + if (!ucounts) { hlist_add_head(&new->node, hashent); get_user_ns(new->ns); spin_unlock_irq(&ucounts_lock); return new; } } + wrapped = !get_ucounts_or_wrap(ucounts); spin_unlock_irq(&ucounts_lock); + kfree(new); if (wrapped) { put_ucounts(ucounts); return NULL; From bf4c52eb67d56db70de0c84b1b1eff8a794a3375 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Wed, 4 Dec 2024 13:41:06 -0500 Subject: [PATCH 396/504] checkpatch: check return of `git_commit_info` Avoid string concatenation with an undefined variable when a reference to a missing commit is contained in a `Fixes` tag. Given this patch: : From: Tamir Duberstein : Subject: Test patch : Date: Fri, 25 Oct 2024 19:30:51 -0400 : : This is a test patch. : : Fixes: deadbeef111 : Signed-off-by: Tamir Duberstein : --- /dev/null : +++ b/new-file : @@ -0,0 +1 @@ : +Test. Before: WARNING: Please use correct Fixes: style 'Fixes: <12 chars of sha1> ("")' - ie: 'Fixes: ("commit title")' WARNING: Unknown commit id 'deadbeef111', maybe rebased or not pulled? Use of uninitialized value $cid in concatenation (.) or string at scripts/checkpatch.pl line 3242. After: WARNING: Unknown commit id 'deadbeef111', maybe rebased or not pulled? This patch also reduce duplication slightly. Link: https://lkml.kernel.org/r/20241204-checkpatch-missing-commit-v1-1-68b34c94944e@gmail.com Signed-off-by: Tamir Duberstein <tamird@gmail.com> Cc: Andy Whitcroft <apw@canonical.com> Cc: Dwaipayan Ray <dwaipayanray1@gmail.com> Cc: Joe Perches <joe@perches.com> Cc: Lukas Bulwahn <lukas.bulwahn@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- scripts/checkpatch.pl | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index dbb9c3c6fe30..744328d21eb8 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3237,12 +3237,12 @@ sub process { my ($cid, $ctitle) = git_commit_info($orig_commit, $id, $title); - if ($ctitle ne $title || $tag_case || $tag_space || - $id_length || $id_case || !$title_has_quotes) { + if (defined($cid) && ($ctitle ne $title || $tag_case || $tag_space || $id_length || $id_case || !$title_has_quotes)) { + my $fixed = "Fixes: $cid (\"$ctitle\")"; if (WARN("BAD_FIXES_TAG", - "Please use correct Fixes: style 'Fixes: <12 chars of sha1> (\"<title line>\")' - ie: 'Fixes: $cid (\"$ctitle\")'\n" . $herecurr) && + "Please use correct Fixes: style 'Fixes: <12 chars of sha1> (\"<title line>\")' - ie: '$fixed'\n" . $herecurr) && $fix) { - $fixed[$fixlinenr] = "Fixes: $cid (\"$ctitle\")"; + $fixed[$fixlinenr] = $fixed; } } } From 4fb5a9e675a7c67e4e860f73cd6b2887f27207e5 Mon Sep 17 00:00:00 2001 From: Andrew Morton <akpm@linux-foundation.org> Date: Mon, 6 Jan 2025 18:16:23 -0800 Subject: [PATCH 397/504] checkpatch-check-return-of-git_commit_info-fix s/12 chars of sha1/12+ chars of sha1/, per Jon Link: https://lkml.kernel.org/r/87o70kt232.fsf@trenco.lwn.net Cc: Andy Whitcroft <apw@canonical.com> Cc: Dwaipayan Ray <dwaipayanray1@gmail.com> Cc: Joe Perches <joe@perches.com> Cc: Lukas Bulwahn <lukas.bulwahn@gmail.com> Cc: Tamir Duberstein <tamird@gmail.com> Cc: Jonathan Corbet <corbet@lwn.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- scripts/checkpatch.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 744328d21eb8..2bdc3d169af5 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -3240,7 +3240,7 @@ sub process { if (defined($cid) && ($ctitle ne $title || $tag_case || $tag_space || $id_length || $id_case || !$title_has_quotes)) { my $fixed = "Fixes: $cid (\"$ctitle\")"; if (WARN("BAD_FIXES_TAG", - "Please use correct Fixes: style 'Fixes: <12 chars of sha1> (\"<title line>\")' - ie: '$fixed'\n" . $herecurr) && + "Please use correct Fixes: style 'Fixes: <12+ chars of sha1> (\"<title line>\")' - ie: '$fixed'\n" . $herecurr) && $fix) { $fixed[$fixlinenr] = $fixed; } From dfc929ac377b770349151ed5800710a1a34ca0f3 Mon Sep 17 00:00:00 2001 From: Akinobu Mita <akinobu.mita@gmail.com> Date: Sun, 8 Dec 2024 23:24:15 +0900 Subject: [PATCH 398/504] fault-inject: use prandom where cryptographically secure randomness is not needed Currently get_random*() is used to determine the probability of fault injection, but cryptographically secure random numbers are not required. There is no big problem in using prandom instead of get_random*() to determine the probability of fault injection, and it also avoids acquiring a spinlock, which is unsafe in some contexts. Link: https://lore.kernel.org/lkml/20241129120939.GG35539@noisy.programming.kicks-ass.net Link: https://lkml.kernel.org/r/20241208142415.205960-1-akinobu.mita@gmail.com Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Suggested-by: Peter Zijlstra <peterz@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/fault-inject.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 52eb6ba29698..92a54c8a8380 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only #include <linux/kernel.h> #include <linux/init.h> -#include <linux/random.h> +#include <linux/prandom.h> #include <linux/debugfs.h> #include <linux/sched.h> #include <linux/stat.h> @@ -12,6 +12,24 @@ #include <linux/stacktrace.h> #include <linux/fault-inject.h> +/* + * The should_fail() use prandom instead of the normal Linux RNG since they don't + * need cryptographically secure random numbers. + */ +static DEFINE_PER_CPU(struct rnd_state, fault_rnd_state); + +static u32 fault_prandom_u32_below_100(void) +{ + struct rnd_state *state; + u32 res; + + state = &get_cpu_var(fault_rnd_state); + res = prandom_u32_state(state); + put_cpu_var(fault_rnd_state); + + return res % 100; +} + /* * setup_fault_attr() is a helper function for various __setup handlers, so it * returns 0 on error, because that is what __setup handlers do. @@ -31,6 +49,8 @@ int setup_fault_attr(struct fault_attr *attr, char *str) return 0; } + prandom_init_once(&fault_rnd_state); + attr->probability = probability; attr->interval = interval; atomic_set(&attr->times, times); @@ -146,7 +166,7 @@ bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags) return false; } - if (attr->probability <= get_random_u32_below(100)) + if (attr->probability <= fault_prandom_u32_below_100()) return false; fail: @@ -219,6 +239,8 @@ struct dentry *fault_create_debugfs_attr(const char *name, if (IS_ERR(dir)) return dir; + prandom_init_once(&fault_rnd_state); + debugfs_create_ul("probability", mode, dir, &attr->probability); debugfs_create_ul("interval", mode, dir, &attr->interval); debugfs_create_atomic_t("times", mode, dir, &attr->times); @@ -431,6 +453,8 @@ static const struct config_item_type fault_config_type = { void fault_config_init(struct fault_config *config, const char *name) { + prandom_init_once(&fault_rnd_state); + config_group_init_type_name(&config->group, name, &fault_config_type); } EXPORT_SYMBOL_GPL(fault_config_init); From 65be08d27e872d57b3a3f32bb365f35dd53d6af6 Mon Sep 17 00:00:00 2001 From: Andrew Morton <akpm@linux-foundation.org> Date: Mon, 9 Dec 2024 19:12:31 -0800 Subject: [PATCH 399/504] fault-inject-use-prandom-where-cryptographically-secure-randomness-is-not-needed-fix tweak and reflow comment Cc: Akinobu Mita <akinobu.mita@gmail.com> Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/fault-inject.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 92a54c8a8380..999053fa133e 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -13,8 +13,8 @@ #include <linux/fault-inject.h> /* - * The should_fail() use prandom instead of the normal Linux RNG since they don't - * need cryptographically secure random numbers. + * The should_fail() functions use prandom instead of the normal Linux RNG + * since they don't need cryptographically secure random numbers. */ static DEFINE_PER_CPU(struct rnd_state, fault_rnd_state); From 0de7948c681779be648dc567c3a778afce9c3e8f Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:32 +0000 Subject: [PATCH 400/504] netfilter: conntrack: cleanup timeout definitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Converge on using secs_to_jiffies()", v3. This is a series that follows up on my previous series to introduce secs_to_jiffies() and convert a few initial users.[1] In the review for that series, Anna-Maria requested converting other users with Coccinelle. [2] This is part 1 that converts users of msecs_to_jiffies() that use the multiply pattern of either of: - msecs_to_jiffies(N*1000), or - msecs_to_jiffies(N*MSEC_PER_SEC) where N is a constant, to avoid the multiplication. The entire conversion is made with Coccinelle in the script added in patch 2. Some changes suggested by Coccinelle have been deferred to later parts that will address other possible variant patterns. [1] https://lore.kernel.org/all/20241030-open-coded-timeouts-v3-0-9ba123facf88@linux.microsoft.com/ [2] https://lore.kernel.org/all/8734kngfni.fsf@somnus/ This patch (of 19): None of the higher order definitions are used anymore, so remove definitions for minutes, hours, and days timeouts. Convert the seconds denominated timeouts to secs_to_jiffies() Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-0-ddfefd7e9f2a@linux.microsoft.com Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-1-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com>: Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org>: Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- net/netfilter/nf_conntrack_proto_sctp.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 4cc97f971264..7c6f7c9f7332 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -39,20 +39,15 @@ static const char *const sctp_conntrack_names[] = { [SCTP_CONNTRACK_HEARTBEAT_SENT] = "HEARTBEAT_SENT", }; -#define SECS * HZ -#define MINS * 60 SECS -#define HOURS * 60 MINS -#define DAYS * 24 HOURS - static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { - [SCTP_CONNTRACK_CLOSED] = 10 SECS, - [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, - [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, - [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, - [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS, - [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS, - [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, - [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, + [SCTP_CONNTRACK_CLOSED] = secs_to_jiffies(10), + [SCTP_CONNTRACK_COOKIE_WAIT] = secs_to_jiffies(3), + [SCTP_CONNTRACK_COOKIE_ECHOED] = secs_to_jiffies(3), + [SCTP_CONNTRACK_ESTABLISHED] = secs_to_jiffies(210), + [SCTP_CONNTRACK_SHUTDOWN_SENT] = secs_to_jiffies(3), + [SCTP_CONNTRACK_SHUTDOWN_RECD] = secs_to_jiffies(3), + [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = secs_to_jiffies(3), + [SCTP_CONNTRACK_HEARTBEAT_SENT] = secs_to_jiffies(30), }; #define SCTP_FLAG_HEARTBEAT_VTAG_FAILED 1 From 6ff9a75b6219df4bad55f5a29ccbbc85918a7b51 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:33 +0000 Subject: [PATCH 401/504] coccinelle: misc: add secs_to_jiffies script MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This script finds and suggests conversions of timeout patterns that result in seconds-denominated timeouts to use the new secs_to_jiffies() API in include/linux/jiffies.h for better readability. Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-2-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Suggested-by: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- scripts/coccinelle/misc/secs_to_jiffies.cocci | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 scripts/coccinelle/misc/secs_to_jiffies.cocci diff --git a/scripts/coccinelle/misc/secs_to_jiffies.cocci b/scripts/coccinelle/misc/secs_to_jiffies.cocci new file mode 100644 index 000000000000..8bbb2884ea5d --- /dev/null +++ b/scripts/coccinelle/misc/secs_to_jiffies.cocci @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only +/// +/// Find usages of: +/// - msecs_to_jiffies(value*1000) +/// - msecs_to_jiffies(value*MSEC_PER_SEC) +/// +// Confidence: High +// Copyright: (C) 2024 Easwar Hariharan, Microsoft +// Keywords: secs, seconds, jiffies +// + +virtual patch + +@depends on patch@ constant C; @@ + +- msecs_to_jiffies(C * 1000) ++ secs_to_jiffies(C) + +@depends on patch@ constant C; @@ + +- msecs_to_jiffies(C * MSEC_PER_SEC) ++ secs_to_jiffies(C) From 11a1d555a30009aae5af837f112f45e2a2ba9f42 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:34 +0000 Subject: [PATCH 402/504] arm: pxa: convert timeouts to use secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-3-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- arch/arm/mach-pxa/sharpsl_pm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c index 0c8d9000df5a..dd930e3a61a4 100644 --- a/arch/arm/mach-pxa/sharpsl_pm.c +++ b/arch/arm/mach-pxa/sharpsl_pm.c @@ -31,10 +31,10 @@ /* * Constants */ -#define SHARPSL_CHARGE_ON_TIME_INTERVAL (msecs_to_jiffies(1*60*1000)) /* 1 min */ -#define SHARPSL_CHARGE_FINISH_TIME (msecs_to_jiffies(10*60*1000)) /* 10 min */ -#define SHARPSL_BATCHK_TIME (msecs_to_jiffies(15*1000)) /* 15 sec */ -#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */ +#define SHARPSL_CHARGE_ON_TIME_INTERVAL (secs_to_jiffies(60)) +#define SHARPSL_CHARGE_FINISH_TIME (secs_to_jiffies(10*60)) +#define SHARPSL_BATCHK_TIME (secs_to_jiffies(15)) +#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */ #define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */ #define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */ From 251c4dbb1bd9d0682827a807aea215342eb3e113 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:35 +0000 Subject: [PATCH 403/504] s390: kernel: convert timeouts to use secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the values here are a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-4-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Acked-by: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- arch/s390/kernel/lgr.c | 2 +- arch/s390/kernel/time.c | 4 ++-- arch/s390/kernel/topology.c | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/s390/kernel/lgr.c b/arch/s390/kernel/lgr.c index 6652e54cf3db..6d1ffca5f798 100644 --- a/arch/s390/kernel/lgr.c +++ b/arch/s390/kernel/lgr.c @@ -166,7 +166,7 @@ static struct timer_list lgr_timer; */ static void lgr_timer_set(void) { - mod_timer(&lgr_timer, jiffies + msecs_to_jiffies(LGR_TIMER_INTERVAL_SECS * MSEC_PER_SEC)); + mod_timer(&lgr_timer, jiffies + secs_to_jiffies(LGR_TIMER_INTERVAL_SECS)); } /* diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 34a65c141ea0..e9f47c3a6197 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -662,12 +662,12 @@ static void stp_check_leap(void) if (ret < 0) pr_err("failed to set leap second flags\n"); /* arm Timer to clear leap second flags */ - mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC)); + mod_timer(&stp_timer, jiffies + secs_to_jiffies(14400)); } else { /* The day the leap second is scheduled for hasn't been reached. Retry * in one hour. */ - mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC)); + mod_timer(&stp_timer, jiffies + secs_to_jiffies(3600)); } } diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 4f9c301a705b..0fd56a1cadbd 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -371,7 +371,7 @@ static void set_topology_timer(void) if (atomic_add_unless(&topology_poll, -1, 0)) mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100)); else - mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC)); + mod_timer(&topology_timer, jiffies + secs_to_jiffies(60)); } void topology_expect_change(void) From 43f6b20851f71a27a45a8534aa22cc28467374a3 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev <agordeev@linux.ibm.com> Date: Tue, 17 Dec 2024 18:31:16 +0100 Subject: [PATCH 404/504] s390-kernel-convert-timeouts-to-use-secs_to_jiffies-fix simplify cmm_set_timer() Link: https://lkml.kernel.org/r/Z2G1ZPL2cAlQOYlF@li-008a6a4c-3549-11b2-a85c-c5cc2836eea2.ibm.com Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Easwar Hariharan <eahariha@linux.microsoft.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- arch/s390/mm/cmm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index d01724a715d0..7bf0f691827b 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -204,7 +204,7 @@ static void cmm_set_timer(void) del_timer(&cmm_timer); return; } - mod_timer(&cmm_timer, jiffies + msecs_to_jiffies(cmm_timeout_seconds * MSEC_PER_SEC)); + mod_timer(&cmm_timer, jiffies + secs_to_jiffies(cmm_timeout_seconds)); } static void cmm_timer_fn(struct timer_list *unused) From 7dec7e3cb1114f690f28cb623ed662e572d6f837 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:36 +0000 Subject: [PATCH 405/504] powerpc/papr_scm: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-5-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- arch/powerpc/platforms/pseries/papr_scm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/powerpc/platforms/pseries/papr_scm.c b/arch/powerpc/platforms/pseries/papr_scm.c index f84ac9fbe203..f7c9271bda58 100644 --- a/arch/powerpc/platforms/pseries/papr_scm.c +++ b/arch/powerpc/platforms/pseries/papr_scm.c @@ -544,7 +544,7 @@ static int drc_pmem_query_health(struct papr_scm_priv *p) /* Jiffies offset for which the health data is assumed to be same */ cache_timeout = p->lasthealth_jiffies + - msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000); + secs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL); /* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */ if (time_after(jiffies, cache_timeout)) From cb2d58ba275f0cfc88d4aedbc62e91d7241d3a2b Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:37 +0000 Subject: [PATCH 406/504] mm: kmemleak: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-6-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- mm/kmemleak.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 820ba3b5cbfc..982bb5ef3233 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -1855,7 +1855,7 @@ static int kmemleak_scan_thread(void *arg) * Wait before the first scan to allow the system to fully initialize. */ if (first_run) { - signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000); + signed long timeout = secs_to_jiffies(SECS_FIRST_SCAN); first_run = 0; while (timeout && !kthread_should_stop()) timeout = schedule_timeout_interruptible(timeout); @@ -2241,7 +2241,7 @@ void __init kmemleak_init(void) return; jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); - jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); + jiffies_scan_wait = secs_to_jiffies(SECS_SCAN_WAIT); object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); From d6f9cf9e70e411c265d04d561e9f703458d322c3 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:38 +0000 Subject: [PATCH 407/504] accel/habanalabs: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-7-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- drivers/accel/habanalabs/common/device.c | 2 +- drivers/accel/habanalabs/common/habanalabs_drv.c | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/accel/habanalabs/common/device.c b/drivers/accel/habanalabs/common/device.c index e0cf3b4343bb..30277ae410d4 100644 --- a/drivers/accel/habanalabs/common/device.c +++ b/drivers/accel/habanalabs/common/device.c @@ -817,7 +817,7 @@ static void device_hard_reset_pending(struct work_struct *work) } queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work, - msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000)); + secs_to_jiffies(HL_PENDING_RESET_PER_SEC)); } } diff --git a/drivers/accel/habanalabs/common/habanalabs_drv.c b/drivers/accel/habanalabs/common/habanalabs_drv.c index 708dfd10f39c..5ec13f3a46f9 100644 --- a/drivers/accel/habanalabs/common/habanalabs_drv.c +++ b/drivers/accel/habanalabs/common/habanalabs_drv.c @@ -362,8 +362,7 @@ static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout) * a different default timeout for Gaudi */ if (timeout == HL_DEFAULT_TIMEOUT_LOCKED) - hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED * - MSEC_PER_SEC); + hdev->timeout_jiffies = secs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED); hdev->reset_upon_device_release = 0; break; From c7c8a85db7cdabc1ab90fab16a2a775a058b0d15 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:39 +0000 Subject: [PATCH 408/504] drm/xe: convert timeout to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-8-ddfefd7e9f2a@linux.microsoft.com Reviewed-by: Thomas Hellström <thomas.hellstrom@linux.intel.com> Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- drivers/gpu/drm/xe/xe_device.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 06d6db8b50f9..f260e21fa283 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -514,7 +514,7 @@ static int wait_for_lmem_ready(struct xe_device *xe) drm_dbg(&xe->drm, "Waiting for lmem initialization\n"); start = jiffies; - timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */ + timeout = start + secs_to_jiffies(60); /* 60 sec! */ do { if (signal_pending(current)) From 6e5eebd7e46a2da376f068a738fdf84329fc97a2 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:40 +0000 Subject: [PATCH 409/504] scsi: lpfc: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-9-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- drivers/scsi/lpfc/lpfc_init.c | 18 +++++++++--------- drivers/scsi/lpfc/lpfc_nportdisc.c | 8 ++++---- drivers/scsi/lpfc/lpfc_nvme.c | 2 +- drivers/scsi/lpfc/lpfc_sli.c | 4 ++-- drivers/scsi/lpfc/lpfc_vmid.c | 2 +- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 7f57397d91a9..4fed2e1243e0 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -598,7 +598,7 @@ lpfc_config_port_post(struct lpfc_hba *phba) jiffies + msecs_to_jiffies(1000 * timeout)); /* Set up heart beat (HB) timer */ mod_timer(&phba->hb_tmofunc, - jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); clear_bit(HBA_HBEAT_INP, &phba->hba_flag); clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); phba->last_completion_time = jiffies; @@ -1267,7 +1267,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) !test_bit(FC_UNLOADING, &phba->pport->load_flag)) mod_timer(&phba->hb_tmofunc, jiffies + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); return; } @@ -1555,7 +1555,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) /* If IOs are completing, no need to issue a MBX_HEARTBEAT */ spin_lock_irq(&phba->pport->work_port_lock); if (time_after(phba->last_completion_time + - msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL), jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (test_bit(HBA_HBEAT_INP, &phba->hba_flag)) @@ -3354,7 +3354,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) spin_unlock_irqrestore(&phba->hbalock, iflag); if (mbx_action == LPFC_MBX_NO_WAIT) return; - timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies; spin_lock_irqsave(&phba->hbalock, iflag); if (phba->sli.mbox_active) { actcmd = phba->sli.mbox_active->u.mb.mbxCommand; @@ -4924,14 +4924,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) stat = 1; goto finished; } - if (time >= msecs_to_jiffies(30 * 1000)) { + if (time >= secs_to_jiffies(30)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } - if (time >= msecs_to_jiffies(15 * 1000) && + if (time >= secs_to_jiffies(15) && phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " @@ -4945,7 +4945,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; if (!atomic_read(&vport->fc_map_cnt) && - time < msecs_to_jiffies(2 * 1000)) + time < secs_to_jiffies(2)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; @@ -5179,8 +5179,8 @@ lpfc_vmid_poll(struct timer_list *t) lpfc_worker_wake_up(phba); /* restart the timer for the next iteration */ - mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 * - LPFC_VMID_TIMER)); + mod_timer(&phba->inactive_vmid_poll, + jiffies + secs_to_jiffies(LPFC_VMID_TIMER)); } /** diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 4d88cfe71cae..08a7f5c6157f 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -906,7 +906,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (ndlp->nlp_state >= NLP_STE_ADISC_ISSUE || ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) { mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000 * 1)); + jiffies + secs_to_jiffies(1)); set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_printf_vlog(vport, KERN_INFO, @@ -1332,7 +1332,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); + mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1)); set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; @@ -1936,7 +1936,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000 * 1)); + jiffies + secs_to_jiffies(1)); set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; @@ -2743,7 +2743,7 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { mod_timer(&ndlp->nlp_delayfunc, - jiffies + msecs_to_jiffies(1000 * 1)); + jiffies + secs_to_jiffies(1)); set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 43dc1da4a156..b1adb9f59097 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -2237,7 +2237,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, * wait. Print a message if a 10 second wait expires and renew the * wait. This is unexpected. */ - wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000); + wait_tmo = secs_to_jiffies(LPFC_NVME_WAIT_TMO); while (true) { ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo); if (unlikely(!ret)) { diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 874644b31a3e..3fd9723cd271 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -9012,7 +9012,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, - jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); + jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL)); clear_bit(HBA_HBEAT_INP, &phba->hba_flag); clear_bit(HBA_HBEAT_TMO, &phba->hba_flag); phba->last_completion_time = jiffies; @@ -13323,7 +13323,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action) lpfc_sli_mbox_sys_flush(phba); return; } - timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; + timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies; /* Disable softirqs, including timers from obtaining phba->hbalock */ local_bh_disable(); diff --git a/drivers/scsi/lpfc/lpfc_vmid.c b/drivers/scsi/lpfc/lpfc_vmid.c index cc3e4736f2fe..14dbfe954e42 100644 --- a/drivers/scsi/lpfc/lpfc_vmid.c +++ b/drivers/scsi/lpfc/lpfc_vmid.c @@ -278,7 +278,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid, if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) { mod_timer(&vport->phba->inactive_vmid_poll, jiffies + - msecs_to_jiffies(1000 * LPFC_VMID_TIMER)); + secs_to_jiffies(LPFC_VMID_TIMER)); vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD; } } From b19cb2a0c8abe639c5797c7ebcafd751517629fb Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:41 +0000 Subject: [PATCH 410/504] scsi: arcmsr: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-10-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- drivers/scsi/arcmsr/arcmsr_hba.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 35860c61468b..fd797e278549 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -1044,7 +1044,7 @@ static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb) static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb) { timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0); - pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000); + pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60); add_timer(&pacb->refresh_timer); } From 2d933214d3c0fd1ee8a8482476083dba35935c8d Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:42 +0000 Subject: [PATCH 411/504] scsi: pm8001: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-11-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- drivers/scsi/pm8001/pm8001_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index f8c81e53e93f..22e0e79e88ab 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -736,7 +736,7 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) return -EIO; } time_remaining = wait_for_completion_timeout(&completion, - msecs_to_jiffies(60*1000)); // 1 min + secs_to_jiffies(60)); // 1 min if (!time_remaining) { kfree(payload.func_specific); pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n"); From eadbe03d246def453c18c9c970ea4682885bc116 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:43 +0000 Subject: [PATCH 412/504] xen/blkback: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-12-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- drivers/block/xen-blkback/blkback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c index 838064593f62..a7c2b04ab943 100644 --- a/drivers/block/xen-blkback/blkback.c +++ b/drivers/block/xen-blkback/blkback.c @@ -544,7 +544,7 @@ static void print_stats(struct xen_blkif_ring *ring) ring->st_rd_req, ring->st_wr_req, ring->st_f_req, ring->st_ds_req, ring->persistent_gnt_c, max_pgrants); - ring->st_print = jiffies + msecs_to_jiffies(10 * 1000); + ring->st_print = jiffies + secs_to_jiffies(10); ring->st_rd_req = 0; ring->st_wr_req = 0; ring->st_oo_req = 0; From 6fa43581f662a27f2bbdc5e902534fcc8f36ba48 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:45 +0000 Subject: [PATCH 413/504] wifi: ath11k: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-14-ddfefd7e9f2a@linux.microsoft.com Acked-by: Jeff Johnson <quic_jjohnson@quicinc.com> Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- drivers/net/wireless/ath/ath11k/debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 57281a135dd7..bf192529e3fe 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -178,7 +178,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar, * received 'update stats' event, we keep a 3 seconds timeout in case, * fw_stats_done is not marked yet */ - timeout = jiffies + msecs_to_jiffies(3 * 1000); + timeout = jiffies + secs_to_jiffies(3); ath11k_debugfs_fw_stats_reset(ar); From bd6eef3f951382a9767cd7fcd0ad27b540c76e01 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:46 +0000 Subject: [PATCH 414/504] bluetooth: mgmt: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-15-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- net/bluetooth/mgmt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index b31192d473d0..8c993763ee0f 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -210,7 +210,7 @@ static const u16 mgmt_untrusted_events[] = { MGMT_EV_EXP_FEATURE_CHANGED, }; -#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) +#define CACHE_TIMEOUT secs_to_jiffies(2) #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ "\x00\x00\x00\x00\x00\x00\x00\x00" From b43886b59408d2a9b1532e02c3cae2c6bb70aff6 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:47 +0000 Subject: [PATCH 415/504] staging: vc04_services: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-16-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c index dc0d715ed970..0dbe76ee5570 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c @@ -59,7 +59,7 @@ static int bcm2835_audio_send_msg_locked(struct bcm2835_audio_instance *instance if (wait) { if (!wait_for_completion_timeout(&instance->msg_avail_comp, - msecs_to_jiffies(10 * 1000))) { + secs_to_jiffies(10))) { dev_err(instance->dev, "vchi message timeout, msg=%d\n", m->type); return -ETIMEDOUT; From 1925f9cac2e1b0eb75f4af22fa32ede6e4e60942 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:48 +0000 Subject: [PATCH 416/504] ceph: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-17-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ceph/quota.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/quota.c b/fs/ceph/quota.c index 06ee397e0c3a..d90eda19bcc4 100644 --- a/fs/ceph/quota.c +++ b/fs/ceph/quota.c @@ -166,7 +166,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc, if (IS_ERR(in)) { doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino, PTR_ERR(in)); - qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */ + qri->timeout = jiffies + secs_to_jiffies(60); /* XXX */ } else { qri->timeout = 0; qri->inode = in; From f297084e662f956043ef4ce6ad4de75ac443dec8 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:49 +0000 Subject: [PATCH 417/504] livepatch: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-18-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- samples/livepatch/livepatch-callbacks-busymod.c | 3 +-- samples/livepatch/livepatch-shadow-fix1.c | 3 +-- samples/livepatch/livepatch-shadow-mod.c | 15 +++++---------- 3 files changed, 7 insertions(+), 14 deletions(-) diff --git a/samples/livepatch/livepatch-callbacks-busymod.c b/samples/livepatch/livepatch-callbacks-busymod.c index 378e2d40271a..69105596e72e 100644 --- a/samples/livepatch/livepatch-callbacks-busymod.c +++ b/samples/livepatch/livepatch-callbacks-busymod.c @@ -44,8 +44,7 @@ static void busymod_work_func(struct work_struct *work) static int livepatch_callbacks_mod_init(void) { pr_info("%s\n", __func__); - schedule_delayed_work(&work, - msecs_to_jiffies(1000 * 0)); + schedule_delayed_work(&work, 0); return 0; } diff --git a/samples/livepatch/livepatch-shadow-fix1.c b/samples/livepatch/livepatch-shadow-fix1.c index 6701641bf12d..f3f153895d6c 100644 --- a/samples/livepatch/livepatch-shadow-fix1.c +++ b/samples/livepatch/livepatch-shadow-fix1.c @@ -72,8 +72,7 @@ static struct dummy *livepatch_fix1_dummy_alloc(void) if (!d) return NULL; - d->jiffies_expire = jiffies + - msecs_to_jiffies(1000 * EXPIRE_PERIOD); + d->jiffies_expire = jiffies + secs_to_jiffies(EXPIRE_PERIOD); /* * Patch: save the extra memory location into a SV_LEAK shadow diff --git a/samples/livepatch/livepatch-shadow-mod.c b/samples/livepatch/livepatch-shadow-mod.c index 7e753b0d2fa6..5d83ad5a8118 100644 --- a/samples/livepatch/livepatch-shadow-mod.c +++ b/samples/livepatch/livepatch-shadow-mod.c @@ -101,8 +101,7 @@ static __used noinline struct dummy *dummy_alloc(void) if (!d) return NULL; - d->jiffies_expire = jiffies + - msecs_to_jiffies(1000 * EXPIRE_PERIOD); + d->jiffies_expire = jiffies + secs_to_jiffies(EXPIRE_PERIOD); /* Oops, forgot to save leak! */ leak = kzalloc(sizeof(*leak), GFP_KERNEL); @@ -152,8 +151,7 @@ static void alloc_work_func(struct work_struct *work) list_add(&d->list, &dummy_list); mutex_unlock(&dummy_list_mutex); - schedule_delayed_work(&alloc_dwork, - msecs_to_jiffies(1000 * ALLOC_PERIOD)); + schedule_delayed_work(&alloc_dwork, secs_to_jiffies(ALLOC_PERIOD)); } /* @@ -184,16 +182,13 @@ static void cleanup_work_func(struct work_struct *work) } mutex_unlock(&dummy_list_mutex); - schedule_delayed_work(&cleanup_dwork, - msecs_to_jiffies(1000 * CLEANUP_PERIOD)); + schedule_delayed_work(&cleanup_dwork, secs_to_jiffies(CLEANUP_PERIOD)); } static int livepatch_shadow_mod_init(void) { - schedule_delayed_work(&alloc_dwork, - msecs_to_jiffies(1000 * ALLOC_PERIOD)); - schedule_delayed_work(&cleanup_dwork, - msecs_to_jiffies(1000 * CLEANUP_PERIOD)); + schedule_delayed_work(&alloc_dwork, secs_to_jiffies(ALLOC_PERIOD)); + schedule_delayed_work(&cleanup_dwork, secs_to_jiffies(CLEANUP_PERIOD)); return 0; } From 1e06793762ace7d8c845fe72c2c64953b51497e4 Mon Sep 17 00:00:00 2001 From: Easwar Hariharan <eahariha@linux.microsoft.com> Date: Tue, 10 Dec 2024 22:02:50 +0000 Subject: [PATCH 418/504] ALSA: line6: convert timeouts to secs_to_jiffies() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b35108a51cf7 ("jiffies: Define secs_to_jiffies()") introduced secs_to_jiffies(). As the value here is a multiple of 1000, use secs_to_jiffies() instead of msecs_to_jiffies to avoid the multiplication. This is converted using scripts/coccinelle/misc/secs_to_jiffies.cocci with the following Coccinelle rules: @@ constant C; @@ - msecs_to_jiffies(C * 1000) + secs_to_jiffies(C) @@ constant C; @@ - msecs_to_jiffies(C * MSEC_PER_SEC) + secs_to_jiffies(C) Link: https://lkml.kernel.org/r/20241210-converge-secs-to-jiffies-v3-19-ddfefd7e9f2a@linux.microsoft.com Signed-off-by: Easwar Hariharan <eahariha@linux.microsoft.com> Cc: Alexander Gordeev <agordeev@linux.ibm.com> Cc: Andrew Lunn <andrew+netdev@lunn.ch> Cc: Anna-Maria Behnsen <anna-maria@linutronix.de> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christian Borntraeger <borntraeger@linux.ibm.com> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Daniel Mack <daniel@zonque.org> Cc: David Airlie <airlied@gmail.com> Cc: David S. Miller <davem@davemloft.net> Cc: Dick Kennedy <dick.kennedy@broadcom.com> Cc: Eric Dumazet <edumazet@google.com> Cc: Florian Fainelli <florian.fainelli@broadcom.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Haojian Zhuang <haojian.zhuang@gmail.com> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Ilya Dryomov <idryomov@gmail.com> Cc: Jack Wang <jinpu.wang@cloud.ionos.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: James Bottomley <James.Bottomley@HansenPartnership.com> Cc: James Smart <james.smart@broadcom.com> Cc: Jaroslav Kysela <perex@perex.cz> Cc: Jeff Johnson <jjohnson@kernel.org> Cc: Jeff Johnson <quic_jjohnson@quicinc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Jeroen de Borst <jeroendb@google.com> Cc: Jiri Kosina <jikos@kernel.org> Cc: Joe Lawrence <joe.lawrence@redhat.com> Cc: Johan Hedberg <johan.hedberg@gmail.com> Cc: Josh Poimboeuf <jpoimboe@kernel.org> Cc: Jozsef Kadlecsik <kadlec@netfilter.org> Cc: Julia Lawall <julia.lawall@inria.fr> Cc: Kalle Valo <kvalo@kernel.org> Cc: Louis Peens <louis.peens@corigine.com> Cc: Lucas De Marchi <lucas.demarchi@intel.com> Cc: Luiz Augusto von Dentz <luiz.dentz@gmail.com> Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Marcel Holtmann <marcel@holtmann.org> Cc: Martin K. Petersen <martin.petersen@oracle.com> Cc: Maxime Ripard <mripard@kernel.org> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Miroslav Benes <mbenes@suse.cz> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Nicolas Palix <nicolas.palix@imag.fr> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Ofir Bitton <obitton@habana.ai> Cc: Pablo Neira Ayuso <pablo@netfilter.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: Petr Mladek <pmladek@suse.com> Cc: Praveen Kaligineedi <pkaligineedi@google.com> Cc: Ray Jui <rjui@broadcom.com> Cc: Robert Jarzmik <robert.jarzmik@free.fr> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Russell King <linux@armlinux.org.uk> Cc: Scott Branden <sbranden@broadcom.com> Cc: Shailend Chand <shailend@google.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Simon Horman <horms@kernel.org> Cc: Sven Schnelle <svens@linux.ibm.com> Cc: Takashi Iwai <tiwai@suse.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Cc: Thomas Zimmermann <tzimmermann@suse.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Xiubo Li <xiubli@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- sound/usb/line6/toneport.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c index ca2c6f5de407..c073b38cd673 100644 --- a/sound/usb/line6/toneport.c +++ b/sound/usb/line6/toneport.c @@ -386,7 +386,7 @@ static int toneport_setup(struct usb_line6_toneport *toneport) toneport_update_led(toneport); schedule_delayed_work(&toneport->line6.startup_work, - msecs_to_jiffies(TONEPORT_PCM_DELAY * 1000)); + secs_to_jiffies(TONEPORT_PCM_DELAY)); return 0; } From 72b5d1f07d7f14edd49a9596c65708a3913faf65 Mon Sep 17 00:00:00 2001 From: Yunhui Cui <cuiyunhui@bytedance.com> Date: Tue, 10 Dec 2024 17:52:38 +0800 Subject: [PATCH 419/504] watchdog: output this_cpu when printing hard LOCKUP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When printing "Watchdog detected hard LOCKUP on cpu", also output the detecting CPU. It's more intuitive. Link: https://lkml.kernel.org/r/20241210095238.63444-1-cuiyunhui@bytedance.com Signed-off-by: Yunhui Cui <cuiyunhui@bytedance.com> Reviewed-by: Douglas Anderson <dianders@chromium.org> Cc: Bitao Hu <yaoma@linux.alibaba.com> Cc: Joel Granados <joel.granados@kernel.org> Cc: John Ogness <john.ogness@linutronix.de> Cc: Liu Song <liusong@linux.alibaba.com> Cc: Song Liu <song@kernel.org> Cc: Thomas Weißschuh <linux@weissschuh.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 41e0f7e9fa35..177abb7d0d4e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -190,7 +190,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs) * with printk_cpu_sync_get_irqsave() that we can still at least * get the message about the lockup out. */ - pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", cpu); + pr_emerg("CPU%u: Watchdog detected hard LOCKUP on cpu %u\n", this_cpu, cpu); printk_cpu_sync_get_irqsave(flags); print_modules(); From b48f659f71e74650d8a0602c06c938da1c3c74cd Mon Sep 17 00:00:00 2001 From: Eric Sandeen <sandeen@redhat.com> Date: Mon, 28 Oct 2024 09:41:14 -0500 Subject: [PATCH 420/504] dlmfs: convert to the new mount API Patch series "ocfs2, dlmfs: convert to the new mount API". This patch (of 2): Convert dlmfs to the new mount API. Link: https://lkml.kernel.org/r/20241028144443.609151-1-sandeen@redhat.com Link: https://lkml.kernel.org/r/20241028144443.609151-2-sandeen@redhat.com Signed-off-by: Eric Sandeen <sandeen@redhat.com> Reviewed-by: Goldwyn Rodrigues <rgoldwyn@suse.com> Tested-by: Goldwyn Rodrigues <rgoldwyn@suse.com> Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com> Tested-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Jun Piao <piaojun@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/dlmfs/dlmfs.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 7fc0e920eda7..2a7f36643895 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c @@ -20,6 +20,7 @@ #include <linux/module.h> #include <linux/fs.h> +#include <linux/fs_context.h> #include <linux/pagemap.h> #include <linux/types.h> #include <linux/slab.h> @@ -506,9 +507,7 @@ bail: return status; } -static int dlmfs_fill_super(struct super_block * sb, - void * data, - int silent) +static int dlmfs_fill_super(struct super_block *sb, struct fs_context *fc) { sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_blocksize = PAGE_SIZE; @@ -556,17 +555,27 @@ static const struct inode_operations dlmfs_file_inode_operations = { .setattr = dlmfs_file_setattr, }; -static struct dentry *dlmfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) +static int dlmfs_get_tree(struct fs_context *fc) { - return mount_nodev(fs_type, flags, data, dlmfs_fill_super); + return get_tree_nodev(fc, dlmfs_fill_super); +} + +static const struct fs_context_operations dlmfs_context_ops = { + .get_tree = dlmfs_get_tree, +}; + +static int dlmfs_init_fs_context(struct fs_context *fc) +{ + fc->ops = &dlmfs_context_ops; + + return 0; } static struct file_system_type dlmfs_fs_type = { .owner = THIS_MODULE, .name = "ocfs2_dlmfs", - .mount = dlmfs_mount, .kill_sb = kill_litter_super, + .init_fs_context = dlmfs_init_fs_context, }; MODULE_ALIAS_FS("ocfs2_dlmfs"); From a5791b34d68a92cca5054cdaf50c3e34ea886120 Mon Sep 17 00:00:00 2001 From: Eric Sandeen <sandeen@redhat.com> Date: Mon, 28 Oct 2024 09:41:15 -0500 Subject: [PATCH 421/504] ocfs2: convert to the new mount API Convert ocfs2 to the new mount API. Link: https://lkml.kernel.org/r/20241028144443.609151-3-sandeen@redhat.com Signed-off-by: Eric Sandeen <sandeen@redhat.com> Reviewed-by: Goldwyn Rodrigues <rgoldwyn@suse.com> Tested-by: Goldwyn Rodrigues <rgoldwyn@suse.com> Acked-by: Joseph Qi <joseph.qi@linux.alibaba.com> Tested-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/ocfs2_trace.h | 20 +- fs/ocfs2/super.c | 585 +++++++++++++++++++---------------------- 2 files changed, 275 insertions(+), 330 deletions(-) diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h index 0511c69c9fde..54ed1495de9a 100644 --- a/fs/ocfs2/ocfs2_trace.h +++ b/fs/ocfs2/ocfs2_trace.h @@ -1658,34 +1658,34 @@ TRACE_EVENT(ocfs2_remount, ); TRACE_EVENT(ocfs2_fill_super, - TP_PROTO(void *sb, void *data, int silent), - TP_ARGS(sb, data, silent), + TP_PROTO(void *sb, void *fc, int silent), + TP_ARGS(sb, fc, silent), TP_STRUCT__entry( __field(void *, sb) - __field(void *, data) + __field(void *, fc) __field(int, silent) ), TP_fast_assign( __entry->sb = sb; - __entry->data = data; + __entry->fc = fc; __entry->silent = silent; ), TP_printk("%p %p %d", __entry->sb, - __entry->data, __entry->silent) + __entry->fc, __entry->silent) ); TRACE_EVENT(ocfs2_parse_options, - TP_PROTO(int is_remount, char *options), - TP_ARGS(is_remount, options), + TP_PROTO(int is_remount, const char *option), + TP_ARGS(is_remount, option), TP_STRUCT__entry( __field(int, is_remount) - __string(options, options) + __string(option, option) ), TP_fast_assign( __entry->is_remount = is_remount; - __assign_str(options); + __assign_str(option); ), - TP_printk("%d %s", __entry->is_remount, __get_str(options)) + TP_printk("%d %s", __entry->is_remount, __get_str(option)) ); DEFINE_OCFS2_POINTER_EVENT(ocfs2_put_super); diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 5a501adb7c39..e0b91dbaa0ac 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c @@ -19,10 +19,10 @@ #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> -#include <linux/parser.h> +#include <linux/fs_parser.h> +#include <linux/fs_context.h> #include <linux/crc32.h> #include <linux/debugfs.h> -#include <linux/mount.h> #include <linux/seq_file.h> #include <linux/quotaops.h> #include <linux/signal.h> @@ -80,17 +80,15 @@ struct mount_options unsigned int resv_level; int dir_resv_level; char cluster_stack[OCFS2_STACK_LABEL_LEN + 1]; + bool user_stack; }; -static int ocfs2_parse_options(struct super_block *sb, char *options, - struct mount_options *mopt, - int is_remount); +static int ocfs2_parse_param(struct fs_context *fc, struct fs_parameter *param); static int ocfs2_check_set_options(struct super_block *sb, struct mount_options *options); static int ocfs2_show_options(struct seq_file *s, struct dentry *root); static void ocfs2_put_super(struct super_block *sb); static int ocfs2_mount_volume(struct super_block *sb); -static int ocfs2_remount(struct super_block *sb, int *flags, char *data); static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err); static int ocfs2_initialize_mem_caches(void); static void ocfs2_free_mem_caches(void); @@ -135,7 +133,6 @@ static const struct super_operations ocfs2_sops = { .evict_inode = ocfs2_evict_inode, .sync_fs = ocfs2_sync_fs, .put_super = ocfs2_put_super, - .remount_fs = ocfs2_remount, .show_options = ocfs2_show_options, .quota_read = ocfs2_quota_read, .quota_write = ocfs2_quota_write, @@ -144,15 +141,10 @@ static const struct super_operations ocfs2_sops = { enum { Opt_barrier, - Opt_err_panic, - Opt_err_ro, + Opt_errors, Opt_intr, - Opt_nointr, - Opt_hb_none, - Opt_hb_local, - Opt_hb_global, - Opt_data_ordered, - Opt_data_writeback, + Opt_heartbeat, + Opt_data, Opt_atime_quantum, Opt_slot, Opt_commit, @@ -160,52 +152,64 @@ enum { Opt_localflocks, Opt_stack, Opt_user_xattr, - Opt_nouser_xattr, Opt_inode64, Opt_acl, - Opt_noacl, Opt_usrquota, Opt_grpquota, - Opt_coherency_buffered, - Opt_coherency_full, + Opt_coherency, Opt_resv_level, Opt_dir_resv_level, Opt_journal_async_commit, - Opt_err_cont, - Opt_err, }; -static const match_table_t tokens = { - {Opt_barrier, "barrier=%u"}, - {Opt_err_panic, "errors=panic"}, - {Opt_err_ro, "errors=remount-ro"}, - {Opt_intr, "intr"}, - {Opt_nointr, "nointr"}, - {Opt_hb_none, OCFS2_HB_NONE}, - {Opt_hb_local, OCFS2_HB_LOCAL}, - {Opt_hb_global, OCFS2_HB_GLOBAL}, - {Opt_data_ordered, "data=ordered"}, - {Opt_data_writeback, "data=writeback"}, - {Opt_atime_quantum, "atime_quantum=%u"}, - {Opt_slot, "preferred_slot=%u"}, - {Opt_commit, "commit=%u"}, - {Opt_localalloc, "localalloc=%d"}, - {Opt_localflocks, "localflocks"}, - {Opt_stack, "cluster_stack=%s"}, - {Opt_user_xattr, "user_xattr"}, - {Opt_nouser_xattr, "nouser_xattr"}, - {Opt_inode64, "inode64"}, - {Opt_acl, "acl"}, - {Opt_noacl, "noacl"}, - {Opt_usrquota, "usrquota"}, - {Opt_grpquota, "grpquota"}, - {Opt_coherency_buffered, "coherency=buffered"}, - {Opt_coherency_full, "coherency=full"}, - {Opt_resv_level, "resv_level=%u"}, - {Opt_dir_resv_level, "dir_resv_level=%u"}, - {Opt_journal_async_commit, "journal_async_commit"}, - {Opt_err_cont, "errors=continue"}, - {Opt_err, NULL} +static const struct constant_table ocfs2_param_errors[] = { + {"panic", OCFS2_MOUNT_ERRORS_PANIC}, + {"remount-ro", OCFS2_MOUNT_ERRORS_ROFS}, + {"continue", OCFS2_MOUNT_ERRORS_CONT}, + {} +}; + +static const struct constant_table ocfs2_param_heartbeat[] = { + {"local", OCFS2_MOUNT_HB_LOCAL}, + {"none", OCFS2_MOUNT_HB_NONE}, + {"global", OCFS2_MOUNT_HB_GLOBAL}, + {} +}; + +static const struct constant_table ocfs2_param_data[] = { + {"writeback", OCFS2_MOUNT_DATA_WRITEBACK}, + {"ordered", 0}, + {} +}; + +static const struct constant_table ocfs2_param_coherency[] = { + {"buffered", OCFS2_MOUNT_COHERENCY_BUFFERED}, + {"full", 0}, + {} +}; + +static const struct fs_parameter_spec ocfs2_param_spec[] = { + fsparam_u32 ("barrier", Opt_barrier), + fsparam_enum ("errors", Opt_errors, ocfs2_param_errors), + fsparam_flag_no ("intr", Opt_intr), + fsparam_enum ("heartbeat", Opt_heartbeat, ocfs2_param_heartbeat), + fsparam_enum ("data", Opt_data, ocfs2_param_data), + fsparam_u32 ("atime_quantum", Opt_atime_quantum), + fsparam_u32 ("preferred_slot", Opt_slot), + fsparam_u32 ("commit", Opt_commit), + fsparam_s32 ("localalloc", Opt_localalloc), + fsparam_flag ("localflocks", Opt_localflocks), + fsparam_string ("cluster_stack", Opt_stack), + fsparam_flag_no ("user_xattr", Opt_user_xattr), + fsparam_flag ("inode64", Opt_inode64), + fsparam_flag_no ("acl", Opt_acl), + fsparam_flag ("usrquota", Opt_usrquota), + fsparam_flag ("grpquota", Opt_grpquota), + fsparam_enum ("coherency", Opt_coherency, ocfs2_param_coherency), + fsparam_u32 ("resv_level", Opt_resv_level), + fsparam_u32 ("dir_resv_level", Opt_dir_resv_level), + fsparam_flag ("journal_async_commit", Opt_journal_async_commit), + {} }; #ifdef CONFIG_DEBUG_FS @@ -600,32 +604,32 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits, return (((unsigned long long)bytes) << bitshift) - trim; } -static int ocfs2_remount(struct super_block *sb, int *flags, char *data) +static int ocfs2_reconfigure(struct fs_context *fc) { int incompat_features; int ret = 0; - struct mount_options parsed_options; + struct mount_options *parsed_options = fc->fs_private; + struct super_block *sb = fc->root->d_sb; struct ocfs2_super *osb = OCFS2_SB(sb); u32 tmp; sync_filesystem(sb); - if (!ocfs2_parse_options(sb, data, &parsed_options, 1) || - !ocfs2_check_set_options(sb, &parsed_options)) { + if (!ocfs2_check_set_options(sb, parsed_options)) { ret = -EINVAL; goto out; } tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | OCFS2_MOUNT_HB_NONE; - if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) { + if ((osb->s_mount_opt & tmp) != (parsed_options->mount_opt & tmp)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n"); goto out; } if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) != - (parsed_options.mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) { + (parsed_options->mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot change data mode on remount\n"); goto out; @@ -634,16 +638,16 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) /* Probably don't want this on remount; it might * mess with other nodes */ if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64) && - (parsed_options.mount_opt & OCFS2_MOUNT_INODE64)) { + (parsed_options->mount_opt & OCFS2_MOUNT_INODE64)) { ret = -EINVAL; mlog(ML_ERROR, "Cannot enable inode64 on remount\n"); goto out; } /* We're going to/from readonly mode. */ - if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) { + if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) { /* Disable quota accounting before remounting RO */ - if (*flags & SB_RDONLY) { + if (fc->sb_flags & SB_RDONLY) { ret = ocfs2_susp_quotas(osb, 0); if (ret < 0) goto out; @@ -657,7 +661,7 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) goto unlock_osb; } - if (*flags & SB_RDONLY) { + if (fc->sb_flags & SB_RDONLY) { sb->s_flags |= SB_RDONLY; osb->osb_flags |= OCFS2_OSB_SOFT_RO; } else { @@ -678,11 +682,11 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data) sb->s_flags &= ~SB_RDONLY; osb->osb_flags &= ~OCFS2_OSB_SOFT_RO; } - trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags); + trace_ocfs2_remount(sb->s_flags, osb->osb_flags, fc->sb_flags); unlock_osb: spin_unlock(&osb->osb_lock); /* Enable quota accounting after remounting RW */ - if (!ret && !(*flags & SB_RDONLY)) { + if (!ret && !(fc->sb_flags & SB_RDONLY)) { if (sb_any_quota_suspended(sb)) ret = ocfs2_susp_quotas(osb, 1); else @@ -701,11 +705,11 @@ unlock_osb: if (!ret) { /* Only save off the new mount options in case of a successful * remount. */ - osb->s_mount_opt = parsed_options.mount_opt; - osb->s_atime_quantum = parsed_options.atime_quantum; - osb->preferred_slot = parsed_options.slot; - if (parsed_options.commit_interval) - osb->osb_commit_interval = parsed_options.commit_interval; + osb->s_mount_opt = parsed_options->mount_opt; + osb->s_atime_quantum = parsed_options->atime_quantum; + osb->preferred_slot = parsed_options->slot; + if (parsed_options->commit_interval) + osb->osb_commit_interval = parsed_options->commit_interval; if (!ocfs2_is_hard_readonly(osb)) ocfs2_set_journal_params(osb); @@ -966,23 +970,18 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb) } } -static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) +static int ocfs2_fill_super(struct super_block *sb, struct fs_context *fc) { struct dentry *root; int status, sector_size; - struct mount_options parsed_options; + struct mount_options *parsed_options = fc->fs_private; struct inode *inode = NULL; struct ocfs2_super *osb = NULL; struct buffer_head *bh = NULL; char nodestr[12]; struct ocfs2_blockcheck_stats stats; - trace_ocfs2_fill_super(sb, data, silent); - - if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) { - status = -EINVAL; - goto out; - } + trace_ocfs2_fill_super(sb, fc, fc->sb_flags & SB_SILENT); /* probe for superblock */ status = ocfs2_sb_probe(sb, &bh, §or_size, &stats); @@ -999,24 +998,24 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent) osb = OCFS2_SB(sb); - if (!ocfs2_check_set_options(sb, &parsed_options)) { + if (!ocfs2_check_set_options(sb, parsed_options)) { status = -EINVAL; goto out_super; } - osb->s_mount_opt = parsed_options.mount_opt; - osb->s_atime_quantum = parsed_options.atime_quantum; - osb->preferred_slot = parsed_options.slot; - osb->osb_commit_interval = parsed_options.commit_interval; + osb->s_mount_opt = parsed_options->mount_opt; + osb->s_atime_quantum = parsed_options->atime_quantum; + osb->preferred_slot = parsed_options->slot; + osb->osb_commit_interval = parsed_options->commit_interval; - ocfs2_la_set_sizes(osb, parsed_options.localalloc_opt); - osb->osb_resv_level = parsed_options.resv_level; - osb->osb_dir_resv_level = parsed_options.resv_level; - if (parsed_options.dir_resv_level == -1) - osb->osb_dir_resv_level = parsed_options.resv_level; + ocfs2_la_set_sizes(osb, parsed_options->localalloc_opt); + osb->osb_resv_level = parsed_options->resv_level; + osb->osb_dir_resv_level = parsed_options->resv_level; + if (parsed_options->dir_resv_level == -1) + osb->osb_dir_resv_level = parsed_options->resv_level; else - osb->osb_dir_resv_level = parsed_options.dir_resv_level; + osb->osb_dir_resv_level = parsed_options->dir_resv_level; - status = ocfs2_verify_userspace_stack(osb, &parsed_options); + status = ocfs2_verify_userspace_stack(osb, parsed_options); if (status) goto out_super; @@ -1180,27 +1179,72 @@ out: return status; } -static struct dentry *ocfs2_mount(struct file_system_type *fs_type, - int flags, - const char *dev_name, - void *data) +static int ocfs2_get_tree(struct fs_context *fc) { - return mount_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super); + return get_tree_bdev(fc, ocfs2_fill_super); +} + +static void ocfs2_free_fc(struct fs_context *fc) +{ + kfree(fc->fs_private); +} + +static const struct fs_context_operations ocfs2_context_ops = { + .parse_param = ocfs2_parse_param, + .get_tree = ocfs2_get_tree, + .reconfigure = ocfs2_reconfigure, + .free = ocfs2_free_fc, +}; + +static int ocfs2_init_fs_context(struct fs_context *fc) +{ + struct mount_options *mopt; + + mopt = kzalloc(sizeof(struct mount_options), GFP_KERNEL); + if (!mopt) + return -EINVAL; + + mopt->commit_interval = 0; + mopt->mount_opt = OCFS2_MOUNT_NOINTR; + mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; + mopt->slot = OCFS2_INVALID_SLOT; + mopt->localalloc_opt = -1; + mopt->cluster_stack[0] = '\0'; + mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL; + mopt->dir_resv_level = -1; + + fc->fs_private = mopt; + fc->ops = &ocfs2_context_ops; + + return 0; } static struct file_system_type ocfs2_fs_type = { .owner = THIS_MODULE, .name = "ocfs2", - .mount = ocfs2_mount, .kill_sb = kill_block_super, .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE, - .next = NULL + .next = NULL, + .init_fs_context = ocfs2_init_fs_context, + .parameters = ocfs2_param_spec, }; MODULE_ALIAS_FS("ocfs2"); static int ocfs2_check_set_options(struct super_block *sb, struct mount_options *options) { + if (options->user_stack == 0) { + u32 tmp; + + /* Ensure only one heartbeat mode */ + tmp = options->mount_opt & (OCFS2_MOUNT_HB_LOCAL | + OCFS2_MOUNT_HB_GLOBAL | + OCFS2_MOUNT_HB_NONE); + if (hweight32(tmp) != 1) { + mlog(ML_ERROR, "Invalid heartbeat mount options\n"); + return 0; + } + } if (options->mount_opt & OCFS2_MOUNT_USRQUOTA && !OCFS2_HAS_RO_COMPAT_FEATURE(sb, OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) { @@ -1232,241 +1276,142 @@ static int ocfs2_check_set_options(struct super_block *sb, return 1; } -static int ocfs2_parse_options(struct super_block *sb, - char *options, - struct mount_options *mopt, - int is_remount) +static int ocfs2_parse_param(struct fs_context *fc, struct fs_parameter *param) { - int status, user_stack = 0; - char *p; - u32 tmp; - int token, option; - substring_t args[MAX_OPT_ARGS]; + struct fs_parse_result result; + int opt; + struct mount_options *mopt = fc->fs_private; + bool is_remount = (fc->purpose & FS_CONTEXT_FOR_RECONFIGURE); - trace_ocfs2_parse_options(is_remount, options ? options : "(none)"); + trace_ocfs2_parse_options(is_remount, param->key); - mopt->commit_interval = 0; - mopt->mount_opt = OCFS2_MOUNT_NOINTR; - mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM; - mopt->slot = OCFS2_INVALID_SLOT; - mopt->localalloc_opt = -1; - mopt->cluster_stack[0] = '\0'; - mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL; - mopt->dir_resv_level = -1; + opt = fs_parse(fc, ocfs2_param_spec, param, &result); + if (opt < 0) + return opt; - if (!options) { - status = 1; - goto bail; - } - - while ((p = strsep(&options, ",")) != NULL) { - if (!*p) - continue; - - token = match_token(p, tokens, args); - switch (token) { - case Opt_hb_local: - mopt->mount_opt |= OCFS2_MOUNT_HB_LOCAL; - break; - case Opt_hb_none: - mopt->mount_opt |= OCFS2_MOUNT_HB_NONE; - break; - case Opt_hb_global: - mopt->mount_opt |= OCFS2_MOUNT_HB_GLOBAL; - break; - case Opt_barrier: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option) - mopt->mount_opt |= OCFS2_MOUNT_BARRIER; - else - mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER; - break; - case Opt_intr: - mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR; - break; - case Opt_nointr: + switch (opt) { + case Opt_heartbeat: + mopt->mount_opt |= result.uint_32; + break; + case Opt_barrier: + if (result.uint_32) + mopt->mount_opt |= OCFS2_MOUNT_BARRIER; + else + mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER; + break; + case Opt_intr: + if (result.negated) mopt->mount_opt |= OCFS2_MOUNT_NOINTR; - break; - case Opt_err_panic: - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT; - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS; - mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC; - break; - case Opt_err_ro: - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT; - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC; - mopt->mount_opt |= OCFS2_MOUNT_ERRORS_ROFS; - break; - case Opt_err_cont: - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS; - mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC; - mopt->mount_opt |= OCFS2_MOUNT_ERRORS_CONT; - break; - case Opt_data_ordered: - mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK; - break; - case Opt_data_writeback: - mopt->mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK; - break; - case Opt_user_xattr: - mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR; - break; - case Opt_nouser_xattr: + else + mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR; + break; + case Opt_errors: + mopt->mount_opt &= ~(OCFS2_MOUNT_ERRORS_CONT | + OCFS2_MOUNT_ERRORS_ROFS | + OCFS2_MOUNT_ERRORS_PANIC); + mopt->mount_opt |= result.uint_32; + break; + case Opt_data: + mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK; + mopt->mount_opt |= result.uint_32; + break; + case Opt_user_xattr: + if (result.negated) mopt->mount_opt |= OCFS2_MOUNT_NOUSERXATTR; - break; - case Opt_atime_quantum: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= 0) - mopt->atime_quantum = option; - break; - case Opt_slot: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option) - mopt->slot = (u16)option; - break; - case Opt_commit: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option < 0) - return 0; - if (option == 0) - option = JBD2_DEFAULT_MAX_COMMIT_AGE; - mopt->commit_interval = HZ * option; - break; - case Opt_localalloc: - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= 0) - mopt->localalloc_opt = option; - break; - case Opt_localflocks: - /* - * Changing this during remount could race - * flock() requests, or "unbalance" existing - * ones (e.g., a lock is taken in one mode but - * dropped in the other). If users care enough - * to flip locking modes during remount, we - * could add a "local" flag to individual - * flock structures for proper tracking of - * state. - */ - if (!is_remount) - mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS; - break; - case Opt_stack: - /* Check both that the option we were passed - * is of the right length and that it is a proper - * string of the right length. - */ - if (((args[0].to - args[0].from) != - OCFS2_STACK_LABEL_LEN) || - (strnlen(args[0].from, - OCFS2_STACK_LABEL_LEN) != - OCFS2_STACK_LABEL_LEN)) { - mlog(ML_ERROR, - "Invalid cluster_stack option\n"); - status = 0; - goto bail; - } - memcpy(mopt->cluster_stack, args[0].from, - OCFS2_STACK_LABEL_LEN); - mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; - /* - * Open code the memcmp here as we don't have - * an osb to pass to - * ocfs2_userspace_stack(). - */ - if (memcmp(mopt->cluster_stack, - OCFS2_CLASSIC_CLUSTER_STACK, - OCFS2_STACK_LABEL_LEN)) - user_stack = 1; - break; - case Opt_inode64: - mopt->mount_opt |= OCFS2_MOUNT_INODE64; - break; - case Opt_usrquota: - mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA; - break; - case Opt_grpquota: - mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA; - break; - case Opt_coherency_buffered: - mopt->mount_opt |= OCFS2_MOUNT_COHERENCY_BUFFERED; - break; - case Opt_coherency_full: - mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED; - break; - case Opt_acl: - mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; - mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL; - break; - case Opt_noacl: + else + mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR; + break; + case Opt_atime_quantum: + mopt->atime_quantum = result.uint_32; + break; + case Opt_slot: + if (result.uint_32) + mopt->slot = (u16)result.uint_32; + break; + case Opt_commit: + if (result.uint_32 == 0) + mopt->commit_interval = HZ * JBD2_DEFAULT_MAX_COMMIT_AGE; + else + mopt->commit_interval = HZ * result.uint_32; + break; + case Opt_localalloc: + if (result.int_32 >= 0) + mopt->localalloc_opt = result.int_32; + break; + case Opt_localflocks: + /* + * Changing this during remount could race flock() requests, or + * "unbalance" existing ones (e.g., a lock is taken in one mode + * but dropped in the other). If users care enough to flip + * locking modes during remount, we could add a "local" flag to + * individual flock structures for proper tracking of state. + */ + if (!is_remount) + mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS; + break; + case Opt_stack: + /* Check both that the option we were passed is of the right + * length and that it is a proper string of the right length. + */ + if (strlen(param->string) != OCFS2_STACK_LABEL_LEN) { + mlog(ML_ERROR, "Invalid cluster_stack option\n"); + return -EINVAL; + } + memcpy(mopt->cluster_stack, param->string, OCFS2_STACK_LABEL_LEN); + mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; + /* + * Open code the memcmp here as we don't have an osb to pass + * to ocfs2_userspace_stack(). + */ + if (memcmp(mopt->cluster_stack, + OCFS2_CLASSIC_CLUSTER_STACK, + OCFS2_STACK_LABEL_LEN)) + mopt->user_stack = 1; + break; + case Opt_inode64: + mopt->mount_opt |= OCFS2_MOUNT_INODE64; + break; + case Opt_usrquota: + mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA; + break; + case Opt_grpquota: + mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA; + break; + case Opt_coherency: + mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED; + mopt->mount_opt |= result.uint_32; + break; + case Opt_acl: + if (result.negated) { mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL; mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL; - break; - case Opt_resv_level: - if (is_remount) - break; - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= OCFS2_MIN_RESV_LEVEL && - option < OCFS2_MAX_RESV_LEVEL) - mopt->resv_level = option; - break; - case Opt_dir_resv_level: - if (is_remount) - break; - if (match_int(&args[0], &option)) { - status = 0; - goto bail; - } - if (option >= OCFS2_MIN_RESV_LEVEL && - option < OCFS2_MAX_RESV_LEVEL) - mopt->dir_resv_level = option; - break; - case Opt_journal_async_commit: - mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT; - break; - default: - mlog(ML_ERROR, - "Unrecognized mount option \"%s\" " - "or missing value\n", p); - status = 0; - goto bail; + } else { + mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL; + mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL; } + break; + case Opt_resv_level: + if (is_remount) + break; + if (result.uint_32 >= OCFS2_MIN_RESV_LEVEL && + result.uint_32 < OCFS2_MAX_RESV_LEVEL) + mopt->resv_level = result.uint_32; + break; + case Opt_dir_resv_level: + if (is_remount) + break; + if (result.uint_32 >= OCFS2_MIN_RESV_LEVEL && + result.uint_32 < OCFS2_MAX_RESV_LEVEL) + mopt->dir_resv_level = result.uint_32; + break; + case Opt_journal_async_commit: + mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT; + break; + default: + return -EINVAL; } - if (user_stack == 0) { - /* Ensure only one heartbeat mode */ - tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | - OCFS2_MOUNT_HB_GLOBAL | - OCFS2_MOUNT_HB_NONE); - if (hweight32(tmp) != 1) { - mlog(ML_ERROR, "Invalid heartbeat mount options\n"); - status = 0; - goto bail; - } - } - - status = 1; - -bail: - return status; + return 0; } static int ocfs2_show_options(struct seq_file *s, struct dentry *root) From 65c3ffd95ab858cd3759e63422dd2083bb8bb10a Mon Sep 17 00:00:00 2001 From: Shivam Chaudhary <cvam0000@gmail.com> Date: Wed, 11 Dec 2024 21:19:03 +0530 Subject: [PATCH 422/504] kernel-wide: add explicity||explicitly to spelling.txt Correct the spelling dictionary so that future instances will be caught by checkpatch, and fix the instances found. Link: https://lkml.kernel.org/r/20241211154903.47027-1-cvam0000@gmail.com Signed-off-by: Shivam Chaudhary <cvam0000@gmail.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Dennis Dalessandro <dennis.dalessandro@cornelisnetworks.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Leon Romanovsky <leon@kernel.org> Cc: Madhavan Srinivasan <maddy@linux.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Naveen N Rao <naveen@kernel.org> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Shivam Chaudhary <cvam0000@gmail.com> Cc: Colin Ian King <colin.i.king@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- arch/powerpc/kvm/book3s_hv.c | 2 +- drivers/infiniband/hw/hfi1/iowait.h | 2 +- drivers/infiniband/hw/usnic/usnic_abi.h | 2 +- drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c | 2 +- drivers/scsi/cxlflash/superpipe.c | 2 +- scripts/spelling.txt | 1 + tools/testing/selftests/pidfd/pidfd_test.c | 2 +- 7 files changed, 7 insertions(+), 6 deletions(-) diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 25429905ae90..86bff159c51e 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4957,7 +4957,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, * states are synchronized from L0 to L1. L1 needs to inform L0 about * MER=1 only when there are pending external interrupts. * In the above if check, MER bit is set if there are pending - * external interrupts. Hence, explicity mask off MER bit + * external interrupts. Hence, explicitly mask off MER bit * here as otherwise it may generate spurious interrupts in L2 KVM * causing an endless loop, which results in L2 guest getting hung. */ diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h index 49805a24bb0a..7259f4f55700 100644 --- a/drivers/infiniband/hw/hfi1/iowait.h +++ b/drivers/infiniband/hw/hfi1/iowait.h @@ -92,7 +92,7 @@ struct iowait_work { * * The lock field is used by waiters to record * the seqlock_t that guards the list head. - * Waiters explicity know that, but the destroy + * Waiters explicitly know that, but the destroy * code that unwaits QPs does not. */ struct iowait { diff --git a/drivers/infiniband/hw/usnic/usnic_abi.h b/drivers/infiniband/hw/usnic/usnic_abi.h index 7fe9502ce8d3..86a82a4da0aa 100644 --- a/drivers/infiniband/hw/usnic/usnic_abi.h +++ b/drivers/infiniband/hw/usnic/usnic_abi.h @@ -72,7 +72,7 @@ struct usnic_ib_create_qp_resp { u64 bar_bus_addr; u32 bar_len; /* - * WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface + * WQ, RQ, CQ are explicitly specified bc exposing a generic resources inteface * expands the scope of ABI to many files. */ u32 wq_cnt; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index 0949e7975ff1..b70d20128f98 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -1810,7 +1810,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt) rfi->cur_idx = cur_idx; } } else { - /* explicity window move updating the expected index */ + /* explicitly window move updating the expected index */ exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET]; brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n", diff --git a/drivers/scsi/cxlflash/superpipe.c b/drivers/scsi/cxlflash/superpipe.c index b375509d1470..97631f48e19d 100644 --- a/drivers/scsi/cxlflash/superpipe.c +++ b/drivers/scsi/cxlflash/superpipe.c @@ -966,7 +966,7 @@ static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach) * * This routine is the release handler for the fops registered with * the CXL services on an initial attach for a context. It is called - * when a close (explicity by the user or as part of a process tear + * when a close (explicitly by the user or as part of a process tear * down) is performed on the adapter file descriptor returned to the * user. The user should be aware that explicitly performing a close * considered catastrophic and subsequent usage of the superpipe API diff --git a/scripts/spelling.txt b/scripts/spelling.txt index 2decc50f5a6e..a290db720b0f 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -678,6 +678,7 @@ exmaple||example expecially||especially experies||expires explicite||explicit +explicity||explicitly explicitely||explicitly explict||explicit explictely||explicitly diff --git a/tools/testing/selftests/pidfd/pidfd_test.c b/tools/testing/selftests/pidfd/pidfd_test.c index 9faa686f90e4..e9728e86b4f2 100644 --- a/tools/testing/selftests/pidfd/pidfd_test.c +++ b/tools/testing/selftests/pidfd/pidfd_test.c @@ -497,7 +497,7 @@ static int child_poll_leader_exit_test(void *args) pthread_create(&t2, NULL, test_pidfd_poll_leader_exit_thread, NULL); /* - * glibc exit calls exit_group syscall, so explicity call exit only + * glibc exit calls exit_group syscall, so explicitly call exit only * so that only the group leader exits, leaving the threads alone. */ *child_exit_secs = time(NULL); From 528a5ef711129d492d72262f35e847637736ce47 Mon Sep 17 00:00:00 2001 From: Kemeng Shi <shikemeng@huaweicloud.com> Date: Fri, 13 Dec 2024 20:25:19 +0800 Subject: [PATCH 423/504] Xarray: do not return sibling entries from xas_find_marked() Patch series "Fixes and cleanups to xarray", v3. This series contains some random fixes and cleanups to xarray. Patch 1-2 are fixes and patch 3-6 are cleanups. More details can be found in respective patches. This patch (of 5): Similar to issue fixed in commit cbc02854331ed ("XArray: Do not return sibling entries from xa_load()"), we may return sibling entries from xas_find_marked as following: Thread A: Thread B: xa_store_range(xa, entry, 6, 7, gfp); xa_set_mark(xa, 6, mark) XA_STATE(xas, xa, 6); xas_find_marked(&xas, 7, mark); offset = xas_find_chunk(xas, advance, mark); [offset is 6 which points to a valid entry] xa_store_range(xa, entry, 4, 7, gfp); entry = xa_entry(xa, node, 6); [entry is a sibling of 4] if (!xa_is_node(entry)) return entry; Skip sibling entry like xas_find() does to protect caller from seeing sibling entry from xas_find_marked() or caller may use sibling entry as a valid entry and crash the kernel. Besides, load_race() test is modified to catch mentioned issue and modified load_race() only passes after this fix is merged. Here is an example how this bug could be triggerred in theory in nfs which enables large folio in mapping: Let's take a look at involved racer: 1. How pages could be created and dirtied in nfs. write ksys_write vfs_write new_sync_write nfs_file_write generic_perform_write nfs_write_begin fgf_set_order __filemap_get_folio nfs_write_end nfs_update_folio nfs_writepage_setup nfs_mark_request_dirty filemap_dirty_folio __folio_mark_dirty __xa_set_mark 2. How dirty pages could be deleted in nfs. ioctl do_vfs_ioctl file_ioctl ioctl_preallocate vfs_fallocate nfs42_fallocate nfs42_proc_deallocate truncate_pagecache_range truncate_inode_pages_range truncate_inode_folio filemap_remove_folio page_cache_delete xas_store(&xas, NULL); 3. How dirty pages could be lockless searched sync_file_range ksys_sync_file_range __filemap_fdatawrite_range filemap_fdatawrite_wbc do_writepages writeback_use_writepage writeback_iter writeback_get_folio filemap_get_folios_tag find_get_entry folio = xas_find_marked() folio_try_get(folio) In theory, kernel will crash as following: 1.Create 2.Search 3.Delete /* write page 2,3 */ write ... nfs_write_begin fgf_set_order __filemap_get_folio ... /* index = 2, order = 1 */ xa_store(&xas, folio) nfs_write_end ... __folio_mark_dirty /* sync page 2 and page 3 */ sync_file_range ... find_get_entry folio = xas_find_marked() /* offset will be 2 */ offset = xas_find_chunk() /* delete page 2 and page 3 */ ioctl ... xas_store(&xas, NULL); /* write page 0-3 */ write ... nfs_write_begin fgf_set_order __filemap_get_folio ... /* index = 0, order = 2 */ xa_store(&xas, folio) nfs_write_end ... __folio_mark_dirty /* get sibling entry from offset 2 */ entry = xa_entry(.., 2) /* use sibling entry as folio and crash kernel */ folio_try_get(folio) Link: https://lkml.kernel.org/r/20241218154613.58754-2-shikemeng@huaweicloud.com Link: https://lkml.kernel.org/r/20241213122523.12764-1-shikemeng@huaweicloud.com Link: https://lkml.kernel.org/r/20241213122523.12764-2-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/xarray.c | 2 ++ tools/testing/radix-tree/multiorder.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/lib/xarray.c b/lib/xarray.c index 5da8d18899a1..19a9ea183c2d 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1387,6 +1387,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark) entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset); if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK)) continue; + if (xa_is_sibling(entry)) + continue; if (!xa_is_node(entry)) return entry; xas->xa_node = xa_to_node(entry); diff --git a/tools/testing/radix-tree/multiorder.c b/tools/testing/radix-tree/multiorder.c index cffaf2245d4f..eaff1b036989 100644 --- a/tools/testing/radix-tree/multiorder.c +++ b/tools/testing/radix-tree/multiorder.c @@ -227,6 +227,7 @@ static void *load_creator(void *ptr) unsigned long index = (3 << RADIX_TREE_MAP_SHIFT) - (1 << order); item_insert_order(tree, index, order); + xa_set_mark(tree, index, XA_MARK_1); item_delete_rcu(tree, index); } } @@ -242,8 +243,11 @@ static void *load_worker(void *ptr) rcu_register_thread(); while (!stop_iteration) { + unsigned long find_index = (2 << RADIX_TREE_MAP_SHIFT) + 1; struct item *item = xa_load(ptr, index); assert(!xa_is_internal(item)); + item = xa_find(ptr, &find_index, index, XA_MARK_1); + assert(!xa_is_internal(item)); } rcu_unregister_thread(); From f22e86663e2a788ec03ce93a836af75e200feda2 Mon Sep 17 00:00:00 2001 From: Kemeng Shi <shikemeng@huaweicloud.com> Date: Fri, 13 Dec 2024 20:25:20 +0800 Subject: [PATCH 424/504] Xarray: move forward index correctly in xas_pause() After xas_load(), xas->index could point to mid of found multi-index entry and xas->index's bits under node->shift maybe non-zero. The afterward xas_pause() will move forward xas->index with xa->node->shift with bits under node->shift un-masked and thus skip some index unexpectedly. Consider following case: Assume XA_CHUNK_SHIFT is 4. xa_store_range(xa, 16, 31, ...) xa_store(xa, 32, ...) XA_STATE(xas, xa, 17); xas_for_each(&xas,...) xas_load(&xas) /* xas->index = 17, xas->xa_offset = 1, xas->xa_node->xa_shift = 4 */ xas_pause() /* xas->index = 33, xas->xa_offset = 2, xas->xa_node->xa_shift = 4 */ As we can see, index of 32 is skipped unexpectedly. Fix this by mask bit under node->xa_shift when move forward index in xas_pause(). For now, this will not cause serious problems. Only minor problem like cachestat return less number of page status could happen. Link: https://lkml.kernel.org/r/20241213122523.12764-3-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/test_xarray.c | 35 +++++++++++++++++++++++++++++++++++ lib/xarray.c | 1 + 2 files changed, 36 insertions(+) diff --git a/lib/test_xarray.c b/lib/test_xarray.c index b6cac747ec46..eab5971d0a48 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -1511,6 +1511,41 @@ static noinline void check_pause(struct kunit *test) XA_BUG_ON(xa, count != order_limit); xa_destroy(xa); + + index = 0; + for (order = XA_CHUNK_SHIFT; order > 0; order--) { + XA_BUG_ON(xa, xa_store_order(xa, index, order, + xa_mk_index(index), GFP_KERNEL)); + index += 1UL << order; + } + + index = 0; + count = 0; + xas_set(&xas, 0); + rcu_read_lock(); + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(index)); + index += 1UL << (XA_CHUNK_SHIFT - count); + count++; + } + rcu_read_unlock(); + XA_BUG_ON(xa, count != XA_CHUNK_SHIFT); + + index = 0; + count = 0; + xas_set(&xas, XA_CHUNK_SIZE / 2 + 1); + rcu_read_lock(); + xas_for_each(&xas, entry, ULONG_MAX) { + XA_BUG_ON(xa, entry != xa_mk_index(index)); + index += 1UL << (XA_CHUNK_SHIFT - count); + count++; + xas_pause(&xas); + } + rcu_read_unlock(); + XA_BUG_ON(xa, count != XA_CHUNK_SHIFT); + + xa_destroy(xa); + } static noinline void check_move_tiny(struct kunit *test) diff --git a/lib/xarray.c b/lib/xarray.c index 19a9ea183c2d..091e2c927915 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1152,6 +1152,7 @@ void xas_pause(struct xa_state *xas) if (!xa_is_sibling(xa_entry(xas->xa, node, offset))) break; } + xas->xa_index &= ~0UL << node->shift; xas->xa_index += (offset - xas->xa_offset) << node->shift; if (xas->xa_index == 0) xas->xa_node = XAS_BOUNDS; From 2f984bc655b8c3aed4c90feab7cad541196b4e8c Mon Sep 17 00:00:00 2001 From: Kemeng Shi <shikemeng@huaweicloud.com> Date: Fri, 13 Dec 2024 20:25:21 +0800 Subject: [PATCH 425/504] Xarray: distinguish large entries correctly in xas_split_alloc() We don't support large entries which expand two more level xa_node in split. For case "xas->xa_shift + 2 * XA_CHUNK_SHIFT == order", we also need two level of xa_node to expand. Distinguish entry as large entry in case "xas->xa_shift + 2 * XA_CHUNK_SHIFT == order". As max order of folio in pagecache (MAX_PAGECACHE_ORDER) is <= (XA_CHUNK_SHIFT * 2 - 1), this change is more likely a cleanup... Link: https://lkml.kernel.org/r/20241213122523.12764-4-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/xarray.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/xarray.c b/lib/xarray.c index 091e2c927915..ecd2e4f71aa8 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -1027,7 +1027,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order, unsigned int mask = xas->xa_sibs; /* XXX: no support for splitting really large entries yet */ - if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order)) + if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order)) goto nomem; if (xas->xa_shift + XA_CHUNK_SHIFT > order) return; From f51e20d11af3037fbf5dcdf36920015763d3a3d8 Mon Sep 17 00:00:00 2001 From: Kemeng Shi <shikemeng@huaweicloud.com> Date: Fri, 13 Dec 2024 20:25:22 +0800 Subject: [PATCH 426/504] Xarray: remove repeat check in xas_squash_marks() Caller of xas_squash_marks() has ensured xas->xa_sibs is non-zero. Just remove repeat check of xas->xa_sibs in xas_squash_marks(). Link: https://lkml.kernel.org/r/20241213122523.12764-5-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/xarray.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/xarray.c b/lib/xarray.c index ecd2e4f71aa8..2386423865a0 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -128,9 +128,6 @@ static void xas_squash_marks(const struct xa_state *xas) unsigned int mark = 0; unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; - if (!xas->xa_sibs) - return; - do { unsigned long *marks = xas->xa_node->marks[mark]; if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) From 7e618a7051539aa95cdea873fa72cabd25713a18 Mon Sep 17 00:00:00 2001 From: Kemeng Shi <shikemeng@huaweicloud.com> Date: Fri, 13 Dec 2024 20:25:23 +0800 Subject: [PATCH 427/504] Xarray: use xa_mark_t in xas_squash_marks() to keep code consistent Besides xas_squash_marks(), all functions use xa_mark_t type to iterate all possible marks. Use xa_mark_t in xas_squash_marks() to keep code consistent. Link: https://lkml.kernel.org/r/20241213122523.12764-6-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi <shikemeng@huaweicloud.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Baolin Wang <baolin.wang@linux.alibaba.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/xarray.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/lib/xarray.c b/lib/xarray.c index 2386423865a0..116e9286c64e 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -125,16 +125,20 @@ static inline void node_mark_all(struct xa_node *node, xa_mark_t mark) */ static void xas_squash_marks(const struct xa_state *xas) { - unsigned int mark = 0; + xa_mark_t mark = 0; unsigned int limit = xas->xa_offset + xas->xa_sibs + 1; - do { - unsigned long *marks = xas->xa_node->marks[mark]; - if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit) - continue; - __set_bit(xas->xa_offset, marks); - bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); - } while (mark++ != (__force unsigned)XA_MARK_MAX); + for (;;) { + unsigned long *marks = node_marks(xas->xa_node, mark); + + if (find_next_bit(marks, limit, xas->xa_offset + 1) != limit) { + __set_bit(xas->xa_offset, marks); + bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs); + } + if (mark == XA_MARK_MAX) + break; + mark_inc(mark); + } } /* extracts the offset within this node from the index */ From 11152f3b721cb17ae1190dd7e2a5f75c4c0718c2 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein <tamird@gmail.com> Date: Tue, 5 Nov 2024 08:03:42 -0400 Subject: [PATCH 428/504] XArray: minor documentation improvements - Replace "they" with "you" where "you" is used in the preceding sentence fragment. - Mention `xa_erase` in discussion of multi-index entries. Split this into a separate sentence. - Add "call" parentheses on "xa_store" for consistency and linkification. - Add caveat that `xa_store` and `xa_erase` are not equivalent in the presence of `XA_FLAGS_ALLOC`. Link: https://lkml.kernel.org/r/20241105-xarray-documentation-v5-1-8e1702321b41@gmail.com Signed-off-by: Tamir Duberstein <tamird@gmail.com> Acked-by: Randy Dunlap <rdunlap@infradead.org> Reviewed-by: Bagas Sanjaya <bagasdotme@gmail.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- Documentation/core-api/xarray.rst | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/Documentation/core-api/xarray.rst b/Documentation/core-api/xarray.rst index 77e0ece2b1d6..f6a3eef4fe7f 100644 --- a/Documentation/core-api/xarray.rst +++ b/Documentation/core-api/xarray.rst @@ -42,8 +42,8 @@ call xa_tag_pointer() to create an entry with a tag, xa_untag_pointer() to turn a tagged entry back into an untagged pointer and xa_pointer_tag() to retrieve the tag of an entry. Tagged pointers use the same bits that are used to distinguish value entries from normal pointers, so you must -decide whether they want to store value entries or tagged pointers in -any particular XArray. +decide whether you want to store value entries or tagged pointers in any +particular XArray. The XArray does not support storing IS_ERR() pointers as some conflict with value entries or internal entries. @@ -52,8 +52,9 @@ An unusual feature of the XArray is the ability to create entries which occupy a range of indices. Once stored to, looking up any index in the range will return the same entry as looking up any other index in the range. Storing to any index will store to all of them. Multi-index -entries can be explicitly split into smaller entries, or storing ``NULL`` -into any entry will cause the XArray to forget about the range. +entries can be explicitly split into smaller entries. Unsetting (using +xa_erase() or xa_store() with ``NULL``) any entry will cause the XArray +to forget about the range. Normal API ========== @@ -63,13 +64,14 @@ for statically allocated XArrays or xa_init() for dynamically allocated ones. A freshly-initialised XArray contains a ``NULL`` pointer at every index. -You can then set entries using xa_store() and get entries -using xa_load(). xa_store will overwrite any entry with the -new entry and return the previous entry stored at that index. You can -use xa_erase() instead of calling xa_store() with a -``NULL`` entry. There is no difference between an entry that has never -been stored to, one that has been erased and one that has most recently -had ``NULL`` stored to it. +You can then set entries using xa_store() and get entries using +xa_load(). xa_store() will overwrite any entry with the new entry and +return the previous entry stored at that index. You can unset entries +using xa_erase() or by setting the entry to ``NULL`` using xa_store(). +There is no difference between an entry that has never been stored to +and one that has been erased with xa_erase(); an entry that has most +recently had ``NULL`` stored to it is also equivalent except if the +XArray was initialized with ``XA_FLAGS_ALLOC``. You can conditionally replace an entry at an index by using xa_cmpxchg(). Like cmpxchg(), it will only succeed if From 243bfc0ca762ff14c2b73acca56e53be155e419d Mon Sep 17 00:00:00 2001 From: Luis Felipe Hernandez <luis.hernandez093@gmail.com> Date: Thu, 12 Dec 2024 23:26:50 -0500 Subject: [PATCH 429/504] lib/math: add int_sqrt test suite Adds test suite for integer based square root function. The test suite is designed to verify the correctness of the int_sqrt() math library function. Link: https://lkml.kernel.org/r/20241213042701.1037467-1-luis.hernandez093@gmail.com Signed-off-by: Luis Felipe Hernandez <luis.hernandez093@gmail.com> Reviewed-by: Kuan-Wei Chiu <visitorckw@gmail.com> Cc: David Gow <davidgow@google.com> Cc: Ricardo B. Marliere <rbm@suse.com> Cc: Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/Kconfig.debug | 15 ++++++++ lib/math/Makefile | 1 + lib/math/tests/Makefile | 1 + lib/math/tests/int_sqrt_kunit.c | 66 +++++++++++++++++++++++++++++++++ 4 files changed, 83 insertions(+) create mode 100644 lib/math/tests/int_sqrt_kunit.c diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b53f5da22e40..d597930d381f 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -3174,6 +3174,21 @@ config INT_POW_TEST If unsure, say N +config INT_SQRT_KUNIT_TEST + tristate "Integer square root test" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + This option enables the KUnit test suite for the int_sqrt() function, + which performs square root calculation. The test suite checks + various scenarios, including edge cases, to ensure correctness. + + Enabling this option will include tests that check various scenarios + and edge cases to ensure the accuracy and reliability of the square root + function. + + If unsure, say N + endif # RUNTIME_TESTING_MENU config ARCH_USE_MEMTEST diff --git a/lib/math/Makefile b/lib/math/Makefile index 3ef11305f8d2..853f023ae537 100644 --- a/lib/math/Makefile +++ b/lib/math/Makefile @@ -9,3 +9,4 @@ obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o obj-$(CONFIG_TEST_DIV64) += test_div64.o obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o +obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += tests/int_sqrt_kunit.o \ No newline at end of file diff --git a/lib/math/tests/Makefile b/lib/math/tests/Makefile index 6a169123320a..e1a79f093b2d 100644 --- a/lib/math/tests/Makefile +++ b/lib/math/tests/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_INT_POW_TEST) += int_pow_kunit.o +obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o diff --git a/lib/math/tests/int_sqrt_kunit.c b/lib/math/tests/int_sqrt_kunit.c new file mode 100644 index 000000000000..1798e1312eb7 --- /dev/null +++ b/lib/math/tests/int_sqrt_kunit.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <kunit/test.h> +#include <linux/limits.h> +#include <linux/math.h> +#include <linux/module.h> +#include <linux/string.h> + +struct test_case_params { + unsigned long x; + unsigned long expected_result; + const char *name; +}; + +static const struct test_case_params params[] = { + { 0, 0, "edge case: square root of 0" }, + { 1, 1, "perfect square: square root of 1" }, + { 2, 1, "non-perfect square: square root of 2" }, + { 3, 1, "non-perfect square: square root of 3" }, + { 4, 2, "perfect square: square root of 4" }, + { 5, 2, "non-perfect square: square root of 5" }, + { 6, 2, "non-perfect square: square root of 6" }, + { 7, 2, "non-perfect square: square root of 7" }, + { 8, 2, "non-perfect square: square root of 8" }, + { 9, 3, "perfect square: square root of 9" }, + { 15, 3, "non-perfect square: square root of 15 (N-1 from 16)" }, + { 16, 4, "perfect square: square root of 16" }, + { 17, 4, "non-perfect square: square root of 17 (N+1 from 16)" }, + { 80, 8, "non-perfect square: square root of 80 (N-1 from 81)" }, + { 81, 9, "perfect square: square root of 81" }, + { 82, 9, "non-perfect square: square root of 82 (N+1 from 81)" }, + { 255, 15, "non-perfect square: square root of 255 (N-1 from 256)" }, + { 256, 16, "perfect square: square root of 256" }, + { 257, 16, "non-perfect square: square root of 257 (N+1 from 256)" }, + { 2147483648, 46340, "large input: square root of 2147483648" }, + { 4294967295, 65535, "edge case: ULONG_MAX for 32-bit" }, +}; + +static void get_desc(const struct test_case_params *tc, char *desc) +{ + strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(int_sqrt, params, get_desc); + +static void int_sqrt_test(struct kunit *test) +{ + const struct test_case_params *tc = (const struct test_case_params *)test->param_value; + + KUNIT_EXPECT_EQ(test, tc->expected_result, int_sqrt(tc->x)); +} + +static struct kunit_case math_int_sqrt_test_cases[] = { + KUNIT_CASE_PARAM(int_sqrt_test, int_sqrt_gen_params), + {} +}; + +static struct kunit_suite int_sqrt_test_suite = { + .name = "math-int_sqrt", + .test_cases = math_int_sqrt_test_cases, +}; + +kunit_test_suites(&int_sqrt_test_suite); + +MODULE_DESCRIPTION("math.int_sqrt KUnit test suite"); +MODULE_LICENSE("GPL"); From cb3662311f911f6fac75d61a9d90ef071d3603e2 Mon Sep 17 00:00:00 2001 From: pangliyuan <pangliyuan1@huawei.com> Date: Tue, 10 Dec 2024 17:08:42 +0800 Subject: [PATCH 430/504] Squashfs: don't allocate fragment caches more than fragments Sometimes the actual number of fragments in image is between 0 and SQUASHFS_CACHED_FRAGMENTS, which cause additional fragment caches to be allocated. Sets the number of fragment caches to the minimum of fragments and SQUASHFS_CACHED_FRAGMENTS. Link: https://lkml.kernel.org/r/20241210090842.160853-1-pangliyuan1@huawei.com Signed-off-by: pangliyuan <pangliyuan1@huawei.com> Reviewed-by: Phillip Lougher <phillip@squashfs.org.uk> Cc: <wangfangpeng1@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/super.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 22e812808e5c..269c6d61bc29 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -405,7 +405,7 @@ handle_fragments: goto check_directory_table; msblk->fragment_cache = squashfs_cache_init("fragment", - SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); + min(SQUASHFS_CACHED_FRAGMENTS, fragments), msblk->block_size); if (msblk->fragment_cache == NULL) { err = -ENOMEM; goto failed_mount; From a0651153e35efb953d2c33ab74030d5e57e72283 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Thu, 5 Dec 2024 17:16:29 +0000 Subject: [PATCH 431/504] ocfs2: handle a symlink read error correctly Patch series "Convert ocfs2 to use folios". Mark did a conversion of ocfs2 to use folios and sent it to me as a giant patch for review ;-) So I've redone it as individual patches, and credited Mark for the patches where his code is substantially the same. It's not a bad way to do it; his patch had some bugs and my patches had some bugs. Hopefully all our bugs were different from each other. And hopefully Mark likes all the changes I made to his code! This patch (of 23): If we can't read the buffer, be sure to unlock the page before returning. Link: https://lkml.kernel.org/r/20241205171653.3179945-1-willy@infradead.org Link: https://lkml.kernel.org/r/20241205171653.3179945-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Jun Piao <piaojun@huawei.com> Cc: Mark Tinguely <mark.tinguely@oracle.com> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/symlink.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index d4c5fdcfa1e4..f5cf2255dc09 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -65,7 +65,7 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio) if (status < 0) { mlog_errno(status); - return status; + goto out; } fe = (struct ocfs2_dinode *) bh->b_data; @@ -76,9 +76,10 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio) memcpy(kaddr, link, len + 1); kunmap_atomic(kaddr); SetPageUptodate(page); +out: unlock_page(page); brelse(bh); - return 0; + return status; } const struct address_space_operations ocfs2_fast_symlink_aops = { From ee51adb3b92e0844771e6274e2ba945bb3c33f2f Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Thu, 5 Dec 2024 17:16:30 +0000 Subject: [PATCH 432/504] ocfs2: convert ocfs2_page_mkwrite() to use a folio Pass the folio into __ocfs2_page_mkwrite() and use it throughout. Does not attempt to support large folios. Link: https://lkml.kernel.org/r/20241205171653.3179945-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/mmap.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 6ef4cb045ccd..93b886f16c40 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -44,13 +44,13 @@ static vm_fault_t ocfs2_fault(struct vm_fault *vmf) } static vm_fault_t __ocfs2_page_mkwrite(struct file *file, - struct buffer_head *di_bh, struct page *page) + struct buffer_head *di_bh, struct folio *folio) { int err; vm_fault_t ret = VM_FAULT_NOPAGE; struct inode *inode = file_inode(file); struct address_space *mapping = inode->i_mapping; - loff_t pos = page_offset(page); + loff_t pos = folio_pos(folio); unsigned int len = PAGE_SIZE; pgoff_t last_index; struct folio *locked_folio = NULL; @@ -72,9 +72,9 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file, * * Let VM retry with these cases. */ - if ((page->mapping != inode->i_mapping) || - (!PageUptodate(page)) || - (page_offset(page) >= size)) + if ((folio->mapping != inode->i_mapping) || + !folio_test_uptodate(folio) || + (pos >= size)) goto out; /* @@ -87,11 +87,11 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file, * worry about ocfs2_write_begin() skipping some buffer reads * because the "write" would invalidate their data. */ - if (page->index == last_index) + if (folio->index == last_index) len = ((size - 1) & ~PAGE_MASK) + 1; err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, - &locked_folio, &fsdata, di_bh, page); + &locked_folio, &fsdata, di_bh, &folio->page); if (err) { if (err != -ENOSPC) mlog_errno(err); @@ -112,7 +112,7 @@ out: static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) { - struct page *page = vmf->page; + struct folio *folio = page_folio(vmf->page); struct inode *inode = file_inode(vmf->vma->vm_file); struct buffer_head *di_bh = NULL; sigset_t oldset; @@ -141,7 +141,7 @@ static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) */ down_write(&OCFS2_I(inode)->ip_alloc_sem); - ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page); + ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, folio); up_write(&OCFS2_I(inode)->ip_alloc_sem); From 37fcdec1bc51a4c892f1e94ff8be57db0a06ec9d Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:31 +0000 Subject: [PATCH 433/504] ocfs2: convert w_target_page to w_target_folio Pass a folio around instead of a page. Saves a few hidden calls to compound_head() and removes a call to kmap_atomic(). Link: https://lkml.kernel.org/r/20241205171653.3179945-4-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 63 ++++++++++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 32 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 64b3ddeb3555..cbb880ad887a 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -731,22 +731,22 @@ struct ocfs2_write_ctxt { /* * Pages involved in this write. * - * w_target_page is the page being written to by the user. + * w_target_folio is the folio being written to by the user. * * w_pages is an array of pages which always contains - * w_target_page, and in the case of an allocating write with + * w_target_folio, and in the case of an allocating write with * page_size < cluster size, it will contain zero'd and mapped - * pages adjacent to w_target_page which need to be written + * pages adjacent to w_target_folio which need to be written * out in so that future reads from that region will get * zero's. */ unsigned int w_num_pages; struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; - struct page *w_target_page; + struct folio *w_target_folio; /* * w_target_locked is used for page_mkwrite path indicating no unlocking - * against w_target_page in ocfs2_write_end_nolock. + * against w_target_folio in ocfs2_write_end_nolock. */ unsigned int w_target_locked:1; @@ -791,18 +791,18 @@ static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) /* * w_target_locked is only set to true in the page_mkwrite() case. * The intent is to allow us to lock the target page from write_begin() - * to write_end(). The caller must hold a ref on w_target_page. + * to write_end(). The caller must hold a ref on w_target_folio. */ if (wc->w_target_locked) { - BUG_ON(!wc->w_target_page); + BUG_ON(!wc->w_target_folio); for (i = 0; i < wc->w_num_pages; i++) { - if (wc->w_target_page == wc->w_pages[i]) { + if (&wc->w_target_folio->page == wc->w_pages[i]) { wc->w_pages[i] = NULL; break; } } - mark_page_accessed(wc->w_target_page); - put_page(wc->w_target_page); + folio_mark_accessed(wc->w_target_folio); + folio_put(wc->w_target_folio); } ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); } @@ -869,8 +869,9 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ -static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to) +static void ocfs2_zero_new_buffers(struct folio *folio, unsigned from, unsigned to) { + struct page *page = &folio->page; unsigned int block_start, block_end; struct buffer_head *head, *bh; @@ -918,8 +919,8 @@ static void ocfs2_write_failure(struct inode *inode, to = user_pos + user_len; struct page *tmppage; - if (wc->w_target_page) - ocfs2_zero_new_buffers(wc->w_target_page, from, to); + if (wc->w_target_folio) + ocfs2_zero_new_buffers(wc->w_target_folio, from, to); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; @@ -954,7 +955,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, new = new | ((i_size_read(inode) <= page_offset(page)) && (page_offset(page) <= user_pos)); - if (page == wc->w_target_page) { + if (page == &wc->w_target_folio->page) { map_from = user_pos & (PAGE_SIZE - 1); map_to = map_from + user_len; @@ -1097,7 +1098,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, wait_for_stable_page(wc->w_pages[i]); if (index == target_index) - wc->w_target_page = wc->w_pages[i]; + wc->w_target_folio = page_folio(wc->w_pages[i]); } out: if (ret) @@ -1494,7 +1495,8 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, * If we don't set w_num_pages then this page won't get unlocked * and freed on cleanup of the write context. */ - wc->w_pages[0] = wc->w_target_page = page; + wc->w_target_folio = page_folio(page); + wc->w_pages[0] = page; wc->w_num_pages = 1; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, @@ -1803,7 +1805,7 @@ try_again: * the operation. */ if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) { - BUG_ON(wc->w_target_page); + BUG_ON(wc->w_target_folio); ret = 0; goto out_quota; } @@ -1826,7 +1828,7 @@ try_again: success: if (foliop) - *foliop = page_folio(wc->w_target_page); + *foliop = wc->w_target_folio; *fsdata = wc; return 0; out_quota: @@ -1924,18 +1926,15 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, struct ocfs2_dinode *di, struct ocfs2_write_ctxt *wc) { - void *kaddr; - if (unlikely(*copied < len)) { - if (!PageUptodate(wc->w_target_page)) { + if (!folio_test_uptodate(wc->w_target_folio)) { *copied = 0; return; } } - kaddr = kmap_atomic(wc->w_target_page); - memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); - kunmap_atomic(kaddr); + memcpy_from_folio(di->id2.i_data.id_data + pos, wc->w_target_folio, + pos, *copied); trace_ocfs2_write_end_inline( (unsigned long long)OCFS2_I(inode)->ip_blkno, @@ -1973,15 +1972,15 @@ int ocfs2_write_end_nolock(struct address_space *mapping, goto out_write_size; } - if (unlikely(copied < len) && wc->w_target_page) { + if (unlikely(copied < len) && wc->w_target_folio) { loff_t new_isize; - if (!PageUptodate(wc->w_target_page)) + if (!folio_test_uptodate(wc->w_target_folio)) copied = 0; new_isize = max_t(loff_t, i_size_read(inode), pos + copied); - if (new_isize > page_offset(wc->w_target_page)) - ocfs2_zero_new_buffers(wc->w_target_page, start+copied, + if (new_isize > folio_pos(wc->w_target_folio)) + ocfs2_zero_new_buffers(wc->w_target_folio, start+copied, start+len); else { /* @@ -1991,12 +1990,12 @@ int ocfs2_write_end_nolock(struct address_space *mapping, * put page & buffer dirty bits into inconsistent * state. */ - block_invalidate_folio(page_folio(wc->w_target_page), + block_invalidate_folio(wc->w_target_folio, 0, PAGE_SIZE); } } - if (wc->w_target_page) - flush_dcache_page(wc->w_target_page); + if (wc->w_target_folio) + flush_dcache_folio(wc->w_target_folio); for(i = 0; i < wc->w_num_pages; i++) { tmppage = wc->w_pages[i]; @@ -2005,7 +2004,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping, if (tmppage == NULL) continue; - if (tmppage == wc->w_target_page) { + if (tmppage == &wc->w_target_folio->page) { from = wc->w_target_from; to = wc->w_target_to; From aae808abfdbe6a0415e5813b13c48ed00d79f5c7 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:32 +0000 Subject: [PATCH 434/504] ocfs2: use a folio in ocfs2_zero_new_buffers() Convert to the new APIs, saving at least one hidden call to compound_head(). Link: https://lkml.kernel.org/r/20241205171653.3179945-5-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index cbb880ad887a..76400bba5ab5 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -869,30 +869,30 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp, * and dirty so they'll be written out (in order to prevent uninitialised * block data from leaking). And clear the new bit. */ -static void ocfs2_zero_new_buffers(struct folio *folio, unsigned from, unsigned to) +static void ocfs2_zero_new_buffers(struct folio *folio, size_t from, size_t to) { - struct page *page = &folio->page; unsigned int block_start, block_end; struct buffer_head *head, *bh; - BUG_ON(!PageLocked(page)); - if (!page_has_buffers(page)) + BUG_ON(!folio_test_locked(folio)); + head = folio_buffers(folio); + if (!head) return; - bh = head = page_buffers(page); + bh = head; block_start = 0; do { block_end = block_start + bh->b_size; if (buffer_new(bh)) { if (block_end > from && block_start < to) { - if (!PageUptodate(page)) { + if (!folio_test_uptodate(folio)) { unsigned start, end; start = max(from, block_start); end = min(to, block_end); - zero_user_segment(page, start, end); + folio_zero_segment(folio, start, end); set_buffer_uptodate(bh); } From 61304dd7d1bc02cb492a47c8a52fcb03ea05cf3d Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:33 +0000 Subject: [PATCH 435/504] ocfs2: use a folio in ocfs2_write_begin_inline() Retrieve a folio from the page cache instead of a page and use that folio throught the function. Saves a couple of calls to compound_head(). Link: https://lkml.kernel.org/r/20241205171653.3179945-6-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 76400bba5ab5..46fb2b564367 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1473,7 +1473,7 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, { int ret; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); - struct page *page; + struct folio *folio; handle_t *handle; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; @@ -1484,19 +1484,20 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, goto out; } - page = find_or_create_page(mapping, 0, GFP_NOFS); - if (!page) { + folio = __filemap_get_folio(mapping, 0, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); + if (IS_ERR(folio)) { ocfs2_commit_trans(osb, handle); - ret = -ENOMEM; + ret = PTR_ERR(folio); mlog_errno(ret); goto out; } /* - * If we don't set w_num_pages then this page won't get unlocked + * If we don't set w_num_pages then this folio won't get unlocked * and freed on cleanup of the write context. */ - wc->w_target_folio = page_folio(page); - wc->w_pages[0] = page; + wc->w_target_folio = folio; + wc->w_pages[0] = &folio->page; wc->w_num_pages = 1; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, @@ -1511,8 +1512,8 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)) ocfs2_set_inode_data_inline(inode, di); - if (!PageUptodate(page)) { - ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh); + if (!folio_test_uptodate(folio)) { + ret = ocfs2_read_inline_data(inode, &folio->page, wc->w_di_bh); if (ret) { ocfs2_commit_trans(osb, handle); From a1a369687580c283941b1c93ca990e37acf74031 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Thu, 5 Dec 2024 17:16:34 +0000 Subject: [PATCH 436/504] ocfs2: pass mmap_folio around instead of mmap_page Saves a few hidden calls to compound_head() and accesses to page->mapping. Link: https://lkml.kernel.org/r/20241205171653.3179945-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 37 ++++++++++++++++++------------------- fs/ocfs2/aops.h | 6 +++--- fs/ocfs2/mmap.c | 2 +- 3 files changed, 22 insertions(+), 23 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 46fb2b564367..c26853fb7dc9 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1023,7 +1023,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, unsigned user_len, int new, - struct page *mmap_page) + struct folio *mmap_folio) { int ret = 0, i; unsigned long start, target_index, end_index, index; @@ -1068,18 +1068,18 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, * and wants us to directly use the page * passed in. */ - lock_page(mmap_page); + folio_lock(mmap_folio); /* Exit and let the caller retry */ - if (mmap_page->mapping != mapping) { - WARN_ON(mmap_page->mapping); - unlock_page(mmap_page); + if (mmap_folio->mapping != mapping) { + WARN_ON(mmap_folio->mapping); + folio_unlock(mmap_folio); ret = -EAGAIN; goto out; } - get_page(mmap_page); - wc->w_pages[i] = mmap_page; + folio_get(mmap_folio); + wc->w_pages[i] = &mmap_folio->page; wc->w_target_locked = true; } else if (index >= target_index && index <= end_index && wc->w_type == OCFS2_WRITE_DIRECT) { @@ -1536,9 +1536,8 @@ int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size) } static int ocfs2_try_to_write_inline_data(struct address_space *mapping, - struct inode *inode, loff_t pos, - unsigned len, struct page *mmap_page, - struct ocfs2_write_ctxt *wc) + struct inode *inode, loff_t pos, size_t len, + struct folio *mmap_folio, struct ocfs2_write_ctxt *wc) { int ret, written = 0; loff_t end = pos + len; @@ -1553,7 +1552,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping, * Handle inodes which already have inline data 1st. */ if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) { - if (mmap_page == NULL && + if (mmap_folio == NULL && ocfs2_size_fits_inline_data(wc->w_di_bh, end)) goto do_inline_write; @@ -1577,7 +1576,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping, * Check whether the write can fit. */ di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; - if (mmap_page || + if (mmap_folio || end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) return 0; @@ -1644,9 +1643,9 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh, } int ocfs2_write_begin_nolock(struct address_space *mapping, - loff_t pos, unsigned len, ocfs2_write_type_t type, - struct folio **foliop, void **fsdata, - struct buffer_head *di_bh, struct page *mmap_page) + loff_t pos, unsigned len, ocfs2_write_type_t type, + struct folio **foliop, void **fsdata, + struct buffer_head *di_bh, struct folio *mmap_folio) { int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS; unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0; @@ -1669,7 +1668,7 @@ try_again: if (ocfs2_supports_inline_data(osb)) { ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len, - mmap_page, wc); + mmap_folio, wc); if (ret == 1) { ret = 0; goto success; @@ -1721,7 +1720,7 @@ try_again: (unsigned long long)OCFS2_I(inode)->ip_blkno, (long long)i_size_read(inode), le32_to_cpu(di->i_clusters), - pos, len, type, mmap_page, + pos, len, type, mmap_folio, clusters_to_alloc, extents_to_split); /* @@ -1797,7 +1796,7 @@ try_again: * extent. */ ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, - cluster_of_pages, mmap_page); + cluster_of_pages, mmap_folio); if (ret) { /* * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock @@ -1848,7 +1847,7 @@ out: * to VM code. */ if (wc->w_target_locked) - unlock_page(mmap_page); + folio_unlock(mmap_folio); ocfs2_free_write_ctxt(inode, wc); diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 1d1b4b7edba0..1cb46072ad84 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -37,9 +37,9 @@ typedef enum { } ocfs2_write_type_t; int ocfs2_write_begin_nolock(struct address_space *mapping, - loff_t pos, unsigned len, ocfs2_write_type_t type, - struct folio **foliop, void **fsdata, - struct buffer_head *di_bh, struct page *mmap_page); + loff_t pos, unsigned len, ocfs2_write_type_t type, + struct folio **foliop, void **fsdata, + struct buffer_head *di_bh, struct folio *mmap_folio); int ocfs2_read_inline_data(struct inode *inode, struct page *page, struct buffer_head *di_bh); diff --git a/fs/ocfs2/mmap.c b/fs/ocfs2/mmap.c index 93b886f16c40..6a314e9f2b49 100644 --- a/fs/ocfs2/mmap.c +++ b/fs/ocfs2/mmap.c @@ -91,7 +91,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file, len = ((size - 1) & ~PAGE_MASK) + 1; err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP, - &locked_folio, &fsdata, di_bh, &folio->page); + &locked_folio, &fsdata, di_bh, folio); if (err) { if (err != -ENOSPC) mlog_errno(err); From 7a33060bff1d0057e69913e15500a318945a7c2d Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:35 +0000 Subject: [PATCH 437/504] ocfs2: convert ocfs2_readpage_inline() to take a folio Save a couple of calls to compound_head() by using a folio throughout this function. Link: https://lkml.kernel.org/r/20241205171653.3179945-8-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index c26853fb7dc9..c0a6b82a6a68 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -252,12 +252,12 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, return 0; } -static int ocfs2_readpage_inline(struct inode *inode, struct page *page) +static int ocfs2_readpage_inline(struct inode *inode, struct folio *folio) { int ret; struct buffer_head *di_bh = NULL; - BUG_ON(!PageLocked(page)); + BUG_ON(!folio_test_locked(folio)); BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)); ret = ocfs2_read_inode_block(inode, &di_bh); @@ -266,9 +266,9 @@ static int ocfs2_readpage_inline(struct inode *inode, struct page *page) goto out; } - ret = ocfs2_read_inline_data(inode, page, di_bh); + ret = ocfs2_read_inline_data(inode, &folio->page, di_bh); out: - unlock_page(page); + folio_unlock(folio); brelse(di_bh); return ret; @@ -322,7 +322,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio) } if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) - ret = ocfs2_readpage_inline(inode, &folio->page); + ret = ocfs2_readpage_inline(inode, folio); else ret = block_read_full_folio(folio, ocfs2_get_block); unlock = 0; From 738ba5d4f9ea6360f7c447d21c8a890672b5f002 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:36 +0000 Subject: [PATCH 438/504] ocfs2: convert ocfs2_inode_lock_with_page() to ocfs2_inode_lock_with_folio() Saves a hidden call to compound_head(). Link: https://lkml.kernel.org/r/20241205171653.3179945-9-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 2 +- fs/ocfs2/dlmglue.c | 18 ++++++++---------- fs/ocfs2/dlmglue.h | 6 ++---- 3 files changed, 11 insertions(+), 15 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index c0a6b82a6a68..87127a7f2f78 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -283,7 +283,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio) trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index); - ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page); + ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio); if (ret != 0) { if (ret == AOP_TRUNCATED_PAGE) unlock = 0; diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 8f08e89be37d..c9b62a6d8673 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c @@ -2529,30 +2529,28 @@ bail: /* * This is working around a lock inversion between tasks acquiring DLM - * locks while holding a page lock and the downconvert thread which - * blocks dlm lock acquiry while acquiring page locks. + * locks while holding a folio lock and the downconvert thread which + * blocks dlm lock acquiry while acquiring folio locks. * - * ** These _with_page variants are only intended to be called from aop - * methods that hold page locks and return a very specific *positive* error + * ** These _with_folio variants are only intended to be called from aop + * methods that hold folio locks and return a very specific *positive* error * code that aop methods pass up to the VFS -- test for errors with != 0. ** * * The DLM is called such that it returns -EAGAIN if it would have * blocked waiting for the downconvert thread. In that case we unlock - * our page so the downconvert thread can make progress. Once we've + * our folio so the downconvert thread can make progress. Once we've * done this we have to return AOP_TRUNCATED_PAGE so the aop method * that called us can bubble that back up into the VFS who will then * immediately retry the aop call. */ -int ocfs2_inode_lock_with_page(struct inode *inode, - struct buffer_head **ret_bh, - int ex, - struct page *page) +int ocfs2_inode_lock_with_folio(struct inode *inode, + struct buffer_head **ret_bh, int ex, struct folio *folio) { int ret; ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK); if (ret == -EAGAIN) { - unlock_page(page); + folio_unlock(folio); /* * If we can't get inode lock immediately, we should not return * directly here, since this will lead to a softlockup problem. diff --git a/fs/ocfs2/dlmglue.h b/fs/ocfs2/dlmglue.h index e5da5809ed95..a3ebd7303ea2 100644 --- a/fs/ocfs2/dlmglue.h +++ b/fs/ocfs2/dlmglue.h @@ -137,10 +137,8 @@ int ocfs2_inode_lock_full_nested(struct inode *inode, int ex, int arg_flags, int subclass); -int ocfs2_inode_lock_with_page(struct inode *inode, - struct buffer_head **ret_bh, - int ex, - struct page *page); +int ocfs2_inode_lock_with_folio(struct inode *inode, + struct buffer_head **ret_bh, int ex, struct folio *folio); /* Variants without special locking class or flags */ #define ocfs2_inode_lock_full(i, r, e, f)\ ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL) From 5f5ea21f8fd0fe6bae7dc134ceabcbbf675be8c0 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:37 +0000 Subject: [PATCH 439/504] ocfs2: convert w_pages to w_folios Pass around an array of folios instead of an array of pages. Removes a few calls to compound_head(). Link: https://lkml.kernel.org/r/20241205171653.3179945-10-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 118 ++++++++++++++++++++++++++---------------------- fs/ocfs2/aops.h | 1 + 2 files changed, 65 insertions(+), 54 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 87127a7f2f78..f088183fbae2 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -729,19 +729,19 @@ struct ocfs2_write_ctxt { unsigned int w_large_pages; /* - * Pages involved in this write. + * Folios involved in this write. * * w_target_folio is the folio being written to by the user. * - * w_pages is an array of pages which always contains + * w_folios is an array of folios which always contains * w_target_folio, and in the case of an allocating write with * page_size < cluster size, it will contain zero'd and mapped * pages adjacent to w_target_folio which need to be written * out in so that future reads from that region will get * zero's. */ - unsigned int w_num_pages; - struct page *w_pages[OCFS2_MAX_CTXT_PAGES]; + unsigned int w_num_folios; + struct folio *w_folios[OCFS2_MAX_CTXT_PAGES]; struct folio *w_target_folio; /* @@ -771,6 +771,19 @@ struct ocfs2_write_ctxt { unsigned int w_unwritten_count; }; +void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios) +{ + int i; + + for(i = 0; i < num_folios; i++) { + if (!folios[i]) + continue; + folio_unlock(folios[i]); + folio_mark_accessed(folios[i]); + folio_put(folios[i]); + } +} + void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) { int i; @@ -784,7 +797,7 @@ void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) } } -static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) +static void ocfs2_unlock_folios(struct ocfs2_write_ctxt *wc) { int i; @@ -795,16 +808,16 @@ static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc) */ if (wc->w_target_locked) { BUG_ON(!wc->w_target_folio); - for (i = 0; i < wc->w_num_pages; i++) { - if (&wc->w_target_folio->page == wc->w_pages[i]) { - wc->w_pages[i] = NULL; + for (i = 0; i < wc->w_num_folios; i++) { + if (wc->w_target_folio == wc->w_folios[i]) { + wc->w_folios[i] = NULL; break; } } folio_mark_accessed(wc->w_target_folio); folio_put(wc->w_target_folio); } - ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages); + ocfs2_unlock_and_free_folios(wc->w_folios, wc->w_num_folios); } static void ocfs2_free_unwritten_list(struct inode *inode, @@ -826,7 +839,7 @@ static void ocfs2_free_write_ctxt(struct inode *inode, struct ocfs2_write_ctxt *wc) { ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list); - ocfs2_unlock_pages(wc); + ocfs2_unlock_folios(wc); brelse(wc->w_di_bh); kfree(wc); } @@ -922,8 +935,8 @@ static void ocfs2_write_failure(struct inode *inode, if (wc->w_target_folio) ocfs2_zero_new_buffers(wc->w_target_folio, from, to); - for(i = 0; i < wc->w_num_pages; i++) { - tmppage = wc->w_pages[i]; + for (i = 0; i < wc->w_num_folios; i++) { + tmppage = &wc->w_folios[i]->page; if (tmppage && page_has_buffers(tmppage)) { if (ocfs2_should_order_data(inode)) @@ -935,12 +948,11 @@ static void ocfs2_write_failure(struct inode *inode, } } -static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno, - struct ocfs2_write_ctxt *wc, - struct page *page, u32 cpos, - loff_t user_pos, unsigned user_len, - int new) +static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, + struct ocfs2_write_ctxt *wc, struct folio *folio, u32 cpos, + loff_t user_pos, unsigned user_len, int new) { + struct page *page = &folio->page; int ret; unsigned int map_from = 0, map_to = 0; unsigned int cluster_start, cluster_end; @@ -1019,11 +1031,9 @@ out: /* * This function will only grab one clusters worth of pages. */ -static int ocfs2_grab_pages_for_write(struct address_space *mapping, - struct ocfs2_write_ctxt *wc, - u32 cpos, loff_t user_pos, - unsigned user_len, int new, - struct folio *mmap_folio) +static int ocfs2_grab_folios_for_write(struct address_space *mapping, + struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos, + unsigned user_len, int new, struct folio *mmap_folio) { int ret = 0, i; unsigned long start, target_index, end_index, index; @@ -1040,7 +1050,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, * last page of the write. */ if (new) { - wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb); + wc->w_num_folios = ocfs2_pages_per_cluster(inode->i_sb); start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos); /* * We need the index *past* the last page we could possibly @@ -1050,15 +1060,15 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, last_byte = max(user_pos + user_len, i_size_read(inode)); BUG_ON(last_byte < 1); end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1; - if ((start + wc->w_num_pages) > end_index) - wc->w_num_pages = end_index - start; + if ((start + wc->w_num_folios) > end_index) + wc->w_num_folios = end_index - start; } else { - wc->w_num_pages = 1; + wc->w_num_folios = 1; start = target_index; } end_index = (user_pos + user_len - 1) >> PAGE_SHIFT; - for(i = 0; i < wc->w_num_pages; i++) { + for(i = 0; i < wc->w_num_folios; i++) { index = start + i; if (index >= target_index && index <= end_index && @@ -1079,26 +1089,27 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping, } folio_get(mmap_folio); - wc->w_pages[i] = &mmap_folio->page; + wc->w_folios[i] = mmap_folio; wc->w_target_locked = true; } else if (index >= target_index && index <= end_index && wc->w_type == OCFS2_WRITE_DIRECT) { /* Direct write has no mapping page. */ - wc->w_pages[i] = NULL; + wc->w_folios[i] = NULL; continue; } else { - wc->w_pages[i] = find_or_create_page(mapping, index, - GFP_NOFS); - if (!wc->w_pages[i]) { - ret = -ENOMEM; + wc->w_folios[i] = __filemap_get_folio(mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, + GFP_NOFS); + if (IS_ERR(wc->w_folios[i])) { + ret = PTR_ERR(wc->w_folios[i]); mlog_errno(ret); goto out; } } - wait_for_stable_page(wc->w_pages[i]); + folio_wait_stable(wc->w_folios[i]); if (index == target_index) - wc->w_target_folio = page_folio(wc->w_pages[i]); + wc->w_target_folio = wc->w_folios[i]; } out: if (ret) @@ -1182,19 +1193,18 @@ static int ocfs2_write_cluster(struct address_space *mapping, if (!should_zero) p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1); - for(i = 0; i < wc->w_num_pages; i++) { + for (i = 0; i < wc->w_num_folios; i++) { int tmpret; /* This is the direct io target page. */ - if (wc->w_pages[i] == NULL) { + if (wc->w_folios[i] == NULL) { p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits)); continue; } - tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc, - wc->w_pages[i], cpos, - user_pos, user_len, - should_zero); + tmpret = ocfs2_prepare_folio_for_write(inode, &p_blkno, wc, + wc->w_folios[i], cpos, user_pos, user_len, + should_zero); if (tmpret) { mlog_errno(tmpret); if (ret == 0) @@ -1493,12 +1503,12 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, goto out; } /* - * If we don't set w_num_pages then this folio won't get unlocked + * If we don't set w_num_folios then this folio won't get unlocked * and freed on cleanup of the write context. */ wc->w_target_folio = folio; - wc->w_pages[0] = &folio->page; - wc->w_num_pages = 1; + wc->w_folios[0] = folio; + wc->w_num_folios = 1; ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh, OCFS2_JOURNAL_ACCESS_WRITE); @@ -1791,18 +1801,18 @@ try_again: } /* - * Fill our page array first. That way we've grabbed enough so + * Fill our folio array first. That way we've grabbed enough so * that we can zero and flush if we error after adding the * extent. */ - ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len, - cluster_of_pages, mmap_folio); + ret = ocfs2_grab_folios_for_write(mapping, wc, wc->w_cpos, pos, len, + cluster_of_pages, mmap_folio); if (ret) { /* - * ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock - * the target page. In this case, we exit with no error and no target - * page. This will trigger the caller, page_mkwrite(), to re-try - * the operation. + * ocfs2_grab_folios_for_write() returns -EAGAIN if it + * could not lock the target folio. In this case, we exit + * with no error and no target folio. This will trigger + * the caller, page_mkwrite(), to re-try the operation. */ if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) { BUG_ON(wc->w_target_folio); @@ -1997,8 +2007,8 @@ int ocfs2_write_end_nolock(struct address_space *mapping, if (wc->w_target_folio) flush_dcache_folio(wc->w_target_folio); - for(i = 0; i < wc->w_num_pages; i++) { - tmppage = wc->w_pages[i]; + for (i = 0; i < wc->w_num_folios; i++) { + tmppage = &wc->w_folios[i]->page; /* This is the direct io target page. */ if (tmppage == NULL) @@ -2059,7 +2069,7 @@ out: * this lock and will ask for the page lock when flushing the data. * put it here to preserve the unlock order. */ - ocfs2_unlock_pages(wc); + ocfs2_unlock_folios(wc); if (handle) ocfs2_commit_trans(osb, handle); diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 1cb46072ad84..dcf4b0dc82c4 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -17,6 +17,7 @@ int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new); +void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios); void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages); int walk_page_buffers( handle_t *handle, From ba470f0396c10eedb2ac4bc7348907bb14e25e15 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:38 +0000 Subject: [PATCH 440/504] ocfs2: convert ocfs2_write_failure() to use a folio Remove the folio->page conversion and just use the folio. Link: https://lkml.kernel.org/r/20241205171653.3179945-11-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index f088183fbae2..2b3a3f5566d5 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -930,20 +930,19 @@ static void ocfs2_write_failure(struct inode *inode, int i; unsigned from = user_pos & (PAGE_SIZE - 1), to = user_pos + user_len; - struct page *tmppage; if (wc->w_target_folio) ocfs2_zero_new_buffers(wc->w_target_folio, from, to); for (i = 0; i < wc->w_num_folios; i++) { - tmppage = &wc->w_folios[i]->page; + struct folio *folio = wc->w_folios[i]; - if (tmppage && page_has_buffers(tmppage)) { + if (folio && folio_buffers(folio)) { if (ocfs2_should_order_data(inode)) ocfs2_jbd2_inode_add_write(wc->w_handle, inode, user_pos, user_len); - block_commit_write(tmppage, from, to); + block_commit_write(&folio->page, from, to); } } } From 578a32e70e0d009fd7d1a345aa2aeabafca34541 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:39 +0000 Subject: [PATCH 441/504] ocfs2: use a folio in ocfs2_write_end_nolock() Remove an access to page->index. Remove some PAGE_SIZE assumptions. Link: https://lkml.kernel.org/r/20241205171653.3179945-12-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 2b3a3f5566d5..47c48f4e707c 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1952,17 +1952,16 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos, le16_to_cpu(di->i_dyn_features)); } -int ocfs2_write_end_nolock(struct address_space *mapping, - loff_t pos, unsigned len, unsigned copied, void *fsdata) +int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos, + unsigned len, unsigned copied, void *fsdata) { int i, ret; - unsigned from, to, start = pos & (PAGE_SIZE - 1); + size_t from, to, start = pos & (PAGE_SIZE - 1); struct inode *inode = mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_write_ctxt *wc = fsdata; struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data; handle_t *handle = wc->w_handle; - struct page *tmppage; BUG_ON(!list_empty(&wc->w_unwritten_list)); @@ -1993,32 +1992,32 @@ int ocfs2_write_end_nolock(struct address_space *mapping, start+len); else { /* - * When page is fully beyond new isize (data copy - * failed), do not bother zeroing the page. Invalidate + * When folio is fully beyond new isize (data copy + * failed), do not bother zeroing the folio. Invalidate * it instead so that writeback does not get confused * put page & buffer dirty bits into inconsistent * state. */ - block_invalidate_folio(wc->w_target_folio, - 0, PAGE_SIZE); + block_invalidate_folio(wc->w_target_folio, 0, + folio_size(wc->w_target_folio)); } } if (wc->w_target_folio) flush_dcache_folio(wc->w_target_folio); for (i = 0; i < wc->w_num_folios; i++) { - tmppage = &wc->w_folios[i]->page; + struct folio *folio = wc->w_folios[i]; - /* This is the direct io target page. */ - if (tmppage == NULL) + /* This is the direct io target folio */ + if (folio == NULL) continue; - if (tmppage == &wc->w_target_folio->page) { + if (folio == wc->w_target_folio) { from = wc->w_target_from; to = wc->w_target_to; - BUG_ON(from > PAGE_SIZE || - to > PAGE_SIZE || + BUG_ON(from > folio_size(folio) || + to > folio_size(folio) || to < from); } else { /* @@ -2027,19 +2026,17 @@ int ocfs2_write_end_nolock(struct address_space *mapping, * to flush their entire range. */ from = 0; - to = PAGE_SIZE; + to = folio_size(folio); } - if (page_has_buffers(tmppage)) { + if (folio_buffers(folio)) { if (handle && ocfs2_should_order_data(inode)) { - loff_t start_byte = - ((loff_t)tmppage->index << PAGE_SHIFT) + - from; + loff_t start_byte = folio_pos(folio) + from; loff_t length = to - from; ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length); } - block_commit_write(tmppage, from, to); + block_commit_write(&folio->page, from, to); } } From c4a8d016073b66080c16d7f62bc90db409a6eb98 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:40 +0000 Subject: [PATCH 442/504] ocfs2: use a folio in ocfs2_prepare_page_for_write() Update to the new APIs. Removes a few page->folio conversions. Link: https://lkml.kernel.org/r/20241205171653.3179945-13-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 47c48f4e707c..b23decd51882 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -963,10 +963,10 @@ static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, /* treat the write as new if the a hole/lseek spanned across * the page boundary. */ - new = new | ((i_size_read(inode) <= page_offset(page)) && - (page_offset(page) <= user_pos)); + new = new | ((i_size_read(inode) <= folio_pos(folio)) && + (folio_pos(folio) <= user_pos)); - if (page == &wc->w_target_folio->page) { + if (folio == wc->w_target_folio) { map_from = user_pos & (PAGE_SIZE - 1); map_to = map_from + user_len; @@ -990,7 +990,7 @@ static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, } } else { /* - * If we haven't allocated the new page yet, we + * If we haven't allocated the new folio yet, we * shouldn't be writing it out without copying user * data. This is likely a math error from the caller. */ @@ -1008,20 +1008,20 @@ static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, } /* - * Parts of newly allocated pages need to be zero'd. + * Parts of newly allocated folios need to be zero'd. * * Above, we have also rewritten 'to' and 'from' - as far as * the rest of the function is concerned, the entire cluster - * range inside of a page needs to be written. + * range inside of a folio needs to be written. * - * We can skip this if the page is up to date - it's already + * We can skip this if the folio is uptodate - it's already * been zero'd from being read in as a hole. */ - if (new && !PageUptodate(page)) + if (new && !folio_test_uptodate(folio)) ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), cpos, user_data_from, user_data_to); - flush_dcache_page(page); + flush_dcache_folio(folio); out: return ret; From 40ff693e7732ca4fe2087748f9e8e3dc05d74103 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:41 +0000 Subject: [PATCH 443/504] ocfs2: use a folio in ocfs2_map_and_dirty_page() Convert the incoming page to a folio and use it throughout the function. Removes a couple of calls to compound_head(). Link: https://lkml.kernel.org/r/20241205171653.3179945-14-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/alloc.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index b3fa953e5637..f65e8f1201fc 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6812,8 +6812,9 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, unsigned int from, unsigned int to, struct page *page, int zero, u64 *phys) { + struct folio *folio = page_folio(page); int ret, partial = 0; - loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from; + loff_t start_byte = folio_pos(folio) + from; loff_t length = to - from; ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0); @@ -6821,14 +6822,14 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, mlog_errno(ret); if (zero) - zero_user_segment(page, from, to); + folio_zero_segment(folio, from, to); /* * Need to set the buffers we zero'd into uptodate * here if they aren't - ocfs2_map_page_blocks() * might've skipped some */ - ret = walk_page_buffers(handle, page_buffers(page), + ret = walk_page_buffers(handle, folio_buffers(folio), from, to, &partial, ocfs2_zero_func); if (ret < 0) @@ -6841,9 +6842,9 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, } if (!partial) - SetPageUptodate(page); + folio_mark_uptodate(folio); - flush_dcache_page(page); + flush_dcache_folio(folio); } static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, From 7b154615f845b169f592bca3ef5468284e08030e Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:42 +0000 Subject: [PATCH 444/504] ocfs2: convert ocfs2_map_page_blocks() to ocfs2_map_folio_blocks() All callers now have a folio, so pass it in instead of converting folio->page->folio. Link: https://lkml.kernel.org/r/20241205171653.3179945-15-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/alloc.c | 2 +- fs/ocfs2/aops.c | 18 ++++++++---------- fs/ocfs2/aops.h | 2 +- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index f65e8f1201fc..fbadfe53a93f 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6817,7 +6817,7 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, loff_t start_byte = folio_pos(folio) + from; loff_t length = to - from; - ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0); + ret = ocfs2_map_folio_blocks(folio, phys, inode, from, to, 0); if (ret) mlog_errno(ret); diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index b23decd51882..68e93af85d1d 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -560,7 +560,7 @@ static void ocfs2_clear_page_regions(struct page *page, /* * Nonsparse file systems fully allocate before we get to the write * code. This prevents ocfs2_write() from tagging the write as an - * allocating one, which means ocfs2_map_page_blocks() might try to + * allocating one, which means ocfs2_map_folio_blocks() might try to * read-in the blocks at the tail of our file. Avoid reading them by * testing i_size against each block offset. */ @@ -585,11 +585,10 @@ static int ocfs2_should_read_blk(struct inode *inode, struct folio *folio, * * This will also skip zeroing, which is handled externally. */ -int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, +int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new) { - struct folio *folio = page_folio(page); int ret = 0; struct buffer_head *head, *bh, *wait[2], **wait_bh = wait; unsigned int block_end, block_start; @@ -971,12 +970,11 @@ static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, map_to = map_from + user_len; if (new) - ret = ocfs2_map_page_blocks(page, p_blkno, inode, - cluster_start, cluster_end, - new); + ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, + cluster_start, cluster_end, new); else - ret = ocfs2_map_page_blocks(page, p_blkno, inode, - map_from, map_to, new); + ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, + map_from, map_to, new); if (ret) { mlog_errno(ret); goto out; @@ -999,8 +997,8 @@ static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, map_from = cluster_start; map_to = cluster_end; - ret = ocfs2_map_page_blocks(page, p_blkno, inode, - cluster_start, cluster_end, new); + ret = ocfs2_map_folio_blocks(folio, p_blkno, inode, + cluster_start, cluster_end, new); if (ret) { mlog_errno(ret); goto out; diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index dcf4b0dc82c4..17ca359c6051 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -13,7 +13,7 @@ handle_t *ocfs2_start_walk_page_trans(struct inode *inode, unsigned from, unsigned to); -int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno, +int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new); From 3afd4f026c2d95333e58486fc46be06d6e4dd743 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:43 +0000 Subject: [PATCH 445/504] ocfs2: convert ocfs2_clear_page_regions() to ocfs2_clear_folio_regions() Pass in and use the folio instead of its page. Link: https://lkml.kernel.org/r/20241205171653.3179945-16-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 68e93af85d1d..ea8dc82cfe98 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -534,7 +534,7 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb, * * from == to == 0 is code for "zero the entire cluster region" */ -static void ocfs2_clear_page_regions(struct page *page, +static void ocfs2_clear_folio_regions(struct folio *folio, struct ocfs2_super *osb, u32 cpos, unsigned from, unsigned to) { @@ -543,7 +543,7 @@ static void ocfs2_clear_page_regions(struct page *page, ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); - kaddr = kmap_atomic(page); + kaddr = kmap_local_folio(folio, 0); if (from || to) { if (from > cluster_start) @@ -554,7 +554,7 @@ static void ocfs2_clear_page_regions(struct page *page, memset(kaddr + cluster_start, 0, cluster_end - cluster_start); } - kunmap_atomic(kaddr); + kunmap_local(kaddr); } /* @@ -950,7 +950,6 @@ static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, struct ocfs2_write_ctxt *wc, struct folio *folio, u32 cpos, loff_t user_pos, unsigned user_len, int new) { - struct page *page = &folio->page; int ret; unsigned int map_from = 0, map_to = 0; unsigned int cluster_start, cluster_end; @@ -1016,7 +1015,7 @@ static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno, * been zero'd from being read in as a hole. */ if (new && !folio_test_uptodate(folio)) - ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb), + ocfs2_clear_folio_regions(folio, OCFS2_SB(inode->i_sb), cpos, user_data_from, user_data_to); flush_dcache_folio(folio); From 60eec2776eb006fee596267aae8c569bcc502eb4 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:44 +0000 Subject: [PATCH 446/504] ocfs2: use an array of folios instead of an array of pages The ocfs2_zero_cluster_folios() / ocfs2_grab_folios() / ocfs2_grab_eof_folios() family of functions pass around an array of pages. Convert them to pass around an array of folios. This removes the last caller of ocfs2_unlock_and_free_pages(), so delete it. Link: https://lkml.kernel.org/r/20241205171653.3179945-17-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/alloc.c | 98 ++++++++++++++++++++++++------------------------ fs/ocfs2/alloc.h | 2 - fs/ocfs2/aops.c | 13 ------- fs/ocfs2/aops.h | 1 - 4 files changed, 49 insertions(+), 65 deletions(-) diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index fbadfe53a93f..0f6ddb534a44 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6847,87 +6847,87 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, flush_dcache_folio(folio); } -static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start, - loff_t end, struct page **pages, - int numpages, u64 phys, handle_t *handle) +static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start, + loff_t end, struct folio **folios, int numfolios, + u64 phys, handle_t *handle) { int i; - struct page *page; unsigned int from, to = PAGE_SIZE; struct super_block *sb = inode->i_sb; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); - if (numpages == 0) + if (numfolios == 0) goto out; to = PAGE_SIZE; - for(i = 0; i < numpages; i++) { - page = pages[i]; + for (i = 0; i < numfolios; i++) { + struct folio *folio = folios[i]; from = start & (PAGE_SIZE - 1); - if ((end >> PAGE_SHIFT) == page->index) + if ((end >> PAGE_SHIFT) == folio->index) to = end & (PAGE_SIZE - 1); BUG_ON(from > PAGE_SIZE); BUG_ON(to > PAGE_SIZE); - ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1, + ocfs2_map_and_dirty_page(inode, handle, from, to, &folio->page, 1, &phys); - start = (page->index + 1) << PAGE_SHIFT; + start = (folio->index + 1) << PAGE_SHIFT; } out: - if (pages) - ocfs2_unlock_and_free_pages(pages, numpages); + if (folios) + ocfs2_unlock_and_free_folios(folios, numfolios); } -int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, - struct page **pages, int *num) +static int ocfs2_grab_folios(struct inode *inode, loff_t start, loff_t end, + struct folio **folios, int *num) { - int numpages, ret = 0; + int numfolios, ret = 0; struct address_space *mapping = inode->i_mapping; unsigned long index; loff_t last_page_bytes; BUG_ON(start > end); - numpages = 0; + numfolios = 0; last_page_bytes = PAGE_ALIGN(end); index = start >> PAGE_SHIFT; do { - pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS); - if (!pages[numpages]) { - ret = -ENOMEM; + folios[numfolios] = __filemap_get_folio(mapping, index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); + if (IS_ERR(folios[numfolios])) { + ret = PTR_ERR(folios[numfolios]); mlog_errno(ret); goto out; } - numpages++; - index++; + index = folio_next_index(folios[numfolios]); + numfolios++; } while (index < (last_page_bytes >> PAGE_SHIFT)); out: if (ret != 0) { - if (pages) - ocfs2_unlock_and_free_pages(pages, numpages); - numpages = 0; + if (folios) + ocfs2_unlock_and_free_folios(folios, numfolios); + numfolios = 0; } - *num = numpages; + *num = numfolios; return ret; } -static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, - struct page **pages, int *num) +static int ocfs2_grab_eof_folios(struct inode *inode, loff_t start, loff_t end, + struct folio **folios, int *num) { struct super_block *sb = inode->i_sb; BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits != (end - 1) >> OCFS2_SB(sb)->s_clustersize_bits); - return ocfs2_grab_pages(inode, start, end, pages, num); + return ocfs2_grab_folios(inode, start, end, folios, num); } /* @@ -6941,8 +6941,8 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end, int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, u64 range_start, u64 range_end) { - int ret = 0, numpages; - struct page **pages = NULL; + int ret = 0, numfolios; + struct folio **folios = NULL; u64 phys; unsigned int ext_flags; struct super_block *sb = inode->i_sb; @@ -6955,17 +6955,17 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, return 0; /* - * Avoid zeroing pages fully beyond current i_size. It is pointless as - * underlying blocks of those pages should be already zeroed out and + * Avoid zeroing folios fully beyond current i_size. It is pointless as + * underlying blocks of those folios should be already zeroed out and * page writeback will skip them anyway. */ range_end = min_t(u64, range_end, i_size_read(inode)); if (range_start >= range_end) return 0; - pages = kcalloc(ocfs2_pages_per_cluster(sb), - sizeof(struct page *), GFP_NOFS); - if (pages == NULL) { + folios = kcalloc(ocfs2_pages_per_cluster(sb), + sizeof(struct folio *), GFP_NOFS); + if (folios == NULL) { ret = -ENOMEM; mlog_errno(ret); goto out; @@ -6986,18 +6986,18 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN) goto out; - ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages, - &numpages); + ret = ocfs2_grab_eof_folios(inode, range_start, range_end, folios, + &numfolios); if (ret) { mlog_errno(ret); goto out; } - ocfs2_zero_cluster_pages(inode, range_start, range_end, pages, - numpages, phys, handle); + ocfs2_zero_cluster_folios(inode, range_start, range_end, folios, + numfolios, phys, handle); /* - * Initiate writeout of the pages we zero'd here. We don't + * Initiate writeout of the folios we zero'd here. We don't * wait on them - the truncate_inode_pages() call later will * do that for us. */ @@ -7007,7 +7007,7 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle, mlog_errno(ret); out: - kfree(pages); + kfree(folios); return ret; } @@ -7060,7 +7060,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di) int ocfs2_convert_inline_data_to_extents(struct inode *inode, struct buffer_head *di_bh) { - int ret, has_data, num_pages = 0; + int ret, has_data, num_folios = 0; int need_free = 0; u32 bit_off, num; handle_t *handle; @@ -7069,7 +7069,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; struct ocfs2_alloc_context *data_ac = NULL; - struct page *page = NULL; + struct folio *folio = NULL; struct ocfs2_extent_tree et; int did_quota = 0; @@ -7124,8 +7124,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, */ block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); - ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page, - &num_pages); + ret = ocfs2_grab_eof_folios(inode, 0, page_end, &folio, + &num_folios); if (ret) { mlog_errno(ret); need_free = 1; @@ -7136,14 +7136,14 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, * This should populate the 1st page for us and mark * it up to date. */ - ret = ocfs2_read_inline_data(inode, page, di_bh); + ret = ocfs2_read_inline_data(inode, &folio->page, di_bh); if (ret) { mlog_errno(ret); need_free = 1; goto out_unlock; } - ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0, + ocfs2_map_and_dirty_page(inode, handle, 0, page_end, &folio->page, 0, &phys); } @@ -7175,8 +7175,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, } out_unlock: - if (page) - ocfs2_unlock_and_free_pages(&page, num_pages); + if (folio) + ocfs2_unlock_and_free_folios(&folio, num_folios); out_commit: if (ret < 0 && did_quota) diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 4af7abaa6e40..6a2aca1a062e 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -254,8 +254,6 @@ static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec) return !rec->e_leaf_clusters; } -int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end, - struct page **pages, int *num); void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, unsigned int from, unsigned int to, struct page *page, int zero, u64 *phys); diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index ea8dc82cfe98..61fecfe7dce6 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -783,19 +783,6 @@ void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios) } } -void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages) -{ - int i; - - for(i = 0; i < num_pages; i++) { - if (pages[i]) { - unlock_page(pages[i]); - mark_page_accessed(pages[i]); - put_page(pages[i]); - } - } -} - static void ocfs2_unlock_folios(struct ocfs2_write_ctxt *wc) { int i; diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 17ca359c6051..cf8d202d9a8b 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -18,7 +18,6 @@ int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno, unsigned int to, int new); void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios); -void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages); int walk_page_buffers( handle_t *handle, struct buffer_head *head, From 1ca241dab69bb0474e368fc6589971aa32210e19 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:45 +0000 Subject: [PATCH 447/504] ocfs2: convert ocfs2_duplicate_clusters_by_page() to use a folio Retrieve folios from the page cache, not pages, and use a folio throughout this function. Removes seven calls to compound_head(). Link: https://lkml.kernel.org/r/20241205171653.3179945-18-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/refcounttree.c | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 73caf991ede5..1d7325d4411f 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2902,7 +2902,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, int ret = 0, partial; struct super_block *sb = inode->i_sb; u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); - struct page *page; pgoff_t page_index; unsigned int from, to; loff_t offset, end, map_end; @@ -2921,6 +2920,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, end = i_size_read(inode); while (offset < end) { + struct folio *folio; page_index = offset >> PAGE_SHIFT; map_end = ((loff_t)page_index + 1) << PAGE_SHIFT; if (map_end > end) @@ -2933,9 +2933,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, to = map_end & (PAGE_SIZE - 1); retry: - page = find_or_create_page(mapping, page_index, GFP_NOFS); - if (!page) { - ret = -ENOMEM; + folio = __filemap_get_folio(mapping, page_index, + FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); + if (IS_ERR(folio)) { + ret = PTR_ERR(folio); mlog_errno(ret); break; } @@ -2945,9 +2946,9 @@ retry: * page, so write it back. */ if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) { - if (PageDirty(page)) { - unlock_page(page); - put_page(page); + if (folio_test_dirty(folio)) { + folio_unlock(folio); + folio_put(folio); ret = filemap_write_and_wait_range(mapping, offset, map_end - 1); @@ -2955,9 +2956,7 @@ retry: } } - if (!PageUptodate(page)) { - struct folio *folio = page_folio(page); - + if (!folio_test_uptodate(folio)) { ret = block_read_full_folio(folio, ocfs2_get_block); if (ret) { mlog_errno(ret); @@ -2966,8 +2965,8 @@ retry: folio_lock(folio); } - if (page_has_buffers(page)) { - ret = walk_page_buffers(handle, page_buffers(page), + if (folio_buffers(folio)) { + ret = walk_page_buffers(handle, folio_buffers(folio), from, to, &partial, ocfs2_clear_cow_buffer); if (ret) { @@ -2978,12 +2977,11 @@ retry: ocfs2_map_and_dirty_page(inode, handle, from, to, - page, 0, &new_block); - mark_page_accessed(page); + &folio->page, 0, &new_block); + folio_mark_accessed(folio); unlock: - unlock_page(page); - put_page(page); - page = NULL; + folio_unlock(folio); + folio_put(folio); offset = map_end; if (ret) break; From d013af270886678806773223167d91fb6406be66 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:46 +0000 Subject: [PATCH 448/504] ocfs2: convert ocfs2_map_and_dirty_page() to ocfs2_map_and_dirty_folio() All callers now have a folio, so skip the folio->page->folio conversion. Link: https://lkml.kernel.org/r/20241205171653.3179945-19-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/alloc.c | 17 ++++++++--------- fs/ocfs2/alloc.h | 6 +++--- fs/ocfs2/refcounttree.c | 5 ++--- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 0f6ddb534a44..88d43fb5bf7d 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6808,11 +6808,10 @@ static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh) return 0; } -void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, - unsigned int from, unsigned int to, - struct page *page, int zero, u64 *phys) +void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle, + size_t from, size_t to, struct folio *folio, int zero, + u64 *phys) { - struct folio *folio = page_folio(page); int ret, partial = 0; loff_t start_byte = folio_pos(folio) + from; loff_t length = to - from; @@ -6871,8 +6870,8 @@ static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start, BUG_ON(from > PAGE_SIZE); BUG_ON(to > PAGE_SIZE); - ocfs2_map_and_dirty_page(inode, handle, from, to, &folio->page, 1, - &phys); + ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1, + &phys); start = (folio->index + 1) << PAGE_SHIFT; } @@ -7120,7 +7119,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, /* * Save two copies, one for insert, and one that can - * be changed by ocfs2_map_and_dirty_page() below. + * be changed by ocfs2_map_and_dirty_folio() below. */ block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off); @@ -7143,8 +7142,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, goto out_unlock; } - ocfs2_map_and_dirty_page(inode, handle, 0, page_end, &folio->page, 0, - &phys); + ocfs2_map_and_dirty_folio(inode, handle, 0, page_end, folio, 0, + &phys); } spin_lock(&oi->ip_lock); diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 6a2aca1a062e..1c0c83362904 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h @@ -254,9 +254,9 @@ static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec) return !rec->e_leaf_clusters; } -void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle, - unsigned int from, unsigned int to, - struct page *page, int zero, u64 *phys); +void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle, + size_t from, size_t to, struct folio *folio, int zero, + u64 *phys); /* * Structures which describe a path through a btree, and functions to * manipulate them. diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 1d7325d4411f..8f732742b26e 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c @@ -2975,9 +2975,8 @@ retry: } } - ocfs2_map_and_dirty_page(inode, - handle, from, to, - &folio->page, 0, &new_block); + ocfs2_map_and_dirty_folio(inode, handle, from, to, + folio, 0, &new_block); folio_mark_accessed(folio); unlock: folio_unlock(folio); From 018881fd60c53dc032c63f7ab19f3eb9353d68ff Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Thu, 5 Dec 2024 17:16:47 +0000 Subject: [PATCH 449/504] ocfs2: convert ocfs2_read_inline_data() to take a folio All callers now have a folio, so pass it in. We can use folio_fill_tail() instead of open-coding it. Saves a call to compound_head(). Link: https://lkml.kernel.org/r/20241205171653.3179945-20-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/alloc.c | 2 +- fs/ocfs2/aops.c | 20 ++++++-------------- fs/ocfs2/aops.h | 2 +- 3 files changed, 8 insertions(+), 16 deletions(-) diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 88d43fb5bf7d..453be2d2c124 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -7135,7 +7135,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode, * This should populate the 1st page for us and mark * it up to date. */ - ret = ocfs2_read_inline_data(inode, &folio->page, di_bh); + ret = ocfs2_read_inline_data(inode, folio, di_bh); if (ret) { mlog_errno(ret); need_free = 1; diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 61fecfe7dce6..5bbeb6fbb1ac 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -215,10 +215,9 @@ bail: return err; } -int ocfs2_read_inline_data(struct inode *inode, struct page *page, +int ocfs2_read_inline_data(struct inode *inode, struct folio *folio, struct buffer_head *di_bh) { - void *kaddr; loff_t size; struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; @@ -230,7 +229,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, size = i_size_read(inode); - if (size > PAGE_SIZE || + if (size > folio_size(folio) || size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) { ocfs2_error(inode->i_sb, "Inode %llu has with inline data has bad size: %Lu\n", @@ -239,15 +238,8 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page, return -EROFS; } - kaddr = kmap_atomic(page); - if (size) - memcpy(kaddr, di->id2.i_data.id_data, size); - /* Clear the remaining part of the page */ - memset(kaddr + size, 0, PAGE_SIZE - size); - flush_dcache_page(page); - kunmap_atomic(kaddr); - - SetPageUptodate(page); + folio_fill_tail(folio, 0, di->id2.i_data.id_data, size); + folio_mark_uptodate(folio); return 0; } @@ -266,7 +258,7 @@ static int ocfs2_readpage_inline(struct inode *inode, struct folio *folio) goto out; } - ret = ocfs2_read_inline_data(inode, &folio->page, di_bh); + ret = ocfs2_read_inline_data(inode, folio, di_bh); out: folio_unlock(folio); @@ -1506,7 +1498,7 @@ static int ocfs2_write_begin_inline(struct address_space *mapping, ocfs2_set_inode_data_inline(inode, di); if (!folio_test_uptodate(folio)) { - ret = ocfs2_read_inline_data(inode, &folio->page, wc->w_di_bh); + ret = ocfs2_read_inline_data(inode, folio, wc->w_di_bh); if (ret) { ocfs2_commit_trans(osb, handle); diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index cf8d202d9a8b..6f25066e5756 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -41,7 +41,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping, struct folio **foliop, void **fsdata, struct buffer_head *di_bh, struct folio *mmap_folio); -int ocfs2_read_inline_data(struct inode *inode, struct page *page, +int ocfs2_read_inline_data(struct inode *inode, struct folio *folio, struct buffer_head *di_bh); int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size); From 06a96f1de54622986b496e4556cd9a2364546345 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Thu, 5 Dec 2024 17:16:48 +0000 Subject: [PATCH 450/504] ocfs2: use a folio in ocfs2_fast_symlink_read_folio() Use folio_end_read() instead of SetPageUptodate() and unlock_page(). Use memcpy_to_folio() instead of open-coding a kmap_atomic() sequence. Link: https://lkml.kernel.org/r/20241205171653.3179945-21-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/symlink.c | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c index f5cf2255dc09..ad8be3300b49 100644 --- a/fs/ocfs2/symlink.c +++ b/fs/ocfs2/symlink.c @@ -54,13 +54,11 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio) { - struct page *page = &folio->page; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct buffer_head *bh = NULL; int status = ocfs2_read_inode_block(inode, &bh); struct ocfs2_dinode *fe; const char *link; - void *kaddr; size_t len; if (status < 0) { @@ -72,12 +70,9 @@ static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio) link = (char *) fe->id2.i_symlink; /* will be less than a page size */ len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb)); - kaddr = kmap_atomic(page); - memcpy(kaddr, link, len + 1); - kunmap_atomic(kaddr); - SetPageUptodate(page); + memcpy_to_folio(folio, 0, link, len + 1); out: - unlock_page(page); + folio_end_read(folio, status == 0); brelse(bh); return status; } From 1870cb11915724f4c021ba0bcd575786f332da56 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Thu, 5 Dec 2024 17:16:49 +0000 Subject: [PATCH 451/504] ocfs2: remove ocfs2_start_walk_page_trans() prototype This function no longer exists. Link: https://lkml.kernel.org/r/20241205171653.3179945-22-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/aops.h | 5 ----- 1 file changed, 5 deletions(-) diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index 6f25066e5756..114efc9111e4 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h @@ -8,11 +8,6 @@ #include <linux/fs.h> -handle_t *ocfs2_start_walk_page_trans(struct inode *inode, - struct page *page, - unsigned from, - unsigned to); - int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno, struct inode *inode, unsigned int from, unsigned int to, int new); From c24bb3bf217118f36d26a0d0e2202b7aea48ae57 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:50 +0000 Subject: [PATCH 452/504] ocfs2: support large folios in ocfs2_zero_cluster_folios() Remove assumptions that a folio is one page in size. Link: https://lkml.kernel.org/r/20241205171653.3179945-23-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/alloc.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 453be2d2c124..5cf698785fae 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6851,7 +6851,6 @@ static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start, u64 phys, handle_t *handle) { int i; - unsigned int from, to = PAGE_SIZE; struct super_block *sb = inode->i_sb; BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb))); @@ -6859,21 +6858,18 @@ static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start, if (numfolios == 0) goto out; - to = PAGE_SIZE; for (i = 0; i < numfolios; i++) { struct folio *folio = folios[i]; + size_t to = folio_size(folio); + size_t from = offset_in_folio(folio, start); - from = start & (PAGE_SIZE - 1); - if ((end >> PAGE_SHIFT) == folio->index) - to = end & (PAGE_SIZE - 1); - - BUG_ON(from > PAGE_SIZE); - BUG_ON(to > PAGE_SIZE); + if (to > end - folio_pos(folio)) + to = end - folio_pos(folio); ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1, &phys); - start = (folio->index + 1) << PAGE_SHIFT; + start = folio_next_index(folio) << PAGE_SHIFT; } out: if (folios) From b581292dbebadda02aa335707f3cd817dbefc197 Mon Sep 17 00:00:00 2001 From: Mark Tinguely <mark.tinguely@oracle.com> Date: Thu, 5 Dec 2024 17:16:51 +0000 Subject: [PATCH 453/504] ocfs2: support large folios in ocfs2_write_zero_page() Remove assumptions that a folio is PAGE_SIZE. Link: https://lkml.kernel.org/r/20241205171653.3179945-24-willy@infradead.org Signed-off-by: Mark Tinguely <mark.tinguely@oracle.com> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/file.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 957ced628eb1..e54f2c4b5a90 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -782,11 +782,11 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from, goto out_commit_trans; } - /* Get the offsets within the page that we want to zero */ - zero_from = abs_from & (PAGE_SIZE - 1); - zero_to = abs_to & (PAGE_SIZE - 1); + /* Get the offsets within the folio that we want to zero */ + zero_from = offset_in_folio(folio, abs_from); + zero_to = offset_in_folio(folio, abs_to); if (!zero_to) - zero_to = PAGE_SIZE; + zero_to = folio_size(folio); trace_ocfs2_write_zero_page( (unsigned long long)OCFS2_I(inode)->ip_blkno, From 0bda6453f8efd8fa7ea0c4434e5993977a70edfc Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Mon, 16 Dec 2024 16:12:50 +0000 Subject: [PATCH 454/504] iov_iter: remove setting of page->index Nothing actually checks page->index, so just remove it. Link: https://lkml.kernel.org/r/20241216161253.37687-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/kunit_iov_iter.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c index 13e15687675a..497d86e039f6 100644 --- a/lib/kunit_iov_iter.c +++ b/lib/kunit_iov_iter.c @@ -63,9 +63,6 @@ static void *__init iov_kunit_create_buffer(struct kunit *test, KUNIT_ASSERT_EQ(test, got, npages); } - for (int i = 0; i < npages; i++) - pages[i]->index = i; - buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer); From 8ead706d6b121342c8a572e5934e0ca103a1c1b0 Mon Sep 17 00:00:00 2001 From: Martin Kepplinger <martink@posteo.de> Date: Wed, 18 Dec 2024 12:36:38 +0000 Subject: [PATCH 455/504] init: fix removal warning for deprecated initrd loading This won't be removed in 2021, no matter how hard we try. Link: https://lkml.kernel.org/r/20241218123638.34907-1-martink@posteo.de Signed-off-by: Martin Kepplinger <martink@posteo.de> Cc: Christoph Hellwig <hch@lst.de> Cc: Joel Granados <joel.granados@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- init/do_mounts_initrd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 22c7f41ff642..f86ef92a6c46 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -89,7 +89,7 @@ static void __init handle_initrd(char *root_device_name) extern char *envp_init[]; int error; - pr_warn("using deprecated initrd support, will be removed in 2021.\n"); + pr_warn("using deprecated initrd support, will be removed soon.\n"); real_root_dev = new_encode_dev(ROOT_DEV); create_dev("/dev/root.old", Root_RAM0); From 5618ee438d5447b6e474f925ada0266fafe959f7 Mon Sep 17 00:00:00 2001 From: Ariel Otilibili <ariel.otilibili-anieli@eurecom.fr> Date: Thu, 19 Dec 2024 10:21:12 +0100 Subject: [PATCH 456/504] lib/inflate.c: remove dead code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a follow up from a discussion in Xen: The if-statement tests that `res` is non-zero; meaning the case zero is never reached. Link: https://lore.kernel.org/all/7587b503-b2ca-4476-8dc9-e9683d4ca5f0@suse.com/ Link: https://lkml.kernel.org/r/20241219092615.644642-2-ariel.otilibili-anieli@eurecom.fr Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Ariel Otilibili <ariel.otilibili-anieli@eurecom.fr> Suggested-by: Jan Beulich <jbeulich@suse.com> Cc: Andrew Cooper <andrew.cooper3@citrix.com> Cc: Anthony PERARD <anthony.perard@vates.tech> Cc: Michal Orzel <michal.orzel@amd.com> Cc: Julien Grall <julien@xen.org> Cc: Roger Pau Monné <roger.pau@citrix.com> Cc: Stefano Stabellini <sstabellini@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/inflate.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/inflate.c b/lib/inflate.c index fbaf03c1748d..eab886baa1b4 100644 --- a/lib/inflate.c +++ b/lib/inflate.c @@ -1257,8 +1257,6 @@ static int INIT gunzip(void) /* Decompress */ if ((res = inflate())) { switch (res) { - case 0: - break; case 1: error("invalid compressed format (err=1)"); break; From b0ce6e7b72791402c599b1028d91dd233a7be8f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dominik=20Karol=20Pi=C4=85tkowski?= <dominik.karol.piatkowski@protonmail.com> Date: Fri, 20 Dec 2024 18:12:12 +0000 Subject: [PATCH 457/504] kasan: fix typo in kasan_poison_new_object documentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix presumed copy-paste typo of kasan_poison_new_object documentation referring to kasan_unpoison_new_object. No functional changes. Link: https://lkml.kernel.org/r/20241220181205.9663-1-dominik.karol.piatkowski@protonmail.com Fixes: 1ce9a0523938 ("kasan: rename and document kasan_(un)poison_object_data") ta") Signed-off-by: Dominik Karol Piątkowski <dominik.karol.piatkowski@protonmail.com> Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com> Reviewed-by: Alexander Potapenko <glider@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- include/linux/kasan.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 6bbfc8aa42e8..56465af31044 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -153,7 +153,7 @@ static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache, void __kasan_poison_new_object(struct kmem_cache *cache, void *object); /** - * kasan_unpoison_new_object - Repoison a new slab object. + * kasan_poison_new_object - Repoison a new slab object. * @cache: Cache the object belong to. * @object: Pointer to the object. * From fd3918865f1f1868c8625e37ecd5f3f5f07d2228 Mon Sep 17 00:00:00 2001 From: Yafang Shao <laoar.shao@gmail.com> Date: Thu, 19 Dec 2024 10:34:48 +0800 Subject: [PATCH 458/504] kernel: remove get_task_comm() and print task comm directly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Remove get_task_comm() and print task comm directly", v2. Since task->comm is guaranteed to be NUL-terminated, we can print it directly without the need to copy it into a separate buffer. This simplifies the code and avoids unnecessary operations. This patch (of 5): Since task->comm is guaranteed to be NUL-terminated, we can print it directly without the need to copy it into a separate buffer. This simplifies the code and avoids unnecessary operations. Link: https://lkml.kernel.org/r/20241219023452.69907-1-laoar.shao@gmail.com Link: https://lkml.kernel.org/r/20241219023452.69907-2-laoar.shao@gmail.com Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Cc: Serge Hallyn <serge@hallyn.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Darren Hart <dvhart@infradead.org> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: "André Almeida" <andrealmeid@igalia.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Borislav Petkov (AMD) <bp@alien8.de> Cc: Kalle Valo <kvalo@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Danilo Krummrich <dakr@redhat.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Airlie <airlied@gmail.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: James Morris <jmorris@namei.org> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Jiri Slaby <jirislaby@kernel.org> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Karol Herbst <kherbst@redhat.com> Cc: Kees Cook <kees@kernel.org> Cc: Lyude Paul <lyude@redhat.com> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Paul Moore <paul@paul-moore.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Tvrtko Ursulin <tursulin@ursulin.net> Cc: Vineet Gupta <vgupta@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- kernel/capability.c | 8 ++------ kernel/futex/waitwake.c | 3 +-- 2 files changed, 3 insertions(+), 8 deletions(-) diff --git a/kernel/capability.c b/kernel/capability.c index dac4df77e376..e089d2628c29 100644 --- a/kernel/capability.c +++ b/kernel/capability.c @@ -38,10 +38,8 @@ __setup("no_file_caps", file_caps_disable); static void warn_legacy_capability_use(void) { - char name[sizeof(current->comm)]; - pr_info_once("warning: `%s' uses 32-bit capabilities (legacy support in use)\n", - get_task_comm(name, current)); + current->comm); } /* @@ -62,10 +60,8 @@ static void warn_legacy_capability_use(void) static void warn_deprecated_v2(void) { - char name[sizeof(current->comm)]; - pr_info_once("warning: `%s' uses deprecated v2 capabilities in a way that may be insecure\n", - get_task_comm(name, current)); + current->comm); } /* diff --git a/kernel/futex/waitwake.c b/kernel/futex/waitwake.c index 3a10375d9521..eb86a7ade06a 100644 --- a/kernel/futex/waitwake.c +++ b/kernel/futex/waitwake.c @@ -210,13 +210,12 @@ static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) { if (oparg < 0 || oparg > 31) { - char comm[sizeof(current->comm)]; /* * kill this print and return -EINVAL when userspace * is sane again */ pr_info_ratelimited("futex_wake_op: %s tries to shift op by %d; fix this program\n", - get_task_comm(comm, current), oparg); + current->comm, oparg); oparg &= 31; } oparg = 1 << oparg; From 6487763c56cea02706fe2b4bbad2833cf1f2831b Mon Sep 17 00:00:00 2001 From: Yafang Shao <laoar.shao@gmail.com> Date: Thu, 19 Dec 2024 10:34:49 +0800 Subject: [PATCH 459/504] arch: remove get_task_comm() and print task comm directly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since task->comm is guaranteed to be NUL-terminated, we can print it directly without the need to copy it into a separate buffer. This simplifies the code and avoids unnecessary operations. Link: https://lkml.kernel.org/r/20241219023452.69907-3-laoar.shao@gmail.com Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: "André Almeida" <andrealmeid@igalia.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Danilo Krummrich <dakr@redhat.com> Cc: Darren Hart <dvhart@infradead.org> Cc: David Airlie <airlied@gmail.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: James Morris <jmorris@namei.org> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Jiri Slaby <jirislaby@kernel.org> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Kalle Valo <kvalo@kernel.org> Cc: Karol Herbst <kherbst@redhat.com> Cc: Kees Cook <kees@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Lyude Paul <lyude@redhat.com> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Paul Moore <paul@paul-moore.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: "Serge E. Hallyn" <serge@hallyn.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Tvrtko Ursulin <tursulin@ursulin.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- arch/arc/kernel/unaligned.c | 5 ++--- arch/x86/kernel/vm86_32.c | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c index d2f5ceaaed1b..3b2d8b1bd271 100644 --- a/arch/arc/kernel/unaligned.c +++ b/arch/arc/kernel/unaligned.c @@ -200,7 +200,6 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, struct callee_regs *cregs) { struct disasm_state state; - char buf[TASK_COMM_LEN]; /* handle user mode only and only if enabled by sysadmin */ if (!user_mode(regs) || !unaligned_enabled) @@ -212,11 +211,11 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs, " performance significantly\n. To enable further" " logging of such instances, please \n" " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n", - get_task_comm(buf, current), task_pid_nr(current)); + current->comm, task_pid_nr(current)); } else { /* Add rate limiting if it gets down to it */ pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n", - get_task_comm(buf, current), task_pid_nr(current), + current->comm, task_pid_nr(current), address, regs->ret); } diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index e9e803a4d44c..e6cc84143f3e 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c @@ -246,9 +246,8 @@ static long do_sys_vm86(struct vm86plus_struct __user *user_vm86, bool plus) /* VM86_SCREEN_BITMAP had numerous bugs and appears to have no users. */ if (v.flags & VM86_SCREEN_BITMAP) { - char comm[TASK_COMM_LEN]; - - pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", get_task_comm(comm, current)); + pr_info_once("vm86: '%s' uses VM86_SCREEN_BITMAP, which is no longer supported\n", + current->comm); return -EINVAL; } From 8fac1ab35715b6279586567a8dd2044c88937200 Mon Sep 17 00:00:00 2001 From: Yafang Shao <laoar.shao@gmail.com> Date: Thu, 19 Dec 2024 10:34:50 +0800 Subject: [PATCH 460/504] net: remove get_task_comm() and print task comm directly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since task->comm is guaranteed to be NUL-terminated, we can print it directly without the need to copy it into a separate buffer. This simplifies the code and avoids unnecessary operations. Link: https://lkml.kernel.org/r/20241219023452.69907-4-laoar.shao@gmail.com Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: "André Almeida" <andrealmeid@igalia.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Borislav Petkov (AMD) <bp@alien8.de> Cc: Danilo Krummrich <dakr@redhat.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Airlie <airlied@gmail.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Morris <jmorris@namei.org> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Jiri Slaby <jirislaby@kernel.org> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Kalle Valo <kvalo@kernel.org> Cc: Karol Herbst <kherbst@redhat.com> Cc: Kees Cook <kees@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Lyude Paul <lyude@redhat.com> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Paul Moore <paul@paul-moore.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: "Serge E. Hallyn" <serge@hallyn.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tvrtko Ursulin <tursulin@ursulin.net> Cc: Vineet Gupta <vgupta@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- net/wireless/wext-core.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c index 3bb04b05c5ce..bea70eb6f034 100644 --- a/net/wireless/wext-core.c +++ b/net/wireless/wext-core.c @@ -640,10 +640,8 @@ EXPORT_SYMBOL(wireless_send_event); #ifdef CONFIG_CFG80211_WEXT static void wireless_warn_cfg80211_wext(void) { - char name[sizeof(current->comm)]; - pr_warn_once("warning: `%s' uses wireless extensions which will stop working for Wi-Fi 7 hardware; use nl80211\n", - get_task_comm(name, current)); + current->comm); } #endif From ba4510352b90e070709a655679191e27beaec680 Mon Sep 17 00:00:00 2001 From: Yafang Shao <laoar.shao@gmail.com> Date: Thu, 19 Dec 2024 10:34:51 +0800 Subject: [PATCH 461/504] security: remove get_task_comm() and print task comm directly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since task->comm is guaranteed to be NUL-terminated, we can print it directly without the need to copy it into a separate buffer. This simplifies the code and avoids unnecessary operations. Link: https://lkml.kernel.org/r/20241219023452.69907-5-laoar.shao@gmail.com Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Reviewed-by: Paul Moore <paul@paul-moore.com> Acked-by: Kees Cook <kees@kernel.org> Cc: James Morris <jmorris@namei.org> Cc: "Serge E. Hallyn" <serge@hallyn.com> Cc: "André Almeida" <andrealmeid@igalia.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Borislav Petkov (AMD) <bp@alien8.de> Cc: Danilo Krummrich <dakr@redhat.com> Cc: Darren Hart <dvhart@infradead.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: David Airlie <airlied@gmail.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Jiri Slaby <jirislaby@kernel.org> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Kalle Valo <kvalo@kernel.org> Cc: Karol Herbst <kherbst@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Lyude Paul <lyude@redhat.com> Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tvrtko Ursulin <tursulin@ursulin.net> Cc: Vineet Gupta <vgupta@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- security/yama/yama_lsm.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index e1a5e13ea269..1a2d02fee09b 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -76,7 +76,6 @@ static void report_access(const char *access, struct task_struct *target, struct task_struct *agent) { struct access_report_info *info; - char agent_comm[sizeof(agent->comm)]; assert_spin_locked(&target->alloc_lock); /* for target->comm */ @@ -86,8 +85,7 @@ static void report_access(const char *access, struct task_struct *target, */ pr_notice_ratelimited( "ptrace %s of \"%s\"[%d] was attempted by \"%s\"[%d]\n", - access, target->comm, target->pid, - get_task_comm(agent_comm, agent), agent->pid); + access, target->comm, target->pid, agent->comm, agent->pid); return; } From cf08bd297e8365051e3bd4237dc0fa84d5fab46c Mon Sep 17 00:00:00 2001 From: Yafang Shao <laoar.shao@gmail.com> Date: Thu, 19 Dec 2024 10:34:52 +0800 Subject: [PATCH 462/504] drivers: remove get_task_comm() and print task comm directly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since task->comm is guaranteed to be NUL-terminated, we can print it directly without the need to copy it into a separate buffer. This simplifies the code and avoids unnecessary operations. Link: https://lkml.kernel.org/r/20241219023452.69907-6-laoar.shao@gmail.com Signed-off-by: Yafang Shao <laoar.shao@gmail.com> Reviewed-by: Jiri Slaby <jirislaby@kernel.org> (For tty) Reviewed-by: Lyude Paul <lyude@redhat.com> (For nouveau) Cc: Oded Gabbay <ogabbay@kernel.org> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Tvrtko Ursulin <tursulin@ursulin.net> Cc: David Airlie <airlied@gmail.com> Cc: Simona Vetter <simona@ffwll.ch> Cc: Karol Herbst <kherbst@redhat.com> Cc: Lyude Paul <lyude@redhat.com> Cc: Danilo Krummrich <dakr@redhat.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Jiri Slaby <jirislaby@kernel.org> Cc: "André Almeida" <andrealmeid@igalia.com> Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com> Cc: Borislav Petkov (AMD) <bp@alien8.de> Cc: Darren Hart <dvhart@infradead.org> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Morris <jmorris@namei.org> Cc: Johannes Berg <johannes@sipsolutions.net> Cc: Kalle Valo <kvalo@kernel.org> Cc: Kees Cook <kees@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Moore <paul@paul-moore.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Petr Mladek <pmladek@suse.com> Cc: "Serge E. Hallyn" <serge@hallyn.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vineet Gupta <vgupta@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- drivers/accel/habanalabs/common/context.c | 3 +-- drivers/accel/habanalabs/common/habanalabs_ioctl.c | 11 +++-------- drivers/gpu/drm/i915/display/intel_display_driver.c | 6 ++---- drivers/gpu/drm/nouveau/nouveau_chan.c | 4 +--- drivers/gpu/drm/nouveau/nouveau_drm.c | 5 ++--- drivers/tty/tty_io.c | 3 +-- 6 files changed, 10 insertions(+), 22 deletions(-) diff --git a/drivers/accel/habanalabs/common/context.c b/drivers/accel/habanalabs/common/context.c index b83141f58319..9f212b17611a 100644 --- a/drivers/accel/habanalabs/common/context.c +++ b/drivers/accel/habanalabs/common/context.c @@ -199,7 +199,6 @@ out_err: int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) { - char task_comm[TASK_COMM_LEN]; int rc = 0, i; ctx->hdev = hdev; @@ -272,7 +271,7 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) mutex_init(&ctx->ts_reg_lock); dev_dbg(hdev->dev, "create user context, comm=\"%s\", asid=%u\n", - get_task_comm(task_comm, current), ctx->asid); + current->comm, ctx->asid); } return 0; diff --git a/drivers/accel/habanalabs/common/habanalabs_ioctl.c b/drivers/accel/habanalabs/common/habanalabs_ioctl.c index 1dd6e23172ca..8729a0c57d78 100644 --- a/drivers/accel/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/accel/habanalabs/common/habanalabs_ioctl.c @@ -1279,13 +1279,10 @@ static long _hl_ioctl(struct hl_fpriv *hpriv, unsigned int cmd, unsigned long ar retcode = -EFAULT; out_err: - if (retcode) { - char task_comm[TASK_COMM_LEN]; - + if (retcode) dev_dbg_ratelimited(dev, "error in ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n", - task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr); - } + task_pid_nr(current), current->comm, cmd, nr); if (kdata != stack_kdata) kfree(kdata); @@ -1308,11 +1305,9 @@ long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg) if (nr == _IOC_NR(DRM_IOCTL_HL_INFO)) { ioctl = &hl_ioctls_control[nr - HL_COMMAND_START]; } else { - char task_comm[TASK_COMM_LEN]; - dev_dbg_ratelimited(hdev->dev_ctrl, "invalid ioctl: pid=%d, comm=\"%s\", cmd=%#010x, nr=%#04x\n", - task_pid_nr(current), get_task_comm(task_comm, current), cmd, nr); + task_pid_nr(current), current->comm, cmd, nr); return -ENOTTY; } diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 56b78cf6b854..62596424a9aa 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -391,7 +391,6 @@ void intel_display_driver_resume_access(struct drm_i915_private *i915) */ bool intel_display_driver_check_access(struct drm_i915_private *i915) { - char comm[TASK_COMM_LEN]; char current_task[TASK_COMM_LEN + 16]; char allowed_task[TASK_COMM_LEN + 16] = "none"; @@ -400,12 +399,11 @@ bool intel_display_driver_check_access(struct drm_i915_private *i915) return true; snprintf(current_task, sizeof(current_task), "%s[%d]", - get_task_comm(comm, current), - task_pid_vnr(current)); + current->comm, task_pid_vnr(current)); if (i915->display.access.allowed_task) snprintf(allowed_task, sizeof(allowed_task), "%s[%d]", - get_task_comm(comm, i915->display.access.allowed_task), + i915->display.access.allowed_task->comm, task_pid_vnr(i915->display.access.allowed_task)); drm_dbg_kms(&i915->drm, diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index 2cb2e5675807..cd659b9fd1d9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -279,7 +279,6 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm, const u64 plength = 0x10000; const u64 ioffset = plength; const u64 ilength = 0x02000; - char name[TASK_COMM_LEN]; int cid, ret; u64 size; @@ -338,8 +337,7 @@ nouveau_channel_ctor(struct nouveau_cli *cli, bool priv, u64 runm, chan->userd = &chan->user; } - get_task_comm(name, current); - snprintf(args.name, sizeof(args.name), "%s[%d]", name, task_pid_nr(current)); + snprintf(args.name, sizeof(args.name), "%s[%d]", current->comm, task_pid_nr(current)); ret = nvif_object_ctor(&device->object, "abi16ChanUser", 0, hosts[cid].oclass, &args, sizeof(args), &chan->user); diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 107f63f08bd9..ea7206484d22 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -1159,7 +1159,7 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) { struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_cli *cli; - char name[32], tmpname[TASK_COMM_LEN]; + char name[32]; int ret; /* need to bring up power immediately if opening device */ @@ -1169,10 +1169,9 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv) return ret; } - get_task_comm(tmpname, current); rcu_read_lock(); snprintf(name, sizeof(name), "%s[%d]", - tmpname, pid_nr(rcu_dereference(fpriv->pid))); + current->comm, pid_nr(rcu_dereference(fpriv->pid))); rcu_read_unlock(); if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL))) { diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index dcb1769c3625..50c0c23ae678 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -2622,14 +2622,13 @@ static int tty_tiocgicount(struct tty_struct *tty, void __user *arg) static int tty_set_serial(struct tty_struct *tty, struct serial_struct *ss) { - char comm[TASK_COMM_LEN]; int flags; flags = ss->flags & ASYNC_DEPRECATED; if (flags) pr_warn_ratelimited("%s: '%s' is using deprecated serial flags (with no effect): %.8x\n", - __func__, get_task_comm(comm, current), flags); + __func__, current->comm, flags); if (!tty->ops->set_serial) return -ENOTTY; From c9373960a43a515baa44938f3ac90d81a9e1e78a Mon Sep 17 00:00:00 2001 From: Wang Yaxin <wang.yaxin@zte.com.cn> Date: Fri, 20 Dec 2024 17:31:05 +0800 Subject: [PATCH 463/504] delayacct: add delay min to record delay peak Delay accounting can now calculate the average delay of processes, detect the overall system load, and also record the 'delay max' to identify potential abnormal delays. However, 'delay min' can help us identify another useful delay peak. By comparing the difference between 'delay max' and 'delay min', we can understand the optimization space for latency, providing a reference for the optimization of latency performance. Use case ========= bash-4.4# ./getdelays -d -t 242 print delayacct stats ON TGID 242 CPU count real total virtual total delay total delay average delay max delay min 39 156000000 156576579 2111069 0.054ms 0.212296ms 0.031307ms IO count delay total delay average delay max delay min 0 0 0.000ms 0.000000ms 0.000000ms SWAP count delay total delay average delay max delay min 0 0 0.000ms 0.000000ms 0.000000ms RECLAIM count delay total delay average delay max delay min 0 0 0.000ms 0.000000ms 0.000000ms THRASHING count delay total delay average delay max delay min 0 0 0.000ms 0.000000ms 0.000000ms COMPACT count delay total delay average delay max delay min 0 0 0.000ms 0.000000ms 0.000000ms WPCOPY count delay total delay average delay max delay min 156 11215873 0.072ms 0.207403ms 0.033913ms IRQ count delay total delay average delay max delay min 0 0 0.000ms 0.000000ms 0.000000ms Link: https://lkml.kernel.org/r/20241220173105906EOdsPhzjMLYNJJBqgz1ga@zte.com.cn Co-developed-by: Wang Yong <wang.yong12@zte.com.cn> Signed-off-by: Wang Yong <wang.yong12@zte.com.cn> Co-developed-by: xu xin <xu.xin16@zte.com.cn> Signed-off-by: xu xin <xu.xin16@zte.com.cn> Signed-off-by: Wang Yaxin <wang.yaxin@zte.com.cn> Co-developed-by: Kun Jiang <jiang.kun2@zte.com.cn> Signed-off-by: Kun Jiang <jiang.kun2@zte.com.cn> Cc: Balbir Singh <bsingharora@gmail.com> Cc: David Hildenbrand <david@redhat.com> Cc: Fan Yu <fan.yu9@zte.com.cn> Cc: Peilin He <he.peilin@zte.com.cn> Cc: tuqiang <tu.qiang35@zte.com.cn> Cc: ye xingchen <ye.xingchen@zte.com.cn> Cc: Yunkai Zhang <zhang.yunkai@zte.com.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- Documentation/accounting/delay-accounting.rst | 32 +++++++------- include/linux/delayacct.h | 7 ++++ include/linux/sched.h | 3 ++ include/uapi/linux/taskstats.h | 8 ++++ kernel/delayacct.c | 32 ++++++++++---- kernel/sched/stats.h | 4 ++ tools/accounting/getdelays.c | 42 +++++++++++-------- 7 files changed, 88 insertions(+), 40 deletions(-) diff --git a/Documentation/accounting/delay-accounting.rst b/Documentation/accounting/delay-accounting.rst index 8a0277428ccf..210c194d4a7b 100644 --- a/Documentation/accounting/delay-accounting.rst +++ b/Documentation/accounting/delay-accounting.rst @@ -107,22 +107,22 @@ Get sum and peak of delays, since system boot, for all pids with tgid 242:: TGID 242 - CPU count real total virtual total delay total delay average delay max - 239 296000000 307724885 1127792 0.005ms 0.238382ms - IO count delay total delay average delay max - 0 0 0.000ms 0.000000ms - SWAP count delay total delay average delay max - 0 0 0.000ms 0.000000ms - RECLAIM count delay total delay average delay max - 0 0 0.000ms 0.000000ms - THRASHING count delay total delay average delay max - 0 0 0.000ms 0.000000ms - COMPACT count delay total delay average delay max - 0 0 0.000ms 0.000000ms - WPCOPY count delay total delay average delay max - 230 19100476 0.083ms 0.383822ms - IRQ count delay total delay average delay max - 0 0 0.000ms 0.000000ms + CPU count real total virtual total delay total delay average delay max delay min + 39 156000000 156576579 2111069 0.054ms 0.212296ms 0.031307ms + IO count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms + SWAP count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms + RECLAIM count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms + THRASHING count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms + COMPACT count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms + WPCOPY count delay total delay average delay max delay min + 156 11215873 0.072ms 0.207403ms 0.033913ms + IRQ count delay total delay average delay max delay min + 0 0 0.000ms 0.000000ms 0.000000ms Get IO accounting for pid 1, it works only with -p:: diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h index 56fbfa2c2ac5..800dcc360db2 100644 --- a/include/linux/delayacct.h +++ b/include/linux/delayacct.h @@ -30,9 +30,11 @@ struct task_delay_info { */ u64 blkio_start; u64 blkio_delay_max; + u64 blkio_delay_min; u64 blkio_delay; /* wait for sync block io completion */ u64 swapin_start; u64 swapin_delay_max; + u64 swapin_delay_min; u64 swapin_delay; /* wait for swapin */ u32 blkio_count; /* total count of the number of sync block */ /* io operations performed */ @@ -40,21 +42,26 @@ struct task_delay_info { u64 freepages_start; u64 freepages_delay_max; + u64 freepages_delay_min; u64 freepages_delay; /* wait for memory reclaim */ u64 thrashing_start; u64 thrashing_delay_max; + u64 thrashing_delay_min; u64 thrashing_delay; /* wait for thrashing page */ u64 compact_start; u64 compact_delay_max; + u64 compact_delay_min; u64 compact_delay; /* wait for memory compact */ u64 wpcopy_start; u64 wpcopy_delay_max; + u64 wpcopy_delay_min; u64 wpcopy_delay; /* wait for write-protect copy */ u64 irq_delay_max; + u64 irq_delay_min; u64 irq_delay; /* wait for IRQ/SOFTIRQ */ u32 freepages_count; /* total count of memory reclaim */ diff --git a/include/linux/sched.h b/include/linux/sched.h index a0ae3923b41d..155012467b21 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -401,6 +401,9 @@ struct sched_info { /* Max time spent waiting on a runqueue: */ unsigned long long max_run_delay; + /* Min time spent waiting on a runqueue: */ + unsigned long long min_run_delay; + /* Timestamps: */ /* When did we last run on a CPU? */ diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h index e0d1c6fc9f3b..934e20ef7f79 100644 --- a/include/uapi/linux/taskstats.h +++ b/include/uapi/linux/taskstats.h @@ -73,6 +73,7 @@ struct taskstats { __u64 cpu_count __attribute__((aligned(8))); __u64 cpu_delay_total; __u64 cpu_delay_max; + __u64 cpu_delay_min; /* Following four fields atomically updated using task->delays->lock */ @@ -82,11 +83,13 @@ struct taskstats { __u64 blkio_count; __u64 blkio_delay_total; __u64 blkio_delay_max; + __u64 blkio_delay_min; /* Delay waiting for page fault I/O (swap in only) */ __u64 swapin_count; __u64 swapin_delay_total; __u64 swapin_delay_max; + __u64 swapin_delay_min; /* cpu "wall-clock" running time * On some architectures, value will adjust for cpu time stolen @@ -170,11 +173,13 @@ struct taskstats { __u64 freepages_count; __u64 freepages_delay_total; __u64 freepages_delay_max; + __u64 freepages_delay_min; /* Delay waiting for thrashing page */ __u64 thrashing_count; __u64 thrashing_delay_total; __u64 thrashing_delay_max; + __u64 thrashing_delay_min; /* v10: 64-bit btime to avoid overflow */ __u64 ac_btime64; /* 64-bit begin time */ @@ -183,6 +188,7 @@ struct taskstats { __u64 compact_count; __u64 compact_delay_total; __u64 compact_delay_max; + __u64 compact_delay_min; /* v12 begin */ __u32 ac_tgid; /* thread group ID */ @@ -205,11 +211,13 @@ struct taskstats { __u64 wpcopy_count; __u64 wpcopy_delay_total; __u64 wpcopy_delay_max; + __u64 wpcopy_delay_min; /* v14: Delay waiting for IRQ/SOFTIRQ */ __u64 irq_count; __u64 irq_delay_total; __u64 irq_delay_max; + __u64 irq_delay_min; /* v15: add Delay max */ }; diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 23212a0c88e4..b238eb8c6573 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c @@ -95,7 +95,7 @@ void __delayacct_tsk_init(struct task_struct *tsk) * Finish delay accounting for a statistic using its timestamps (@start), * accumulator (@total) and @count */ -static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count, u64 *max) +static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count, u64 *max, u64 *min) { s64 ns = local_clock() - *start; unsigned long flags; @@ -106,6 +106,8 @@ static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *cou (*count)++; if (ns > *max) *max = ns; + if (*min == 0 || ns < *min) + *min = ns; raw_spin_unlock_irqrestore(lock, flags); } } @@ -125,7 +127,8 @@ void __delayacct_blkio_end(struct task_struct *p) &p->delays->blkio_start, &p->delays->blkio_delay, &p->delays->blkio_count, - &p->delays->blkio_delay_max); + &p->delays->blkio_delay_max, + &p->delays->blkio_delay_min); } int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) @@ -157,6 +160,7 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) d->cpu_count += t1; d->cpu_delay_max = tsk->sched_info.max_run_delay; + d->cpu_delay_min = tsk->sched_info.min_run_delay; tmp = (s64)d->cpu_delay_total + t2; d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; tmp = (s64)d->cpu_run_virtual_total + t3; @@ -170,24 +174,31 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */ raw_spin_lock_irqsave(&tsk->delays->lock, flags); d->blkio_delay_max = tsk->delays->blkio_delay_max; + d->blkio_delay_min = tsk->delays->blkio_delay_min; tmp = d->blkio_delay_total + tsk->delays->blkio_delay; d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp; d->swapin_delay_max = tsk->delays->swapin_delay_max; + d->swapin_delay_min = tsk->delays->swapin_delay_min; tmp = d->swapin_delay_total + tsk->delays->swapin_delay; d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp; d->freepages_delay_max = tsk->delays->freepages_delay_max; + d->freepages_delay_min = tsk->delays->freepages_delay_min; tmp = d->freepages_delay_total + tsk->delays->freepages_delay; d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp; d->thrashing_delay_max = tsk->delays->thrashing_delay_max; + d->thrashing_delay_min = tsk->delays->thrashing_delay_min; tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay; d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp; d->compact_delay_max = tsk->delays->compact_delay_max; + d->compact_delay_min = tsk->delays->compact_delay_min; tmp = d->compact_delay_total + tsk->delays->compact_delay; d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp; d->wpcopy_delay_max = tsk->delays->wpcopy_delay_max; + d->wpcopy_delay_min = tsk->delays->wpcopy_delay_min; tmp = d->wpcopy_delay_total + tsk->delays->wpcopy_delay; d->wpcopy_delay_total = (tmp < d->wpcopy_delay_total) ? 0 : tmp; d->irq_delay_max = tsk->delays->irq_delay_max; + d->irq_delay_min = tsk->delays->irq_delay_min; tmp = d->irq_delay_total + tsk->delays->irq_delay; d->irq_delay_total = (tmp < d->irq_delay_total) ? 0 : tmp; d->blkio_count += tsk->delays->blkio_count; @@ -224,7 +235,8 @@ void __delayacct_freepages_end(void) ¤t->delays->freepages_start, ¤t->delays->freepages_delay, ¤t->delays->freepages_count, - ¤t->delays->freepages_delay_max); + ¤t->delays->freepages_delay_max, + ¤t->delays->freepages_delay_min); } void __delayacct_thrashing_start(bool *in_thrashing) @@ -247,7 +259,8 @@ void __delayacct_thrashing_end(bool *in_thrashing) ¤t->delays->thrashing_start, ¤t->delays->thrashing_delay, ¤t->delays->thrashing_count, - ¤t->delays->thrashing_delay_max); + ¤t->delays->thrashing_delay_max, + ¤t->delays->thrashing_delay_min); } void __delayacct_swapin_start(void) @@ -261,7 +274,8 @@ void __delayacct_swapin_end(void) ¤t->delays->swapin_start, ¤t->delays->swapin_delay, ¤t->delays->swapin_count, - ¤t->delays->swapin_delay_max); + ¤t->delays->swapin_delay_max, + ¤t->delays->swapin_delay_min); } void __delayacct_compact_start(void) @@ -275,7 +289,8 @@ void __delayacct_compact_end(void) ¤t->delays->compact_start, ¤t->delays->compact_delay, ¤t->delays->compact_count, - ¤t->delays->compact_delay_max); + ¤t->delays->compact_delay_max, + ¤t->delays->compact_delay_min); } void __delayacct_wpcopy_start(void) @@ -289,7 +304,8 @@ void __delayacct_wpcopy_end(void) ¤t->delays->wpcopy_start, ¤t->delays->wpcopy_delay, ¤t->delays->wpcopy_count, - ¤t->delays->wpcopy_delay_max); + ¤t->delays->wpcopy_delay_max, + ¤t->delays->wpcopy_delay_min); } void __delayacct_irq(struct task_struct *task, u32 delta) @@ -301,6 +317,8 @@ void __delayacct_irq(struct task_struct *task, u32 delta) task->delays->irq_count++; if (delta > task->delays->irq_delay_max) task->delays->irq_delay_max = delta; + if (delta && (!task->delays->irq_delay_min || delta < task->delays->irq_delay_min)) + task->delays->irq_delay_min = delta; raw_spin_unlock_irqrestore(&task->delays->lock, flags); } diff --git a/kernel/sched/stats.h b/kernel/sched/stats.h index ed72435aef51..693537b908a1 100644 --- a/kernel/sched/stats.h +++ b/kernel/sched/stats.h @@ -246,6 +246,8 @@ static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t) t->sched_info.run_delay += delta; if (delta > t->sched_info.max_run_delay) t->sched_info.max_run_delay = delta; + if (delta && (!t->sched_info.min_run_delay || delta < t->sched_info.min_run_delay)) + t->sched_info.min_run_delay = delta; rq_sched_info_dequeue(rq, delta); } @@ -269,6 +271,8 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t) t->sched_info.pcount++; if (delta > t->sched_info.max_run_delay) t->sched_info.max_run_delay = delta; + if (delta && (!t->sched_info.min_run_delay || delta < t->sched_info.min_run_delay)) + t->sched_info.min_run_delay = delta; rq_sched_info_arrive(rq, delta); } diff --git a/tools/accounting/getdelays.c b/tools/accounting/getdelays.c index e570bcad185d..100ad3dc091a 100644 --- a/tools/accounting/getdelays.c +++ b/tools/accounting/getdelays.c @@ -192,7 +192,7 @@ static int get_family_id(int sd) } #define average_ms(t, c) (t / 1000000ULL / (c ? c : 1)) -#define delay_max_ms(t) (t / 1000000ULL) +#define delay_ms(t) (t / 1000000ULL) static void print_delayacct(struct taskstats *t) { @@ -213,48 +213,56 @@ static void print_delayacct(struct taskstats *t) "IRQ %15s%15s%15s%15s\n" " %15llu%15llu%15.3fms%13.6fms\n", "count", "real total", "virtual total", - "delay total", "delay average", "delay max", + "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->cpu_count, (unsigned long long)t->cpu_run_real_total, (unsigned long long)t->cpu_run_virtual_total, (unsigned long long)t->cpu_delay_total, average_ms((double)t->cpu_delay_total, t->cpu_count), - delay_max_ms((double)t->cpu_delay_max), - "count", "delay total", "delay average", "delay max", + delay_ms((double)t->cpu_delay_max), + delay_ms((double)t->cpu_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->blkio_count, (unsigned long long)t->blkio_delay_total, average_ms((double)t->blkio_delay_total, t->blkio_count), - delay_max_ms((double)t->blkio_delay_max), - "count", "delay total", "delay average", "delay max", + delay_ms((double)t->blkio_delay_max), + delay_ms((double)t->blkio_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->swapin_count, (unsigned long long)t->swapin_delay_total, average_ms((double)t->swapin_delay_total, t->swapin_count), - delay_max_ms((double)t->swapin_delay_max), - "count", "delay total", "delay average", "delay max", + delay_ms((double)t->swapin_delay_max), + delay_ms((double)t->swapin_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->freepages_count, (unsigned long long)t->freepages_delay_total, average_ms((double)t->freepages_delay_total, t->freepages_count), - delay_max_ms((double)t->freepages_delay_max), - "count", "delay total", "delay average", "delay max", + delay_ms((double)t->freepages_delay_max), + delay_ms((double)t->freepages_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->thrashing_count, (unsigned long long)t->thrashing_delay_total, average_ms((double)t->thrashing_delay_total, t->thrashing_count), - delay_max_ms((double)t->thrashing_delay_max), - "count", "delay total", "delay average", "delay max", + delay_ms((double)t->thrashing_delay_max), + delay_ms((double)t->thrashing_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->compact_count, (unsigned long long)t->compact_delay_total, average_ms((double)t->compact_delay_total, t->compact_count), - delay_max_ms((double)t->compact_delay_max), - "count", "delay total", "delay average", "delay max", + delay_ms((double)t->compact_delay_max), + delay_ms((double)t->compact_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->wpcopy_count, (unsigned long long)t->wpcopy_delay_total, average_ms((double)t->wpcopy_delay_total, t->wpcopy_count), - delay_max_ms((double)t->wpcopy_delay_max), - "count", "delay total", "delay average", "delay max", + delay_ms((double)t->wpcopy_delay_max), + delay_ms((double)t->wpcopy_delay_min), + "count", "delay total", "delay average", "delay max", "delay min", (unsigned long long)t->irq_count, (unsigned long long)t->irq_delay_total, average_ms((double)t->irq_delay_total, t->irq_count), - delay_max_ms((double)t->irq_delay_max)); + delay_ms((double)t->irq_delay_max), + delay_ms((double)t->irq_delay_min)); } static void task_context_switch_counts(struct taskstats *t) From fdad5df80e39eed00aa4ce93c84499395e8ad89e Mon Sep 17 00:00:00 2001 From: Phillip Lougher <phillip@squashfs.org.uk> Date: Sun, 29 Dec 2024 23:37:49 +0000 Subject: [PATCH 464/504] squashfs: make squashfs_cache_init() return ERR_PTR(-ENOMEM) Patch series "mm, swap: rework of swap allocator locks". This patchset reduces the amount of memory that Squashfs uses when CONFIG_FILE_DIRECT is configured, and updates various out of date information in the documentation and Kconfig. This patch (of 4): Make squashfs_cache_init() return an ERR_PTR(-ENOMEM) on failure rather than NULL. This tidies up some calling code, but, it also allows NULL to be returned as a valid result when a cache hasn't be allocated. Link: https://lkml.kernel.org/r/20241229233752.54481-1-phillip@squashfs.org.uk Link: https://lkml.kernel.org/r/20241229233752.54481-2-phillip@squashfs.org.uk Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/cache.c | 10 +++++++--- fs/squashfs/super.c | 17 ++++++++++------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 5062326d0efb..4db0d2b0aab8 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c @@ -224,11 +224,15 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, int block_size) { int i, j; - struct squashfs_cache *cache = kzalloc(sizeof(*cache), GFP_KERNEL); + struct squashfs_cache *cache; + if (entries == 0) + return NULL; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); if (cache == NULL) { ERROR("Failed to allocate %s cache\n", name); - return NULL; + return ERR_PTR(-ENOMEM); } cache->entry = kcalloc(entries, sizeof(*(cache->entry)), GFP_KERNEL); @@ -281,7 +285,7 @@ struct squashfs_cache *squashfs_cache_init(char *name, int entries, cleanup: squashfs_cache_delete(cache); - return NULL; + return ERR_PTR(-ENOMEM); } diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 269c6d61bc29..fedae8dbc5de 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -314,26 +314,29 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) sb->s_flags |= SB_RDONLY; sb->s_op = &squashfs_super_ops; - err = -ENOMEM; - msblk->block_cache = squashfs_cache_init("metadata", SQUASHFS_CACHED_BLKS, SQUASHFS_METADATA_SIZE); - if (msblk->block_cache == NULL) + if (IS_ERR(msblk->block_cache)) { + err = PTR_ERR(msblk->block_cache); goto failed_mount; + } /* Allocate read_page block */ msblk->read_page = squashfs_cache_init("data", msblk->max_thread_num, msblk->block_size); - if (msblk->read_page == NULL) { + if (IS_ERR(msblk->read_page)) { errorf(fc, "Failed to allocate read_page block"); + err = PTR_ERR(msblk->read_page); goto failed_mount; } if (msblk->devblksize == PAGE_SIZE) { struct inode *cache = new_inode(sb); - if (cache == NULL) + if (cache == NULL) { + err = -ENOMEM; goto failed_mount; + } set_nlink(cache, 1); cache->i_size = OFFSET_MAX; @@ -406,8 +409,8 @@ handle_fragments: msblk->fragment_cache = squashfs_cache_init("fragment", min(SQUASHFS_CACHED_FRAGMENTS, fragments), msblk->block_size); - if (msblk->fragment_cache == NULL) { - err = -ENOMEM; + if (IS_ERR(msblk->fragment_cache)) { + err = PTR_ERR(msblk->fragment_cache); goto failed_mount; } From 86ee94cf2e5e6757ec83f86332785fee60a58026 Mon Sep 17 00:00:00 2001 From: Phillip Lougher <phillip@squashfs.org.uk> Date: Sun, 29 Dec 2024 23:37:50 +0000 Subject: [PATCH 465/504] squashfs: don't allocate read_page cache if SQUASHFS_FILE_DIRECT configured If Squashfs has been configured to directly read datablocks into the page cache (SQUASHFS_FILE_DIRECT), then the read_page cache is unnecessary. This improvement is due to the following two commits, which added the ability to read datablocks into the page cache when pages were missing, enabling the fallback which used an intermediate buffer to be removed. commit f268eedddf359 ("squashfs: extend "page actor" to handle missing pages") commit 1bb1a07afad97 ("squashfs: don't use intermediate buffer if pages missing") This reduces the amount of memory used when mounting a filesystem by block_size * maximum number of threads. Link: https://lkml.kernel.org/r/20241229233752.54481-3-phillip@squashfs.org.uk Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/squashfs.h | 6 ++++++ fs/squashfs/super.c | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 5a756e6790b5..2c45b9b938e9 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -14,6 +14,12 @@ #define WARNING(s, args...) pr_warn("SQUASHFS: "s, ## args) +#ifdef CONFIG_SQUASHFS_FILE_CACHE +#define SQUASHFS_READ_PAGES msblk->max_thread_num +#else +#define SQUASHFS_READ_PAGES 0 +#endif + /* block.c */ extern int squashfs_read_data(struct super_block *, u64, int, u64 *, struct squashfs_page_actor *); diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index fedae8dbc5de..67c55fe32ce8 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c @@ -323,7 +323,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc) /* Allocate read_page block */ msblk->read_page = squashfs_cache_init("data", - msblk->max_thread_num, msblk->block_size); + SQUASHFS_READ_PAGES, msblk->block_size); if (IS_ERR(msblk->read_page)) { errorf(fc, "Failed to allocate read_page block"); err = PTR_ERR(msblk->read_page); From 3c7a549acd3ea474b79477ee413d196caff1fc23 Mon Sep 17 00:00:00 2001 From: Phillip Lougher <phillip@squashfs.org.uk> Date: Sun, 29 Dec 2024 23:37:51 +0000 Subject: [PATCH 466/504] Documentation: update the Squashfs filesystem documentation This patch updates the following which are out of date. - Zstd has been added to the compression algorithms supported. - The filesystem mailing list (for the kernel code) is changed to linux-fsdevel rather than the now very little used Sourceforge mailing list. - The Squashfs website has been changed to the Squashfs-tools github repository. - The fact that Squashfs-tools is likely packaged by the linux distribution is mentioned. Link: https://lkml.kernel.org/r/20241229233752.54481-4-phillip@squashfs.org.uk Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- Documentation/filesystems/squashfs.rst | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/Documentation/filesystems/squashfs.rst b/Documentation/filesystems/squashfs.rst index 4af8d6207509..45653b3228f9 100644 --- a/Documentation/filesystems/squashfs.rst +++ b/Documentation/filesystems/squashfs.rst @@ -6,7 +6,7 @@ Squashfs 4.0 Filesystem Squashfs is a compressed read-only filesystem for Linux. -It uses zlib, lz4, lzo, or xz compression to compress files, inodes and +It uses zlib, lz4, lzo, xz or zstd compression to compress files, inodes and directories. Inodes in the system are very small and all blocks are packed to minimise data overhead. Block sizes greater than 4K are supported up to a maximum of 1Mbytes (default block size 128K). @@ -16,8 +16,8 @@ use (i.e. in cases where a .tar.gz file may be used), and in constrained block device/memory systems (e.g. embedded systems) where low overhead is needed. -Mailing list: squashfs-devel@lists.sourceforge.net -Web site: www.squashfs.org +Mailing list (kernel code): linux-fsdevel@vger.kernel.org +Web site: github.com/plougher/squashfs-tools 1. Filesystem Features ---------------------- @@ -58,11 +58,9 @@ inodes have different sizes). As squashfs is a read-only filesystem, the mksquashfs program must be used to create populated squashfs filesystems. This and other squashfs utilities -can be obtained from http://www.squashfs.org. Usage instructions can be -obtained from this site also. - -The squashfs-tools development tree is now located on kernel.org - git://git.kernel.org/pub/scm/fs/squashfs/squashfs-tools.git +are very likely packaged by your linux distribution (called squashfs-tools). +The source code can be obtained from github.com/plougher/squashfs-tools. +Usage instructions can also be obtained from this site. 2.1 Mount options ----------------- From 921972552c649d658a4309191ee60dd4adf1afbc Mon Sep 17 00:00:00 2001 From: Phillip Lougher <phillip@squashfs.org.uk> Date: Sun, 29 Dec 2024 23:37:52 +0000 Subject: [PATCH 467/504] squashfs: update Kconfig information Update the compression algorithms supported, and the Squashfs website location. Link: https://lkml.kernel.org/r/20241229233752.54481-5-phillip@squashfs.org.uk Signed-off-by: Phillip Lougher <phillip@squashfs.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/Kconfig | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig index 60fc98bdf421..b1091e70434a 100644 --- a/fs/squashfs/Kconfig +++ b/fs/squashfs/Kconfig @@ -5,8 +5,8 @@ config SQUASHFS help Saying Y here includes support for SquashFS 4.0 (a Compressed Read-Only File System). Squashfs is a highly compressed read-only - filesystem for Linux. It uses zlib, lzo or xz compression to - compress both files, inodes and directories. Inodes in the system + filesystem for Linux. It uses zlib, lz4, lzo, xz or zstd compression + to compress both files, inodes and directories. Inodes in the system are very small and all blocks are packed to minimise data overhead. Block sizes greater than 4K are supported up to a maximum of 1 Mbytes (default block size 128K). SquashFS 4.0 supports 64 bit filesystems @@ -16,7 +16,7 @@ config SQUASHFS Squashfs is intended for general read-only filesystem use, for archival use (i.e. in cases where a .tar.gz file may be used), and in embedded systems where low overhead is needed. Further information - and tools are available from http://squashfs.sourceforge.net. + and tools are available from github.com/plougher/squashfs-tools. If you want to compile this as a module ( = code which can be inserted in and removed from the running kernel whenever you want), From 2d628182792d4de8fb0c5db3807479c2ce6cd73d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Fri, 20 Dec 2024 22:46:24 +0000 Subject: [PATCH 468/504] squashfs: use a folio throughout squashfs_read_folio() Use modern folio APIs where they exist and convert back to struct page for the internal functions. Link: https://lkml.kernel.org/r/20241220224634.723899-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Phillip Lougher <phillip@squashfs.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/file.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 21aaa96856c1..bc6598c3a48f 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -445,21 +445,19 @@ static int squashfs_readpage_sparse(struct page *page, int expected) static int squashfs_read_folio(struct file *file, struct folio *folio) { - struct page *page = &folio->page; - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; - int index = page->index >> (msblk->block_log - PAGE_SHIFT); + int index = folio->index >> (msblk->block_log - PAGE_SHIFT); int file_end = i_size_read(inode) >> msblk->block_log; int expected = index == file_end ? (i_size_read(inode) & (msblk->block_size - 1)) : msblk->block_size; int res = 0; - void *pageaddr; TRACE("Entered squashfs_readpage, page index %lx, start block %llx\n", - page->index, squashfs_i(inode)->start); + folio->index, squashfs_i(inode)->start); - if (page->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> + if (folio->index >= ((i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT)) goto out; @@ -472,23 +470,18 @@ static int squashfs_read_folio(struct file *file, struct folio *folio) goto out; if (res == 0) - res = squashfs_readpage_sparse(page, expected); + res = squashfs_readpage_sparse(&folio->page, expected); else - res = squashfs_readpage_block(page, block, res, expected); + res = squashfs_readpage_block(&folio->page, block, res, expected); } else - res = squashfs_readpage_fragment(page, expected); + res = squashfs_readpage_fragment(&folio->page, expected); if (!res) return 0; out: - pageaddr = kmap_atomic(page); - memset(pageaddr, 0, PAGE_SIZE); - kunmap_atomic(pageaddr); - flush_dcache_page(page); - if (res == 0) - SetPageUptodate(page); - unlock_page(page); + folio_zero_segment(folio, 0, folio_size(folio)); + folio_end_read(folio, res == 0); return res; } From 97bd9aefff3b4060c05c9c3459bef0477c097155 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Fri, 20 Dec 2024 22:46:25 +0000 Subject: [PATCH 469/504] squashfs: pass a folio to squashfs_readpage_fragment() Remove an access to page->mapping. Link: https://lkml.kernel.org/r/20241220224634.723899-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Phillip Lougher <phillip@squashfs.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/file.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index bc6598c3a48f..6bd16e12493b 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -417,9 +417,9 @@ skip_page: } /* Read datablock stored packed inside a fragment (tail-end packed block) */ -static int squashfs_readpage_fragment(struct page *page, int expected) +static int squashfs_readpage_fragment(struct folio *folio, int expected) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_fragment(inode->i_sb, squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); @@ -430,7 +430,7 @@ static int squashfs_readpage_fragment(struct page *page, int expected) squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); else - squashfs_copy_cache(page, buffer, expected, + squashfs_copy_cache(&folio->page, buffer, expected, squashfs_i(inode)->fragment_offset); squashfs_cache_put(buffer); @@ -474,7 +474,7 @@ static int squashfs_read_folio(struct file *file, struct folio *folio) else res = squashfs_readpage_block(&folio->page, block, res, expected); } else - res = squashfs_readpage_fragment(&folio->page, expected); + res = squashfs_readpage_fragment(folio, expected); if (!res) return 0; From bb856ab669ac440d55dde734f48e5ffeedf7a89d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Fri, 20 Dec 2024 22:46:26 +0000 Subject: [PATCH 470/504] squashfs: convert squashfs_readpage_block() to take a folio Remove a few accesses to page->mapping. Link: https://lkml.kernel.org/r/20241220224634.723899-3-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Phillip Lougher <phillip@squashfs.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/file.c | 2 +- fs/squashfs/file_cache.c | 6 +++--- fs/squashfs/file_direct.c | 11 +++++------ fs/squashfs/squashfs.h | 2 +- 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 6bd16e12493b..5b81e26b1226 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -472,7 +472,7 @@ static int squashfs_read_folio(struct file *file, struct folio *folio) if (res == 0) res = squashfs_readpage_sparse(&folio->page, expected); else - res = squashfs_readpage_block(&folio->page, block, res, expected); + res = squashfs_readpage_block(folio, block, res, expected); } else res = squashfs_readpage_fragment(folio, expected); diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c index 54c17b7c85fd..0360d22a77d4 100644 --- a/fs/squashfs/file_cache.c +++ b/fs/squashfs/file_cache.c @@ -18,9 +18,9 @@ #include "squashfs.h" /* Read separately compressed datablock and memcopy into page cache */ -int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expected) +int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, int expected) { - struct inode *i = page->mapping->host; + struct inode *i = folio->mapping->host; struct squashfs_cache_entry *buffer = squashfs_get_datablock(i->i_sb, block, bsize); int res = buffer->error; @@ -29,7 +29,7 @@ int squashfs_readpage_block(struct page *page, u64 block, int bsize, int expecte ERROR("Unable to read page, block %llx, size %x\n", block, bsize); else - squashfs_copy_cache(page, buffer, expected, 0); + squashfs_copy_cache(&folio->page, buffer, expected, 0); squashfs_cache_put(buffer); return res; diff --git a/fs/squashfs/file_direct.c b/fs/squashfs/file_direct.c index d19d4db74af8..2c3e809d6891 100644 --- a/fs/squashfs/file_direct.c +++ b/fs/squashfs/file_direct.c @@ -19,12 +19,11 @@ #include "page_actor.h" /* Read separately compressed datablock directly into page cache */ -int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, - int expected) - +int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, + int expected) { - struct folio *folio = page_folio(target_page); - struct inode *inode = target_page->mapping->host; + struct page *target_page = &folio->page; + struct inode *inode = folio->mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; loff_t file_end = (i_size_read(inode) - 1) >> PAGE_SHIFT; int mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; @@ -48,7 +47,7 @@ int squashfs_readpage_block(struct page *target_page, u64 block, int bsize, /* Try to grab all the pages covered by the Squashfs block */ for (i = 0, index = start_index; index <= end_index; index++) { page[i] = (index == folio->index) ? target_page : - grab_cache_page_nowait(target_page->mapping, index); + grab_cache_page_nowait(folio->mapping, index); if (page[i] == NULL) continue; diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 2c45b9b938e9..9922a9460ce6 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -78,7 +78,7 @@ void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, int); /* file_xxx.c */ -extern int squashfs_readpage_block(struct page *, u64, int, int); +int squashfs_readpage_block(struct folio *, u64 block, int bsize, int expected); /* id.c */ extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); From a1ca5eb924e7c4ce48a5499b718c6a5095a9ef0a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Fri, 20 Dec 2024 22:46:27 +0000 Subject: [PATCH 471/504] squashfs; convert squashfs_copy_cache() to take a folio Remove accesses to page->index and page->mapping. Also use folio APIs where available. This code still assumes order 0 folios. Link: https://lkml.kernel.org/r/20241220224634.723899-4-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Phillip Lougher <phillip@squashfs.org.uk> Cc: Dan Carpenter <dan.carpenter@linaro.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/file.c | 46 ++++++++++++++++++++++------------------ fs/squashfs/file_cache.c | 2 +- fs/squashfs/squashfs.h | 4 ++-- 3 files changed, 28 insertions(+), 24 deletions(-) diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 5b81e26b1226..1f27e8161319 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -378,13 +378,15 @@ void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, } /* Copy data into page cache */ -void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, - int bytes, int offset) +void squashfs_copy_cache(struct folio *folio, + struct squashfs_cache_entry *buffer, size_t bytes, + size_t offset) { - struct inode *inode = page->mapping->host; + struct address_space *mapping = folio->mapping; + struct inode *inode = mapping->host; struct squashfs_sb_info *msblk = inode->i_sb->s_fs_info; int i, mask = (1 << (msblk->block_log - PAGE_SHIFT)) - 1; - int start_index = page->index & ~mask, end_index = start_index | mask; + int start_index = folio->index & ~mask, end_index = start_index | mask; /* * Loop copying datablock into pages. As the datablock likely covers @@ -394,25 +396,27 @@ void squashfs_copy_cache(struct page *page, struct squashfs_cache_entry *buffer, */ for (i = start_index; i <= end_index && bytes > 0; i++, bytes -= PAGE_SIZE, offset += PAGE_SIZE) { - struct page *push_page; - int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0; + struct folio *push_folio; + size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0; - TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); + TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail); - push_page = (i == page->index) ? page : - grab_cache_page_nowait(page->mapping, i); + push_folio = (i == folio->index) ? folio : + __filemap_get_folio(mapping, i, + FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, + mapping_gfp_mask(mapping)); - if (!push_page) + if (!push_folio) continue; - if (PageUptodate(push_page)) - goto skip_page; + if (folio_test_uptodate(push_folio)) + goto skip_folio; - squashfs_fill_page(push_page, buffer, offset, avail); -skip_page: - unlock_page(push_page); - if (i != page->index) - put_page(push_page); + squashfs_fill_page(&push_folio->page, buffer, offset, avail); +skip_folio: + folio_unlock(push_folio); + if (i != folio->index) + folio_put(push_folio); } } @@ -430,16 +434,16 @@ static int squashfs_readpage_fragment(struct folio *folio, int expected) squashfs_i(inode)->fragment_block, squashfs_i(inode)->fragment_size); else - squashfs_copy_cache(&folio->page, buffer, expected, + squashfs_copy_cache(folio, buffer, expected, squashfs_i(inode)->fragment_offset); squashfs_cache_put(buffer); return res; } -static int squashfs_readpage_sparse(struct page *page, int expected) +static int squashfs_readpage_sparse(struct folio *folio, int expected) { - squashfs_copy_cache(page, NULL, expected, 0); + squashfs_copy_cache(folio, NULL, expected, 0); return 0; } @@ -470,7 +474,7 @@ static int squashfs_read_folio(struct file *file, struct folio *folio) goto out; if (res == 0) - res = squashfs_readpage_sparse(&folio->page, expected); + res = squashfs_readpage_sparse(folio, expected); else res = squashfs_readpage_block(folio, block, res, expected); } else diff --git a/fs/squashfs/file_cache.c b/fs/squashfs/file_cache.c index 0360d22a77d4..40e59a43d098 100644 --- a/fs/squashfs/file_cache.c +++ b/fs/squashfs/file_cache.c @@ -29,7 +29,7 @@ int squashfs_readpage_block(struct folio *folio, u64 block, int bsize, int expec ERROR("Unable to read page, block %llx, size %x\n", block, bsize); else - squashfs_copy_cache(&folio->page, buffer, expected, 0); + squashfs_copy_cache(folio, buffer, expected, 0); squashfs_cache_put(buffer); return res; diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 9922a9460ce6..8c6fbef022f4 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -74,8 +74,8 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *, /* file.c */ void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); -void squashfs_copy_cache(struct page *, struct squashfs_cache_entry *, int, - int); +void squashfs_copy_cache(struct folio *, struct squashfs_cache_entry *, + size_t bytes, size_t offset); /* file_xxx.c */ int squashfs_readpage_block(struct folio *, u64 block, int bsize, int expected); From 9dd04ae925275e42b20f5713a505b8dc68529dd7 Mon Sep 17 00:00:00 2001 From: Dan Carpenter <dan.carpenter@linaro.org> Date: Wed, 8 Jan 2025 12:16:30 +0300 Subject: [PATCH 472/504] squashfs: fix a NULL vs IS_ERR() bug __filemap_get_folio() never returns NULL, it returns error pointers. This incorrect check would lead to an Oops on the following line when we pass "push_folio" to folio_test_uptodate(). Link: https://lkml.kernel.org/r/7b7f44d6-9153-4d7c-b65b-2d78febe6c7a@stanley.mountain Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Phillip Lougher <phillip@squashfs.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 1f27e8161319..74076c4823c3 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -406,7 +406,7 @@ void squashfs_copy_cache(struct folio *folio, FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, mapping_gfp_mask(mapping)); - if (!push_folio) + if (IS_ERR(push_folio)) continue; if (folio_test_uptodate(push_folio)) From 9e4ff5149f6d39c724c5fba788925f0ae790a7dc Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Fri, 20 Dec 2024 22:46:28 +0000 Subject: [PATCH 473/504] squashfs: convert squashfs_fill_page() to take a folio squashfs_fill_page is only used in this file, so make it static. Use kmap_local instead of kmap_atomic, and return a bool so that the caller can use folio_end_read() which saves an atomic operation over calling folio_mark_uptodate() followed by folio_unlock(). Link: https://lkml.kernel.org/r/20241220224634.723899-5-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Tested-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Phillip Lougher <phillip@squashfs.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/file.c | 21 ++++++++++++--------- fs/squashfs/squashfs.h | 1 - 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index 74076c4823c3..eef1dd462592 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -362,19 +362,21 @@ static int read_blocklist(struct inode *inode, int index, u64 *block) return squashfs_block_size(size); } -void squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) +static bool squashfs_fill_page(struct folio *folio, + struct squashfs_cache_entry *buffer, size_t offset, + size_t avail) { - int copied; + size_t copied; void *pageaddr; - pageaddr = kmap_atomic(page); + pageaddr = kmap_local_folio(folio, 0); copied = squashfs_copy_data(pageaddr, buffer, offset, avail); memset(pageaddr + copied, 0, PAGE_SIZE - copied); - kunmap_atomic(pageaddr); + kunmap_local(pageaddr); - flush_dcache_page(page); - if (copied == avail) - SetPageUptodate(page); + flush_dcache_folio(folio); + + return copied == avail; } /* Copy data into page cache */ @@ -398,6 +400,7 @@ void squashfs_copy_cache(struct folio *folio, bytes -= PAGE_SIZE, offset += PAGE_SIZE) { struct folio *push_folio; size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0; + bool uptodate = true; TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail); @@ -412,9 +415,9 @@ void squashfs_copy_cache(struct folio *folio, if (folio_test_uptodate(push_folio)) goto skip_folio; - squashfs_fill_page(&push_folio->page, buffer, offset, avail); + uptodate = squashfs_fill_page(push_folio, buffer, offset, avail); skip_folio: - folio_unlock(push_folio); + folio_end_read(push_folio, uptodate); if (i != folio->index) folio_put(push_folio); } diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 8c6fbef022f4..218868b20f16 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h @@ -73,7 +73,6 @@ extern __le64 *squashfs_read_fragment_index_table(struct super_block *, u64, u64, unsigned int); /* file.c */ -void squashfs_fill_page(struct page *, struct squashfs_cache_entry *, int, int); void squashfs_copy_cache(struct folio *, struct squashfs_cache_entry *, size_t bytes, size_t offset); From 82dda04771d917ed8a0543cf316a2bcf79ddff74 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" <willy@infradead.org> Date: Fri, 10 Jan 2025 16:32:58 +0000 Subject: [PATCH 474/504] squashfs: fix "convert squashfs_fill_page() to take a folio" I got the polarity of "uptodate" wrong. Rename it. Thanks to Ryan for testing; please fold into above named patch, and he'd like you to add Link: https://lkml.kernel.org/r/20250110163300.3346321-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org> Tested-by: Ryan Roberts <ryan.roberts@arm.com> Cc: Phillip Lougher <phillip@squashfs.org.uk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/squashfs/file.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index eef1dd462592..5ca2baa16dc2 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c @@ -400,7 +400,7 @@ void squashfs_copy_cache(struct folio *folio, bytes -= PAGE_SIZE, offset += PAGE_SIZE) { struct folio *push_folio; size_t avail = buffer ? min(bytes, PAGE_SIZE) : 0; - bool uptodate = true; + bool updated = false; TRACE("bytes %zu, i %d, available_bytes %zu\n", bytes, i, avail); @@ -415,9 +415,9 @@ void squashfs_copy_cache(struct folio *folio, if (folio_test_uptodate(push_folio)) goto skip_folio; - uptodate = squashfs_fill_page(push_folio, buffer, offset, avail); + updated = squashfs_fill_page(push_folio, buffer, offset, avail); skip_folio: - folio_end_read(push_folio, uptodate); + folio_end_read(push_folio, updated); if (i != folio->index) folio_put(push_folio); } From b0a8bf882adf9c708ec237c06c1ef6c6946db58d Mon Sep 17 00:00:00 2001 From: Tio Zhang <tiozhang@didiglobal.com> Date: Tue, 24 Dec 2024 17:53:44 +0800 Subject: [PATCH 475/504] kthread: correct comments before kthread_queue_work() s/kthread_worker_create/kthread_create_worker/ to avoid confusion when reading comments before kthread_queue_work(). Link: https://lkml.kernel.org/r/20241224095344.GA7587@didi-ThinkCentre-M930t-N000 Signed-off-by: Tio Zhang <tiozhang@didiglobal.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- kernel/kthread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/kthread.c b/kernel/kthread.c index a5ac612b1609..2fd0daa6b3b6 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -1015,7 +1015,7 @@ static void kthread_insert_work(struct kthread_worker *worker, * @work: kthread_work to queue * * Queue @work to work processor @task for async execution. @task - * must have been created with kthread_worker_create(). Returns %true + * must have been created with kthread_create_worker(). Returns %true * if @work was successfully queued, %false if it was already pending. * * Reinitialize the work if it needs to be used by another worker. From 9a430d00f10e2e839c228bdd4ed54f361d36f9d2 Mon Sep 17 00:00:00 2001 From: "Rob Herring (Arm)" <robh@kernel.org> Date: Tue, 31 Dec 2024 09:54:14 -0600 Subject: [PATCH 476/504] MAINTAINERS: fix list entries with display names get_maintainers.pl doesn't expect list entries to have a display name. Entries with a display name are omitted and print just the description: (open list:PIN CONTROLLER - FREESCALE) These cases are pretty much aliases to a few people, not lists which are archived and can be subscribed to. Change these cases to be reviewers instead. Link: https://lkml.kernel.org/r/20241231155415.186244-1-robh@kernel.org Signed-off-by: Rob Herring (Arm) <robh@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Joe Perches <joe@perches.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- MAINTAINERS | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 30cbc3d44cd5..953feba08959 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2829,7 +2829,7 @@ ARM/NXP S32G ARCHITECTURE R: Chester Lin <chester62515@gmail.com> R: Matthias Brugger <mbrugger@suse.com> R: Ghennadi Procopciuc <ghennadi.procopciuc@oss.nxp.com> -L: NXP S32 Linux Team <s32@nxp.com> +R: NXP S32 Linux Team <s32@nxp.com> L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm64/boot/dts/freescale/s32g*.dts* @@ -16604,8 +16604,8 @@ F: arch/nios2/ NITRO ENCLAVES (NE) M: Alexandru Ciobotaru <alcioa@amazon.com> +R: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com> L: linux-kernel@vger.kernel.org -L: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com> S: Supported W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/ F: Documentation/virt/ne_overview.rst @@ -16616,8 +16616,8 @@ F: samples/nitro_enclaves/ NITRO SECURE MODULE (NSM) M: Alexander Graf <graf@amazon.com> +R: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com> L: linux-kernel@vger.kernel.org -L: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com> S: Supported W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/ F: drivers/misc/nsm.c @@ -18429,8 +18429,8 @@ M: Fabio Estevam <festevam@gmail.com> M: Shawn Guo <shawnguo@kernel.org> M: Jacky Bai <ping.bai@nxp.com> R: Pengutronix Kernel Team <kernel@pengutronix.de> +R: NXP S32 Linux Team <s32@nxp.com> L: linux-gpio@vger.kernel.org -L: NXP S32 Linux Team <s32@nxp.com> S: Maintained F: Documentation/devicetree/bindings/pinctrl/fsl,* F: Documentation/devicetree/bindings/pinctrl/nxp,s32* @@ -19565,7 +19565,7 @@ F: drivers/ras/amd/fmpm.c RASPBERRY PI PISP BACK END M: Jacopo Mondi <jacopo.mondi@ideasonboard.com> -L: Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com> +R: Raspberry Pi Kernel Maintenance <kernel-list@raspberrypi.com> L: linux-media@vger.kernel.org S: Maintained F: Documentation/devicetree/bindings/media/raspberrypi,pispbe.yaml From 210c73092f7f983e4b738dbcd2483ceb1a2c8d9b Mon Sep 17 00:00:00 2001 From: Kuan-Wei Chiu <visitorckw@gmail.com> Date: Tue, 7 Jan 2025 01:01:03 +0800 Subject: [PATCH 477/504] lib/sort: clarify comparison function requirements in sort_r() Patch series "lib: clarify comparison function requirements", v2. Add a detailed explanation in the sort_r/list_sort kernel doc comment specifying that the comparison function must satisfy antisymmetry and transitivity. These properties are essential for the sorting algorithm to produce correct results. Issues have arisen in the past [1][2][3][4] where comparison functions violated the transitivity property, causing sorting algorithms to fail to correctly order elements. While these requirements may seem straightforward, they are commonly misunderstood or overlooked, leading to bugs. Highlighting these properties in the documentation will help prevent such mistakes in the future. Link: https://lore.kernel.org/lkml/20240701205639.117194-1-visitorckw@gmail.com [1] Link: https://lore.kernel.org/lkml/20241203202228.1274403-1-visitorckw@gmail.com [2] Link: https://lore.kernel.org/lkml/20241209134226.1939163-1-visitorckw@gmail.com [3] Link: https://lore.kernel.org/lkml/20241209145728.1975311-1-visitorckw@gmail.com [4] This patch (of 2): Add a detailed explanation in the sort_r() kernel doc comment specifying that the comparison function must satisfy antisymmetry and transitivity. These properties are essential for the sorting algorithm to produce correct results. Issues have arisen in the past [1][2][3][4] where comparison functions violated the transitivity property, causing sorting algorithms to fail to correctly order elements. While these requirements may seem straightforward, they are commonly misunderstood or overlooked, leading to bugs. Highlighting these properties in the documentation will help prevent such mistakes in the future. Link: https://lkml.kernel.org/r/20250106170104.3137845-1-visitorckw@gmail.com Link: https://lore.kernel.org/lkml/20240701205639.117194-1-visitorckw@gmail.com [1] Link: https://lore.kernel.org/lkml/20241203202228.1274403-1-visitorckw@gmail.com [2] Link: https://lore.kernel.org/lkml/20241209134226.1939163-1-visitorckw@gmail.com [3] Link: https://lore.kernel.org/lkml/20241209145728.1975311-1-visitorckw@gmail.com [4] Link: https://lkml.kernel.org/r/20250106170104.3137845-2-visitorckw@gmail.com Signed-off-by: Kuan-Wei Chiu <visitorckw@gmail.com> Cc: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw> Cc: <chuang@cs.nycu.edu.tw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/sort.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/sort.c b/lib/sort.c index 048b7a6ef967..8e73dc55476b 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -200,6 +200,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size) * copy (e.g. fix up pointers or auxiliary data), but the built-in swap * avoids a slow retpoline and so is significantly faster. * + * The comparison function must adhere to specific mathematical + * properties to ensure correct and stable sorting: + * - Antisymmetry: cmp_func(a, b) must return the opposite sign of + * cmp_func(b, a). + * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then + * cmp_func(a, c) <= 0. + * * Sorting time is O(n log n) both on average and worst-case. While * quicksort is slightly faster on average, it suffers from exploitable * O(n*n) worst-case behavior and extra memory requirements that make From 6612ac8c625c0edef7346a4f99f05961ae2b8c19 Mon Sep 17 00:00:00 2001 From: Kuan-Wei Chiu <visitorckw@gmail.com> Date: Tue, 7 Jan 2025 01:01:04 +0800 Subject: [PATCH 478/504] lib/list_sort: clarify comparison function requirements in list_sort() Add a detailed explanation in the list_sort() kernel doc comment specifying that the comparison function must satisfy antisymmetry and transitivity. These properties are essential for the sorting algorithm to produce correct results. Issues have arisen in the past [1][2][3][4] where comparison functions violated the transitivity property, causing sorting algorithms to fail to correctly order elements. While these requirements may seem straightforward, they are commonly misunderstood or overlooked, leading to bugs. Highlighting these properties in the documentation will help prevent such mistakes in the future. Link: https://lore.kernel.org/lkml/20240701205639.117194-1-visitorckw@gmail.com [1] Link: https://lore.kernel.org/lkml/20241203202228.1274403-1-visitorckw@gmail.com [2] Link: https://lore.kernel.org/lkml/20241209134226.1939163-1-visitorckw@gmail.com [3] Link: https://lore.kernel.org/lkml/20241209145728.1975311-1-visitorckw@gmail.com [4] Link: https://lkml.kernel.org/r/20250106170104.3137845-3-visitorckw@gmail.com Signed-off-by: Kuan-Wei Chiu <visitorckw@gmail.com> Cc: Ching-Chun (Jim) Huang <jserv@ccns.ncku.edu.tw> Cc: <chuang@cs.nycu.edu.tw> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- lib/list_sort.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/lib/list_sort.c b/lib/list_sort.c index 8d3f623536fe..a310ecb7ccc0 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -108,6 +108,13 @@ static void merge_final(void *priv, list_cmp_func_t cmp, struct list_head *head, * and list_sort is a stable sort, so it is not necessary to distinguish * the @a < @b and @a == @b cases. * + * The comparison function must adhere to specific mathematical properties + * to ensure correct and stable sorting: + * - Antisymmetry: cmp(@a, @b) must return the opposite sign of + * cmp(@b, @a). + * - Transitivity: if cmp(@a, @b) <= 0 and cmp(@b, @c) <= 0, then + * cmp(@a, @c) <= 0. + * * This is compatible with two styles of @cmp function: * - The traditional style which returns <0 / =0 / >0, or * - Returning a boolean 0/1. From 33a65436f0eb70bcca009168ac24918c799ee4f5 Mon Sep 17 00:00:00 2001 From: Julian Sun <sunjunchao2870@gmail.com> Date: Mon, 6 Jan 2025 10:34:31 +0800 Subject: [PATCH 479/504] ocfs2: check el->l_next_free_rec in ocfs2_get_clusters_nocache Recently syzbot reported a use-after-free issue[1]. The root cause of the problem is that the journal inode recorded in this file system image is corrupted. The value of "di->id2.i_list.l_next_free_rec" is 8193, which is greater than the value of "di->id2.i_list.l_count" (19). To solve this problem, an additional check should be added within ocfs2_get_clusters_nocache(). If the check fails, an error will be returned and the file system will be set to read-only. [1]: https://lore.kernel.org/all/67577778.050a0220.a30f1.01bc.GAE@google.com/T/ Link: https://lkml.kernel.org/r/20250106023432.1320904-1-sunjunchao2870@gmail.com Signed-off-by: Julian Sun <sunjunchao2870@gmail.com> Reported-by: syzbot+2313dda4dc4885c93578@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=2313dda4dc4885c93578 Tested-by: syzbot+2313dda4dc4885c93578@syzkaller.appspotmail.com Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Joel Becker <jlbec@evilplan.org> Cc: Jun Piao <piaojun@huawei.com> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Mark Fasheh <mark@fasheh.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/extent_map.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index f7672472fa82..930150ed5db1 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -435,6 +435,16 @@ static int ocfs2_get_clusters_nocache(struct inode *inode, } } + if (le16_to_cpu(el->l_next_free_rec) > le16_to_cpu(el->l_count)) { + ocfs2_error(inode->i_sb, + "Inode %lu has an invalid extent (next_free_rec %u, count %u)\n", + inode->i_ino, + le16_to_cpu(el->l_next_free_rec), + le16_to_cpu(el->l_count)); + ret = -EROFS; + goto out; + } + i = ocfs2_search_extent_list(el, v_cluster); if (i == -1) { /* From 4d0f946d6b80dabb7f78b4cb5c7c3b2bd3063b16 Mon Sep 17 00:00:00 2001 From: Julian Sun <sunjunchao2870@gmail.com> Date: Mon, 6 Jan 2025 10:34:32 +0800 Subject: [PATCH 480/504] ocfs2: correct l_next_free_rec in online check Correct the value of l_next_free_rec to l_count during the online check, as done in the check_el() function in ocfs2_tools. Link: https://lkml.kernel.org/r/20250106023432.1320904-2-sunjunchao2870@gmail.com Signed-off-by: Julian Sun <sunjunchao2870@gmail.com> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Jun Piao <piaojun@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/inode.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c index cd3173062ae3..12e5d1f73325 100644 --- a/fs/ocfs2/inode.c +++ b/fs/ocfs2/inode.c @@ -200,6 +200,20 @@ bail: return inode; } +static int ocfs2_dinode_has_extents(struct ocfs2_dinode *di) +{ + /* inodes flagged with other stuff in id2 */ + if (di->i_flags & (OCFS2_SUPER_BLOCK_FL | OCFS2_LOCAL_ALLOC_FL | + OCFS2_CHAIN_FL | OCFS2_DEALLOC_FL)) + return 0; + /* i_flags doesn't indicate when id2 is a fast symlink */ + if (S_ISLNK(di->i_mode) && di->i_size && di->i_clusters == 0) + return 0; + if (di->i_dyn_features & OCFS2_INLINE_DATA_FL) + return 0; + + return 1; +} /* * here's how inodes get read from disk: @@ -1547,6 +1561,16 @@ static int ocfs2_filecheck_repair_inode_block(struct super_block *sb, le32_to_cpu(di->i_fs_generation)); } + if (ocfs2_dinode_has_extents(di) && + le16_to_cpu(di->id2.i_list.l_next_free_rec) > le16_to_cpu(di->id2.i_list.l_count)) { + di->id2.i_list.l_next_free_rec = di->id2.i_list.l_count; + changed = 1; + mlog(ML_ERROR, + "Filecheck: reset dinode #%llu: l_next_free_rec to %u\n", + (unsigned long long)bh->b_blocknr, + le16_to_cpu(di->id2.i_list.l_next_free_rec)); + } + if (changed || ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check)) { ocfs2_compute_meta_ecc(sb, bh->b_data, &di->i_check); mark_buffer_dirty(bh); From 853ad3138cc8a2c02ad1b4170df420fa40310cc5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Linus=20L=C3=BCssing?= <linus.luessing@c0d3.blue> Date: Wed, 8 Jan 2025 04:58:39 +0100 Subject: [PATCH 481/504] =?UTF-8?q?mailmap:=20update=20entry=20for=20Linus?= =?UTF-8?q?=20L=C3=BCssing?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mapping another old, obsolete work email address to my primary one. Link: https://lkml.kernel.org/r/20250108035840.25194-1-linus.luessing@c0d3.blue Signed-off-by: Linus Lüssing <linus.luessing@c0d3.blue> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- .mailmap | 1 + 1 file changed, 1 insertion(+) diff --git a/.mailmap b/.mailmap index 8c6d0b5cc00b..fb9de36fc087 100644 --- a/.mailmap +++ b/.mailmap @@ -408,6 +408,7 @@ Liam Mark <quic_lmark@quicinc.com> <lmark@codeaurora.org> Linas Vepstas <linas@austin.ibm.com> Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@ascom.ch> Linus Lüssing <linus.luessing@c0d3.blue> <linus.luessing@web.de> +Linus Lüssing <linus.luessing@c0d3.blue> <ll@simonwunderlich.de> <linux-hardening@vger.kernel.org> <kernel-hardening@lists.openwall.com> Li Yang <leoyang.li@nxp.com> <leoli@freescale.com> Li Yang <leoyang.li@nxp.com> <leo@zh-kernel.org> From ae4ddc24a21066e56a403f949f023e1a0d5d4ef5 Mon Sep 17 00:00:00 2001 From: Su Yue <glass.su@suse.com> Date: Wed, 8 Jan 2025 10:41:19 +0800 Subject: [PATCH 482/504] ocfs2: check tl->count of truncate log inode in ocfs2_get_truncate_log_info syz reported: (syz-executor404,5313,0):ocfs2_truncate_log_append:5874 ERROR: bug expression: tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || tl_count == 0 (syz-executor404,5313,0):ocfs2_truncate_log_append:5874 ERROR: Truncate record count on #77 invalid wanted 39, actual 2087 ------------[ cut here ]------------ kernel BUG at fs/ocfs2/alloc.c:5874! Oops: invalid opcode: 0000 [#1] PREEMPT SMP KASAN NOPTI CPU: 0 UID: 0 PID: 5313 Comm: syz-executor404 Not tainted 6.12.0-rc5-syzkaller-00299-g11066801dd4b #0 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2~bpo12+1 04/01/2014 RIP: 0010:ocfs2_truncate_log_append+0x9a8/0x9c0 fs/ocfs2/alloc.c:5868 RSP: 0018:ffffc9000cf16f40 EFLAGS: 00010292 RAX: b4b54f1d10640800 RBX: 0000000000000027 RCX: b4b54f1d10640800 RDX: 0000000000000000 RSI: 0000000080000000 RDI: 0000000000000000 RBP: ffffc9000cf17070 R08: ffffffff8174a14c R09: 1ffff11003f8519a R10: dffffc0000000000 R11: ffffed1003f8519b R12: 1ffff110085f5f58 R13: ffffff3800000000 R14: 000000000000004d R15: ffff8880438f0008 FS: 00005555722df380(0000) GS:ffff88801fc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000002000f000 CR3: 000000004010e000 CR4: 0000000000352ef0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: <TASK> ocfs2_remove_btree_range+0x1303/0x1860 fs/ocfs2/alloc.c:5789 ocfs2_remove_inode_range+0xff3/0x29f0 fs/ocfs2/file.c:1907 ocfs2_reflink_remap_extent fs/ocfs2/refcounttree.c:4537 [inline] ocfs2_reflink_remap_blocks+0xcd4/0x1f30 fs/ocfs2/refcounttree.c:4684 ocfs2_remap_file_range+0x5fa/0x8d0 fs/ocfs2/file.c:2736 vfs_copy_file_range+0xc07/0x1510 fs/read_write.c:1615 __do_sys_copy_file_range fs/read_write.c:1705 [inline] __se_sys_copy_file_range+0x3f2/0x5d0 fs/read_write.c:1668 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f RIP: 0033:0x7fd327167af9 Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 61 17 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007ffe6b8e22e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000146 RAX: ffffffffffffffda RBX: 00007fd3271b005e RCX: 00007fd327167af9 RDX: 0000000000000006 RSI: 0000000000000000 RDI: 0000000000000004 RBP: 00007fd3271de610 R08: 000000000000d8c2 R09: 0000000000000000 R10: 0000000020000640 R11: 0000000000000246 R12: 0000000000000001 R13: 00007ffe6b8e24b8 R14: 0000000000000001 R15: 0000000000000001 </TASK> The fuzz image has a truncate log inode whose tl_count is bigger than ocfs2_truncate_recs_per_inode() so it triggers the BUG in ocfs2_truncate_log_append(). As what the check in ocfs2_truncate_log_append() does, just do same check into ocfs2_get_truncate_log_info when truncate log inode is reading in so we can bail out earlier. Link: https://lkml.kernel.org/r/20250108024119.60313-1-glass.su@suse.com Signed-off-by: Su Yue <glass.su@suse.com> Reported-by: Liebes Wang <wanghaichi0403@gmail.com> Link: https://lore.kernel.org/ocfs2-devel/CADCV8souQhdP0RdQF1U7KTWtuHDfpn+3LnTt-EEuMmB-pMRrgQ@mail.gmail.com/T/#u Reported-by: syzbot+a66542ca5ebb4233b563@syzkaller.appspotmail.com Tested-by: syzbot+a66542ca5ebb4233b563@syzkaller.appspotmail.com Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Jun Piao <piaojun@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/alloc.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 5cf698785fae..4414743b638e 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c @@ -6154,6 +6154,9 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, int status; struct inode *inode = NULL; struct buffer_head *bh = NULL; + struct ocfs2_dinode *di; + struct ocfs2_truncate_log *tl; + unsigned int tl_count; inode = ocfs2_get_system_file_inode(osb, TRUNCATE_LOG_SYSTEM_INODE, @@ -6171,6 +6174,18 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb, goto bail; } + di = (struct ocfs2_dinode *)bh->b_data; + tl = &di->id2.i_dealloc; + tl_count = le16_to_cpu(tl->tl_count); + if (unlikely(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) || + tl_count == 0)) { + status = -EFSCORRUPTED; + iput(inode); + brelse(bh); + mlog_errno(status); + goto bail; + } + *tl_inode = inode; *tl_bh = bh; bail: From ccfa6252098379213cde6fa4c194c8fc437fb2f2 Mon Sep 17 00:00:00 2001 From: Su Yue <glass.su@suse.com> Date: Mon, 6 Jan 2025 22:06:53 +0800 Subject: [PATCH 483/504] ocfs2: mark dquot as inactive if failed to start trans while releasing dquot While running fstests generic/329, the kernel workqueue quota_release_workfn is dead looping in calling ocfs2_release_dquot(). The ocfs2 state is already readonly but ocfs2_release_dquot wants to start a transaction but fails and returns. ===================================================================== [ 2918.123602 ][ T275 ] On-disk corruption discovered. Please run fsck.ocfs2 once the filesystem is unmounted. [ 2918.124034 ][ T275 ] (kworker/u135:1,275,11):ocfs2_release_dquot:765 ERROR: status = -30 [ 2918.124452 ][ T275 ] (kworker/u135:1,275,11):ocfs2_release_dquot:795 ERROR: status = -30 [ 2918.124883 ][ T275 ] (kworker/u135:1,275,11):ocfs2_start_trans:357 ERROR: status = -30 [ 2918.125276 ][ T275 ] OCFS2: abort (device dm-0): ocfs2_start_trans: Detected aborted journal [ 2918.125710 ][ T275 ] On-disk corruption discovered. Please run fsck.ocfs2 once the filesystem is unmounted. ===================================================================== ocfs2_release_dquot() is much like dquot_release(), which is called by ext4 to handle similar situation. So here fix it by marking the dquot as inactive like what dquot_release() does. Link: https://lkml.kernel.org/r/20250106140653.92292-1-glass.su@suse.com Fixes: 9e33d69f553a ("ocfs2: Implementation of local and global quota file handling") Signed-off-by: Su Yue <glass.su@suse.com> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Jun Piao <piaojun@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/quota_global.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c index 3404e7a30c33..15d9acd456ec 100644 --- a/fs/ocfs2/quota_global.c +++ b/fs/ocfs2/quota_global.c @@ -761,6 +761,11 @@ static int ocfs2_release_dquot(struct dquot *dquot) handle = ocfs2_start_trans(osb, ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_id.type)); if (IS_ERR(handle)) { + /* + * Mark dquot as inactive to avoid endless cycle in + * quota_release_workfn(). + */ + clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); status = PTR_ERR(handle); mlog_errno(status); goto out_ilock; From 72c43293f50276b2bc52892a84643c3e407f3e84 Mon Sep 17 00:00:00 2001 From: Su Yue <glass.su@suse.com> Date: Mon, 6 Jan 2025 22:06:34 +0800 Subject: [PATCH 484/504] ocfs2: remove parameter parent_fe_bh from __ocfs2_mknod_locked The parameter is not used in __ocfs2_mknod_locked(). So remove it. No functional change. Link: https://lkml.kernel.org/r/20250106140634.92241-1-glass.su@suse.com Signed-off-by: Su Yue <glass.su@suse.com> Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> Cc: Mark Fasheh <mark@fasheh.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Junxiao Bi <junxiao.bi@oracle.com> Cc: Changwei Ge <gechangwei@live.cn> Cc: Jun Piao <piaojun@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/ocfs2/namei.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 5550f8afa438..0ec63a1a94b8 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -508,7 +508,6 @@ static int __ocfs2_mknod_locked(struct inode *dir, struct inode *inode, dev_t dev, struct buffer_head **new_fe_bh, - struct buffer_head *parent_fe_bh, handle_t *handle, struct ocfs2_alloc_context *inode_ac, u64 fe_blkno, u64 suballoc_loc, u16 suballoc_bit) @@ -641,8 +640,8 @@ static int ocfs2_mknod_locked(struct ocfs2_super *osb, } return __ocfs2_mknod_locked(dir, inode, dev, new_fe_bh, - parent_fe_bh, handle, inode_ac, - fe_blkno, suballoc_loc, suballoc_bit); + handle, inode_ac, fe_blkno, + suballoc_loc, suballoc_bit); } static int ocfs2_mkdir(struct mnt_idmap *idmap, @@ -2576,7 +2575,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir, clear_nlink(inode); /* do the real work now. */ status = __ocfs2_mknod_locked(dir, inode, - 0, &new_di_bh, parent_di_bh, handle, + 0, &new_di_bh, handle, inode_ac, di_blkno, suballoc_loc, suballoc_bit); if (status < 0) { From e067f16c05f526e9a775be42c577fecf86e4a89a Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Wed, 8 Jan 2025 05:00:46 +0900 Subject: [PATCH 485/504] nilfs2: do not force clear folio if buffer is referenced Patch series "nilfs2: protect busy buffer heads from being force-cleared". This series fixes the buffer head state inconsistency issues reported by syzbot that occurs when the filesystem is corrupted and falls back to read-only, and the associated buffer head use-after-free issue. This patch (of 2): Syzbot has reported that after nilfs2 detects filesystem corruption and falls back to read-only, inconsistencies in the buffer state may occur. One of the inconsistencies is that when nilfs2 calls mark_buffer_dirty() to set a data or metadata buffer as dirty, but it detects that the buffer is not in the uptodate state: WARNING: CPU: 0 PID: 6049 at fs/buffer.c:1177 mark_buffer_dirty+0x2e5/0x520 fs/buffer.c:1177 ... Call Trace: <TASK> nilfs_palloc_commit_alloc_entry+0x4b/0x160 fs/nilfs2/alloc.c:598 nilfs_ifile_create_inode+0x1dd/0x3a0 fs/nilfs2/ifile.c:73 nilfs_new_inode+0x254/0x830 fs/nilfs2/inode.c:344 nilfs_mkdir+0x10d/0x340 fs/nilfs2/namei.c:218 vfs_mkdir+0x2f9/0x4f0 fs/namei.c:4257 do_mkdirat+0x264/0x3a0 fs/namei.c:4280 __do_sys_mkdirat fs/namei.c:4295 [inline] __se_sys_mkdirat fs/namei.c:4293 [inline] __x64_sys_mkdirat+0x87/0xa0 fs/namei.c:4293 do_syscall_x64 arch/x86/entry/common.c:52 [inline] do_syscall_64+0xf3/0x230 arch/x86/entry/common.c:83 entry_SYSCALL_64_after_hwframe+0x77/0x7f The other is when nilfs_btree_propagate(), which propagates the dirty state to the ancestor nodes of a b-tree that point to a dirty buffer, detects that the origin buffer is not dirty, even though it should be: WARNING: CPU: 0 PID: 5245 at fs/nilfs2/btree.c:2089 nilfs_btree_propagate+0xc79/0xdf0 fs/nilfs2/btree.c:2089 ... Call Trace: <TASK> nilfs_bmap_propagate+0x75/0x120 fs/nilfs2/bmap.c:345 nilfs_collect_file_data+0x4d/0xd0 fs/nilfs2/segment.c:587 nilfs_segctor_apply_buffers+0x184/0x340 fs/nilfs2/segment.c:1006 nilfs_segctor_scan_file+0x28c/0xa50 fs/nilfs2/segment.c:1045 nilfs_segctor_collect_blocks fs/nilfs2/segment.c:1216 [inline] nilfs_segctor_collect fs/nilfs2/segment.c:1540 [inline] nilfs_segctor_do_construct+0x1c28/0x6b90 fs/nilfs2/segment.c:2115 nilfs_segctor_construct+0x181/0x6b0 fs/nilfs2/segment.c:2479 nilfs_segctor_thread_construct fs/nilfs2/segment.c:2587 [inline] nilfs_segctor_thread+0x69e/0xe80 fs/nilfs2/segment.c:2701 kthread+0x2f0/0x390 kernel/kthread.c:389 ret_from_fork+0x4b/0x80 arch/x86/kernel/process.c:147 ret_from_fork_asm+0x1a/0x30 arch/x86/entry/entry_64.S:244 </TASK> Both of these issues are caused by the callbacks that handle the page/folio write requests, forcibly clear various states, including the working state of the buffers they hold, at unexpected times when they detect read-only fallback. Fix these issues by checking if the buffer is referenced before clearing the page/folio state, and skipping the clear if it is. Link: https://lkml.kernel.org/r/20250107200202.6432-1-konishi.ryusuke@gmail.com Link: https://lkml.kernel.org/r/20250107200202.6432-2-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Reported-by: syzbot+b2b14916b77acf8626d7@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=b2b14916b77acf8626d7 Reported-by: syzbot+d98fd19acd08b36ff422@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?extid=d98fd19acd08b36ff422 Fixes: 8c26c4e2694a ("nilfs2: fix issue with flush kernel thread after remount in RO mode because of driver's internal error or metadata corruption") Tested-by: syzbot+b2b14916b77acf8626d7@syzkaller.appspotmail.com Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/page.c | 31 +++++++++++++++++++++++++++---- 1 file changed, 27 insertions(+), 4 deletions(-) diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 9de2a494a069..899686d2e5f7 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -392,6 +392,11 @@ void nilfs_clear_dirty_pages(struct address_space *mapping) /** * nilfs_clear_folio_dirty - discard dirty folio * @folio: dirty folio that will be discarded + * + * nilfs_clear_folio_dirty() clears working states including dirty state for + * the folio and its buffers. If the folio has buffers, clear only if it is + * confirmed that none of the buffer heads are busy (none have valid + * references and none are locked). */ void nilfs_clear_folio_dirty(struct folio *folio) { @@ -399,10 +404,6 @@ void nilfs_clear_folio_dirty(struct folio *folio) BUG_ON(!folio_test_locked(folio)); - folio_clear_uptodate(folio); - folio_clear_mappedtodisk(folio); - folio_clear_checked(folio); - head = folio_buffers(folio); if (head) { const unsigned long clear_bits = @@ -410,6 +411,25 @@ void nilfs_clear_folio_dirty(struct folio *folio) BIT(BH_Async_Write) | BIT(BH_NILFS_Volatile) | BIT(BH_NILFS_Checked) | BIT(BH_NILFS_Redirected) | BIT(BH_Delay)); + bool busy, invalidated = false; + +recheck_buffers: + busy = false; + bh = head; + do { + if (atomic_read(&bh->b_count) | buffer_locked(bh)) { + busy = true; + break; + } + } while (bh = bh->b_this_page, bh != head); + + if (busy) { + if (invalidated) + return; + invalidate_bh_lrus(); + invalidated = true; + goto recheck_buffers; + } bh = head; do { @@ -419,6 +439,9 @@ void nilfs_clear_folio_dirty(struct folio *folio) } while (bh = bh->b_this_page, bh != head); } + folio_clear_uptodate(folio); + folio_clear_mappedtodisk(folio); + folio_clear_checked(folio); __nilfs_clear_folio_dirty(folio); } From 0b4816d267da028e0be812c93d856e0e4c1554c6 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Wed, 8 Jan 2025 05:00:47 +0900 Subject: [PATCH 486/504] nilfs2: protect access to buffers with no active references nilfs_lookup_dirty_data_buffers(), which iterates through the buffers attached to dirty data folios/pages, accesses the attached buffers without locking the folios/pages. For data cache, nilfs_clear_folio_dirty() may be called asynchronously when the file system degenerates to read only, so nilfs_lookup_dirty_data_buffers() still has the potential to cause use after free issues when buffers lose the protection of their dirty state midway due to this asynchronous clearing and are unintentionally freed by try_to_free_buffers(). Eliminate this race issue by adjusting the lock section in this function. Link: https://lkml.kernel.org/r/20250107200202.6432-3-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Fixes: 8c26c4e2694a ("nilfs2: fix issue with flush kernel thread after remount in RO mode because of driver's internal error or metadata corruption") Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/segment.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 587251830897..58a598b548fa 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -734,7 +734,6 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, if (!head) head = create_empty_buffers(folio, i_blocksize(inode), 0); - folio_unlock(folio); bh = head; do { @@ -744,11 +743,14 @@ static size_t nilfs_lookup_dirty_data_buffers(struct inode *inode, list_add_tail(&bh->b_assoc_buffers, listp); ndirties++; if (unlikely(ndirties >= nlimit)) { + folio_unlock(folio); folio_batch_release(&fbatch); cond_resched(); return ndirties; } } while (bh = bh->b_this_page, bh != head); + + folio_unlock(folio); } folio_batch_release(&fbatch); cond_resched(); From 1b910e350f19a7b1256ddaa7d81b9fec69286d54 Mon Sep 17 00:00:00 2001 From: David Reaver <me@davidreaver.com> Date: Wed, 8 Jan 2025 11:24:54 -0800 Subject: [PATCH 487/504] checkpatch: remove migrated RCU APIs from deprecated_apis The deprecated_apis map was created in [1] so checkpatch would flag deprecated RCU APIs. These deprecated APIs have since been removed from the kernel. This patch removes them from this map so checkpatch doesn't waste time looking for them, and so readers of checkpatch looking for deprecated APIs don't waste time searching for them. Link: https://lore.kernel.org/all/20181111192904.3199-13-paulmck@linux.ibm.com/ [1] Link: https://lkml.kernel.org/r/20250108192456.47871-1-me@davidreaver.com Signed-off-by: David Reaver <me@davidreaver.com> Reviewed-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Kuan-Wei Chiu <visitorckw@gmail.com> Acked-by: Joe Perches <joe@perches.com> Cc: Andy Whitcroft <apw@canonical.com> Cc: Dwaipayan Ray <dwaipayanray1@gmail.com> Cc: Krister Johansen <kjlx@templeofstupid.com> Cc: Lukas Bulwahn <lukas.bulwahn@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- scripts/checkpatch.pl | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 2bdc3d169af5..c625da28cdae 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -834,16 +834,6 @@ foreach my $entry (@mode_permission_funcs) { $mode_perms_search = "(?:${mode_perms_search})"; our %deprecated_apis = ( - "synchronize_rcu_bh" => "synchronize_rcu", - "synchronize_rcu_bh_expedited" => "synchronize_rcu_expedited", - "call_rcu_bh" => "call_rcu", - "rcu_barrier_bh" => "rcu_barrier", - "synchronize_sched" => "synchronize_rcu", - "synchronize_sched_expedited" => "synchronize_rcu_expedited", - "call_rcu_sched" => "call_rcu", - "rcu_barrier_sched" => "rcu_barrier", - "get_state_synchronize_sched" => "get_state_synchronize_rcu", - "cond_synchronize_sched" => "cond_synchronize_rcu", "kmap" => "kmap_local_page", "kunmap" => "kunmap_local", "kmap_atomic" => "kmap_local_page", From 4983b261bf0438f351270e2d1f2ac38ec010e9ea Mon Sep 17 00:00:00 2001 From: Oxana Kharitonova <oxana@cloudflare.com> Date: Fri, 10 Jan 2025 16:03:28 +0000 Subject: [PATCH 488/504] hung_task: add task->flags, blocked by coredump to log Resending this patch as I haven't received feedback on my initial submission https://lore.kernel.org/all/20241204182953.10854-1-oxana@cloudflare.com/ For the processes which are terminated abnormally the kernel can provide a coredump if enabled. When the coredump is performed, the process and all its threads are put into the D state (TASK_UNINTERRUPTIBLE | TASK_FREEZABLE). On the other hand, we have kernel thread khungtaskd which monitors the processes in the D state. If the task stuck in the D state more than kernel.hung_task_timeout_secs, the hung_task alert appears in the kernel log. The higher memory usage of a process, the longer it takes to create coredump, the longer tasks are in the D state. We have hung_task alerts for the processes with memory usage above 10Gb. Although, our kernel.hung_task_timeout_secs is 10 sec when the default is 120 sec. Adding additional information to the log that the task is blocked by coredump will help with monitoring. Another approach might be to completely filter out alerts for such tasks, but in that case we would lose transparency about what is putting pressure on some system resources, e.g. we saw an increase in I/O when coredump occurs due its writing to disk. Additionally, it would be helpful to have task_struct->flags in the log from the function sched_show_task(). Currently it prints task_struct->thread_info->flags, this seems misleading as the line starts with "task:xxxx". Link: https://lkml.kernel.org/r/20250110160328.64947-1-oxana@cloudflare.com Signed-off-by: Oxana Kharitonova <oxana@cloudflare.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Ben Segall <bsegall@google.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Valentin Schneider <vschneid@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- kernel/hung_task.c | 2 ++ kernel/sched/core.c | 4 ++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/kernel/hung_task.c b/kernel/hung_task.c index c18717189f32..953169893a95 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -147,6 +147,8 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) print_tainted(), init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); + if (t->flags & PF_POSTCOREDUMP) + pr_err(" Blocked by coredump.\n"); pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\"" " disables this message.\n"); sched_show_task(t); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 3e5a6bf587f9..77b6af12e146 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7701,9 +7701,9 @@ void sched_show_task(struct task_struct *p) if (pid_alive(p)) ppid = task_pid_nr(rcu_dereference(p->real_parent)); rcu_read_unlock(); - pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d flags:0x%08lx\n", + pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%08lx flags:0x%08lx\n", free, task_pid_nr(p), task_tgid_nr(p), - ppid, read_task_thread_flags(p)); + ppid, p->flags, read_task_thread_flags(p)); print_worker_info(KERN_INFO, p); print_stop_info(KERN_INFO, p); From 19eb95f591c1bf8b6d558bf6e3d095df1df42e30 Mon Sep 17 00:00:00 2001 From: Andrew Morton <akpm@linux-foundation.org> Date: Fri, 10 Jan 2025 19:51:53 -0800 Subject: [PATCH 489/504] hung_task-add-task-flags-blocked-by-coredump-to-log-fix fix printk control string In file included from ./include/asm-generic/bug.h:22, from ./arch/x86/include/asm/bug.h:99, from ./include/linux/bug.h:5, from ./arch/x86/include/asm/paravirt.h:19, from ./arch/x86/include/asm/irqflags.h:80, from ./include/linux/irqflags.h:18, from ./include/linux/spinlock.h:59, from ./include/linux/wait.h:9, from ./include/linux/wait_bit.h:8, from ./include/linux/fs.h:6, from ./include/linux/highmem.h:5, from kernel/sched/core.c:10: kernel/sched/core.c: In function 'sched_show_task': ./include/linux/kern_levels.h:5:25: error: format '%lx' expects argument of type 'long unsigned int', but argument 6 has type 'unsigned int' [-Werror=format=] 5 | #define KERN_SOH "\001" /* ASCII Start Of Header */ | ^~~~~~ ./include/linux/printk.h:473:25: note: in definition of macro 'printk_index_wrap' 473 | _p_func(_fmt, ##__VA_ARGS__); \ | ^~~~ ./include/linux/printk.h:586:9: note: in expansion of macro 'printk' 586 | printk(KERN_CONT fmt, ##__VA_ARGS__) | ^~~~~~ ./include/linux/kern_levels.h:24:25: note: in expansion of macro 'KERN_SOH' 24 | #define KERN_CONT KERN_SOH "c" | ^~~~~~~~ ./include/linux/printk.h:586:16: note: in expansion of macro 'KERN_CONT' 586 | printk(KERN_CONT fmt, ##__VA_ARGS__) | ^~~~~~~~~ kernel/sched/core.c:7704:9: note: in expansion of macro 'pr_cont' 7704 | pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%08lx flags:0x%08lx\n", | ^~~~~~~ cc1: all warnings being treated as errors Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Ben Segall <bsegall@google.com> Cc: Christian Brauner <brauner@kernel.org> Cc: Dietmar Eggemann <dietmar.eggemann@arm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Jan Kara <jack@suse.cz> Cc: Juri Lelli <juri.lelli@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Cc: Oxana Kharitonova <oxana@cloudflare.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Valentin Schneider <vschneid@redhat.com> Cc: Vincent Guittot <vincent.guittot@linaro.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- kernel/sched/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 77b6af12e146..109b5df48e8b 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7701,7 +7701,7 @@ void sched_show_task(struct task_struct *p) if (pid_alive(p)) ppid = task_pid_nr(rcu_dereference(p->real_parent)); rcu_read_unlock(); - pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%08lx flags:0x%08lx\n", + pr_cont(" stack:%-5lu pid:%-5d tgid:%-5d ppid:%-6d task_flags:0x%04x flags:0x%08lx\n", free, task_pid_nr(p), task_tgid_nr(p), ppid, p->flags, read_task_thread_flags(p)); From f80ce9cda3e183ab3c0f0b9c1ede4f3fbf01ba79 Mon Sep 17 00:00:00 2001 From: Dan Carpenter <dan.carpenter@linaro.org> Date: Fri, 10 Jan 2025 10:12:17 +0300 Subject: [PATCH 490/504] checkpatch: don't warn about extra parentheses in staging/ This "Unnecessary parentheses" warning is disabled for drivers/staging unless the --strict option is used. Really, we don't want it at all even if the --strict option is used. Link: https://lkml.kernel.org/r/c7278d21-d96c-4c1e-b3bf-f82b8decc5df@stanley.mountain Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Andy Whitcroft <apw@canonical.com> Cc: Dwaipayan Ray <dwaipayanray1@gmail.com> Cc: Joe Perches <joe@perches.com> Cc: Lukas Bulwahn <lukas.bulwahn@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- scripts/checkpatch.pl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index c625da28cdae..9d469c20871f 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -5503,9 +5503,9 @@ sub process { } } -# check for unnecessary parentheses around comparisons in if uses -# when !drivers/staging or command-line uses --strict - if (($realfile !~ m@^(?:drivers/staging/)@ || $check_orig) && +# check for unnecessary parentheses around comparisons +# except in drivers/staging + if (($realfile !~ m@^(?:drivers/staging/)@) && $perl_version_ok && defined($stat) && $stat =~ /(^.\s*if\s*($balanced_parens))/) { my $if_stat = $1; From c7e55a09d204a79c57110af2d22ed7bdb4003b0b Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Fri, 10 Jan 2025 10:01:44 +0900 Subject: [PATCH 491/504] nilfs2: correct return value kernel-doc descriptions for ioctl functions Patch series "nilfs2: fix kernel-doc comments for function return values", v2. This series fixes the inadequacies in the return value descriptions in nilfs2's kernel-doc comments (mainly incorrect formatting), as well as the lack of return value descriptions themselves, and fixes most of the remaining warnings that are output when the kernel-doc script is run with the "-Wall" option. This patch (of 7): In the kernel-doc comments for functions, there are many cases where the format of the return value description is inaccurate, such as "Return Value: ...", which causes many warnings to be output when the kernel-doc script is executed with the "-Wall" option. This fixes such incorrectly formatted return value descriptions for ioctl functions. Link: https://lkml.kernel.org/r/20250110010530.21872-1-konishi.ryusuke@gmail.com Link: https://lkml.kernel.org/r/20250110010530.21872-2-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Cc: "Brian G ." <gissf1@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/ioctl.c | 231 +++++++++++++++++++--------------------------- 1 file changed, 95 insertions(+), 136 deletions(-) diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index fa77f78df681..e877c97974a4 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -33,17 +33,14 @@ * @dofunc: concrete function of get/set metadata info * * Description: nilfs_ioctl_wrap_copy() gets/sets metadata info by means of - * calling dofunc() function on the basis of @argv argument. + * calling dofunc() function on the basis of @argv argument. If successful, + * the requested metadata information is copied to userspace memory. * - * Return Value: On success, 0 is returned and requested metadata info - * is copied into userspace. On error, one of the following - * negative error codes is returned. - * - * %-EINVAL - Invalid arguments from userspace. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during execution of requested operation. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during execution of requested operation. + * * %-EINVAL - Invalid arguments from userspace. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_wrap_copy(struct the_nilfs *nilfs, struct nilfs_argv *argv, int dir, @@ -190,13 +187,10 @@ static int nilfs_ioctl_getversion(struct inode *inode, void __user *argp) * given checkpoint between checkpoint and snapshot state. This ioctl * is used in chcp and mkcp utilities. * - * Return Value: On success, 0 is returned and mode of a checkpoint is - * changed. On error, one of the following negative error codes - * is returned. - * - * %-EPERM - Operation not permitted. - * - * %-EFAULT - Failure during checkpoint mode changing. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * %-EFAULT - Failure during checkpoint mode changing. + * %-EPERM - Operation not permitted. */ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -244,13 +238,10 @@ out: * checkpoint from NILFS2 file system. This ioctl is used in rmcp * utility. * - * Return Value: On success, 0 is returned and a checkpoint is - * removed. On error, one of the following negative error codes - * is returned. - * - * %-EPERM - Operation not permitted. - * - * %-EFAULT - Failure during checkpoint removing. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * %-EFAULT - Failure during checkpoint removing. + * %-EPERM - Operation not permitted. */ static int nilfs_ioctl_delete_checkpoint(struct inode *inode, struct file *filp, @@ -296,7 +287,7 @@ out: * requested checkpoints. The NILFS_IOCTL_GET_CPINFO ioctl is used in * lscp utility and by nilfs_cleanerd daemon. * - * Return value: count of nilfs_cpinfo structures in output buffer. + * Return: Count of nilfs_cpinfo structures in output buffer. */ static ssize_t nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -320,17 +311,14 @@ nilfs_ioctl_do_get_cpinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, * * Description: nilfs_ioctl_get_cpstat() returns information about checkpoints. * The NILFS_IOCTL_GET_CPSTAT ioctl is used by lscp, rmcp utilities - * and by nilfs_cleanerd daemon. + * and by nilfs_cleanerd daemon. The checkpoint statistics are copied to + * the userspace memory pointed to by @argp. * - * Return Value: On success, 0 is returned, and checkpoints information is - * copied into userspace pointer @argp. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during getting checkpoints statistics. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during getting checkpoints statistics. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -363,7 +351,8 @@ static int nilfs_ioctl_get_cpstat(struct inode *inode, struct file *filp, * info about requested segments. The NILFS_IOCTL_GET_SUINFO ioctl is used * in lssu, nilfs_resize utilities and by nilfs_cleanerd daemon. * - * Return value: count of nilfs_suinfo structures in output buffer. + * Return: Count of nilfs_suinfo structures in output buffer on success, + * or a negative error code on failure. */ static ssize_t nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -387,17 +376,14 @@ nilfs_ioctl_do_get_suinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, * * Description: nilfs_ioctl_get_sustat() returns segment usage statistics. * The NILFS_IOCTL_GET_SUSTAT ioctl is used in lssu, nilfs_resize utilities - * and by nilfs_cleanerd daemon. + * and by nilfs_cleanerd daemon. The requested segment usage information is + * copied to the userspace memory pointed to by @argp. * - * Return Value: On success, 0 is returned, and segment usage information is - * copied into userspace pointer @argp. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during getting segment usage statistics. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during getting segment usage statistics. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -430,7 +416,8 @@ static int nilfs_ioctl_get_sustat(struct inode *inode, struct file *filp, * on virtual block addresses. The NILFS_IOCTL_GET_VINFO ioctl is used * by nilfs_cleanerd daemon. * - * Return value: count of nilfs_vinfo structures in output buffer. + * Return: Count of nilfs_vinfo structures in output buffer on success, or + * a negative error code on failure. */ static ssize_t nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -457,7 +444,8 @@ nilfs_ioctl_do_get_vinfo(struct the_nilfs *nilfs, __u64 *posp, int flags, * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl * is used by nilfs_cleanerd daemon. * - * Return value: count of nilfs_bdescs structures in output buffer. + * Return: Count of nilfs_bdescs structures in output buffer on success, or + * a negative error code on failure. */ static ssize_t nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, @@ -494,19 +482,15 @@ nilfs_ioctl_do_get_bdescs(struct the_nilfs *nilfs, __u64 *posp, int flags, * * Description: nilfs_ioctl_do_get_bdescs() function returns information * about descriptors of disk block numbers. The NILFS_IOCTL_GET_BDESCS ioctl - * is used by nilfs_cleanerd daemon. + * is used by nilfs_cleanerd daemon. If successful, disk block descriptors + * are copied to userspace pointer @argp. * - * Return Value: On success, 0 is returned, and disk block descriptors are - * copied into userspace pointer @argp. On error, one of the following - * negative error codes is returned. - * - * %-EINVAL - Invalid arguments from userspace. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during getting disk block descriptors. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during getting disk block descriptors. + * * %-EINVAL - Invalid arguments from userspace. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -540,16 +524,12 @@ static int nilfs_ioctl_get_bdescs(struct inode *inode, struct file *filp, * Description: nilfs_ioctl_move_inode_block() function registers data/node * buffer in the GC pagecache and submit read request. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - Requested block doesn't exist. - * - * %-EEXIST - Blocks conflict is detected. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EEXIST - Block conflict detected. + * * %-EIO - I/O error. + * * %-ENOENT - Requested block doesn't exist. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_move_inode_block(struct inode *inode, struct nilfs_vdesc *vdesc, @@ -604,8 +584,8 @@ static int nilfs_ioctl_move_inode_block(struct inode *inode, * blocks that garbage collector specified with the array of nilfs_vdesc * structures and stores them into page caches of GC inodes. * - * Return Value: Number of processed nilfs_vdesc structures or - * error code, otherwise. + * Return: Number of processed nilfs_vdesc structures on success, or + * a negative error code on failure. */ static int nilfs_ioctl_move_blocks(struct super_block *sb, struct nilfs_argv *argv, void *buf) @@ -682,14 +662,11 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb, * in the period from p_start to p_end, excluding p_end itself. The checkpoints * which have been already deleted are ignored. * - * Return Value: Number of processed nilfs_period structures or - * error code, otherwise. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - invalid checkpoints. + * Return: Number of processed nilfs_period structures on success, or one of + * the following negative error codes on failure: + * * %-EINVAL - invalid checkpoints. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) @@ -717,14 +694,11 @@ static int nilfs_ioctl_delete_checkpoints(struct the_nilfs *nilfs, * Description: nilfs_ioctl_free_vblocknrs() function frees * the virtual block numbers specified by @buf and @argv->v_nmembs. * - * Return Value: Number of processed virtual block numbers or - * error code, otherwise. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The virtual block number have not been allocated. + * Return: Number of processed virtual block numbers on success, or one of the + * following negative error codes on failure: + * * %-EIO - I/O error. + * * %-ENOENT - Unallocated virtual block number. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) @@ -746,14 +720,11 @@ static int nilfs_ioctl_free_vblocknrs(struct the_nilfs *nilfs, * Description: nilfs_ioctl_mark_blocks_dirty() function marks * metadata file or data blocks as dirty. * - * Return Value: Number of processed block descriptors or - * error code, otherwise. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - the specified block does not exist (hole block) + * Return: Number of processed block descriptors on success, or one of the + * following negative error codes on failure: + * * %-EIO - I/O error. + * * %-ENOENT - Non-existent block (hole block). + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_mark_blocks_dirty(struct the_nilfs *nilfs, struct nilfs_argv *argv, void *buf) @@ -852,7 +823,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs, * from userspace. The NILFS_IOCTL_CLEAN_SEGMENTS ioctl is used by * nilfs_cleanerd daemon. * - * Return Value: On success, 0 is returned or error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -976,20 +947,14 @@ out: * and metadata are written out to the device when it successfully * returned. * - * Return Value: On success, 0 is retured. On errors, one of the following - * negative error code is returned. - * - * %-EROFS - Read only filesystem. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EFAULT - Failure during execution of requested operation. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during execution of requested operation. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. + * * %-EROFS - Read only filesystem. */ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) @@ -1023,7 +988,7 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp, * @filp: file object * @argp: pointer on argument from userspace * - * Return Value: On success, 0 is returned or error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_resize(struct inode *inode, struct file *filp, void __user *argp) @@ -1059,7 +1024,7 @@ out: * checks the arguments from userspace and calls nilfs_sufile_trim_fs, which * performs the actual trim operation. * - * Return Value: On success, 0 is returned or negative error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) { @@ -1101,7 +1066,7 @@ static int nilfs_ioctl_trim_fs(struct inode *inode, void __user *argp) * of segments in bytes and upper limit of segments in bytes. * The NILFS_IOCTL_SET_ALLOC_RANGE is used by nilfs_resize utility. * - * Return Value: On success, 0 is returned or error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_ioctl_set_alloc_range(struct inode *inode, void __user *argp) { @@ -1152,17 +1117,15 @@ out: * @dofunc: concrete function of getting metadata info * * Description: nilfs_ioctl_get_info() gets metadata info by means of - * calling dofunc() function. + * calling dofunc() function. The requested metadata information is copied + * to userspace memory @argp. * - * Return Value: On success, 0 is returned and requested metadata info - * is copied into userspace. On error, one of the following - * negative error codes is returned. - * - * %-EINVAL - Invalid arguments from userspace. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EFAULT - Failure during execution of requested operation. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EFAULT - Failure during execution of requested operation. + * * %-EINVAL - Invalid arguments from userspace. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp, @@ -1202,18 +1165,14 @@ static int nilfs_ioctl_get_info(struct inode *inode, struct file *filp, * encapsulated in nilfs_argv and updates the segment usage info * according to the flags in nilfs_suinfo_update. * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EPERM - Not enough permissions - * - * %-EFAULT - Error copying input data - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid values in input (segment number, flags or nblocks) + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EEXIST - Block conflict detected. + * * %-EFAULT - Error copying input data. + * * %-EINVAL - Invalid values in input (segment number, flags or nblocks). + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-EPERM - Not enough permissions. */ static int nilfs_ioctl_set_suinfo(struct inode *inode, struct file *filp, unsigned int cmd, void __user *argp) From 40a631332fec57a0deb93051a4ae6869eb3f46c9 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Fri, 10 Jan 2025 10:01:45 +0900 Subject: [PATCH 492/504] nilfs2: correct return value kernel-doc descriptions for bmap functions Similar to the previous patch to fix the ioctl return value descriptions, this fixes the format of the return value descriptions for bmap (and btree)-related functions, which was causing the kernel-doc script to emit a number of warnings. Link: https://lkml.kernel.org/r/20250110010530.21872-3-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Cc: "Brian G ." <gissf1@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/bmap.c | 120 ++++++++++++++++++++-------------------------- fs/nilfs2/btree.c | 7 ++- 2 files changed, 54 insertions(+), 73 deletions(-) diff --git a/fs/nilfs2/bmap.c b/fs/nilfs2/bmap.c index c9e8d9a7d820..ccc1a7aa52d2 100644 --- a/fs/nilfs2/bmap.c +++ b/fs/nilfs2/bmap.c @@ -47,17 +47,14 @@ static int nilfs_bmap_convert_error(struct nilfs_bmap *bmap, * @ptrp: place to store the value associated to @key * * Description: nilfs_bmap_lookup_at_level() finds a record whose key - * matches @key in the block at @level of the bmap. + * matches @key in the block at @level of the bmap. The record associated + * with @key is stored in the place pointed to by @ptrp. * - * Return Value: On success, 0 is returned and the record associated with @key - * is stored in the place pointed by @ptrp. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - A record associated with @key does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - A record associated with @key does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_lookup_at_level(struct nilfs_bmap *bmap, __u64 key, int level, __u64 *ptrp) @@ -138,14 +135,11 @@ static int nilfs_bmap_do_insert(struct nilfs_bmap *bmap, __u64 key, __u64 ptr) * Description: nilfs_bmap_insert() inserts the new key-record pair specified * by @key and @rec into @bmap. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EEXIST - A record associated with @key already exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EEXIST - A record associated with @key already exists. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_insert(struct nilfs_bmap *bmap, __u64 key, unsigned long rec) { @@ -193,14 +187,11 @@ static int nilfs_bmap_do_delete(struct nilfs_bmap *bmap, __u64 key) * Description: nilfs_bmap_seek_key() seeks a valid key on @bmap * starting from @start, and stores it to @keyp if found. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - No valid entry was found + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - No valid entry was found. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_seek_key(struct nilfs_bmap *bmap, __u64 start, __u64 *keyp) { @@ -236,14 +227,11 @@ int nilfs_bmap_last_key(struct nilfs_bmap *bmap, __u64 *keyp) * Description: nilfs_bmap_delete() deletes the key-record pair specified by * @key from @bmap. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - A record associated with @key does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - A record associated with @key does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_delete(struct nilfs_bmap *bmap, __u64 key) { @@ -290,12 +278,10 @@ static int nilfs_bmap_do_truncate(struct nilfs_bmap *bmap, __u64 key) * Description: nilfs_bmap_truncate() removes key-record pairs whose keys are * greater than or equal to @key from @bmap. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_truncate(struct nilfs_bmap *bmap, __u64 key) { @@ -330,12 +316,10 @@ void nilfs_bmap_clear(struct nilfs_bmap *bmap) * Description: nilfs_bmap_propagate() marks the buffers that directly or * indirectly refer to the block specified by @bh dirty. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_propagate(struct nilfs_bmap *bmap, struct buffer_head *bh) { @@ -362,22 +346,22 @@ void nilfs_bmap_lookup_dirty_buffers(struct nilfs_bmap *bmap, /** * nilfs_bmap_assign - assign a new block number to a block - * @bmap: bmap - * @bh: pointer to buffer head + * @bmap: bmap + * @bh: place to store a pointer to the buffer head to which a block + * address is assigned (in/out) * @blocknr: block number - * @binfo: block information + * @binfo: block information * * Description: nilfs_bmap_assign() assigns the block number @blocknr to the - * buffer specified by @bh. + * buffer specified by @bh. The block information is stored in the memory + * pointed to by @binfo, and the buffer head may be replaced as a block + * address is assigned, in which case a pointer to the new buffer head is + * stored in the memory pointed to by @bh. * - * Return Value: On success, 0 is returned and the buffer head of a newly - * create buffer and the block information associated with the buffer are - * stored in the place pointed by @bh and @binfo, respectively. On error, one - * of the following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_assign(struct nilfs_bmap *bmap, struct buffer_head **bh, @@ -402,12 +386,10 @@ int nilfs_bmap_assign(struct nilfs_bmap *bmap, * Description: nilfs_bmap_mark() marks the block specified by @key and @level * as dirty. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) { @@ -430,7 +412,7 @@ int nilfs_bmap_mark(struct nilfs_bmap *bmap, __u64 key, int level) * Description: nilfs_test_and_clear() is the atomic operation to test and * clear the dirty state of @bmap. * - * Return Value: 1 is returned if @bmap is dirty, or 0 if clear. + * Return: 1 if @bmap is dirty, or 0 if clear. */ int nilfs_bmap_test_and_clear_dirty(struct nilfs_bmap *bmap) { @@ -490,10 +472,10 @@ static struct lock_class_key nilfs_bmap_mdt_lock_key; * * Description: nilfs_bmap_read() initializes the bmap @bmap. * - * Return Value: On success, 0 is returned. On error, the following negative - * error code is returned. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (corrupted bmap). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_bmap_read(struct nilfs_bmap *bmap, struct nilfs_inode *raw_inode) { diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index ef5061bb56da..0d8f7fb15c2e 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -334,7 +334,7 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree_node *node, * @inode: host inode of btree * @blocknr: block number * - * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. + * Return: 0 if normal, 1 if the node is broken. */ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, size_t size, struct inode *inode, @@ -366,7 +366,7 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, * @node: btree root node to be examined * @inode: host inode of btree * - * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. + * Return: 0 if normal, 1 if the root node is broken. */ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, struct inode *inode) @@ -652,8 +652,7 @@ static int nilfs_btree_do_lookup_last(const struct nilfs_bmap *btree, * @minlevel: start level * @nextkey: place to store the next valid key * - * Return Value: If a next key was found, 0 is returned. Otherwise, - * -ENOENT is returned. + * Return: 0 if the next key was found, %-ENOENT if not found. */ static int nilfs_btree_get_next_key(const struct nilfs_bmap *btree, const struct nilfs_btree_path *path, From 6a8c5cc3fb20da639d61a967742fcae5bad576fc Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Fri, 10 Jan 2025 10:01:46 +0900 Subject: [PATCH 493/504] nilfs2: correct return value kernel-doc descriptions for sufile Similar to the previous changes to fix return value descriptions, this fixes the format of the return value descriptions of functions for sufile-related functions, eliminating a dozen warnings emitted by the kernel-doc script. Link: https://lkml.kernel.org/r/20250110010530.21872-4-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Cc: "Brian G ." <gissf1@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/sufile.c | 102 +++++++++++++++++---------------------------- fs/nilfs2/sufile.h | 16 +++---- 2 files changed, 45 insertions(+), 73 deletions(-) diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index d3ecc813d633..623d417a54eb 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -155,17 +155,13 @@ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) * of successfully modified segments from the head is stored in the * place @ndone points to. * - * Return Value: On success, zero is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - Given segment usage is in hole block (may be returned if - * @create is zero) - * - * %-EINVAL - Invalid segment usage number + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid segment usage number + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Given segment usage is in hole block (may be returned if + * @create is zero) + * * %-ENOMEM - Insufficient memory available. */ int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, int create, size_t *ndone, @@ -272,10 +268,7 @@ int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, * @start: minimum segment number of allocatable region (inclusive) * @end: maximum segment number of allocatable region (inclusive) * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-ERANGE - invalid segment region + * Return: 0 on success, or %-ERANGE if segment range is invalid. */ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) { @@ -300,17 +293,14 @@ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) * @sufile: inode of segment usage file * @segnump: pointer to segment number * - * Description: nilfs_sufile_alloc() allocates a clean segment. + * Description: nilfs_sufile_alloc() allocates a clean segment, and stores + * its segment number in the place pointed to by @segnump. * - * Return Value: On success, 0 is returned and the segment number of the - * allocated segment is stored in the place pointed by @segnump. On error, one - * of the following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOSPC - No clean segment left. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No clean segment left. */ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) { @@ -610,16 +600,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, * @sufile: inode of segment usage file * @sustat: pointer to a structure of segment usage statistics * - * Description: nilfs_sufile_get_stat() returns information about segment - * usage. + * Description: nilfs_sufile_get_stat() retrieves segment usage statistics + * and stores them in the location pointed to by @sustat. * - * Return Value: On success, 0 is returned, and segment usage information is - * stored in the place pointed by @sustat. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) { @@ -683,16 +670,12 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, * @start: start segment number (inclusive) * @end: end segment number (inclusive) * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid number of segments specified - * - * %-EBUSY - Dirty or active segments are present in the range + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EBUSY - Dirty or active segments are present in the range. + * * %-EINVAL - Invalid number of segments specified. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_sufile_truncate_range(struct inode *sufile, __u64 start, __u64 end) @@ -787,16 +770,12 @@ out: * @sufile: inode of segment usage file * @newnsegs: new number of segments * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOSPC - Enough free space is not left for shrinking - * - * %-EBUSY - Dirty or active segments exist in the region to be truncated + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EBUSY - Dirty or active segments exist in the region to be truncated. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - Enough free space is not left for shrinking. */ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) { @@ -939,14 +918,11 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, * segment usage accordingly. Only the fields indicated by the sup_flags * are updated. * - * Return Value: On success, 0 is returned. On error, one of the - * following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid values in input (segment number, flags or nblocks) + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid values in input (segment number, flags or nblocks). + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, unsigned int supsz, size_t nsup) @@ -1073,7 +1049,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, * and start+len is rounded down. For each clean segment blkdev_issue_discard * function is invoked. * - * Return Value: On success, 0 is returned or negative error code, otherwise. + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) { diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index 8e8a1a5a0402..127ab01a47ea 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h @@ -95,8 +95,7 @@ static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv, * @nsegs: size of @segnumv array * @ndone: place to store the number of cancelled segments * - * Return Value: On success, 0 is returned. On error, a negative error codes - * is returned. + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_cancel_freev(struct inode *sufile, __u64 *segnumv, size_t nsegs, @@ -114,14 +113,11 @@ static inline int nilfs_sufile_cancel_freev(struct inode *sufile, * Description: nilfs_sufile_set_error() marks the segment specified by * @segnum as erroneous. The error segment will never be used again. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - Invalid segment usage number. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid segment usage number. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ static inline int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum) { From 415519b186abf0b78573cd1e6e1441edc3504870 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Fri, 10 Jan 2025 10:01:47 +0900 Subject: [PATCH 494/504] nilfs2: correct return value kernel-doc descriptions for metadata files Similar to the previous changes to fix return value descriptions, this fixes the format of the return value descriptions for metadata file functions other than sufile. Link: https://lkml.kernel.org/r/20250110010530.21872-5-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Cc: "Brian G ." <gissf1@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/cpfile.c | 51 ++++++++++++++++----------------------- fs/nilfs2/dat.c | 40 +++++++++++++------------------ fs/nilfs2/ifile.c | 32 +++++++++++-------------- fs/nilfs2/mdt.c | 59 +++++++++++++++++++++------------------------- 4 files changed, 77 insertions(+), 105 deletions(-) diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index c20207d7a989..a1c0c09f61e2 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -191,14 +191,11 @@ static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, * @cnop: place to store the next checkpoint number * @bhp: place to store a pointer to buffer_head struct * - * Return Value: On success, it returns 0. On error, the following negative - * error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - no block exists in the range. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - no block exists in the range. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile, __u64 start_cno, __u64 end_cno, @@ -447,14 +444,11 @@ error: * the period from @start to @end, excluding @end itself. The checkpoints * which have been already deleted are ignored. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-EINVAL - invalid checkpoints. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Invalid checkpoints. + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, __u64 start, @@ -1058,14 +1052,11 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) * Description: nilfs_change_cpmode() changes the mode of the checkpoint * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - No such checkpoint. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - No such checkpoint. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) { @@ -1097,14 +1088,12 @@ int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) * @cpstat: pointer to a structure of checkpoint statistics * * Description: nilfs_cpfile_get_stat() returns information about checkpoints. + * The checkpoint statistics are stored in the location pointed to by @cpstat. * - * Return Value: On success, 0 is returned, and checkpoints information is - * stored in the place pointed by @cpstat. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) { diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index e220dcb08aa6..c57445e62298 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -302,14 +302,11 @@ int nilfs_dat_mark_dirty(struct inode *dat, __u64 vblocknr) * Description: nilfs_dat_freev() frees the virtual block numbers specified by * @vblocknrs and @nitems. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The virtual block number have not been allocated. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - The virtual block number have not been allocated. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems) { @@ -325,12 +322,10 @@ int nilfs_dat_freev(struct inode *dat, __u64 *vblocknrs, size_t nitems) * Description: nilfs_dat_move() changes the block number associated with * @vblocknr to @blocknr. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. */ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) { @@ -390,17 +385,14 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr) * @blocknrp: pointer to a block number * * Description: nilfs_dat_translate() maps the virtual block number @vblocknr - * to the corresponding block number. + * to the corresponding block number. The block number associated with + * @vblocknr is stored in the place pointed to by @blocknrp. * - * Return Value: On success, 0 is returned and the block number associated - * with @vblocknr is stored in the place pointed by @blocknrp. On error, one - * of the following negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - A block number associated with @vblocknr does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - A block number associated with @vblocknr does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp) { diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index e7339eb3c08a..bae9d7d9a424 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -38,17 +38,16 @@ static inline struct nilfs_ifile_info *NILFS_IFILE_I(struct inode *ifile) * @out_ino: pointer to a variable to store inode number * @out_bh: buffer_head contains newly allocated disk inode * - * Return Value: On success, 0 is returned and the newly allocated inode - * number is stored in the place pointed by @ino, and buffer_head pointer - * that contains newly allocated disk inode structure is stored in the - * place pointed by @out_bh - * On error, one of the following negative error codes is returned. + * nilfs_ifile_create_inode() allocates a new inode in the ifile metadata + * file and stores the inode number in the variable pointed to by @out_ino, + * as well as storing the ifile's buffer with the disk inode in the location + * pointed to by @out_bh. * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOSPC - No inode left. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No inode left. */ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, struct buffer_head **out_bh) @@ -83,14 +82,11 @@ int nilfs_ifile_create_inode(struct inode *ifile, ino_t *out_ino, * @ifile: ifile inode * @ino: inode number * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error codes is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The inode number @ino have not been allocated. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Inode number unallocated. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino) { diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 965b5ad1c0df..3f7510beebbb 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -226,20 +226,21 @@ static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, * @out_bh: output of a pointer to the buffer_head * * nilfs_mdt_get_block() looks up the specified buffer and tries to create - * a new buffer if @create is not zero. On success, the returned buffer is - * assured to be either existing or formatted using a buffer lock on success. - * @out_bh is substituted only when zero is returned. + * a new buffer if @create is not zero. If (and only if) this function + * succeeds, it stores a pointer to the retrieved buffer head in the location + * pointed to by @out_bh. * - * Return Value: On success, it returns 0. On error, the following negative - * error code is returned. + * The retrieved buffer may be either an existing one or a newly allocated one. + * For a newly created buffer, if the callback function argument @init_block + * is non-NULL, the callback will be called with the buffer locked to format + * the block. * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - the specified block does not exist (hole block) - * - * %-EROFS - Read only filesystem (for create mode) + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - The specified block does not exist (hole block). + * * %-ENOMEM - Insufficient memory available. + * * %-EROFS - Read only filesystem (for create mode). */ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, void (*init_block)(struct inode *, @@ -275,14 +276,11 @@ int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, * @out_bh, and block offset to @blkoff, respectively. @out_bh and * @blkoff are substituted only when zero is returned. * - * Return Value: On success, it returns 0. On error, the following negative - * error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error - * - * %-ENOENT - no block was found in the range + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - No block was found in the range. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_mdt_find_block(struct inode *inode, unsigned long start, unsigned long end, unsigned long *blkoff, @@ -321,12 +319,11 @@ out: * @inode: inode of the meta data file * @block: block offset * - * Return Value: On success, zero is returned. - * On error, one of the following negative error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-EIO - I/O error + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Non-existent block. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) { @@ -349,12 +346,10 @@ int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and * tries to release the page including the buffer from a page cache. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-EBUSY - page has an active buffer. - * - * %-ENOENT - page cache has no page addressed by the offset. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EBUSY - Page has an active buffer. + * * %-ENOENT - Page cache has no page addressed by the offset. */ int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) { From 56fa6b235784772aeabc109617950467727a03c6 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Fri, 10 Jan 2025 10:01:48 +0900 Subject: [PATCH 495/504] nilfs2: correct return value kernel-doc descriptions for the rest Similar to the previous changes to fix return value descriptions, this fixes the format of the return value descriptions of functions for the rest. Link: https://lkml.kernel.org/r/20250110010530.21872-6-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Cc: "Brian G ." <gissf1@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/gcinode.c | 24 +++++++++------------- fs/nilfs2/inode.c | 12 +++-------- fs/nilfs2/recovery.c | 32 +++++++++++------------------ fs/nilfs2/segbuf.c | 12 ++--------- fs/nilfs2/segment.c | 48 ++++++++++++++++--------------------------- fs/nilfs2/the_nilfs.c | 12 +++++------ 6 files changed, 50 insertions(+), 90 deletions(-) diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c index 2dbb15767df1..561c220799c7 100644 --- a/fs/nilfs2/gcinode.c +++ b/fs/nilfs2/gcinode.c @@ -46,14 +46,11 @@ * specified by @pbn to the GC pagecache with the key @blkoff. * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer. * - * Return Value: On success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. - * - * %-ENOENT - The block specified with @pbn does not exist. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - The block specified with @pbn does not exist. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, sector_t pbn, __u64 vbn, @@ -114,12 +111,11 @@ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, * specified by @vbn to the GC pagecache. @pbn can be supplied by the * caller to avoid translation of the disk block address. * - * Return Value: On success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error. - * - * %-ENOMEM - Insufficient amount of memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Invalid virtual block address. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 23f3a75edd50..228bfffa5d22 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -598,10 +598,7 @@ struct inode *nilfs_iget_for_gc(struct super_block *sb, unsigned long ino, * or does nothing if the inode already has it. This function allocates * an additional inode to maintain page cache of B-tree nodes one-on-one. * - * Return Value: On success, 0 is returned. On errors, one of the following - * negative error code is returned. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or %-ENOMEM if memory is insufficient. */ int nilfs_attach_btree_node_cache(struct inode *inode) { @@ -660,11 +657,8 @@ void nilfs_detach_btree_node_cache(struct inode *inode) * in one inode and the one for b-tree node pages is set up in the * other inode, which is attached to the former inode. * - * Return Value: On success, a pointer to the inode for data pages is - * returned. On errors, one of the following negative error code is returned - * in a pointer type. - * - * %-ENOMEM - Insufficient memory available. + * Return: a pointer to the inode for data pages on success, or %-ENOMEM + * if memory is insufficient. */ struct inode *nilfs_iget_for_shadow(struct inode *inode) { diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index e43405bf521e..4063f084e75d 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -754,18 +754,13 @@ static void nilfs_abort_roll_forward(struct the_nilfs *nilfs) * @sb: super block instance * @ri: pointer to a nilfs_recovery_info struct to store search results. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-EINVAL - Inconsistent filesystem state. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Inconsistent filesystem state. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. */ int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, struct super_block *sb, @@ -830,14 +825,11 @@ failed: * segment pointed by the superblock. It sets up struct the_nilfs through * this search. It fills nilfs_recovery_info (ri) required for recovery. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-EINVAL - No valid segment found - * - * %-EIO - I/O error - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - No valid segment found. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ int nilfs_search_super_root(struct the_nilfs *nilfs, struct nilfs_recovery_info *ri) diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index e08cab03366b..a8bdf3d318ea 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -406,12 +406,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf, * @segbuf: buffer storing a log to be written * @nilfs: nilfs object * - * Return Value: On Success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error - * - * %-ENOMEM - Insufficient memory available. + * Return: Always 0. */ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, struct the_nilfs *nilfs) @@ -452,10 +447,7 @@ static int nilfs_segbuf_write(struct nilfs_segment_buffer *segbuf, * nilfs_segbuf_wait - wait for completion of requested BIOs * @segbuf: segment buffer * - * Return Value: On Success, 0 is returned. On Error, one of the following - * negative error code is returned. - * - * %-EIO - I/O error + * Return: 0 on success, or %-EIO if I/O error is detected. */ static int nilfs_segbuf_wait(struct nilfs_segment_buffer *segbuf) { diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 58a598b548fa..05c24b971464 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -191,12 +191,10 @@ static int nilfs_prepare_segment_lock(struct super_block *sb, * When @vacancy_check flag is set, this function will check the amount of * free space, and will wait for the GC to reclaim disk space if low capacity. * - * Return Value: On success, 0 is returned. On error, one of the following - * negative error code is returned. - * - * %-ENOMEM - Insufficient memory available. - * - * %-ENOSPC - No space left on device + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (if checking free space). */ int nilfs_transaction_begin(struct super_block *sb, struct nilfs_transaction_info *ti, @@ -2314,18 +2312,13 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err, bool force) * nilfs_construct_segment - construct a logical segment * @sb: super block * - * Return Value: On success, 0 is returned. On errors, one of the following - * negative error code is returned. - * - * %-EROFS - Read only filesystem. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. + * * %-EROFS - Read only filesystem. */ int nilfs_construct_segment(struct super_block *sb) { @@ -2349,18 +2342,13 @@ int nilfs_construct_segment(struct super_block *sb) * @start: start byte offset * @end: end byte offset (inclusive) * - * Return Value: On success, 0 is returned. On errors, one of the following - * negative error code is returned. - * - * %-EROFS - Read only filesystem. - * - * %-EIO - I/O error - * - * %-ENOSPC - No space left on device (only in a panic state). - * - * %-ERESTARTSYS - Interrupted. - * - * %-ENOMEM - Insufficient memory available. + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - No space left on device (only in a panic state). + * * %-ERESTARTSYS - Interrupted. + * * %-EROFS - Read only filesystem. */ int nilfs_construct_dsync_segment(struct super_block *sb, struct inode *inode, loff_t start, loff_t end) diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index ac03fd3c330c..1bcaf85506d3 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -49,8 +49,8 @@ void nilfs_set_last_segment(struct the_nilfs *nilfs, * alloc_nilfs - allocate a nilfs object * @sb: super block instance * - * Return Value: On success, pointer to the_nilfs is returned. - * On error, NULL is returned. + * Return: a pointer to the allocated nilfs object on success, or NULL on + * failure. */ struct the_nilfs *alloc_nilfs(struct super_block *sb) { @@ -200,8 +200,7 @@ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, * exponent information written in @sbp and stores it in @blocksize, * or aborts with an error message if it's too large. * - * Return Value: On success, 0 is returned. If the block size is too - * large, -EINVAL is returned. + * Return: 0 on success, or %-EINVAL if the block size is too large. */ static int nilfs_get_blocksize(struct super_block *sb, struct nilfs_super_block *sbp, int *blocksize) @@ -538,7 +537,7 @@ static int nilfs_valid_sb(struct nilfs_super_block *sbp) * area, or if the parameters themselves are not normal, it is * determined to be invalid. * - * Return Value: true if invalid, false if valid. + * Return: true if invalid, false if valid. */ static bool nilfs_sb2_bad_offset(struct nilfs_super_block *sbp, u64 offset) { @@ -684,8 +683,7 @@ static int nilfs_load_super_block(struct the_nilfs *nilfs, * reading the super block, getting disk layout information, initializing * shared fields in the_nilfs). * - * Return Value: On success, 0 is returned. On error, a negative error - * code is returned. + * Return: 0 on success, or a negative error code on failure. */ int init_nilfs(struct the_nilfs *nilfs, struct super_block *sb) { From a125b56a21cc11e698a91e92bba2785504366d11 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Fri, 10 Jan 2025 10:01:49 +0900 Subject: [PATCH 496/504] nilfs2: add missing return value kernel-doc descriptions There are a number of kernel-doc comments for functions that are missing return values, which also causes a number of warnings when the kernel-doc script is run with the "-Wall" option. Fix this issue by adding proper return value descriptions, and improve code maintainability. Link: https://lkml.kernel.org/r/20250110010530.21872-7-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Cc: "Brian G ." <gissf1@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/alloc.c | 67 +++++++++++++++++++++++++++++++++++++++++-- fs/nilfs2/alloc.h | 2 ++ fs/nilfs2/cpfile.c | 2 ++ fs/nilfs2/dat.c | 2 ++ fs/nilfs2/ifile.c | 2 ++ fs/nilfs2/inode.c | 4 +++ fs/nilfs2/mdt.c | 4 +++ fs/nilfs2/page.c | 8 +++--- fs/nilfs2/recovery.c | 30 +++++++++++++++++++ fs/nilfs2/segment.c | 8 ++++++ fs/nilfs2/sufile.c | 8 ++++++ fs/nilfs2/sufile.h | 6 ++++ fs/nilfs2/super.c | 10 ++++++- fs/nilfs2/the_nilfs.c | 14 +++++++++ 14 files changed, 160 insertions(+), 7 deletions(-) diff --git a/fs/nilfs2/alloc.c b/fs/nilfs2/alloc.c index ba3e1f591f36..6b506995818d 100644 --- a/fs/nilfs2/alloc.c +++ b/fs/nilfs2/alloc.c @@ -21,6 +21,8 @@ * nilfs_palloc_groups_per_desc_block - get the number of groups that a group * descriptor block can maintain * @inode: inode of metadata file using this allocator + * + * Return: Number of groups that a group descriptor block can maintain. */ static inline unsigned long nilfs_palloc_groups_per_desc_block(const struct inode *inode) @@ -32,6 +34,8 @@ nilfs_palloc_groups_per_desc_block(const struct inode *inode) /** * nilfs_palloc_groups_count - get maximum number of groups * @inode: inode of metadata file using this allocator + * + * Return: Maximum number of groups. */ static inline unsigned long nilfs_palloc_groups_count(const struct inode *inode) @@ -43,6 +47,8 @@ nilfs_palloc_groups_count(const struct inode *inode) * nilfs_palloc_init_blockgroup - initialize private variables for allocator * @inode: inode of metadata file using this allocator * @entry_size: size of the persistent object + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size) { @@ -78,6 +84,9 @@ int nilfs_palloc_init_blockgroup(struct inode *inode, unsigned int entry_size) * @inode: inode of metadata file using this allocator * @nr: serial number of the entry (e.g. inode number) * @offset: pointer to store offset number in the group + * + * Return: Number of the group that contains the entry with the index + * specified by @nr. */ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr, unsigned long *offset) @@ -93,8 +102,8 @@ static unsigned long nilfs_palloc_group(const struct inode *inode, __u64 nr, * @inode: inode of metadata file using this allocator * @group: group number * - * nilfs_palloc_desc_blkoff() returns block offset of the descriptor - * block which contains a descriptor of the specified group. + * Return: Index number in the metadata file of the descriptor block of + * the group specified by @group. */ static unsigned long nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group) @@ -111,6 +120,9 @@ nilfs_palloc_desc_blkoff(const struct inode *inode, unsigned long group) * * nilfs_palloc_bitmap_blkoff() returns block offset of the bitmap * block used to allocate/deallocate entries in the specified group. + * + * Return: Index number in the metadata file of the bitmap block of + * the group specified by @group. */ static unsigned long nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group) @@ -125,6 +137,8 @@ nilfs_palloc_bitmap_blkoff(const struct inode *inode, unsigned long group) * nilfs_palloc_group_desc_nfrees - get the number of free entries in a group * @desc: pointer to descriptor structure for the group * @lock: spin lock protecting @desc + * + * Return: Number of free entries written in the group descriptor @desc. */ static unsigned long nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc, @@ -143,6 +157,9 @@ nilfs_palloc_group_desc_nfrees(const struct nilfs_palloc_group_desc *desc, * @desc: pointer to descriptor structure for the group * @lock: spin lock protecting @desc * @n: delta to be added + * + * Return: Number of free entries after adjusting the group descriptor + * @desc. */ static u32 nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc, @@ -161,6 +178,9 @@ nilfs_palloc_group_desc_add_entries(struct nilfs_palloc_group_desc *desc, * nilfs_palloc_entry_blkoff - get block offset of an entry block * @inode: inode of metadata file using this allocator * @nr: serial number of the entry (e.g. inode number) + * + * Return: Index number in the metadata file of the block containing + * the entry specified by @nr. */ static unsigned long nilfs_palloc_entry_blkoff(const struct inode *inode, __u64 nr) @@ -238,6 +258,12 @@ static int nilfs_palloc_get_block(struct inode *inode, unsigned long blkoff, * @blkoff: block offset * @prev: nilfs_bh_assoc struct of the last used buffer * @lock: spin lock protecting @prev + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOENT - Non-existent block. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff, struct nilfs_bh_assoc *prev, @@ -258,6 +284,8 @@ static int nilfs_palloc_delete_block(struct inode *inode, unsigned long blkoff, * @group: group number * @create: create flag * @bhp: pointer to store the resultant buffer head + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_get_desc_block(struct inode *inode, unsigned long group, @@ -277,6 +305,8 @@ static int nilfs_palloc_get_desc_block(struct inode *inode, * @group: group number * @create: create flag * @bhp: pointer to store the resultant buffer head + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_get_bitmap_block(struct inode *inode, unsigned long group, @@ -294,6 +324,8 @@ static int nilfs_palloc_get_bitmap_block(struct inode *inode, * nilfs_palloc_delete_bitmap_block - delete a bitmap block * @inode: inode of metadata file using this allocator * @group: group number + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_delete_bitmap_block(struct inode *inode, unsigned long group) @@ -312,6 +344,8 @@ static int nilfs_palloc_delete_bitmap_block(struct inode *inode, * @nr: serial number of the entry (e.g. inode number) * @create: create flag * @bhp: pointer to store the resultant buffer head + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr, int create, struct buffer_head **bhp) @@ -328,6 +362,8 @@ int nilfs_palloc_get_entry_block(struct inode *inode, __u64 nr, * nilfs_palloc_delete_entry_block - delete an entry block * @inode: inode of metadata file using this allocator * @nr: serial number of the entry + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_delete_entry_block(struct inode *inode, __u64 nr) { @@ -397,6 +433,9 @@ size_t nilfs_palloc_entry_offset(const struct inode *inode, __u64 nr, * @bsize: size in bits * @lock: spin lock protecting @bitmap * @wrap: whether to wrap around + * + * Return: Offset number within the group of the found free entry, or + * %-ENOSPC if not found. */ static int nilfs_palloc_find_available_slot(unsigned char *bitmap, unsigned long target, @@ -438,6 +477,9 @@ static int nilfs_palloc_find_available_slot(unsigned char *bitmap, * @inode: inode of metadata file using this allocator * @curr: current group number * @max: maximum number of groups + * + * Return: Number of remaining descriptors (= groups) managed by the descriptor + * block. */ static unsigned long nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, @@ -453,6 +495,8 @@ nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, * nilfs_palloc_count_desc_blocks - count descriptor blocks number * @inode: inode of metadata file using this allocator * @desc_blocks: descriptor blocks number [out] + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_palloc_count_desc_blocks(struct inode *inode, unsigned long *desc_blocks) @@ -473,6 +517,8 @@ static int nilfs_palloc_count_desc_blocks(struct inode *inode, * MDT file growing * @inode: inode of metadata file using this allocator * @desc_blocks: known current descriptor blocks count + * + * Return: true if a group can be added in the metadata file, false if not. */ static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode, unsigned long desc_blocks) @@ -487,6 +533,12 @@ static inline bool nilfs_palloc_mdt_file_can_grow(struct inode *inode, * @inode: inode of metadata file using this allocator * @nused: current number of used entries * @nmaxp: max number of entries [out] + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ERANGE - Number of entries in use is out of range. */ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp) { @@ -518,6 +570,13 @@ int nilfs_palloc_count_max_entries(struct inode *inode, u64 nused, u64 *nmaxp) * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the allocation * @wrap: whether to wrap around + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error (including metadata corruption). + * * %-ENOMEM - Insufficient memory available. + * * %-ENOSPC - Entries exhausted (No entries available for allocation). + * * %-EROFS - Read only filesystem */ int nilfs_palloc_prepare_alloc_entry(struct inode *inode, struct nilfs_palloc_req *req, bool wrap) @@ -710,6 +769,8 @@ void nilfs_palloc_abort_alloc_entry(struct inode *inode, * nilfs_palloc_prepare_free_entry - prepare to deallocate a persistent object * @inode: inode of metadata file using this allocator * @req: nilfs_palloc_req structure exchanged for the removal + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_prepare_free_entry(struct inode *inode, struct nilfs_palloc_req *req) @@ -754,6 +815,8 @@ void nilfs_palloc_abort_free_entry(struct inode *inode, * @inode: inode of metadata file using this allocator * @entry_nrs: array of entry numbers to be deallocated * @nitems: number of entries stored in @entry_nrs + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) { diff --git a/fs/nilfs2/alloc.h b/fs/nilfs2/alloc.h index 3f115ab7e9a7..046d876ea3e0 100644 --- a/fs/nilfs2/alloc.h +++ b/fs/nilfs2/alloc.h @@ -21,6 +21,8 @@ * * The number of entries per group is defined by the number of bits * that a bitmap block can maintain. + * + * Return: Number of entries per group. */ static inline unsigned long nilfs_palloc_entries_per_group(const struct inode *inode) diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index a1c0c09f61e2..aaab0ae90261 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -1124,6 +1124,8 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) * @cpsize: size of a checkpoint entry * @raw_inode: on-disk cpfile inode * @inodep: buffer to store the inode + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_cpfile_read(struct super_block *sb, size_t cpsize, struct nilfs_inode *raw_inode, struct inode **inodep) diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index c57445e62298..c5664035b3e3 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -481,6 +481,8 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned int visz, * @entry_size: size of a dat entry * @raw_inode: on-disk dat inode * @inodep: buffer to store the inode + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_dat_read(struct super_block *sb, size_t entry_size, struct nilfs_inode *raw_inode, struct inode **inodep) diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index bae9d7d9a424..e1e050dfbbc2 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -146,6 +146,8 @@ int nilfs_ifile_get_inode_block(struct inode *ifile, ino_t ino, * @ifile: ifile inode * @nmaxinodes: current maximum of available inodes count [out] * @nfreeinodes: free inodes count [out] + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_ifile_count_free_inodes(struct inode *ifile, u64 *nmaxinodes, u64 *nfreeinodes) diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 228bfffa5d22..e8015d24a82c 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -68,6 +68,8 @@ void nilfs_inode_sub_blocks(struct inode *inode, int n) * * This function does not issue actual read request of the specified data * block. It is done by VFS. + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_get_block(struct inode *inode, sector_t blkoff, struct buffer_head *bh_result, int create) @@ -141,6 +143,8 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff, * address_space_operations. * @file: file struct of the file to be read * @folio: the folio to be read + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_read_folio(struct file *file, struct folio *folio) { diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 3f7510beebbb..2f850a18d6e7 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c @@ -519,6 +519,8 @@ void nilfs_mdt_set_entry_size(struct inode *inode, unsigned int entry_size, * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file * @inode: inode of the metadata file * @shadow: shadow mapping + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_mdt_setup_shadow_map(struct inode *inode, struct nilfs_shadow_map *shadow) @@ -540,6 +542,8 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, /** * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map * @inode: inode of the metadata file + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_mdt_save_to_shadow_map(struct inode *inode) { diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 899686d2e5f7..806b056d2260 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c @@ -135,8 +135,7 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh) * nilfs_folio_buffers_clean - Check if a folio has dirty buffers or not. * @folio: Folio to be checked. * - * nilfs_folio_buffers_clean() returns false if the folio has dirty buffers. - * Otherwise, it returns true. + * Return: false if the folio has dirty buffers, true otherwise. */ bool nilfs_folio_buffers_clean(struct folio *folio) { @@ -500,8 +499,9 @@ void __nilfs_clear_folio_dirty(struct folio *folio) * This function searches an extent of buffers marked "delayed" which * starts from a block offset equal to or larger than @start_blk. If * such an extent was found, this will store the start offset in - * @blkoff and return its length in blocks. Otherwise, zero is - * returned. + * @blkoff and return its length in blocks. + * + * Return: Length in blocks of found extent, 0 otherwise. */ unsigned long nilfs_find_uncommitted_extent(struct inode *inode, sector_t start_blk, diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c index 4063f084e75d..22aecf6e2344 100644 --- a/fs/nilfs2/recovery.c +++ b/fs/nilfs2/recovery.c @@ -88,6 +88,8 @@ static int nilfs_warn_segment_error(struct super_block *sb, int err) * @check_bytes: number of bytes to be checked * @start: DBN of start block * @nblock: number of blocks to be checked + * + * Return: 0 on success, or %-EIO if an I/O error occurs. */ static int nilfs_compute_checksum(struct the_nilfs *nilfs, struct buffer_head *bhs, u32 *sum, @@ -126,6 +128,11 @@ static int nilfs_compute_checksum(struct the_nilfs *nilfs, * @sr_block: disk block number of the super root block * @pbh: address of a buffer_head pointer to return super root buffer * @check: CRC check flag + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Super root block corrupted. + * * %-EIO - I/O error. */ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, struct buffer_head **pbh, int check) @@ -176,6 +183,8 @@ int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, * @nilfs: nilfs object * @start_blocknr: start block number of the log * @sum: pointer to return segment summary structure + * + * Return: Buffer head pointer, or NULL if an I/O error occurs. */ static struct buffer_head * nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, @@ -195,6 +204,13 @@ nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, * @seg_seq: sequence number of segment * @bh_sum: buffer head of summary block * @sum: segment summary struct + * + * Return: 0 on success, or one of the following internal codes on failure: + * * %NILFS_SEG_FAIL_MAGIC - Magic number mismatch. + * * %NILFS_SEG_FAIL_SEQ - Sequence number mismatch. + * * %NIFLS_SEG_FAIL_CONSISTENCY - Block count out of range. + * * %NILFS_SEG_FAIL_IO - I/O error. + * * %NILFS_SEG_FAIL_CHECKSUM_FULL - Full log checksum verification failed. */ static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq, struct buffer_head *bh_sum, @@ -238,6 +254,9 @@ out: * @pbh: the current buffer head on summary blocks [in, out] * @offset: the current byte offset on summary blocks [in, out] * @bytes: byte size of the item to be read + * + * Return: Kernel space address of current segment summary entry, or + * NULL if an I/O error occurs. */ static void *nilfs_read_summary_info(struct the_nilfs *nilfs, struct buffer_head **pbh, @@ -300,6 +319,11 @@ static void nilfs_skip_summary_info(struct the_nilfs *nilfs, * @start_blocknr: start block number of the log * @sum: log summary information * @head: list head to add nilfs_recovery_block struct + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, struct nilfs_segment_summary *sum, @@ -571,6 +595,12 @@ static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, * @sb: super block instance * @root: NILFS root instance * @ri: pointer to a nilfs_recovery_info + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - Log format error. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. */ static int nilfs_do_roll_forward(struct the_nilfs *nilfs, struct super_block *sb, diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 05c24b971464..94f45281253c 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -250,6 +250,8 @@ int nilfs_transaction_begin(struct super_block *sb, * nilfs_transaction_commit() sets a timer to start the segment * constructor. If a sync flag is set, it starts construction * directly. + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_transaction_commit(struct super_block *sb) { @@ -405,6 +407,8 @@ static void *nilfs_segctor_map_segsum_entry(struct nilfs_sc_info *sci, /** * nilfs_segctor_reset_segment_buffer - reset the current segment buffer * @sci: nilfs_sc_info + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_segctor_reset_segment_buffer(struct nilfs_sc_info *sci) { @@ -1315,6 +1319,8 @@ static int nilfs_segctor_collect_blocks(struct nilfs_sc_info *sci, int mode) * nilfs_segctor_begin_construction - setup segment buffer to make a new log * @sci: nilfs_sc_info * @nilfs: nilfs object + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_segctor_begin_construction(struct nilfs_sc_info *sci, struct the_nilfs *nilfs) @@ -2454,6 +2460,8 @@ static void nilfs_segctor_notify(struct nilfs_sc_info *sci, int mode, int err) * nilfs_segctor_construct - form logs and write them to disk * @sci: segment constructor object * @mode: mode of log forming + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_segctor_construct(struct nilfs_sc_info *sci, int mode) { diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 623d417a54eb..47d01255a8fb 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -133,6 +133,8 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, /** * nilfs_sufile_get_ncleansegs - return the number of clean segments * @sufile: inode of segment usage file + * + * Return: Number of clean segments. */ unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) { @@ -500,6 +502,8 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty * @sufile: inode of segment usage file * @segnum: segment number + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) { @@ -559,6 +563,8 @@ out_sem: * @segnum: segment number * @nblocks: number of live blocks in the segment * @modtime: modification time (option) + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, unsigned long nblocks, time64_t modtime) @@ -1195,6 +1201,8 @@ out_sem: * @susize: size of a segment usage entry * @raw_inode: on-disk sufile inode * @inodep: buffer to store the inode + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_sufile_read(struct super_block *sb, size_t susize, struct nilfs_inode *raw_inode, struct inode **inodep) diff --git a/fs/nilfs2/sufile.h b/fs/nilfs2/sufile.h index 127ab01a47ea..cd6f28ab3521 100644 --- a/fs/nilfs2/sufile.h +++ b/fs/nilfs2/sufile.h @@ -58,6 +58,8 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range); * nilfs_sufile_scrap - make a segment garbage * @sufile: inode of segment usage file * @segnum: segment number to be freed + * + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum) { @@ -68,6 +70,8 @@ static inline int nilfs_sufile_scrap(struct inode *sufile, __u64 segnum) * nilfs_sufile_free - free segment * @sufile: inode of segment usage file * @segnum: segment number to be freed + * + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum) { @@ -80,6 +84,8 @@ static inline int nilfs_sufile_free(struct inode *sufile, __u64 segnum) * @segnumv: array of segment numbers * @nsegs: size of @segnumv array * @ndone: place to store the number of freed segments + * + * Return: 0 on success, or a negative error code on failure. */ static inline int nilfs_sufile_freev(struct inode *sufile, __u64 *segnumv, size_t nsegs, size_t *ndone) diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index eca79cca3803..badc2cbc895e 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c @@ -309,6 +309,8 @@ int nilfs_commit_super(struct super_block *sb, int flag) * This function restores state flags in the on-disk super block. * This will set "clean" flag (i.e. NILFS_VALID_FS) unless the * filesystem was not clean previously. + * + * Return: 0 on success, %-EIO if I/O error or superblock is corrupted. */ int nilfs_cleanup_super(struct super_block *sb) { @@ -339,6 +341,8 @@ int nilfs_cleanup_super(struct super_block *sb) * nilfs_move_2nd_super - relocate secondary super block * @sb: super block instance * @sb2off: new offset of the secondary super block (in bytes) + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_move_2nd_super(struct super_block *sb, loff_t sb2off) { @@ -420,6 +424,8 @@ out: * nilfs_resize_fs - resize the filesystem * @sb: super block instance * @newsize: new size of the filesystem (in bytes) + * + * Return: 0 on success, or a negative error code on failure. */ int nilfs_resize_fs(struct super_block *sb, __u64 newsize) { @@ -987,7 +993,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno, * nilfs_tree_is_busy() - try to shrink dentries of a checkpoint * @root_dentry: root dentry of the tree to be shrunk * - * This function returns true if the tree was in-use. + * Return: true if the tree was in-use, false otherwise. */ static bool nilfs_tree_is_busy(struct dentry *root_dentry) { @@ -1033,6 +1039,8 @@ int nilfs_checkpoint_is_mounted(struct super_block *sb, __u64 cno) * * This function is called exclusively by nilfs->ns_mount_mutex. * So, the recovery process is protected from other simultaneous mounts. + * + * Return: 0 on success, or a negative error code on failure. */ static int nilfs_fill_super(struct super_block *sb, struct fs_context *fc) diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c index 1bcaf85506d3..cb01ea81724d 100644 --- a/fs/nilfs2/the_nilfs.c +++ b/fs/nilfs2/the_nilfs.c @@ -165,6 +165,9 @@ static void nilfs_clear_recovery_info(struct nilfs_recovery_info *ri) * containing a super root from a given super block, and initializes * relevant information on the nilfs object preparatory for log * scanning and recovery. + * + * Return: 0 on success, or %-EINVAL if current segment number is out + * of range. */ static int nilfs_store_log_cursor(struct the_nilfs *nilfs, struct nilfs_super_block *sbp) @@ -225,6 +228,13 @@ static int nilfs_get_blocksize(struct super_block *sb, * load_nilfs() searches and load the latest super root, * attaches the last segment, and does recovery if needed. * The caller must call this exclusively for simultaneous mounts. + * + * Return: 0 on success, or one of the following negative error codes on + * failure: + * * %-EINVAL - No valid segment found. + * * %-EIO - I/O error. + * * %-ENOMEM - Insufficient memory available. + * * %-EROFS - Read only device or RO compat mode (if recovery is required) */ int load_nilfs(struct the_nilfs *nilfs, struct super_block *sb) { @@ -394,6 +404,8 @@ static unsigned long long nilfs_max_size(unsigned int blkbits) * nilfs_nrsvsegs - calculate the number of reserved segments * @nilfs: nilfs object * @nsegs: total number of segments + * + * Return: Number of reserved segments. */ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs) { @@ -405,6 +417,8 @@ unsigned long nilfs_nrsvsegs(struct the_nilfs *nilfs, unsigned long nsegs) /** * nilfs_max_segment_count - calculate the maximum number of segments * @nilfs: nilfs object + * + * Return: Maximum number of segments */ static u64 nilfs_max_segment_count(struct the_nilfs *nilfs) { From 06cd834104865b899e8b433e6afba8cb8ddb249b Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Fri, 10 Jan 2025 10:01:50 +0900 Subject: [PATCH 497/504] nilfs2: revise the return value description style for consistency. Also for comments that do not cause kernel-doc warnings (those that list multiple error codes), revise the return value description style to match Brian G.'s suggestion of "..., or one of the following negative error codes on failure:". Link: https://lkml.kernel.org/r/CAAq45aNh1qV8P6XgDhKeNstT=PvcPUaCXsAF-f9rvmzznsZL5A@mail.gmail.com Link: https://lkml.kernel.org/r/20250110010530.21872-8-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Cc: "Brian G ." <gissf1@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/btnode.c | 3 ++- fs/nilfs2/cpfile.c | 16 ++++++++++------ fs/nilfs2/dat.c | 3 ++- fs/nilfs2/ifile.c | 3 ++- fs/nilfs2/ioctl.c | 3 ++- fs/nilfs2/segment.c | 6 ++++-- fs/nilfs2/sufile.c | 2 +- 7 files changed, 23 insertions(+), 13 deletions(-) diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 54a3fa0cf67e..568367129092 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c @@ -201,7 +201,8 @@ void nilfs_btnode_delete(struct buffer_head *bh) * Note that the current implementation does not support folio sizes larger * than the page size. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EIO - I/O error (metadata corruption). * * %-ENOMEM - Insufficient memory available. */ diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c index aaab0ae90261..bcc7d76269ac 100644 --- a/fs/nilfs2/cpfile.c +++ b/fs/nilfs2/cpfile.c @@ -236,7 +236,8 @@ static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, * stores it to the inode file given by @ifile and the nilfs root object * given by @root. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid checkpoint. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). @@ -304,7 +305,8 @@ out_sem: * In either case, the buffer of the block containing the checkpoint entry * and the cpfile inode are made dirty for inclusion in the write log. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). * * %-EROFS - Read only filesystem @@ -373,7 +375,8 @@ out_sem: * cpfile with the data given by the arguments @root, @blkinc, @ctime, and * @minor. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). */ @@ -712,7 +715,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, * number to continue searching. * * Return: Count of checkpoint info items stored in the output buffer on - * success, or the following negative error code on failure. + * success, or one of the following negative error codes on failure: * * %-EINVAL - Invalid checkpoint mode. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). @@ -737,7 +740,8 @@ ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, * @cpfile: checkpoint file inode * @cno: checkpoint number to delete * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EBUSY - Checkpoint in use (snapshot specified). * * %-EIO - I/O error (including metadata corruption). * * %-ENOENT - No valid checkpoint found. @@ -1005,7 +1009,7 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) * @cno: checkpoint number * * Return: 1 if the checkpoint specified by @cno is a snapshot, 0 if not, or - * the following negative error code on failure. + * one of the following negative error codes on failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOENT - No such checkpoint. * * %-ENOMEM - Insufficient memory available. diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c index c5664035b3e3..c664daba56ae 100644 --- a/fs/nilfs2/dat.c +++ b/fs/nilfs2/dat.c @@ -276,7 +276,8 @@ void nilfs_dat_abort_update(struct inode *dat, * @dat: DAT file inode * @vblocknr: virtual block number * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid DAT entry (internal code). * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c index e1e050dfbbc2..c4cd4a4dedd0 100644 --- a/fs/nilfs2/ifile.c +++ b/fs/nilfs2/ifile.c @@ -172,7 +172,8 @@ int nilfs_ifile_count_free_inodes(struct inode *ifile, * @cno: number of checkpoint entry to read * @inode_size: size of an inode * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid checkpoint. * * %-ENOMEM - Insufficient memory available. * * %-EIO - I/O error (including metadata corruption). diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c index e877c97974a4..a66d62a51f77 100644 --- a/fs/nilfs2/ioctl.c +++ b/fs/nilfs2/ioctl.c @@ -1268,7 +1268,8 @@ static int nilfs_ioctl_get_fslabel(struct super_block *sb, void __user *argp) * @filp: file object * @argp: pointer to userspace memory that contains the volume name * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EFAULT - Error copying input data. * * %-EINVAL - Label length exceeds record size in superblock. * * %-EIO - I/O error. diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 94f45281253c..3a202e51b360 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1122,7 +1122,8 @@ static int nilfs_segctor_scan_file_dsync(struct nilfs_sc_info *sci, * a super root block containing this sufile change is complete, and it can * be canceled with nilfs_sufile_cancel_freev() until then. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINVAL - Invalid segment number. * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. @@ -2834,7 +2835,8 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci) * This allocates a log writer object, initializes it, and starts the * log writer. * - * Return: 0 on success, or the following negative error code on failure. + * Return: 0 on success, or one of the following negative error codes on + * failure: * * %-EINTR - Log writer thread creation failed due to interruption. * * %-ENOMEM - Insufficient memory available. */ diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c index 47d01255a8fb..330f269abedf 100644 --- a/fs/nilfs2/sufile.c +++ b/fs/nilfs2/sufile.c @@ -850,7 +850,7 @@ out: * @nsi: size of suinfo array * * Return: Count of segment usage info items stored in the output buffer on - * success, or the following negative error code on failure. + * success, or one of the following negative error codes on failure: * * %-EIO - I/O error (including metadata corruption). * * %-ENOMEM - Insufficient memory available. */ From b2fd0b54abdba66a640ade33805c05dc97d896e0 Mon Sep 17 00:00:00 2001 From: Tanya Agarwal <tanyaagarwal25699@gmail.com> Date: Sun, 12 Jan 2025 01:17:10 +0530 Subject: [PATCH 498/504] CREDITS: fix spelling mistakes Fix spelling errors identified using codespell tool. Link: https://lkml.kernel.org/r/20250111194709.51133-1-tanyaagarwal25699@gmail.com Signed-off-by: Tanya Agarwal <tanyaagarwal25699@gmail.com> Cc: Anup Sharma <anupnewsmail@gmail.com> Cc: Shuah Khan <skhan@linuxfoundation.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- CREDITS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CREDITS b/CREDITS index b1777b53c63a..c7f962465108 100644 --- a/CREDITS +++ b/CREDITS @@ -4327,7 +4327,7 @@ D: Freescale Highspeed USB device driver D: Freescale QE SoC support and Ethernet driver S: B-1206 Jingmao Guojigongyu S: 16 Baliqiao Nanjie, Beijing 101100 -S: People's Repulic of China +S: People's Republic of China N: Vlad Yasevich E: vyasevich@gmail.com From 5b048de571cc6d8f0d877c035bb7fc8ee3e1653e Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Sat, 11 Jan 2025 23:26:35 +0900 Subject: [PATCH 499/504] nilfs2: handle errors that nilfs_prepare_chunk() may return Patch series "nilfs2: fix issues with rename operations". This series fixes BUG_ON check failures reported by syzbot around rename operations, and a minor behavioral issue where the mtime of a child directory changes when it is renamed instead of moved. This patch (of 2): The directory manipulation routines nilfs_set_link() and nilfs_delete_entry() rewrite the directory entry in the folio/page previously read by nilfs_find_entry(), so error handling is omitted on the assumption that nilfs_prepare_chunk(), which prepares the buffer for rewriting, will always succeed for these. And if an error is returned, it triggers the legacy BUG_ON() checks in each routine. This assumption is wrong, as proven by syzbot: the buffer layer called by nilfs_prepare_chunk() may call nilfs_get_block() if necessary, which may fail due to metadata corruption or other reasons. This has been there all along, but improved sanity checks and error handling may have made it more reproducible in fuzzing tests. Fix this issue by adding missing error paths in nilfs_set_link(), nilfs_delete_entry(), and their caller nilfs_rename(). Link: https://lkml.kernel.org/r/20250111143518.7901-1-konishi.ryusuke@gmail.com Link: https://lkml.kernel.org/r/20250111143518.7901-2-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Reported-by: syzbot+32c3706ebf5d95046ea1@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=32c3706ebf5d95046ea1 Reported-by: syzbot+1097e95f134f37d9395c@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=1097e95f134f37d9395c Fixes: 2ba466d74ed7 ("nilfs2: directory entry operations") Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/dir.c | 13 ++++++++++--- fs/nilfs2/namei.c | 29 +++++++++++++++-------------- fs/nilfs2/nilfs.h | 4 ++-- 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 0a3aea6c416b..9b7f8e9655a2 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c @@ -400,7 +400,7 @@ int nilfs_inode_by_name(struct inode *dir, const struct qstr *qstr, ino_t *ino) return 0; } -void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, +int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, struct folio *folio, struct inode *inode) { size_t from = offset_in_folio(folio, de); @@ -410,11 +410,15 @@ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, folio_lock(folio); err = nilfs_prepare_chunk(folio, from, to); - BUG_ON(err); + if (unlikely(err)) { + folio_unlock(folio); + return err; + } de->inode = cpu_to_le64(inode->i_ino); de->file_type = fs_umode_to_ftype(inode->i_mode); nilfs_commit_chunk(folio, mapping, from, to); inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir)); + return 0; } /* @@ -543,7 +547,10 @@ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct folio *folio) from = (char *)pde - kaddr; folio_lock(folio); err = nilfs_prepare_chunk(folio, from, to); - BUG_ON(err); + if (unlikely(err)) { + folio_unlock(folio); + goto out; + } if (pde) pde->rec_len = nilfs_rec_len_to_disk(to - from); dir->inode = 0; diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 1d836a5540f3..e02fae6757f1 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -406,8 +406,10 @@ static int nilfs_rename(struct mnt_idmap *idmap, err = PTR_ERR(new_de); goto out_dir; } - nilfs_set_link(new_dir, new_de, new_folio, old_inode); + err = nilfs_set_link(new_dir, new_de, new_folio, old_inode); folio_release_kmap(new_folio, new_de); + if (unlikely(err)) + goto out_dir; nilfs_mark_inode_dirty(new_dir); inode_set_ctime_current(new_inode); if (dir_de) @@ -430,28 +432,27 @@ static int nilfs_rename(struct mnt_idmap *idmap, */ inode_set_ctime_current(old_inode); - nilfs_delete_entry(old_de, old_folio); - - if (dir_de) { - nilfs_set_link(old_inode, dir_de, dir_folio, new_dir); - folio_release_kmap(dir_folio, dir_de); - drop_nlink(old_dir); + err = nilfs_delete_entry(old_de, old_folio); + if (likely(!err)) { + if (dir_de) { + err = nilfs_set_link(old_inode, dir_de, dir_folio, + new_dir); + drop_nlink(old_dir); + } + nilfs_mark_inode_dirty(old_dir); } - folio_release_kmap(old_folio, old_de); - - nilfs_mark_inode_dirty(old_dir); nilfs_mark_inode_dirty(old_inode); - err = nilfs_transaction_commit(old_dir->i_sb); - return err; - out_dir: if (dir_de) folio_release_kmap(dir_folio, dir_de); out_old: folio_release_kmap(old_folio, old_de); out: - nilfs_transaction_abort(old_dir->i_sb); + if (likely(!err)) + err = nilfs_transaction_commit(old_dir->i_sb); + else + nilfs_transaction_abort(old_dir->i_sb); return err; } diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h index dff241c53fc5..cb6ed54accd7 100644 --- a/fs/nilfs2/nilfs.h +++ b/fs/nilfs2/nilfs.h @@ -261,8 +261,8 @@ struct nilfs_dir_entry *nilfs_find_entry(struct inode *, const struct qstr *, int nilfs_delete_entry(struct nilfs_dir_entry *, struct folio *); int nilfs_empty_dir(struct inode *); struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct folio **); -void nilfs_set_link(struct inode *, struct nilfs_dir_entry *, - struct folio *, struct inode *); +int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de, + struct folio *folio, struct inode *inode); /* file.c */ extern int nilfs_sync_file(struct file *, loff_t, loff_t, int); From 73d5e92a3f04d538f6c54df34f0f9d63da01cf86 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi <konishi.ryusuke@gmail.com> Date: Sat, 11 Jan 2025 23:26:36 +0900 Subject: [PATCH 500/504] nilfs2: do not update mtime of renamed directory that is not moved A minor issue with nilfs_rename, originating from an old ext2 implementation, is that the mtime is updated even if the rename target is a directory and it is renamed within the same directory, rather than moved to a different directory. In this case, the child directory being renamed does not change in any way, so changing its mtime is unnecessary according to the specification, and can unnecessarily confuse backup tools. In ext2, this issue was fixed by commit 39fe7557b4d6 ("ext2: Do not update mtime of a moved directory") and a few subsequent fixes, but it remained in nilfs2. Fix this issue by not calling nilfs_set_link(), which rewrites the inode number of the directory entry that refers to the parent directory, when the move target is a directory and the source and destination are the same directory. Here, the directory to be moved only needs to be read if the inode number of the parent directory is rewritten with nilfs_set_link, so also adjust the execution conditions of the preparation work to avoid unnecessary directory reads. Link: https://lkml.kernel.org/r/20250111143518.7901-3-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- fs/nilfs2/namei.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index e02fae6757f1..953fbd5f0851 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c @@ -370,6 +370,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, struct folio *old_folio; struct nilfs_dir_entry *old_de; struct nilfs_transaction_info ti; + bool old_is_dir = S_ISDIR(old_inode->i_mode); int err; if (flags & ~RENAME_NOREPLACE) @@ -385,7 +386,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, goto out; } - if (S_ISDIR(old_inode->i_mode)) { + if (old_is_dir && old_dir != new_dir) { err = -EIO; dir_de = nilfs_dotdot(old_inode, &dir_folio); if (!dir_de) @@ -397,7 +398,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, struct nilfs_dir_entry *new_de; err = -ENOTEMPTY; - if (dir_de && !nilfs_empty_dir(new_inode)) + if (old_is_dir && !nilfs_empty_dir(new_inode)) goto out_dir; new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, @@ -412,7 +413,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, goto out_dir; nilfs_mark_inode_dirty(new_dir); inode_set_ctime_current(new_inode); - if (dir_de) + if (old_is_dir) drop_nlink(new_inode); drop_nlink(new_inode); nilfs_mark_inode_dirty(new_inode); @@ -420,7 +421,7 @@ static int nilfs_rename(struct mnt_idmap *idmap, err = nilfs_add_link(new_dentry, old_inode); if (err) goto out_dir; - if (dir_de) { + if (old_is_dir) { inc_nlink(new_dir); nilfs_mark_inode_dirty(new_dir); } @@ -434,9 +435,10 @@ static int nilfs_rename(struct mnt_idmap *idmap, err = nilfs_delete_entry(old_de, old_folio); if (likely(!err)) { - if (dir_de) { - err = nilfs_set_link(old_inode, dir_de, dir_folio, - new_dir); + if (old_is_dir) { + if (old_dir != new_dir) + err = nilfs_set_link(old_inode, dir_de, + dir_folio, new_dir); drop_nlink(old_dir); } nilfs_mark_inode_dirty(old_dir); From 4cd5702770874a477975d4ff8d8602c67cd88147 Mon Sep 17 00:00:00 2001 From: Randy Dunlap <rdunlap@infradead.org> Date: Fri, 10 Jan 2025 22:30:19 -0800 Subject: [PATCH 501/504] latencytop: use correct kernel-doc format for func params Use a ':' instead of a '-' after function parameters to eliminate kernel-doc warnings. kernel/latencytop.c:177: warning: Function parameter or struct member 'tsk' not described in '__account_scheduler_latency' ../kernel/latencytop.c:177: warning: Function parameter or struct member 'usecs' not described in '__account_scheduler_latency' ../kernel/latencytop.c:177: warning: Function parameter or struct member 'inter' not described in '__account_scheduler_latency' Link: https://lkml.kernel.org/r/20250111063019.910730-1-rdunlap@infradead.org Fixes: ad0b0fd554df ("sched, latencytop: incorporate review feedback from Andrew Morton") Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- kernel/latencytop.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 7a75eab9c179..77ee3ea8a573 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -158,9 +158,9 @@ account_global_scheduler_latency(struct task_struct *tsk, /** * __account_scheduler_latency - record an occurred latency - * @tsk - the task struct of the task hitting the latency - * @usecs - the duration of the latency in microseconds - * @inter - 1 if the sleep was interruptible, 0 if uninterruptible + * @tsk: the task struct of the task hitting the latency + * @usecs: the duration of the latency in microseconds + * @inter: 1 if the sleep was interruptible, 0 if uninterruptible * * This function is the main entry point for recording latency entries * as called by the scheduler. From 1bc3c5db9becd0171108c2dcfaf7ef5ef10c1e03 Mon Sep 17 00:00:00 2001 From: Randy Dunlap <rdunlap@infradead.org> Date: Fri, 10 Jan 2025 22:30:08 -0800 Subject: [PATCH 502/504] kthread: modify kernel-doc function name to match code kthread.c:1073: warning: expecting prototype for kthread_create_worker(). Prototype was for kthread_create_worker_on_node() instead Link: https://lkml.kernel.org/r/20250111063008.910712-1-rdunlap@infradead.org Fixes: b1e125dcbcea ("kthread: Unify kthread_create_on_cpu() and kthread_create_worker_on_cpu() automatic format") Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Cc: Frederic Weisbecker <frederic@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- kernel/kthread.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/kthread.c b/kernel/kthread.c index 2fd0daa6b3b6..f847be88bf07 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -900,7 +900,7 @@ fail_task: } /** - * kthread_create_worker - create a kthread worker + * kthread_create_worker_on_node - create a kthread worker * @flags: flags modifying the default behavior of the worker * @namefmt: printf-style name for the kthread worker (task). * From 8d155b8aed9f2bd78adf32d68aa6894c1af9e2ad Mon Sep 17 00:00:00 2001 From: Randy Dunlap <rdunlap@infradead.org> Date: Fri, 10 Jan 2025 22:29:44 -0800 Subject: [PATCH 503/504] gcov: clang: use correct function param names Fix the function parameter names to match the function so that the kernel-doc warnings disappear. clang.c:273: warning: Function parameter or struct member 'dst' not described in 'gcov_info_add' clang.c:273: warning: Function parameter or struct member 'src' not described in 'gcov_info_add' clang.c:273: warning: Excess function parameter 'dest' description in 'gcov_info_add' clang.c:273: warning: Excess function parameter 'source' description in 'gcov_info_add' Link: https://lkml.kernel.org/r/20250111062944.910638-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Cc: Peter Oberparleiter <oberpar@linux.ibm.com> Cc: Nathan Chancellor <nathan@kernel.org> Cc: Nick Desaulniers <ndesaulniers@google.com> Cc: Bill Wendling <morbo@google.com> Cc: Justin Stitt <justinstitt@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- kernel/gcov/clang.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/gcov/clang.c b/kernel/gcov/clang.c index 7670a811a565..8b888a6193cc 100644 --- a/kernel/gcov/clang.c +++ b/kernel/gcov/clang.c @@ -264,10 +264,10 @@ int gcov_info_is_compatible(struct gcov_info *info1, struct gcov_info *info2) /** * gcov_info_add - add up profiling data - * @dest: profiling data set to which data is added - * @source: profiling data set which is added + * @dst: profiling data set to which data is added + * @src: profiling data set which is added * - * Adds profiling counts of @source to @dest. + * Adds profiling counts of @src to @dst. */ void gcov_info_add(struct gcov_info *dst, struct gcov_info *src) { From dfbe2aeb5ed491cb9f0a2de5a7ad9a9d4d477a8f Mon Sep 17 00:00:00 2001 From: Randy Dunlap <rdunlap@infradead.org> Date: Fri, 10 Jan 2025 22:29:05 -0800 Subject: [PATCH 504/504] ipc/util.c: complete the kernel-doc function descriptions Move the function descriptive comments so that they conform to kernel-doc format, eliminating the kernel-doc warnings. util.c:618: warning: missing initial short description on line: * ipc_obtain_object_idr util.c:640: warning: missing initial short description on line: * ipc_obtain_object_check Link: https://lkml.kernel.org/r/20250111062905.910576-1-rdunlap@infradead.org Signed-off-by: Randy Dunlap <rdunlap@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- ipc/util.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/ipc/util.c b/ipc/util.c index 05cb9de66735..cae60f11d9c2 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -615,12 +615,11 @@ void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out) } /** - * ipc_obtain_object_idr + * ipc_obtain_object_idr - Look for an id in the ipc ids idr and + * return associated ipc object. * @ids: ipc identifier set * @id: ipc id to look for * - * Look for an id in the ipc ids idr and return associated ipc object. - * * Call inside the RCU critical section. * The ipc object is *not* locked on exit. */ @@ -637,13 +636,11 @@ struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id) } /** - * ipc_obtain_object_check + * ipc_obtain_object_check - Similar to ipc_obtain_object_idr() but + * also checks the ipc object sequence number. * @ids: ipc identifier set * @id: ipc id to look for * - * Similar to ipc_obtain_object_idr() but also checks the ipc object - * sequence number. - * * Call inside the RCU critical section. * The ipc object is *not* locked on exit. */