mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-19 12:00:00 +00:00
- Stefan Roesch has added ksm statistics to /proc/pid/smaps
- Also a number of singleton patches, mainly cleanups and leftovers. -----BEGIN PGP SIGNATURE----- iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCZPZGXwAKCRDdBJ7gKXxA jkjpAP9F0t5xy3JGs8Iew47Yqva+fvvrZdUSx3aHIZ/C3HyaJwEAi7DwzqludyHi 851+qSdyX3bWnDEuejuNeMykh2QF1wo= =pw9A -----END PGP SIGNATURE----- Merge tag 'mm-stable-2023-09-04-14-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull more MM updates from Andrew Morton: - Stefan Roesch has added ksm statistics to /proc/pid/smaps - Also a number of singleton patches, mainly cleanups and leftovers * tag 'mm-stable-2023-09-04-14-00' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: mm/kmemleak: move up cond_resched() call in page scanning loop mm: page_alloc: remove stale CMA guard code MAINTAINERS: add rmap.h to mm entry rmap: remove anon_vma_link() nommu stub proc/ksm: add ksm stats to /proc/pid/smaps mm/hwpoison: rename hwp_walk* to hwpoison_walk* mm: memory-failure: add PageOffline() check
This commit is contained in:
commit
5eea5820c7
@ -461,6 +461,7 @@ Memory Area, or VMA) there is a series of lines such as the following::
|
|||||||
Private_Dirty: 0 kB
|
Private_Dirty: 0 kB
|
||||||
Referenced: 892 kB
|
Referenced: 892 kB
|
||||||
Anonymous: 0 kB
|
Anonymous: 0 kB
|
||||||
|
KSM: 0 kB
|
||||||
LazyFree: 0 kB
|
LazyFree: 0 kB
|
||||||
AnonHugePages: 0 kB
|
AnonHugePages: 0 kB
|
||||||
ShmemPmdMapped: 0 kB
|
ShmemPmdMapped: 0 kB
|
||||||
@ -501,6 +502,9 @@ accessed.
|
|||||||
a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
|
a mapping associated with a file may contain anonymous pages: when MAP_PRIVATE
|
||||||
and a page is modified, the file page is replaced by a private anonymous copy.
|
and a page is modified, the file page is replaced by a private anonymous copy.
|
||||||
|
|
||||||
|
"KSM" reports how many of the pages are KSM pages. Note that KSM-placed zeropages
|
||||||
|
are not included, only actual KSM pages.
|
||||||
|
|
||||||
"LazyFree" shows the amount of memory which is marked by madvise(MADV_FREE).
|
"LazyFree" shows the amount of memory which is marked by madvise(MADV_FREE).
|
||||||
The memory isn't freed immediately with madvise(). It's freed in memory
|
The memory isn't freed immediately with madvise(). It's freed in memory
|
||||||
pressure if the memory is clean. Please note that the printed value might
|
pressure if the memory is clean. Please note that the printed value might
|
||||||
|
@ -13742,6 +13742,7 @@ F: include/linux/memory_hotplug.h
|
|||||||
F: include/linux/mm.h
|
F: include/linux/mm.h
|
||||||
F: include/linux/mmzone.h
|
F: include/linux/mmzone.h
|
||||||
F: include/linux/pagewalk.h
|
F: include/linux/pagewalk.h
|
||||||
|
F: include/linux/rmap.h
|
||||||
F: include/trace/events/ksm.h
|
F: include/trace/events/ksm.h
|
||||||
F: mm/
|
F: mm/
|
||||||
F: tools/mm/
|
F: tools/mm/
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/huge_mm.h>
|
#include <linux/huge_mm.h>
|
||||||
#include <linux/mount.h>
|
#include <linux/mount.h>
|
||||||
|
#include <linux/ksm.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/ptrace.h>
|
#include <linux/ptrace.h>
|
||||||
@ -396,6 +397,7 @@ struct mem_size_stats {
|
|||||||
unsigned long swap;
|
unsigned long swap;
|
||||||
unsigned long shared_hugetlb;
|
unsigned long shared_hugetlb;
|
||||||
unsigned long private_hugetlb;
|
unsigned long private_hugetlb;
|
||||||
|
unsigned long ksm;
|
||||||
u64 pss;
|
u64 pss;
|
||||||
u64 pss_anon;
|
u64 pss_anon;
|
||||||
u64 pss_file;
|
u64 pss_file;
|
||||||
@ -452,6 +454,9 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
|||||||
mss->lazyfree += size;
|
mss->lazyfree += size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (PageKsm(page))
|
||||||
|
mss->ksm += size;
|
||||||
|
|
||||||
mss->resident += size;
|
mss->resident += size;
|
||||||
/* Accumulate the size in pages that have been accessed. */
|
/* Accumulate the size in pages that have been accessed. */
|
||||||
if (young || page_is_young(page) || PageReferenced(page))
|
if (young || page_is_young(page) || PageReferenced(page))
|
||||||
@ -825,6 +830,7 @@ static void __show_smap(struct seq_file *m, const struct mem_size_stats *mss,
|
|||||||
SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty);
|
SEQ_PUT_DEC(" kB\nPrivate_Dirty: ", mss->private_dirty);
|
||||||
SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced);
|
SEQ_PUT_DEC(" kB\nReferenced: ", mss->referenced);
|
||||||
SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous);
|
SEQ_PUT_DEC(" kB\nAnonymous: ", mss->anonymous);
|
||||||
|
SEQ_PUT_DEC(" kB\nKSM: ", mss->ksm);
|
||||||
SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree);
|
SEQ_PUT_DEC(" kB\nLazyFree: ", mss->lazyfree);
|
||||||
SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp);
|
SEQ_PUT_DEC(" kB\nAnonHugePages: ", mss->anonymous_thp);
|
||||||
SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
|
SEQ_PUT_DEC(" kB\nShmemPmdMapped: ", mss->shmem_thp);
|
||||||
|
@ -479,7 +479,6 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
|
|||||||
|
|
||||||
#define anon_vma_init() do {} while (0)
|
#define anon_vma_init() do {} while (0)
|
||||||
#define anon_vma_prepare(vma) (0)
|
#define anon_vma_prepare(vma) (0)
|
||||||
#define anon_vma_link(vma) do {} while (0)
|
|
||||||
|
|
||||||
static inline int folio_referenced(struct folio *folio, int is_locked,
|
static inline int folio_referenced(struct folio *folio, int is_locked,
|
||||||
struct mem_cgroup *memcg,
|
struct mem_cgroup *memcg,
|
||||||
|
@ -1584,6 +1584,9 @@ static void kmemleak_scan(void)
|
|||||||
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
|
||||||
struct page *page = pfn_to_online_page(pfn);
|
struct page *page = pfn_to_online_page(pfn);
|
||||||
|
|
||||||
|
if (!(pfn & 63))
|
||||||
|
cond_resched();
|
||||||
|
|
||||||
if (!page)
|
if (!page)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -1594,8 +1597,6 @@ static void kmemleak_scan(void)
|
|||||||
if (page_count(page) == 0)
|
if (page_count(page) == 0)
|
||||||
continue;
|
continue;
|
||||||
scan_block(page, page + 1, NULL);
|
scan_block(page, page + 1, NULL);
|
||||||
if (!(pfn & 63))
|
|
||||||
cond_resched();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
put_online_mems();
|
put_online_mems();
|
||||||
|
@ -717,7 +717,7 @@ static void collect_procs(struct page *page, struct list_head *tokill,
|
|||||||
collect_procs_file(page, tokill, force_early);
|
collect_procs_file(page, tokill, force_early);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct hwp_walk {
|
struct hwpoison_walk {
|
||||||
struct to_kill tk;
|
struct to_kill tk;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
int flags;
|
int flags;
|
||||||
@ -752,7 +752,7 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift,
|
|||||||
|
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
|
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
|
||||||
struct hwp_walk *hwp)
|
struct hwpoison_walk *hwp)
|
||||||
{
|
{
|
||||||
pmd_t pmd = *pmdp;
|
pmd_t pmd = *pmdp;
|
||||||
unsigned long pfn;
|
unsigned long pfn;
|
||||||
@ -770,7 +770,7 @@ static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
|
|||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
|
static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
|
||||||
struct hwp_walk *hwp)
|
struct hwpoison_walk *hwp)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -779,7 +779,7 @@ static int check_hwpoisoned_pmd_entry(pmd_t *pmdp, unsigned long addr,
|
|||||||
static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
|
static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr,
|
||||||
unsigned long end, struct mm_walk *walk)
|
unsigned long end, struct mm_walk *walk)
|
||||||
{
|
{
|
||||||
struct hwp_walk *hwp = walk->private;
|
struct hwpoison_walk *hwp = walk->private;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
pte_t *ptep, *mapped_pte;
|
pte_t *ptep, *mapped_pte;
|
||||||
spinlock_t *ptl;
|
spinlock_t *ptl;
|
||||||
@ -813,7 +813,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
|
|||||||
unsigned long addr, unsigned long end,
|
unsigned long addr, unsigned long end,
|
||||||
struct mm_walk *walk)
|
struct mm_walk *walk)
|
||||||
{
|
{
|
||||||
struct hwp_walk *hwp = walk->private;
|
struct hwpoison_walk *hwp = walk->private;
|
||||||
pte_t pte = huge_ptep_get(ptep);
|
pte_t pte = huge_ptep_get(ptep);
|
||||||
struct hstate *h = hstate_vma(walk->vma);
|
struct hstate *h = hstate_vma(walk->vma);
|
||||||
|
|
||||||
@ -824,7 +824,7 @@ static int hwpoison_hugetlb_range(pte_t *ptep, unsigned long hmask,
|
|||||||
#define hwpoison_hugetlb_range NULL
|
#define hwpoison_hugetlb_range NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static const struct mm_walk_ops hwp_walk_ops = {
|
static const struct mm_walk_ops hwpoison_walk_ops = {
|
||||||
.pmd_entry = hwpoison_pte_range,
|
.pmd_entry = hwpoison_pte_range,
|
||||||
.hugetlb_entry = hwpoison_hugetlb_range,
|
.hugetlb_entry = hwpoison_hugetlb_range,
|
||||||
.walk_lock = PGWALK_RDLOCK,
|
.walk_lock = PGWALK_RDLOCK,
|
||||||
@ -847,7 +847,7 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
|
|||||||
int flags)
|
int flags)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct hwp_walk priv = {
|
struct hwpoison_walk priv = {
|
||||||
.pfn = pfn,
|
.pfn = pfn,
|
||||||
};
|
};
|
||||||
priv.tk.tsk = p;
|
priv.tk.tsk = p;
|
||||||
@ -856,7 +856,7 @@ static int kill_accessing_process(struct task_struct *p, unsigned long pfn,
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
mmap_read_lock(p->mm);
|
mmap_read_lock(p->mm);
|
||||||
ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwp_walk_ops,
|
ret = walk_page_range(p->mm, 0, TASK_SIZE, &hwpoison_walk_ops,
|
||||||
(void *)&priv);
|
(void *)&priv);
|
||||||
if (ret == 1 && priv.tk.addr)
|
if (ret == 1 && priv.tk.addr)
|
||||||
kill_proc(&priv.tk, pfn, flags);
|
kill_proc(&priv.tk, pfn, flags);
|
||||||
@ -1562,7 +1562,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
|
|||||||
* Here we are interested only in user-mapped pages, so skip any
|
* Here we are interested only in user-mapped pages, so skip any
|
||||||
* other types of pages.
|
* other types of pages.
|
||||||
*/
|
*/
|
||||||
if (PageReserved(p) || PageSlab(p) || PageTable(p))
|
if (PageReserved(p) || PageSlab(p) || PageTable(p) || PageOffline(p))
|
||||||
return true;
|
return true;
|
||||||
if (!(PageLRU(hpage) || PageHuge(p)))
|
if (!(PageLRU(hpage) || PageHuge(p)))
|
||||||
return true;
|
return true;
|
||||||
@ -2533,7 +2533,8 @@ int unpoison_memory(unsigned long pfn)
|
|||||||
goto unlock_mutex;
|
goto unlock_mutex;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (folio_test_slab(folio) || PageTable(&folio->page) || folio_test_reserved(folio))
|
if (folio_test_slab(folio) || PageTable(&folio->page) ||
|
||||||
|
folio_test_reserved(folio) || PageOffline(&folio->page))
|
||||||
goto unlock_mutex;
|
goto unlock_mutex;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -2641,12 +2641,6 @@ struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone,
|
|||||||
do {
|
do {
|
||||||
page = NULL;
|
page = NULL;
|
||||||
spin_lock_irqsave(&zone->lock, flags);
|
spin_lock_irqsave(&zone->lock, flags);
|
||||||
/*
|
|
||||||
* order-0 request can reach here when the pcplist is skipped
|
|
||||||
* due to non-CMA allocation context. HIGHATOMIC area is
|
|
||||||
* reserved for high-order atomic allocation, so order-0
|
|
||||||
* request should skip it.
|
|
||||||
*/
|
|
||||||
if (alloc_flags & ALLOC_HIGHATOMIC)
|
if (alloc_flags & ALLOC_HIGHATOMIC)
|
||||||
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
|
page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
|
||||||
if (!page) {
|
if (!page) {
|
||||||
@ -2780,17 +2774,10 @@ struct page *rmqueue(struct zone *preferred_zone,
|
|||||||
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
|
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
|
||||||
|
|
||||||
if (likely(pcp_allowed_order(order))) {
|
if (likely(pcp_allowed_order(order))) {
|
||||||
/*
|
page = rmqueue_pcplist(preferred_zone, zone, order,
|
||||||
* MIGRATE_MOVABLE pcplist could have the pages on CMA area and
|
migratetype, alloc_flags);
|
||||||
* we need to skip it when CMA area isn't allowed.
|
if (likely(page))
|
||||||
*/
|
goto out;
|
||||||
if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
|
|
||||||
migratetype != MIGRATE_MOVABLE) {
|
|
||||||
page = rmqueue_pcplist(preferred_zone, zone, order,
|
|
||||||
migratetype, alloc_flags);
|
|
||||||
if (likely(page))
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
|
page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags,
|
||||||
|
Loading…
x
Reference in New Issue
Block a user