mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
mm/ksm: fix ksm_zero_pages accounting
We normally ksm_zero_pages++ in ksmd when page is merged with zero page, but ksm_zero_pages-- is done from page tables side, where there is no any accessing protection of ksm_zero_pages. So we can read very exceptional value of ksm_zero_pages in rare cases, such as -1, which is very confusing to users. Fix it by changing to use atomic_long_t, and the same case with the mm->ksm_zero_pages. Link: https://lkml.kernel.org/r/20240528-b4-ksm-counters-v3-2-34bb358fdc13@linux.dev Fixes:e2942062e0
("ksm: count all zero pages placed by KSM") Fixes:6080d19f07
("ksm: add ksm zero pages for each process") Signed-off-by: Chengming Zhou <chengming.zhou@linux.dev> Acked-by: David Hildenbrand <david@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Hugh Dickins <hughd@google.com> Cc: Ran Xiaokai <ran.xiaokai@zte.com.cn> Cc: Stefan Roesch <shr@devkernel.io> Cc: xu xin <xu.xin16@zte.com.cn> Cc: Yang Yang <yang.yang29@zte.com.cn> Cc: <stable@vger.kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
parent
730cdc2c72
commit
c2dc78b86e
@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
|
||||
mm = get_task_mm(task);
|
||||
if (mm) {
|
||||
seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
|
||||
seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
|
||||
seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
|
||||
seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
|
||||
seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
|
||||
mmput(mm);
|
||||
|
@ -33,16 +33,27 @@ void __ksm_exit(struct mm_struct *mm);
|
||||
*/
|
||||
#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
|
||||
|
||||
extern unsigned long ksm_zero_pages;
|
||||
extern atomic_long_t ksm_zero_pages;
|
||||
|
||||
static inline void ksm_map_zero_page(struct mm_struct *mm)
|
||||
{
|
||||
atomic_long_inc(&ksm_zero_pages);
|
||||
atomic_long_inc(&mm->ksm_zero_pages);
|
||||
}
|
||||
|
||||
static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
|
||||
{
|
||||
if (is_ksm_zero_pte(pte)) {
|
||||
ksm_zero_pages--;
|
||||
mm->ksm_zero_pages--;
|
||||
atomic_long_dec(&ksm_zero_pages);
|
||||
atomic_long_dec(&mm->ksm_zero_pages);
|
||||
}
|
||||
}
|
||||
|
||||
static inline long mm_ksm_zero_pages(struct mm_struct *mm)
|
||||
{
|
||||
return atomic_long_read(&mm->ksm_zero_pages);
|
||||
}
|
||||
|
||||
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
||||
{
|
||||
if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
|
||||
|
@ -985,7 +985,7 @@ struct mm_struct {
|
||||
* Represent how many empty pages are merged with kernel zero
|
||||
* pages when enabling KSM use_zero_pages.
|
||||
*/
|
||||
unsigned long ksm_zero_pages;
|
||||
atomic_long_t ksm_zero_pages;
|
||||
#endif /* CONFIG_KSM */
|
||||
#ifdef CONFIG_LRU_GEN_WALKS_MMU
|
||||
struct {
|
||||
|
11
mm/ksm.c
11
mm/ksm.c
@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly;
|
||||
static bool ksm_smart_scan = true;
|
||||
|
||||
/* The number of zero pages which is placed by KSM */
|
||||
unsigned long ksm_zero_pages;
|
||||
atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
|
||||
|
||||
/* The number of pages that have been skipped due to "smart scanning" */
|
||||
static unsigned long ksm_pages_skipped;
|
||||
@ -1429,8 +1429,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
|
||||
* the dirty bit in zero page's PTE is set.
|
||||
*/
|
||||
newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
|
||||
ksm_zero_pages++;
|
||||
mm->ksm_zero_pages++;
|
||||
ksm_map_zero_page(mm);
|
||||
/*
|
||||
* We're replacing an anonymous page with a zero page, which is
|
||||
* not anonymous. We need to do proper accounting otherwise we
|
||||
@ -3374,7 +3373,7 @@ static void wait_while_offlining(void)
|
||||
#ifdef CONFIG_PROC_FS
|
||||
long ksm_process_profit(struct mm_struct *mm)
|
||||
{
|
||||
return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
|
||||
return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
|
||||
mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
|
||||
}
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
@ -3663,7 +3662,7 @@ KSM_ATTR_RO(pages_skipped);
|
||||
static ssize_t ksm_zero_pages_show(struct kobject *kobj,
|
||||
struct kobj_attribute *attr, char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
|
||||
return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
|
||||
}
|
||||
KSM_ATTR_RO(ksm_zero_pages);
|
||||
|
||||
@ -3672,7 +3671,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
|
||||
{
|
||||
long general_profit;
|
||||
|
||||
general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
|
||||
general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
|
||||
ksm_rmap_items * sizeof(struct ksm_rmap_item);
|
||||
|
||||
return sysfs_emit(buf, "%ld\n", general_profit);
|
||||
|
Loading…
Reference in New Issue
Block a user