mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-06 05:06:29 +00:00
57e9cc50f4
Direct reclaim stats are useful for identifying a potential source for application latency, as well as spotting issues with kswapd. However, khugepaged currently distorts the picture: as a kernel thread it doesn't impose allocation latencies on userspace, and it explicitly opts out of kswapd reclaim. Its activity showing up in the direct reclaim stats is misleading. Counting it as kswapd reclaim could also cause confusion when trying to understand actual kswapd behavior. Break out khugepaged from the direct reclaim counters into new pgsteal_khugepaged, pgdemote_khugepaged, pgscan_khugepaged counters. Test with a huge executable (CONFIG_READ_ONLY_THP_FOR_FS): pgsteal_kswapd 1342185 pgsteal_direct 0 pgsteal_khugepaged 3623 pgscan_kswapd 1345025 pgscan_direct 0 pgscan_khugepaged 3623 Link: https://lkml.kernel.org/r/20221026180133.377671-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Reported-by: Eric Bergen <ebergen@meta.com> Cc: Matthew Wilcox (Oracle) <willy@infradead.org> Cc: Yang Shi <shy828301@gmail.com> Cc: Yosry Ahmed <yosryahmed@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
69 lines
1.8 KiB
C
69 lines
1.8 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_KHUGEPAGED_H
|
|
#define _LINUX_KHUGEPAGED_H
|
|
|
|
#include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */
|
|
|
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
|
extern struct attribute_group khugepaged_attr_group;
|
|
|
|
extern int khugepaged_init(void);
|
|
extern void khugepaged_destroy(void);
|
|
extern int start_stop_khugepaged(void);
|
|
extern void __khugepaged_enter(struct mm_struct *mm);
|
|
extern void __khugepaged_exit(struct mm_struct *mm);
|
|
extern void khugepaged_enter_vma(struct vm_area_struct *vma,
|
|
unsigned long vm_flags);
|
|
extern void khugepaged_min_free_kbytes_update(void);
|
|
extern bool current_is_khugepaged(void);
|
|
#ifdef CONFIG_SHMEM
|
|
extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
|
|
bool install_pmd);
|
|
#else
|
|
static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
|
|
unsigned long addr, bool install_pmd)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
|
|
__khugepaged_enter(mm);
|
|
}
|
|
|
|
static inline void khugepaged_exit(struct mm_struct *mm)
|
|
{
|
|
if (test_bit(MMF_VM_HUGEPAGE, &mm->flags))
|
|
__khugepaged_exit(mm);
|
|
}
|
|
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
|
|
{
|
|
}
|
|
static inline void khugepaged_exit(struct mm_struct *mm)
|
|
{
|
|
}
|
|
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
|
|
unsigned long vm_flags)
|
|
{
|
|
}
|
|
static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
|
|
unsigned long addr, bool install_pmd)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline void khugepaged_min_free_kbytes_update(void)
|
|
{
|
|
}
|
|
|
|
static inline bool current_is_khugepaged(void)
|
|
{
|
|
return false;
|
|
}
|
|
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
|
|
|
|
#endif /* _LINUX_KHUGEPAGED_H */
|