mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
c6f53ed8f2
Patch series "mm, memcg: cg2 memory{.swap,}.peak write handlers", v7. This patch (of 2): Other mechanisms for querying the peak memory usage of either a process or v1 memory cgroup allow for resetting the high watermark. Restore parity with those mechanisms, but with a less racy API. For example: - Any write to memory.max_usage_in_bytes in a cgroup v1 mount resets the high watermark. - writing "5" to the clear_refs pseudo-file in a processes's proc directory resets the peak RSS. This change is an evolution of a previous patch, which mostly copied the cgroup v1 behavior, however, there were concerns about races/ownership issues with a global reset, so instead this change makes the reset filedescriptor-local. Writing any non-empty string to the memory.peak and memory.swap.peak pseudo-files reset the high watermark to the current usage for subsequent reads through that same FD. Notably, following Johannes's suggestion, this implementation moves the O(FDs that have written) behavior onto the FD write(2) path. Instead, on the page-allocation path, we simply add one additional watermark to conditionally bump per-hierarchy level in the page-counter. Additionally, this takes Longman's suggestion of nesting the page-charging-path checks for the two watermarks to reduce the number of common-case comparisons. This behavior is particularly useful for work scheduling systems that need to track memory usage of worker processes/cgroups per-work-item. Since memory can't be squeezed like CPU can (the OOM-killer has opinions), these systems need to track the peak memory usage to compute system/container fullness when binpacking workitems. Most notably, Vimeo's use-case involves a system that's doing global binpacking across many Kubernetes pods/containers, and while we can use PSI for some local decisions about overload, we strive to avoid packing workloads too tightly in the first place. To facilitate this, we track the peak memory usage. However, since we run with long-lived workers (to amortize startup costs) we need a way to track the high watermark while a work-item is executing. Polling runs the risk of missing short spikes that last for timescales below the polling interval, and peak memory tracking at the cgroup level is otherwise perfect for this use-case. As this data is used to ensure that binpacked work ends up with sufficient headroom, this use-case mostly avoids the inaccuracies surrounding reclaimable memory. Link: https://lkml.kernel.org/r/20240730231304.761942-1-davidf@vimeo.com Link: https://lkml.kernel.org/r/20240729143743.34236-1-davidf@vimeo.com Link: https://lkml.kernel.org/r/20240729143743.34236-2-davidf@vimeo.com Signed-off-by: David Finkel <davidf@vimeo.com> Suggested-by: Johannes Weiner <hannes@cmpxchg.org> Suggested-by: Waiman Long <longman@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Michal Koutný <mkoutny@suse.com> Acked-by: Tejun Heo <tj@kernel.org> Reviewed-by: Roman Gushchin <roman.gushchin@linux.dev> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Michal Hocko <mhocko@kernel.org> Cc: Muchun Song <muchun.song@linux.dev> Cc: Shakeel Butt <shakeel.butt@linux.dev> Cc: Shuah Khan <shuah@kernel.org> Cc: Zefan Li <lizefan.x@bytedance.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
110 lines
3.2 KiB
C
110 lines
3.2 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_PAGE_COUNTER_H
|
|
#define _LINUX_PAGE_COUNTER_H
|
|
|
|
#include <linux/atomic.h>
|
|
#include <linux/cache.h>
|
|
#include <linux/limits.h>
|
|
#include <asm/page.h>
|
|
|
|
struct page_counter {
|
|
/*
|
|
* Make sure 'usage' does not share cacheline with any other field. The
|
|
* memcg->memory.usage is a hot member of struct mem_cgroup.
|
|
*/
|
|
atomic_long_t usage;
|
|
CACHELINE_PADDING(_pad1_);
|
|
|
|
/* effective memory.min and memory.min usage tracking */
|
|
unsigned long emin;
|
|
atomic_long_t min_usage;
|
|
atomic_long_t children_min_usage;
|
|
|
|
/* effective memory.low and memory.low usage tracking */
|
|
unsigned long elow;
|
|
atomic_long_t low_usage;
|
|
atomic_long_t children_low_usage;
|
|
|
|
unsigned long watermark;
|
|
/* Latest cg2 reset watermark */
|
|
unsigned long local_watermark;
|
|
unsigned long failcnt;
|
|
|
|
/* Keep all the read most fields in a separete cacheline. */
|
|
CACHELINE_PADDING(_pad2_);
|
|
|
|
bool protection_support;
|
|
unsigned long min;
|
|
unsigned long low;
|
|
unsigned long high;
|
|
unsigned long max;
|
|
struct page_counter *parent;
|
|
} ____cacheline_internodealigned_in_smp;
|
|
|
|
#if BITS_PER_LONG == 32
|
|
#define PAGE_COUNTER_MAX LONG_MAX
|
|
#else
|
|
#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE)
|
|
#endif
|
|
|
|
/*
|
|
* Protection is supported only for the first counter (with id 0).
|
|
*/
|
|
static inline void page_counter_init(struct page_counter *counter,
|
|
struct page_counter *parent,
|
|
bool protection_support)
|
|
{
|
|
counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0);
|
|
counter->max = PAGE_COUNTER_MAX;
|
|
counter->parent = parent;
|
|
counter->protection_support = protection_support;
|
|
}
|
|
|
|
static inline unsigned long page_counter_read(struct page_counter *counter)
|
|
{
|
|
return atomic_long_read(&counter->usage);
|
|
}
|
|
|
|
void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
|
|
void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
|
|
bool page_counter_try_charge(struct page_counter *counter,
|
|
unsigned long nr_pages,
|
|
struct page_counter **fail);
|
|
void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
|
|
void page_counter_set_min(struct page_counter *counter, unsigned long nr_pages);
|
|
void page_counter_set_low(struct page_counter *counter, unsigned long nr_pages);
|
|
|
|
static inline void page_counter_set_high(struct page_counter *counter,
|
|
unsigned long nr_pages)
|
|
{
|
|
WRITE_ONCE(counter->high, nr_pages);
|
|
}
|
|
|
|
int page_counter_set_max(struct page_counter *counter, unsigned long nr_pages);
|
|
int page_counter_memparse(const char *buf, const char *max,
|
|
unsigned long *nr_pages);
|
|
|
|
static inline void page_counter_reset_watermark(struct page_counter *counter)
|
|
{
|
|
unsigned long usage = page_counter_read(counter);
|
|
|
|
/*
|
|
* Update local_watermark first, so it's always <= watermark
|
|
* (modulo CPU/compiler re-ordering)
|
|
*/
|
|
counter->local_watermark = usage;
|
|
counter->watermark = usage;
|
|
}
|
|
|
|
#ifdef CONFIG_MEMCG
|
|
void page_counter_calculate_protection(struct page_counter *root,
|
|
struct page_counter *counter,
|
|
bool recursive_protection);
|
|
#else
|
|
static inline void page_counter_calculate_protection(struct page_counter *root,
|
|
struct page_counter *counter,
|
|
bool recursive_protection) {}
|
|
#endif
|
|
|
|
#endif /* _LINUX_PAGE_COUNTER_H */
|