mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
mm/oom_kill: count global and memory cgroup oom kills
Show count of oom killer invocations in /proc/vmstat and count of processes killed in memory cgroup in knob "memory.events" (in memory.oom_control for v1 cgroup). Also describe difference between "oom" and "oom_kill" in memory cgroup documentation. Currently oom in memory cgroup kills tasks iff shortage has happened inside page fault. These counters helps in monitoring oom kills - for now the only way is grepping for magic words in kernel log. [akpm@linux-foundation.org: fix for mem_cgroup_count_vm_event() rename] [akpm@linux-foundation.org: fix comment, per Konstantin] Link: http://lkml.kernel.org/r/149570810989.203600.9492483715840752937.stgit@buzz Signed-off-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Cc: Michal Hocko <mhocko@kernel.org> Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: Roman Guschin <guroan@gmail.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
2262185c5b
commit
8e675f7af5
@ -852,13 +852,25 @@ PAGE_SIZE multiple when read back.
|
|||||||
|
|
||||||
The number of times the cgroup's memory usage was
|
The number of times the cgroup's memory usage was
|
||||||
about to go over the max boundary. If direct reclaim
|
about to go over the max boundary. If direct reclaim
|
||||||
fails to bring it down, the OOM killer is invoked.
|
fails to bring it down, the cgroup goes to OOM state.
|
||||||
|
|
||||||
oom
|
oom
|
||||||
|
|
||||||
The number of times the OOM killer has been invoked in
|
The number of time the cgroup's memory usage was
|
||||||
the cgroup. This may not exactly match the number of
|
reached the limit and allocation was about to fail.
|
||||||
processes killed but should generally be close.
|
|
||||||
|
Depending on context result could be invocation of OOM
|
||||||
|
killer and retrying allocation or failing alloction.
|
||||||
|
|
||||||
|
Failed allocation in its turn could be returned into
|
||||||
|
userspace as -ENOMEM or siletly ignored in cases like
|
||||||
|
disk readahead. For now OOM in memory cgroup kills
|
||||||
|
tasks iff shortage has happened inside page fault.
|
||||||
|
|
||||||
|
oom_kill
|
||||||
|
|
||||||
|
The number of processes belonging to this cgroup
|
||||||
|
killed by any kind of OOM killer.
|
||||||
|
|
||||||
memory.stat
|
memory.stat
|
||||||
|
|
||||||
|
@ -582,8 +582,11 @@ static inline void count_memcg_event_mm(struct mm_struct *mm,
|
|||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
|
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
|
||||||
if (likely(memcg))
|
if (likely(memcg)) {
|
||||||
this_cpu_inc(memcg->stat->events[idx]);
|
this_cpu_inc(memcg->stat->events[idx]);
|
||||||
|
if (idx == OOM_KILL)
|
||||||
|
cgroup_file_notify(&memcg->events_file);
|
||||||
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
|
||||||
|
@ -41,6 +41,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
|
|||||||
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
|
KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
|
||||||
PAGEOUTRUN, PGROTATED,
|
PAGEOUTRUN, PGROTATED,
|
||||||
DROP_PAGECACHE, DROP_SLAB,
|
DROP_PAGECACHE, DROP_SLAB,
|
||||||
|
OOM_KILL,
|
||||||
#ifdef CONFIG_NUMA_BALANCING
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
NUMA_PTE_UPDATES,
|
NUMA_PTE_UPDATES,
|
||||||
NUMA_HUGE_PTE_UPDATES,
|
NUMA_HUGE_PTE_UPDATES,
|
||||||
|
@ -3573,6 +3573,7 @@ static int mem_cgroup_oom_control_read(struct seq_file *sf, void *v)
|
|||||||
|
|
||||||
seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
|
seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable);
|
||||||
seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
|
seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom);
|
||||||
|
seq_printf(sf, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5164,6 +5165,7 @@ static int memory_events_show(struct seq_file *m, void *v)
|
|||||||
seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
|
seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
|
||||||
seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
|
seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
|
||||||
seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
|
seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
|
||||||
|
seq_printf(m, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -876,6 +876,11 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
|
|||||||
/* Get a reference to safely compare mm after task_unlock(victim) */
|
/* Get a reference to safely compare mm after task_unlock(victim) */
|
||||||
mm = victim->mm;
|
mm = victim->mm;
|
||||||
mmgrab(mm);
|
mmgrab(mm);
|
||||||
|
|
||||||
|
/* Raise event before sending signal: task reaper must see this */
|
||||||
|
count_vm_event(OOM_KILL);
|
||||||
|
count_memcg_event_mm(mm, OOM_KILL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We should send SIGKILL before setting TIF_MEMDIE in order to prevent
|
* We should send SIGKILL before setting TIF_MEMDIE in order to prevent
|
||||||
* the OOM victim from depleting the memory reserves from the user
|
* the OOM victim from depleting the memory reserves from the user
|
||||||
|
@ -1018,6 +1018,7 @@ const char * const vmstat_text[] = {
|
|||||||
|
|
||||||
"drop_pagecache",
|
"drop_pagecache",
|
||||||
"drop_slab",
|
"drop_slab",
|
||||||
|
"oom_kill",
|
||||||
|
|
||||||
#ifdef CONFIG_NUMA_BALANCING
|
#ifdef CONFIG_NUMA_BALANCING
|
||||||
"numa_pte_updates",
|
"numa_pte_updates",
|
||||||
|
Loading…
Reference in New Issue
Block a user