From 95a616d89ccd2d2af0bd26c13c50143b301d82e8 Mon Sep 17 00:00:00 2001 From: everestkc Date: Sun, 15 Sep 2024 02:29:21 -0600 Subject: [PATCH 01/13] cgroup/cpuset: Fix spelling errors in file kernel/cgroup/cpuset.c Corrected the spelling errors repoted by codespell as follows: temparary ==> temporary Proprogate ==> Propagate constrainted ==> constrained Signed-off-by: Everest K.C. Acked-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/cgroup/cpuset.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index a4dd285cdf39..bb1ea7878126 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1339,7 +1339,7 @@ static inline bool is_local_partition(struct cpuset *cs) * remote_partition_enable - Enable current cpuset as a remote partition root * @cs: the cpuset to update * @new_prs: new partition_root_state - * @tmp: temparary masks + * @tmp: temporary masks * Return: 0 if successful, errcode if error * * Enable the current cpuset to become a remote partition root taking CPUs @@ -1377,7 +1377,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, update_unbound_workqueue_cpumask(isolcpus_updated); /* - * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. + * Propagate changes in top_cpuset's effective_cpus down the hierarchy. */ cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); update_sibling_cpumasks(&top_cpuset, NULL, tmp); @@ -1387,7 +1387,7 @@ static int remote_partition_enable(struct cpuset *cs, int new_prs, /* * remote_partition_disable - Remove current cpuset from remote partition list * @cs: the cpuset to update - * @tmp: temparary masks + * @tmp: temporary masks * * The effective_cpus is also updated. * @@ -1413,7 +1413,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) update_unbound_workqueue_cpumask(isolcpus_updated); /* - * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. + * Propagate changes in top_cpuset's effective_cpus down the hierarchy. */ cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); update_sibling_cpumasks(&top_cpuset, NULL, tmp); @@ -1423,7 +1423,7 @@ static void remote_partition_disable(struct cpuset *cs, struct tmpmasks *tmp) * remote_cpus_update - cpus_exclusive change of remote partition * @cs: the cpuset to be updated * @newmask: the new effective_xcpus mask - * @tmp: temparary masks + * @tmp: temporary masks * * top_cpuset and subpartitions_cpus will be updated or partition can be * invalidated. @@ -1465,7 +1465,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask, update_unbound_workqueue_cpumask(isolcpus_updated); /* - * Proprogate changes in top_cpuset's effective_cpus down the hierarchy. + * Propagate changes in top_cpuset's effective_cpus down the hierarchy. */ cpuset_update_tasks_cpumask(&top_cpuset, tmp->new_cpus); update_sibling_cpumasks(&top_cpuset, NULL, tmp); @@ -1480,7 +1480,7 @@ static void remote_cpus_update(struct cpuset *cs, struct cpumask *newmask, * @cs: the cpuset to be updated * @newmask: the new effective_xcpus mask * @delmask: temporary mask for deletion (not in tmp) - * @tmp: temparary masks + * @tmp: temporary masks * * This should be called before the given cs has updated its cpus_allowed * and/or effective_xcpus. @@ -2206,7 +2206,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, return -EINVAL; /* - * When exclusive_cpus isn't explicitly set, it is constrainted + * When exclusive_cpus isn't explicitly set, it is constrained * by cpus_allowed and parent's effective_xcpus. Otherwise, * trialcs->effective_xcpus is used as a temporary cpumask * for checking validity of the partition root. From aefa398d93d5db7c555be78a605ff015357f127d Mon Sep 17 00:00:00 2001 From: Joshua Hahn Date: Wed, 2 Oct 2024 11:47:16 -0700 Subject: [PATCH 02/13] cgroup/rstat: Tracking cgroup-level niced CPU time Cgroup-level CPU statistics currently include time spent on user/system processes, but do not include niced CPU time (despite already being tracked). This patch exposes niced CPU time to the userspace, allowing users to get a better understanding of their hardware limits and can facilitate more informed workload distribution. A new field 'ntime' is added to struct cgroup_base_stat as opposed to struct task_cputime to minimize footprint. Signed-off-by: Joshua Hahn Signed-off-by: Tejun Heo --- include/linux/cgroup-defs.h | 1 + kernel/cgroup/rstat.c | 19 ++++++++++++++----- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 47ae4c4d924c..0a80ef9191a6 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -327,6 +327,7 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; #endif + u64 ntime; }; /* diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index a06b45272411..5877974ece92 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -444,6 +444,7 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum += src_bstat->forceidle_sum; #endif + dst_bstat->ntime += src_bstat->ntime; } static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, @@ -455,6 +456,7 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; #endif + dst_bstat->ntime -= src_bstat->ntime; } static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu) @@ -534,8 +536,10 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, rstatc = cgroup_base_stat_cputime_account_begin(cgrp, &flags); switch (index) { - case CPUTIME_USER: case CPUTIME_NICE: + rstatc->bstat.ntime += delta_exec; + fallthrough; + case CPUTIME_USER: rstatc->bstat.cputime.utime += delta_exec; break; case CPUTIME_SYSTEM: @@ -591,6 +595,7 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat) #ifdef CONFIG_SCHED_CORE bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; #endif + bstat->ntime += cpustat[CPUTIME_NICE]; } } @@ -608,13 +613,14 @@ static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat void cgroup_base_stat_cputime_show(struct seq_file *seq) { struct cgroup *cgrp = seq_css(seq)->cgroup; - u64 usage, utime, stime; + u64 usage, utime, stime, ntime; if (cgroup_parent(cgrp)) { cgroup_rstat_flush_hold(cgrp); usage = cgrp->bstat.cputime.sum_exec_runtime; cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime, &utime, &stime); + ntime = cgrp->bstat.ntime; cgroup_rstat_flush_release(cgrp); } else { /* cgrp->bstat of root is not actually used, reuse it */ @@ -622,16 +628,19 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) usage = cgrp->bstat.cputime.sum_exec_runtime; utime = cgrp->bstat.cputime.utime; stime = cgrp->bstat.cputime.stime; + ntime = cgrp->bstat.ntime; } do_div(usage, NSEC_PER_USEC); do_div(utime, NSEC_PER_USEC); do_div(stime, NSEC_PER_USEC); + do_div(ntime, NSEC_PER_USEC); seq_printf(seq, "usage_usec %llu\n" - "user_usec %llu\n" - "system_usec %llu\n", - usage, utime, stime); + "user_usec %llu\n" + "system_usec %llu\n" + "nice_usec %llu\n", + usage, utime, stime, ntime); cgroup_force_idle_show(seq, &cgrp->bstat); } From 2e82c0d4562a4b8292af83577b70af888a93d16d Mon Sep 17 00:00:00 2001 From: Joshua Hahn Date: Wed, 2 Oct 2024 11:47:17 -0700 Subject: [PATCH 03/13] cgroup/rstat: Selftests for niced CPU statistics Creates a cgroup with a single nice CPU hog process running. fork() is called to generate the nice process because un-nicing is not possible (see man nice(3)). If fork() was not used to generate the CPU hog, we would run the rest of the cgroup selftest suite as a nice process. Signed-off-by: Joshua Hahn Signed-off-by: Tejun Heo --- tools/testing/selftests/cgroup/test_cpu.c | 75 +++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c index dad2ed82f3ef..201ce14cb422 100644 --- a/tools/testing/selftests/cgroup/test_cpu.c +++ b/tools/testing/selftests/cgroup/test_cpu.c @@ -8,6 +8,7 @@ #include #include #include +#include #include "../kselftest.h" #include "cgroup_util.h" @@ -229,6 +230,79 @@ static int test_cpucg_stats(const char *root) return ret; } +/* + * Creates a nice process that consumes CPU and checks that the elapsed + * usertime in the cgroup is close to the expected time. + */ +static int test_cpucg_nice(const char *root) +{ + int ret = KSFT_FAIL; + int status; + long user_usec, nice_usec; + long usage_seconds = 2; + long expected_nice_usec = usage_seconds * USEC_PER_SEC; + char *cpucg; + pid_t pid; + + cpucg = cg_name(root, "cpucg_test"); + if (!cpucg) + goto cleanup; + + if (cg_create(cpucg)) + goto cleanup; + + user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec"); + nice_usec = cg_read_key_long(cpucg, "cpu.stat", "nice_usec"); + if (nice_usec == -1) + ret = KSFT_SKIP; + if (user_usec != 0 || nice_usec != 0) + goto cleanup; + + /* + * We fork here to create a new process that can be niced without + * polluting the nice value of other selftests + */ + pid = fork(); + if (pid < 0) { + goto cleanup; + } else if (pid == 0) { + struct cpu_hog_func_param param = { + .nprocs = 1, + .ts = { + .tv_sec = usage_seconds, + .tv_nsec = 0, + }, + .clock_type = CPU_HOG_CLOCK_PROCESS, + }; + char buf[64]; + snprintf(buf, sizeof(buf), "%d", getpid()); + if (cg_write(cpucg, "cgroup.procs", buf)) + goto cleanup; + + /* Try to keep niced CPU usage as constrained to hog_cpu as possible */ + nice(1); + hog_cpus_timed(cpucg, param); + exit(0); + } else { + waitpid(pid, &status, 0); + if (!WIFEXITED(status)) + goto cleanup; + + user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec"); + nice_usec = cg_read_key_long(cpucg, "cpu.stat", "nice_usec"); + if (!values_close(nice_usec, expected_nice_usec, 1)) + goto cleanup; + + ret = KSFT_PASS; + } + +cleanup: + cg_destroy(cpucg); + free(cpucg); + + return ret; +} + static int run_cpucg_weight_test( const char *root, @@ -686,6 +760,7 @@ struct cpucg_test { } tests[] = { T(test_cpucg_subtree_control), T(test_cpucg_stats), + T(test_cpucg_nice), T(test_cpucg_weight_overprovisioned), T(test_cpucg_weight_underprovisioned), T(test_cpucg_nested_weight_overprovisioned), From 11312c86f9d7d1bffe0587185934a7070ce9ec33 Mon Sep 17 00:00:00 2001 From: Xiu Jianfeng Date: Fri, 11 Oct 2024 06:11:53 +0000 Subject: [PATCH 04/13] selftests/cgroup: Fix compile error in test_cpu.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When compiling the cgroup selftests with the following command: make -C tools/testing/selftests/cgroup/ the compiler complains as below: test_cpu.c: In function ‘test_cpucg_nice’: test_cpu.c:284:39: error: incompatible type for argument 2 of ‘hog_cpus_timed’ 284 | hog_cpus_timed(cpucg, param); | ^~~~~ | | | struct cpu_hog_func_param test_cpu.c:132:53: note: expected ‘void *’ but argument is of type ‘struct cpu_hog_func_param’ 132 | static int hog_cpus_timed(const char *cgroup, void *arg) | ~~~~~~^~~ Fix it by passing the address of param to hog_cpus_timed(). Fixes: 2e82c0d4562a ("cgroup/rstat: Selftests for niced CPU statistics") Signed-off-by: Xiu Jianfeng Signed-off-by: Tejun Heo --- tools/testing/selftests/cgroup/test_cpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c index 201ce14cb422..a2b50af8e9ee 100644 --- a/tools/testing/selftests/cgroup/test_cpu.c +++ b/tools/testing/selftests/cgroup/test_cpu.c @@ -281,7 +281,7 @@ static int test_cpucg_nice(const char *root) /* Try to keep niced CPU usage as constrained to hog_cpu as possible */ nice(1); - hog_cpus_timed(cpucg, param); + hog_cpus_timed(cpucg, ¶m); exit(0); } else { waitpid(pid, &status, 0); From feb301c60970bd2a1310a53ce2d6e4375397a51b Mon Sep 17 00:00:00 2001 From: Chen Ridong Date: Fri, 18 Oct 2024 08:15:19 +0000 Subject: [PATCH 05/13] Revert "cgroup: Fix memory leak caused by missing cgroup_bpf_offline" This reverts commit 04f8ef5643bcd8bcde25dfdebef998aea480b2ba. Only cgroup v2 can be attached by cgroup by BPF programs. Revert this commit and cgroup_bpf_inherit and cgroup_bpf_offline won't be called in cgroup v1. The memory leak issue will be fixed with next patch. Fixes: 04f8ef5643bc ("cgroup: Fix memory leak caused by missing cgroup_bpf_offline") Link: https://lore.kernel.org/cgroups/aka2hk5jsel5zomucpwlxsej6iwnfw4qu5jkrmjhyfhesjlfdw@46zxhg5bdnr7/ Signed-off-by: Chen Ridong Signed-off-by: Tejun Heo --- kernel/cgroup/cgroup.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 5886b95c6eae..5c6d32054e07 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -2314,10 +2314,8 @@ static void cgroup_kill_sb(struct super_block *sb) * And don't kill the default root. */ if (list_empty(&root->cgrp.self.children) && root != &cgrp_dfl_root && - !percpu_ref_is_dying(&root->cgrp.self.refcnt)) { - cgroup_bpf_offline(&root->cgrp); + !percpu_ref_is_dying(&root->cgrp.self.refcnt)) percpu_ref_kill(&root->cgrp.self.refcnt); - } cgroup_put(&root->cgrp); kernfs_kill_sb(sb); } From 2190df6c91373fdec6db9fc07e427084f232f57e Mon Sep 17 00:00:00 2001 From: Chen Ridong Date: Fri, 18 Oct 2024 08:15:20 +0000 Subject: [PATCH 06/13] cgroup/bpf: only cgroup v2 can be attached by bpf programs Only cgroup v2 can be attached by bpf programs, so this patch introduces that cgroup_bpf_inherit and cgroup_bpf_offline can only be called in cgroup v2, and this can fix the memleak mentioned by commit 04f8ef5643bc ("cgroup: Fix memory leak caused by missing cgroup_bpf_offline"), which has been reverted. Fixes: 2b0d3d3e4fcf ("percpu_ref: reduce memory footprint of percpu_ref in fast path") Fixes: 4bfc0bb2c60e ("bpf: decouple the lifetime of cgroup_bpf from cgroup itself") Link: https://lore.kernel.org/cgroups/aka2hk5jsel5zomucpwlxsej6iwnfw4qu5jkrmjhyfhesjlfdw@46zxhg5bdnr7/ Signed-off-by: Chen Ridong Signed-off-by: Tejun Heo --- kernel/cgroup/cgroup.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 5c6d32054e07..0758bf0c9468 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -2140,8 +2140,10 @@ int cgroup_setup_root(struct cgroup_root *root, u16 ss_mask) if (ret) goto exit_stats; - ret = cgroup_bpf_inherit(root_cgrp); - WARN_ON_ONCE(ret); + if (root == &cgrp_dfl_root) { + ret = cgroup_bpf_inherit(root_cgrp); + WARN_ON_ONCE(ret); + } trace_cgroup_setup_root(root); @@ -5708,9 +5710,11 @@ static struct cgroup *cgroup_create(struct cgroup *parent, const char *name, if (ret) goto out_kernfs_remove; - ret = cgroup_bpf_inherit(cgrp); - if (ret) - goto out_psi_free; + if (cgrp->root == &cgrp_dfl_root) { + ret = cgroup_bpf_inherit(cgrp); + if (ret) + goto out_psi_free; + } /* * New cgroup inherits effective freeze counter, and @@ -6024,7 +6028,8 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) cgroup1_check_for_release(parent); - cgroup_bpf_offline(cgrp); + if (cgrp->root == &cgrp_dfl_root) + cgroup_bpf_offline(cgrp); /* put the base reference */ percpu_ref_kill(&cgrp->self.refcnt); From ee1251fc0c4e799a48025318f262739919deb977 Mon Sep 17 00:00:00 2001 From: Chen Ridong Date: Tue, 22 Oct 2024 11:49:45 +0000 Subject: [PATCH 07/13] cgroup/freezer: Reduce redundant traversal for cgroup_freeze MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Whether a cgroup is frozen is determined solely by whether it is set to to be frozen and whether its parent is frozen. Currently, when is cgroup is frozen or unfrozen, it iterates through the entire subtree to freeze or unfreeze its descentdants. However, this is unesessary for a cgroup that does not change its effective frozen status. This path aims to skip the subtree if its parent does not have a change in effective freeze. For an example, subtree like, a-b-c-d-e-f-g, when a is frozen, the entire tree is frozen. If we freeze b and c again, it is unesessary to iterate d, e, f and g. So does that If we unfreeze b/c. Reviewed-by: Michal Koutný Signed-off-by: Chen Ridong Signed-off-by: Tejun Heo --- include/linux/cgroup-defs.h | 2 +- kernel/cgroup/freezer.c | 30 ++++++++++++++---------------- 2 files changed, 15 insertions(+), 17 deletions(-) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 0a80ef9191a6..1b20d2d8ef7c 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -398,7 +398,7 @@ struct cgroup_freezer_state { bool freeze; /* Should the cgroup actually be frozen? */ - int e_freeze; + bool e_freeze; /* Fields below are protected by css_set_lock */ diff --git a/kernel/cgroup/freezer.c b/kernel/cgroup/freezer.c index 617861a54793..188d5f2aeb5a 100644 --- a/kernel/cgroup/freezer.c +++ b/kernel/cgroup/freezer.c @@ -260,8 +260,10 @@ void cgroup_freezer_migrate_task(struct task_struct *task, void cgroup_freeze(struct cgroup *cgrp, bool freeze) { struct cgroup_subsys_state *css; + struct cgroup *parent; struct cgroup *dsct; bool applied = false; + bool old_e; lockdep_assert_held(&cgroup_mutex); @@ -282,22 +284,18 @@ void cgroup_freeze(struct cgroup *cgrp, bool freeze) if (cgroup_is_dead(dsct)) continue; - if (freeze) { - dsct->freezer.e_freeze++; - /* - * Already frozen because of ancestor's settings? - */ - if (dsct->freezer.e_freeze > 1) - continue; - } else { - dsct->freezer.e_freeze--; - /* - * Still frozen because of ancestor's settings? - */ - if (dsct->freezer.e_freeze > 0) - continue; - - WARN_ON_ONCE(dsct->freezer.e_freeze < 0); + /* + * e_freeze is affected by parent's e_freeze and dst's freeze. + * If old e_freeze eq new e_freeze, no change, its children + * will not be affected. So do nothing and skip the subtree + */ + old_e = dsct->freezer.e_freeze; + parent = cgroup_parent(dsct); + dsct->freezer.e_freeze = (dsct->freezer.freeze || + parent->freezer.e_freeze); + if (dsct->freezer.e_freeze == old_e) { + css = css_rightmost_descendant(css); + continue; } /* From 16e83007cdc871fc2db80489962f6e4b781f1e3c Mon Sep 17 00:00:00 2001 From: Chen Ridong Date: Tue, 22 Oct 2024 11:49:46 +0000 Subject: [PATCH 08/13] cgroup/freezer: Add cgroup CGRP_FROZEN flag update helper MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add help to update cgroup CGRP_FROZEN flag. Both cgroup_propagate_frozen and cgroup_update_frozen functions update CGRP_FROZEN flag, this makes code concise. Reviewed-by: Michal Koutný Signed-off-by: Chen Ridong Signed-off-by: Tejun Heo --- kernel/cgroup/freezer.c | 67 ++++++++++++++++++++--------------------- 1 file changed, 32 insertions(+), 35 deletions(-) diff --git a/kernel/cgroup/freezer.c b/kernel/cgroup/freezer.c index 188d5f2aeb5a..bf1690a167dd 100644 --- a/kernel/cgroup/freezer.c +++ b/kernel/cgroup/freezer.c @@ -8,6 +8,28 @@ #include +/* + * Update CGRP_FROZEN of cgroup.flag + * Return true if flags is updated; false if flags has no change + */ +static bool cgroup_update_frozen_flag(struct cgroup *cgrp, bool frozen) +{ + lockdep_assert_held(&css_set_lock); + + /* Already there? */ + if (test_bit(CGRP_FROZEN, &cgrp->flags) == frozen) + return false; + + if (frozen) + set_bit(CGRP_FROZEN, &cgrp->flags); + else + clear_bit(CGRP_FROZEN, &cgrp->flags); + + cgroup_file_notify(&cgrp->events_file); + TRACE_CGROUP_PATH(notify_frozen, cgrp, frozen); + return true; +} + /* * Propagate the cgroup frozen state upwards by the cgroup tree. */ @@ -24,24 +46,16 @@ static void cgroup_propagate_frozen(struct cgroup *cgrp, bool frozen) while ((cgrp = cgroup_parent(cgrp))) { if (frozen) { cgrp->freezer.nr_frozen_descendants += desc; - if (!test_bit(CGRP_FROZEN, &cgrp->flags) && - test_bit(CGRP_FREEZE, &cgrp->flags) && - cgrp->freezer.nr_frozen_descendants == - cgrp->nr_descendants) { - set_bit(CGRP_FROZEN, &cgrp->flags); - cgroup_file_notify(&cgrp->events_file); - TRACE_CGROUP_PATH(notify_frozen, cgrp, 1); - desc++; - } + if (!test_bit(CGRP_FREEZE, &cgrp->flags) || + (cgrp->freezer.nr_frozen_descendants != + cgrp->nr_descendants)) + continue; } else { cgrp->freezer.nr_frozen_descendants -= desc; - if (test_bit(CGRP_FROZEN, &cgrp->flags)) { - clear_bit(CGRP_FROZEN, &cgrp->flags); - cgroup_file_notify(&cgrp->events_file); - TRACE_CGROUP_PATH(notify_frozen, cgrp, 0); - desc++; - } } + + if (cgroup_update_frozen_flag(cgrp, frozen)) + desc++; } } @@ -53,8 +67,6 @@ void cgroup_update_frozen(struct cgroup *cgrp) { bool frozen; - lockdep_assert_held(&css_set_lock); - /* * If the cgroup has to be frozen (CGRP_FREEZE bit set), * and all tasks are frozen and/or stopped, let's consider @@ -63,24 +75,9 @@ void cgroup_update_frozen(struct cgroup *cgrp) frozen = test_bit(CGRP_FREEZE, &cgrp->flags) && cgrp->freezer.nr_frozen_tasks == __cgroup_task_count(cgrp); - if (frozen) { - /* Already there? */ - if (test_bit(CGRP_FROZEN, &cgrp->flags)) - return; - - set_bit(CGRP_FROZEN, &cgrp->flags); - } else { - /* Already there? */ - if (!test_bit(CGRP_FROZEN, &cgrp->flags)) - return; - - clear_bit(CGRP_FROZEN, &cgrp->flags); - } - cgroup_file_notify(&cgrp->events_file); - TRACE_CGROUP_PATH(notify_frozen, cgrp, frozen); - - /* Update the state of ancestor cgroups. */ - cgroup_propagate_frozen(cgrp, frozen); + /* If flags is updated, update the state of ancestor cgroups. */ + if (cgroup_update_frozen_flag(cgrp, frozen)) + cgroup_propagate_frozen(cgrp, frozen); } /* From 30026390b3a90cb0d5dd1abb5a105adf86746325 Mon Sep 17 00:00:00 2001 From: Zefan Li Date: Tue, 5 Nov 2024 11:02:52 +0800 Subject: [PATCH 09/13] MAINTAINERS: remove Zefan Li Not active for a long time, so remove myself from MAINTAINERS. Cc: Zefan Li Signed-off-by: Zefan Li Signed-off-by: Muchun Song Signed-off-by: Tejun Heo --- CREDITS | 3 +++ MAINTAINERS | 2 -- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CREDITS b/CREDITS index d439f5a1bc00..007f8aa8a527 100644 --- a/CREDITS +++ b/CREDITS @@ -574,6 +574,9 @@ N: Zach Brown E: zab@zabbo.net D: maestro pci sound +N: Zefan Li +D: Contribution to control group stuff + N: David Brownell D: Kernel engineer, mentor, and friend. Maintained USB EHCI and D: gadget layers, SPI subsystem, GPIO subsystem, and more than a few diff --git a/MAINTAINERS b/MAINTAINERS index c27f3190737f..2c097bb9352b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5775,7 +5775,6 @@ F: kernel/context_tracking.c CONTROL GROUP (CGROUP) M: Tejun Heo -M: Zefan Li M: Johannes Weiner M: Michal Koutný L: cgroups@vger.kernel.org @@ -5804,7 +5803,6 @@ F: include/linux/blk-cgroup.h CONTROL GROUP - CPUSET M: Waiman Long -M: Zefan Li L: cgroups@vger.kernel.org S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git From bcd7012afd7bcd45fcd7a0e2f48e57b273702317 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Sat, 9 Nov 2024 21:50:21 -0500 Subject: [PATCH 10/13] cgroup/cpuset: Revert "Allow suppression of sched domain rebuild in update_cpumasks_hier()" Revert commit 3ae0b773211e ("cgroup/cpuset: Allow suppression of sched domain rebuild in update_cpumasks_hier()") to allow for an alternative way to suppress unnecessary rebuild_sched_domains_locked() calls in update_cpumasks_hier() and elsewhere in a following commit. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/cgroup/cpuset.c | 39 ++++++++++++++------------------------- 1 file changed, 14 insertions(+), 25 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index bb1ea7878126..1ddbdc8ea10f 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1922,12 +1922,6 @@ static void compute_partition_effective_cpumask(struct cpuset *cs, rcu_read_unlock(); } -/* - * update_cpumasks_hier() flags - */ -#define HIER_CHECKALL 0x01 /* Check all cpusets with no skipping */ -#define HIER_NO_SD_REBUILD 0x02 /* Don't rebuild sched domains */ - /* * update_cpumasks_hier - Update effective cpumasks and tasks in the subtree * @cs: the cpuset to consider @@ -1942,7 +1936,7 @@ static void compute_partition_effective_cpumask(struct cpuset *cs, * Called with cpuset_mutex held */ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, - int flags) + bool force) { struct cpuset *cp; struct cgroup_subsys_state *pos_css; @@ -2007,10 +2001,10 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, * Skip the whole subtree if * 1) the cpumask remains the same, * 2) has no partition root state, - * 3) HIER_CHECKALL flag not set, and + * 3) force flag not set, and * 4) for v2 load balance state same as its parent. */ - if (!cp->partition_root_state && !(flags & HIER_CHECKALL) && + if (!cp->partition_root_state && !force && cpumask_equal(tmp->new_cpus, cp->effective_cpus) && (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) { @@ -2112,8 +2106,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, } rcu_read_unlock(); - if (need_rebuild_sched_domains && !(flags & HIER_NO_SD_REBUILD) && - !force_sd_rebuild) + if (need_rebuild_sched_domains && !force_sd_rebuild) rebuild_sched_domains_locked(); } @@ -2141,9 +2134,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, * directly. * * The update_cpumasks_hier() function may sleep. So we have to - * release the RCU read lock before calling it. HIER_NO_SD_REBUILD - * flag is used to suppress rebuild of sched domains as the callers - * will take care of that. + * release the RCU read lock before calling it. */ rcu_read_lock(); cpuset_for_each_child(sibling, pos_css, parent) { @@ -2159,7 +2150,7 @@ static void update_sibling_cpumasks(struct cpuset *parent, struct cpuset *cs, continue; rcu_read_unlock(); - update_cpumasks_hier(sibling, tmp, HIER_NO_SD_REBUILD); + update_cpumasks_hier(sibling, tmp, false); rcu_read_lock(); css_put(&sibling->css); } @@ -2179,7 +2170,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, struct tmpmasks tmp; struct cpuset *parent = parent_cs(cs); bool invalidate = false; - int hier_flags = 0; + bool force = false; int old_prs = cs->partition_root_state; /* top_cpuset.cpus_allowed tracks cpu_online_mask; it's read-only */ @@ -2240,8 +2231,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, * Check all the descendants in update_cpumasks_hier() if * effective_xcpus is to be changed. */ - if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus)) - hier_flags = HIER_CHECKALL; + force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); retval = validate_change(cs, trialcs); @@ -2309,7 +2299,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, spin_unlock_irq(&callback_lock); /* effective_cpus/effective_xcpus will be updated here */ - update_cpumasks_hier(cs, &tmp, hier_flags); + update_cpumasks_hier(cs, &tmp, force); /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */ if (cs->partition_root_state) @@ -2334,7 +2324,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, struct tmpmasks tmp; struct cpuset *parent = parent_cs(cs); bool invalidate = false; - int hier_flags = 0; + bool force = false; int old_prs = cs->partition_root_state; if (!*buf) { @@ -2357,8 +2347,7 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, * Check all the descendants in update_cpumasks_hier() if * effective_xcpus is to be changed. */ - if (!cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus)) - hier_flags = HIER_CHECKALL; + force = !cpumask_equal(cs->effective_xcpus, trialcs->effective_xcpus); retval = validate_change(cs, trialcs); if (retval) @@ -2411,8 +2400,8 @@ static int update_exclusive_cpumask(struct cpuset *cs, struct cpuset *trialcs, * of the subtree when it is a valid partition root or effective_xcpus * is updated. */ - if (is_partition_valid(cs) || hier_flags) - update_cpumasks_hier(cs, &tmp, hier_flags); + if (is_partition_valid(cs) || force) + update_cpumasks_hier(cs, &tmp, force); /* Update CS_SCHED_LOAD_BALANCE and/or sched_domains, if necessary */ if (cs->partition_root_state) @@ -2853,7 +2842,7 @@ static int update_prstate(struct cpuset *cs, int new_prs) update_unbound_workqueue_cpumask(new_xcpus_state); /* Force update if switching back to member */ - update_cpumasks_hier(cs, &tmpmask, !new_prs ? HIER_CHECKALL : 0); + update_cpumasks_hier(cs, &tmpmask, !new_prs); /* Update sched domains and load balance flag */ update_partition_sd_lb(cs, old_prs); From a040c351283e3ac75422621ea205b1d8d687e108 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Sat, 9 Nov 2024 21:50:22 -0500 Subject: [PATCH 11/13] cgroup/cpuset: Enforce at most one rebuild_sched_domains_locked() call per operation Since commit ff0ce721ec21 ("cgroup/cpuset: Eliminate unncessary sched domains rebuilds in hotplug"), there is only one rebuild_sched_domains_locked() call per hotplug operation. However, writing to the various cpuset control files may still casue more than one rebuild_sched_domains_locked() call to happen in some cases. Juri had found that two rebuild_sched_domains_locked() calls in update_prstate(), one from update_cpumasks_hier() and another one from update_partition_sd_lb() could cause cpuset partition to be created with null total_bw for DL tasks. IOW, DL tasks may not be scheduled correctly in such a partition. A sample command sequence that can reproduce null total_bw is as follows. # echo Y >/sys/kernel/debug/sched/verbose # echo +cpuset >/sys/fs/cgroup/cgroup.subtree_control # mkdir /sys/fs/cgroup/test # echo 0-7 > /sys/fs/cgroup/test/cpuset.cpus # echo 6-7 > /sys/fs/cgroup/test/cpuset.cpus.exclusive # echo root >/sys/fs/cgroup/test/cpuset.cpus.partition Fix this double rebuild_sched_domains_locked() calls problem by replacing existing calls with cpuset_force_rebuild() except the rebuild_sched_domains_cpuslocked() call at the end of cpuset_handle_hotplug(). Checking of the force_sd_rebuild flag is now done at the end of cpuset_write_resmask() and update_prstate() to determine if rebuild_sched_domains_locked() should be called or not. The cpuset v1 code can still call rebuild_sched_domains_locked() directly as double rebuild_sched_domains_locked() calls is not possible. Reported-by: Juri Lelli Closes: https://lore.kernel.org/lkml/ZyuUcJDPBln1BK1Y@jlelli-thinkpadt14gen4.remote.csb/ Signed-off-by: Waiman Long Tested-by: Juri Lelli Signed-off-by: Tejun Heo --- kernel/cgroup/cpuset.c | 49 ++++++++++++++++++++++++++++-------------- 1 file changed, 33 insertions(+), 16 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 1ddbdc8ea10f..24f2a0c5eba4 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -84,9 +84,19 @@ static bool have_boot_isolcpus; static struct list_head remote_children; /* - * A flag to force sched domain rebuild at the end of an operation while - * inhibiting it in the intermediate stages when set. Currently it is only - * set in hotplug code. + * A flag to force sched domain rebuild at the end of an operation. + * It can be set in + * - update_partition_sd_lb() + * - remote_partition_check() + * - update_cpumasks_hier() + * - cpuset_update_flag() + * - cpuset_hotplug_update_tasks() + * - cpuset_handle_hotplug() + * + * Protected by cpuset_mutex (with cpus_read_lock held) or cpus_write_lock. + * + * Note that update_relax_domain_level() in cpuset-v1.c can still call + * rebuild_sched_domains_locked() directly without using this flag. */ static bool force_sd_rebuild; @@ -990,6 +1000,7 @@ void rebuild_sched_domains_locked(void) lockdep_assert_cpus_held(); lockdep_assert_held(&cpuset_mutex); + force_sd_rebuild = false; /* * If we have raced with CPU hotplug, return early to avoid @@ -1164,8 +1175,8 @@ static void update_partition_sd_lb(struct cpuset *cs, int old_prs) clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); } - if (rebuild_domains && !force_sd_rebuild) - rebuild_sched_domains_locked(); + if (rebuild_domains) + cpuset_force_rebuild(); } /* @@ -1512,8 +1523,8 @@ static void remote_partition_check(struct cpuset *cs, struct cpumask *newmask, remote_partition_disable(child, tmp); disable_cnt++; } - if (disable_cnt && !force_sd_rebuild) - rebuild_sched_domains_locked(); + if (disable_cnt) + cpuset_force_rebuild(); } /* @@ -2106,8 +2117,8 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, } rcu_read_unlock(); - if (need_rebuild_sched_domains && !force_sd_rebuild) - rebuild_sched_domains_locked(); + if (need_rebuild_sched_domains) + cpuset_force_rebuild(); } /** @@ -2726,9 +2737,13 @@ int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, cs->flags = trialcs->flags; spin_unlock_irq(&callback_lock); - if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed && - !force_sd_rebuild) - rebuild_sched_domains_locked(); + if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) { + if (!IS_ENABLED(CONFIG_CPUSETS_V1) || + cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) + cpuset_force_rebuild(); + else + rebuild_sched_domains_locked(); + } if (spread_flag_changed) cpuset1_update_tasks_flags(cs); @@ -2848,6 +2863,8 @@ static int update_prstate(struct cpuset *cs, int new_prs) update_partition_sd_lb(cs, old_prs); notify_partition_change(cs, old_prs); + if (force_sd_rebuild) + rebuild_sched_domains_locked(); free_cpumasks(NULL, &tmpmask); return 0; } @@ -3141,6 +3158,8 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of, } free_cpuset(trialcs); + if (force_sd_rebuild) + rebuild_sched_domains_locked(); out_unlock: mutex_unlock(&cpuset_mutex); cpus_read_unlock(); @@ -3885,11 +3904,9 @@ static void cpuset_handle_hotplug(void) rcu_read_unlock(); } - /* rebuild sched domains if cpus_allowed has changed */ - if (force_sd_rebuild) { - force_sd_rebuild = false; + /* rebuild sched domains if necessary */ + if (force_sd_rebuild) rebuild_sched_domains_cpuslocked(); - } free_cpumasks(NULL, ptmp); } From c4c9cebe2fb9cdc73e55513de7af7a4f50260e88 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Sat, 9 Nov 2024 21:50:23 -0500 Subject: [PATCH 12/13] cgroup/cpuset: Further optimize code if CONFIG_CPUSETS_V1 not set Currently the cpuset code uses group_subsys_on_dfl() to check if we are running with cgroup v2. If CONFIG_CPUSETS_V1 isn't set, there is really no need to do this check and we can optimize out some of the unneeded v1 specific code paths. Introduce a new cpuset_v2() and use it to replace the cgroup_subsys_on_dfl() check to further optimize the code. Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/cgroup/cpuset.c | 39 +++++++++++++++++++-------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 24f2a0c5eba4..142303abb055 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -293,6 +293,12 @@ static inline void dec_attach_in_progress(struct cpuset *cs) mutex_unlock(&cpuset_mutex); } +static inline bool cpuset_v2(void) +{ + return !IS_ENABLED(CONFIG_CPUSETS_V1) || + cgroup_subsys_on_dfl(cpuset_cgrp_subsys); +} + /* * Cgroup v2 behavior is used on the "cpus" and "mems" control files when * on default hierarchy or when the cpuset_v2_mode flag is set by mounting @@ -303,7 +309,7 @@ static inline void dec_attach_in_progress(struct cpuset *cs) */ static inline bool is_in_v2_mode(void) { - return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || + return cpuset_v2() || (cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE); } @@ -738,7 +744,7 @@ static int generate_sched_domains(cpumask_var_t **domains, int nslot; /* next empty doms[] struct cpumask slot */ struct cgroup_subsys_state *pos_css; bool root_load_balance = is_sched_load_balance(&top_cpuset); - bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys); + bool cgrpv2 = cpuset_v2(); int nslot_update; doms = NULL; @@ -1198,7 +1204,7 @@ static void reset_partition_data(struct cpuset *cs) { struct cpuset *parent = parent_cs(cs); - if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) + if (!cpuset_v2()) return; lockdep_assert_held(&callback_lock); @@ -2017,7 +2023,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, */ if (!cp->partition_root_state && !force && cpumask_equal(tmp->new_cpus, cp->effective_cpus) && - (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || + (!cpuset_v2() || (is_sched_load_balance(parent) == is_sched_load_balance(cp)))) { pos_css = css_rightmost_descendant(pos_css); continue; @@ -2091,8 +2097,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, * from parent if current cpuset isn't a valid partition root * and their load balance states differ. */ - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && - !is_partition_valid(cp) && + if (cpuset_v2() && !is_partition_valid(cp) && (is_sched_load_balance(parent) != is_sched_load_balance(cp))) { if (is_sched_load_balance(parent)) set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags); @@ -2108,8 +2113,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, */ if (!cpumask_empty(cp->cpus_allowed) && is_sched_load_balance(cp) && - (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || - is_partition_valid(cp))) + (!cpuset_v2() || is_partition_valid(cp))) need_rebuild_sched_domains = true; rcu_read_lock(); @@ -2246,7 +2250,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, retval = validate_change(cs, trialcs); - if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { + if ((retval == -EINVAL) && cpuset_v2()) { struct cgroup_subsys_state *css; struct cpuset *cp; @@ -2738,8 +2742,7 @@ int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs, spin_unlock_irq(&callback_lock); if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) { - if (!IS_ENABLED(CONFIG_CPUSETS_V1) || - cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) + if (cpuset_v2()) cpuset_force_rebuild(); else rebuild_sched_domains_locked(); @@ -2925,8 +2928,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset) * migration permission derives from hierarchy ownership in * cgroup_procs_write_permission()). */ - if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) || - (cpus_updated || mems_updated)) { + if (!cpuset_v2() || (cpus_updated || mems_updated)) { ret = security_task_setscheduler(task); if (ret) goto out_unlock; @@ -3040,8 +3042,7 @@ static void cpuset_attach(struct cgroup_taskset *tset) * in effective cpus and mems. In that case, we can optimize out * by skipping the task iteration and update. */ - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && - !cpus_updated && !mems_updated) { + if (cpuset_v2() && !cpus_updated && !mems_updated) { cpuset_attach_nodemask_to = cs->effective_mems; goto out; } @@ -3391,7 +3392,7 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css) INIT_LIST_HEAD(&cs->remote_sibling); /* Set CS_MEMORY_MIGRATE for default hierarchy */ - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) + if (cpuset_v2()) __set_bit(CS_MEMORY_MIGRATE, &cs->flags); return &cs->css; @@ -3418,8 +3419,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) /* * For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated */ - if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && - !is_sched_load_balance(parent)) + if (cpuset_v2() && !is_sched_load_balance(parent)) clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); cpuset_inc(); @@ -3489,8 +3489,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css) if (is_partition_valid(cs)) update_prstate(cs, 0); - if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && - is_sched_load_balance(cs)) + if (!cpuset_v2() && is_sched_load_balance(cs)) cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); cpuset_dec(); From fbfbf86685b3270dc27d1c5d6108532334aaf329 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Thu, 14 Nov 2024 13:19:15 -0500 Subject: [PATCH 13/13] cgroup/cpuset: Disable cpuset_cpumask_can_shrink() test if not load balancing With some recent proposed changes [1] in the deadline server code, it has caused a test failure in test_cpuset_prs.sh when a change is being made to an isolated partition. This is due to failing the cpuset_cpumask_can_shrink() check for SCHED_DEADLINE tasks at validate_change(). This is actually a false positive as the failed test case involves an isolated partition with load balancing disabled. The deadline check is not meaningful in this case and the users should know what they are doing. Fix this by doing the cpuset_cpumask_can_shrink() check only when loading balanced is enabled. Also change its arguments to use effective_cpus for the current cpuset and user_xcpus() as an approiximation for the target effective_cpus as the real effective_cpus hasn't been fully computed yet as this early stage. As the check isn't comprehensive, there may be false positives or negatives. We may have to revise the code to do a more thorough check in the future if this becomes a concern. [1] https://lore.kernel.org/lkml/82be06c1-6d6d-4651-86c9-bcc828cbcb80@redhat.com/T/#t Signed-off-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/cgroup/cpuset.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 142303abb055..f321ed515f3a 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -581,12 +581,24 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) /* * We can't shrink if we won't have enough room for SCHED_DEADLINE - * tasks. + * tasks. This check is not done when scheduling is disabled as the + * users should know what they are doing. + * + * For v1, effective_cpus == cpus_allowed & user_xcpus() returns + * cpus_allowed. + * + * For v2, is_cpu_exclusive() & is_sched_load_balance() are true only + * for non-isolated partition root. At this point, the target + * effective_cpus isn't computed yet. user_xcpus() is the best + * approximation. + * + * TBD: May need to precompute the real effective_cpus here in case + * incorrect scheduling of SCHED_DEADLINE tasks in a partition + * becomes an issue. */ ret = -EBUSY; - if (is_cpu_exclusive(cur) && - !cpuset_cpumask_can_shrink(cur->cpus_allowed, - trial->cpus_allowed)) + if (is_cpu_exclusive(cur) && is_sched_load_balance(cur) && + !cpuset_cpumask_can_shrink(cur->effective_cpus, user_xcpus(trial))) goto out; /*