mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
cgroup/cpuset: Further optimize code if CONFIG_CPUSETS_V1 not set
Currently the cpuset code uses group_subsys_on_dfl() to check if we are running with cgroup v2. If CONFIG_CPUSETS_V1 isn't set, there is really no need to do this check and we can optimize out some of the unneeded v1 specific code paths. Introduce a new cpuset_v2() and use it to replace the cgroup_subsys_on_dfl() check to further optimize the code. Signed-off-by: Waiman Long <longman@redhat.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
a040c35128
commit
c4c9cebe2f
@ -293,6 +293,12 @@ static inline void dec_attach_in_progress(struct cpuset *cs)
|
||||
mutex_unlock(&cpuset_mutex);
|
||||
}
|
||||
|
||||
static inline bool cpuset_v2(void)
|
||||
{
|
||||
return !IS_ENABLED(CONFIG_CPUSETS_V1) ||
|
||||
cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
|
||||
}
|
||||
|
||||
/*
|
||||
* Cgroup v2 behavior is used on the "cpus" and "mems" control files when
|
||||
* on default hierarchy or when the cpuset_v2_mode flag is set by mounting
|
||||
@ -303,7 +309,7 @@ static inline void dec_attach_in_progress(struct cpuset *cs)
|
||||
*/
|
||||
static inline bool is_in_v2_mode(void)
|
||||
{
|
||||
return cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
|
||||
return cpuset_v2() ||
|
||||
(cpuset_cgrp_subsys.root->flags & CGRP_ROOT_CPUSET_V2_MODE);
|
||||
}
|
||||
|
||||
@ -738,7 +744,7 @@ static int generate_sched_domains(cpumask_var_t **domains,
|
||||
int nslot; /* next empty doms[] struct cpumask slot */
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
bool root_load_balance = is_sched_load_balance(&top_cpuset);
|
||||
bool cgrpv2 = cgroup_subsys_on_dfl(cpuset_cgrp_subsys);
|
||||
bool cgrpv2 = cpuset_v2();
|
||||
int nslot_update;
|
||||
|
||||
doms = NULL;
|
||||
@ -1198,7 +1204,7 @@ static void reset_partition_data(struct cpuset *cs)
|
||||
{
|
||||
struct cpuset *parent = parent_cs(cs);
|
||||
|
||||
if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
|
||||
if (!cpuset_v2())
|
||||
return;
|
||||
|
||||
lockdep_assert_held(&callback_lock);
|
||||
@ -2017,7 +2023,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
|
||||
*/
|
||||
if (!cp->partition_root_state && !force &&
|
||||
cpumask_equal(tmp->new_cpus, cp->effective_cpus) &&
|
||||
(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
|
||||
(!cpuset_v2() ||
|
||||
(is_sched_load_balance(parent) == is_sched_load_balance(cp)))) {
|
||||
pos_css = css_rightmost_descendant(pos_css);
|
||||
continue;
|
||||
@ -2091,8 +2097,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
|
||||
* from parent if current cpuset isn't a valid partition root
|
||||
* and their load balance states differ.
|
||||
*/
|
||||
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
||||
!is_partition_valid(cp) &&
|
||||
if (cpuset_v2() && !is_partition_valid(cp) &&
|
||||
(is_sched_load_balance(parent) != is_sched_load_balance(cp))) {
|
||||
if (is_sched_load_balance(parent))
|
||||
set_bit(CS_SCHED_LOAD_BALANCE, &cp->flags);
|
||||
@ -2108,8 +2113,7 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp,
|
||||
*/
|
||||
if (!cpumask_empty(cp->cpus_allowed) &&
|
||||
is_sched_load_balance(cp) &&
|
||||
(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
|
||||
is_partition_valid(cp)))
|
||||
(!cpuset_v2() || is_partition_valid(cp)))
|
||||
need_rebuild_sched_domains = true;
|
||||
|
||||
rcu_read_lock();
|
||||
@ -2246,7 +2250,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
|
||||
|
||||
retval = validate_change(cs, trialcs);
|
||||
|
||||
if ((retval == -EINVAL) && cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
|
||||
if ((retval == -EINVAL) && cpuset_v2()) {
|
||||
struct cgroup_subsys_state *css;
|
||||
struct cpuset *cp;
|
||||
|
||||
@ -2738,8 +2742,7 @@ int cpuset_update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
|
||||
spin_unlock_irq(&callback_lock);
|
||||
|
||||
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) {
|
||||
if (!IS_ENABLED(CONFIG_CPUSETS_V1) ||
|
||||
cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
|
||||
if (cpuset_v2())
|
||||
cpuset_force_rebuild();
|
||||
else
|
||||
rebuild_sched_domains_locked();
|
||||
@ -2925,8 +2928,7 @@ static int cpuset_can_attach(struct cgroup_taskset *tset)
|
||||
* migration permission derives from hierarchy ownership in
|
||||
* cgroup_procs_write_permission()).
|
||||
*/
|
||||
if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) ||
|
||||
(cpus_updated || mems_updated)) {
|
||||
if (!cpuset_v2() || (cpus_updated || mems_updated)) {
|
||||
ret = security_task_setscheduler(task);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
@ -3040,8 +3042,7 @@ static void cpuset_attach(struct cgroup_taskset *tset)
|
||||
* in effective cpus and mems. In that case, we can optimize out
|
||||
* by skipping the task iteration and update.
|
||||
*/
|
||||
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
||||
!cpus_updated && !mems_updated) {
|
||||
if (cpuset_v2() && !cpus_updated && !mems_updated) {
|
||||
cpuset_attach_nodemask_to = cs->effective_mems;
|
||||
goto out;
|
||||
}
|
||||
@ -3391,7 +3392,7 @@ cpuset_css_alloc(struct cgroup_subsys_state *parent_css)
|
||||
INIT_LIST_HEAD(&cs->remote_sibling);
|
||||
|
||||
/* Set CS_MEMORY_MIGRATE for default hierarchy */
|
||||
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys))
|
||||
if (cpuset_v2())
|
||||
__set_bit(CS_MEMORY_MIGRATE, &cs->flags);
|
||||
|
||||
return &cs->css;
|
||||
@ -3418,8 +3419,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
|
||||
/*
|
||||
* For v2, clear CS_SCHED_LOAD_BALANCE if parent is isolated
|
||||
*/
|
||||
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
||||
!is_sched_load_balance(parent))
|
||||
if (cpuset_v2() && !is_sched_load_balance(parent))
|
||||
clear_bit(CS_SCHED_LOAD_BALANCE, &cs->flags);
|
||||
|
||||
cpuset_inc();
|
||||
@ -3489,8 +3489,7 @@ static void cpuset_css_offline(struct cgroup_subsys_state *css)
|
||||
if (is_partition_valid(cs))
|
||||
update_prstate(cs, 0);
|
||||
|
||||
if (!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
|
||||
is_sched_load_balance(cs))
|
||||
if (!cpuset_v2() && is_sched_load_balance(cs))
|
||||
cpuset_update_flag(CS_SCHED_LOAD_BALANCE, cs, 0);
|
||||
|
||||
cpuset_dec();
|
||||
|
Loading…
Reference in New Issue
Block a user