diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index cd2ac1ba53d2..400337f3b572 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1028,26 +1028,29 @@ static void hybrid_update_cpu_capacity_scaling(void) } } -static void __hybrid_init_cpu_capacity_scaling(void) +static void __hybrid_refresh_cpu_capacity_scaling(void) { hybrid_max_perf_cpu = NULL; hybrid_update_cpu_capacity_scaling(); } +static void hybrid_refresh_cpu_capacity_scaling(void) +{ + guard(mutex)(&hybrid_capacity_lock); + + __hybrid_refresh_cpu_capacity_scaling(); +} + static void hybrid_init_cpu_capacity_scaling(bool refresh) { - bool disable_itmt = false; - - mutex_lock(&hybrid_capacity_lock); - /* * If hybrid_max_perf_cpu is set at this point, the hybrid CPU capacity * scaling has been enabled already and the driver is just changing the * operation mode. */ if (refresh) { - __hybrid_init_cpu_capacity_scaling(); - goto unlock; + hybrid_refresh_cpu_capacity_scaling(); + return; } /* @@ -1056,19 +1059,13 @@ static void hybrid_init_cpu_capacity_scaling(bool refresh) * do not do that when SMT is in use. */ if (hwp_is_hybrid && !sched_smt_active() && arch_enable_hybrid_capacity_scale()) { - __hybrid_init_cpu_capacity_scaling(); - disable_itmt = true; - } - -unlock: - mutex_unlock(&hybrid_capacity_lock); - - /* - * Disabling ITMT causes sched domains to be rebuilt to disable asym - * packing and enable asym capacity. - */ - if (disable_itmt) + hybrid_refresh_cpu_capacity_scaling(); + /* + * Disabling ITMT causes sched domains to be rebuilt to disable asym + * packing and enable asym capacity. + */ sched_clear_itmt_support(); + } } static bool hybrid_clear_max_perf_cpu(void) @@ -1404,7 +1401,7 @@ static void intel_pstate_update_limits_for_all(void) mutex_lock(&hybrid_capacity_lock); if (hybrid_max_perf_cpu) - __hybrid_init_cpu_capacity_scaling(); + __hybrid_refresh_cpu_capacity_scaling(); mutex_unlock(&hybrid_capacity_lock); }