Merge branches 'pm-sleep', 'pm-em' and 'pm-cpuidle' into linux-next

* pm-sleep:
  PM: wakeup: implement devm_device_init_wakeup() helper
  PM: sleep: sysfs: don't include 'pm_wakeup.h' directly
  PM: sleep: autosleep: don't include 'pm_wakeup.h' directly
  PM: sleep: Update stale comment in device_resume()

* pm-em:
  PM: EM: Move sched domains rebuild function from schedutil to EM

* pm-cpuidle:
  intel_idle: add Clearwater Forest SoC support
This commit is contained in:
Rafael J. Wysocki 2025-01-13 18:11:46 +01:00
commit 2a37653f13
9 changed files with 45 additions and 31 deletions

View File

@ -914,7 +914,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
goto Complete;
if (dev->power.direct_complete) {
/* Match the pm_runtime_disable() in __device_suspend(). */
/* Match the pm_runtime_disable() in device_suspend(). */
pm_runtime_enable(dev);
goto Complete;
}

View File

@ -6,7 +6,6 @@
#include <linux/export.h>
#include <linux/pm_qos.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
#include <linux/atomic.h>
#include <linux/jiffies.h>
#include "power.h"

View File

@ -1538,7 +1538,7 @@ static int cpufreq_online(unsigned int cpu)
/*
* Register with the energy model before
* sugov_eas_rebuild_sd() is called, which will result
* em_rebuild_sched_domains() is called, which will result
* in rebuilding of the sched domains, which should only be done
* once the energy model is properly initialized for the policy
* first.

View File

@ -1651,6 +1651,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr),
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf),
X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &idle_cpu_srf),
{}
};

View File

@ -179,6 +179,7 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
int em_dev_update_chip_binning(struct device *dev);
int em_update_performance_limits(struct em_perf_domain *pd,
unsigned long freq_min_khz, unsigned long freq_max_khz);
void em_rebuild_sched_domains(void);
/**
* em_pd_get_efficient_state() - Get an efficient performance state from the EM
@ -404,6 +405,7 @@ int em_update_performance_limits(struct em_perf_domain *pd,
{
return -EINVAL;
}
static inline void em_rebuild_sched_domains(void) {}
#endif
#endif

View File

@ -240,4 +240,21 @@ static inline int device_init_wakeup(struct device *dev, bool enable)
return 0;
}
static void device_disable_wakeup(void *dev)
{
device_init_wakeup(dev, false);
}
/**
* devm_device_init_wakeup - Resource managed device wakeup initialization.
* @dev: Device to handle.
*
* This function is the devm managed version of device_init_wakeup(dev, true).
*/
static inline int devm_device_init_wakeup(struct device *dev)
{
device_init_wakeup(dev, true);
return devm_add_action_or_reset(dev, device_disable_wakeup, dev);
}
#endif /* _LINUX_PM_WAKEUP_H */

View File

@ -9,7 +9,6 @@
#include <linux/device.h>
#include <linux/mutex.h>
#include <linux/pm_wakeup.h>
#include "power.h"

View File

@ -908,3 +908,20 @@ int em_update_performance_limits(struct em_perf_domain *pd,
return 0;
}
EXPORT_SYMBOL_GPL(em_update_performance_limits);
static void rebuild_sd_workfn(struct work_struct *work)
{
rebuild_sched_domains_energy();
}
void em_rebuild_sched_domains(void)
{
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
/*
* When called from the cpufreq_register_driver() path, the
* cpu_hotplug_lock is already held, so use a work item to
* avoid nested locking in rebuild_sched_domains().
*/
schedule_work(&rebuild_sd_work);
}

View File

@ -604,31 +604,6 @@ static const struct kobj_type sugov_tunables_ktype = {
/********************** cpufreq governor interface *********************/
#ifdef CONFIG_ENERGY_MODEL
static void rebuild_sd_workfn(struct work_struct *work)
{
rebuild_sched_domains_energy();
}
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
/*
* EAS shouldn't be attempted without sugov, so rebuild the sched_domains
* on governor changes to make sure the scheduler knows about it.
*/
static void sugov_eas_rebuild_sd(void)
{
/*
* When called from the cpufreq_register_driver() path, the
* cpu_hotplug_lock is already held, so use a work item to
* avoid nested locking in rebuild_sched_domains().
*/
schedule_work(&rebuild_sd_work);
}
#else
static inline void sugov_eas_rebuild_sd(void) { };
#endif
struct cpufreq_governor schedutil_gov;
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
@ -784,7 +759,11 @@ static int sugov_init(struct cpufreq_policy *policy)
goto fail;
out:
sugov_eas_rebuild_sd();
/*
* Schedutil is the preferred governor for EAS, so rebuild sched domains
* on governor changes to make sure the scheduler knows about them.
*/
em_rebuild_sched_domains();
mutex_unlock(&global_tunables_lock);
return 0;
@ -826,7 +805,7 @@ static void sugov_exit(struct cpufreq_policy *policy)
sugov_policy_free(sg_policy);
cpufreq_disable_fast_switch(policy);
sugov_eas_rebuild_sd();
em_rebuild_sched_domains();
}
static int sugov_start(struct cpufreq_policy *policy)