mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-15 09:34:17 +00:00
drivers/perf: arm_pmu: implement CPU_PM notifier
When a CPU is suspended (either through suspend-to-RAM or CPUidle), its PMU registers content can be lost, which means that counters registers values that were initialized on power down entry have to be reprogrammed on power-up to make sure the counters set-up is preserved (ie on power-up registers take the reset values on Cold or Warm reset, which can be architecturally UNKNOWN). To guarantee seamless profiling conditions across a core power down this patch adds a CPU PM notifier to ARM pmus, that upon CPU PM entry/exit from low-power states saves/restores the pmu registers set-up (by using the ARM perf API), so that the power-down/up cycle does not affect the perf behaviour (apart from a black-out period between power-up/down CPU PM notifications that is unavoidable). Cc: Will Deacon <will.deacon@arm.com> Cc: Sudeep Holla <sudeep.holla@arm.com> Cc: Daniel Lezcano <daniel.lezcano@linaro.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Mark Rutland <mark.rutland@arm.com> Acked-by: Ashwin Chaugule <ashwin.chaugule@linaro.org> Acked-by: Kevin Hilman <khilman@baylibre.com> Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
parent
94085fe570
commit
da4e4f18af
@ -13,6 +13,7 @@
|
|||||||
|
|
||||||
#include <linux/bitmap.h>
|
#include <linux/bitmap.h>
|
||||||
#include <linux/cpumask.h>
|
#include <linux/cpumask.h>
|
||||||
|
#include <linux/cpu_pm.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
#include <linux/of_device.h>
|
#include <linux/of_device.h>
|
||||||
@ -710,6 +711,93 @@ static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
|
|||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_PM
|
||||||
|
static void cpu_pm_pmu_setup(struct arm_pmu *armpmu, unsigned long cmd)
|
||||||
|
{
|
||||||
|
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
||||||
|
struct perf_event *event;
|
||||||
|
int idx;
|
||||||
|
|
||||||
|
for (idx = 0; idx < armpmu->num_events; idx++) {
|
||||||
|
/*
|
||||||
|
* If the counter is not used skip it, there is no
|
||||||
|
* need of stopping/restarting it.
|
||||||
|
*/
|
||||||
|
if (!test_bit(idx, hw_events->used_mask))
|
||||||
|
continue;
|
||||||
|
|
||||||
|
event = hw_events->events[idx];
|
||||||
|
|
||||||
|
switch (cmd) {
|
||||||
|
case CPU_PM_ENTER:
|
||||||
|
/*
|
||||||
|
* Stop and update the counter
|
||||||
|
*/
|
||||||
|
armpmu_stop(event, PERF_EF_UPDATE);
|
||||||
|
break;
|
||||||
|
case CPU_PM_EXIT:
|
||||||
|
case CPU_PM_ENTER_FAILED:
|
||||||
|
/* Restore and enable the counter */
|
||||||
|
armpmu_start(event, PERF_EF_RELOAD);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cpu_pm_pmu_notify(struct notifier_block *b, unsigned long cmd,
|
||||||
|
void *v)
|
||||||
|
{
|
||||||
|
struct arm_pmu *armpmu = container_of(b, struct arm_pmu, cpu_pm_nb);
|
||||||
|
struct pmu_hw_events *hw_events = this_cpu_ptr(armpmu->hw_events);
|
||||||
|
int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events);
|
||||||
|
|
||||||
|
if (!cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus))
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Always reset the PMU registers on power-up even if
|
||||||
|
* there are no events running.
|
||||||
|
*/
|
||||||
|
if (cmd == CPU_PM_EXIT && armpmu->reset)
|
||||||
|
armpmu->reset(armpmu);
|
||||||
|
|
||||||
|
if (!enabled)
|
||||||
|
return NOTIFY_OK;
|
||||||
|
|
||||||
|
switch (cmd) {
|
||||||
|
case CPU_PM_ENTER:
|
||||||
|
armpmu->stop(armpmu);
|
||||||
|
cpu_pm_pmu_setup(armpmu, cmd);
|
||||||
|
break;
|
||||||
|
case CPU_PM_EXIT:
|
||||||
|
cpu_pm_pmu_setup(armpmu, cmd);
|
||||||
|
case CPU_PM_ENTER_FAILED:
|
||||||
|
armpmu->start(armpmu);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return NOTIFY_DONE;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NOTIFY_OK;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu)
|
||||||
|
{
|
||||||
|
cpu_pmu->cpu_pm_nb.notifier_call = cpu_pm_pmu_notify;
|
||||||
|
return cpu_pm_register_notifier(&cpu_pmu->cpu_pm_nb);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu)
|
||||||
|
{
|
||||||
|
cpu_pm_unregister_notifier(&cpu_pmu->cpu_pm_nb);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline int cpu_pm_pmu_register(struct arm_pmu *cpu_pmu) { return 0; }
|
||||||
|
static inline void cpu_pm_pmu_unregister(struct arm_pmu *cpu_pmu) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
@ -725,6 +813,10 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
|||||||
if (err)
|
if (err)
|
||||||
goto out_hw_events;
|
goto out_hw_events;
|
||||||
|
|
||||||
|
err = cpu_pm_pmu_register(cpu_pmu);
|
||||||
|
if (err)
|
||||||
|
goto out_unregister;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
|
struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
|
||||||
raw_spin_lock_init(&events->pmu_lock);
|
raw_spin_lock_init(&events->pmu_lock);
|
||||||
@ -746,6 +838,8 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
out_unregister:
|
||||||
|
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
||||||
out_hw_events:
|
out_hw_events:
|
||||||
free_percpu(cpu_hw_events);
|
free_percpu(cpu_hw_events);
|
||||||
return err;
|
return err;
|
||||||
@ -753,6 +847,7 @@ out_hw_events:
|
|||||||
|
|
||||||
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
|
static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
|
||||||
{
|
{
|
||||||
|
cpu_pm_pmu_unregister(cpu_pmu);
|
||||||
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
|
||||||
free_percpu(cpu_pmu->hw_events);
|
free_percpu(cpu_pmu->hw_events);
|
||||||
}
|
}
|
||||||
|
@ -108,6 +108,7 @@ struct arm_pmu {
|
|||||||
struct platform_device *plat_device;
|
struct platform_device *plat_device;
|
||||||
struct pmu_hw_events __percpu *hw_events;
|
struct pmu_hw_events __percpu *hw_events;
|
||||||
struct notifier_block hotplug_nb;
|
struct notifier_block hotplug_nb;
|
||||||
|
struct notifier_block cpu_pm_nb;
|
||||||
};
|
};
|
||||||
|
|
||||||
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
|
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user