mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-11 08:18:47 +00:00
arm64 regression fix for 6.2
- Fix 'perf' regression for non-standard CPU PMU hardware (i.e. Apple M1) -----BEGIN PGP SIGNATURE----- iQFEBAABCgAuFiEEPxTL6PPUbjXGY88ct6xw3ITBYzQFAmPvVyAQHHdpbGxAa2Vy bmVsLm9yZwAKCRC3rHDchMFjNEwzCACuxJI7xVvcjLktrcmdajkNH+j8Owvrpfq+ 8Uja4ykbNJr9BIsZFcI9b7Y2vH7k4+noYDozPKvBgKlSYJVyUUsK2QoJNLzPflc2 RJDPjaM8KrBBE5OTgR5Pvbda+QJ2x5GQGmI1IZv//KVnRUoLTOAje9th4Yza+/oV 5y4THZjHlCeHpsfaVWNiVPqoodQw7Su++kXLABgBZrnRuBwg1lHUQp60cLdTx3lE M4xgvB9MD1+QDFOgtP97AzegT7F251QFnr3JuBj9gtARX8qv2v/REBG/DRsgcPAm piJ8pXaVuNf1rkznRlhiCtI5hhP+OIyySugDxzisBDUXfZ8AJOsv =4A3e -----END PGP SIGNATURE----- Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 regression fix from Will Deacon: "Apologies for the _extremely_ late pull request here, but we had a 'perf' (i.e. CPU PMU) regression on the Apple M1 reported on Wednesday [1] which was introduced by bd2756811766 ("perf: Rewrite core context handling") during the merge window. Mark and I looked into this and noticed an additional problem caused by the same patch, where the 'CHAIN' event (used to combine two adjacent 32-bit counters into a single 64-bit counter) was not being filtered correctly. Mark posted a series on Thursday [2] which addresses both of these regressions and I queued it the same day. The changes are small, self-contained and have been confirmed to fix the original regression. Summary: - Fix 'perf' regression for non-standard CPU PMU hardware (i.e. Apple M1)" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: perf: reject CHAIN events at creation time arm_pmu: fix event CPU filtering
This commit is contained in:
commit
0c2822b116
@ -1023,12 +1023,6 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool armv8pmu_filter(struct pmu *pmu, int cpu)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(pmu);
|
||||
return !cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus);
|
||||
}
|
||||
|
||||
static void armv8pmu_reset(void *info)
|
||||
{
|
||||
struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
|
||||
@ -1069,6 +1063,14 @@ static int __armv8_pmuv3_map_event(struct perf_event *event,
|
||||
&armv8_pmuv3_perf_cache_map,
|
||||
ARMV8_PMU_EVTYPE_EVENT);
|
||||
|
||||
/*
|
||||
* CHAIN events only work when paired with an adjacent counter, and it
|
||||
* never makes sense for a user to open one in isolation, as they'll be
|
||||
* rotated arbitrarily.
|
||||
*/
|
||||
if (hw_event_id == ARMV8_PMUV3_PERFCTR_CHAIN)
|
||||
return -EINVAL;
|
||||
|
||||
if (armv8pmu_event_is_64bit(event))
|
||||
event->hw.flags |= ARMPMU_EVT_64BIT;
|
||||
|
||||
@ -1258,7 +1260,6 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name,
|
||||
cpu_pmu->stop = armv8pmu_stop;
|
||||
cpu_pmu->reset = armv8pmu_reset;
|
||||
cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
|
||||
cpu_pmu->filter = armv8pmu_filter;
|
||||
|
||||
cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx;
|
||||
|
||||
|
@ -550,13 +550,7 @@ static void armpmu_disable(struct pmu *pmu)
|
||||
static bool armpmu_filter(struct pmu *pmu, int cpu)
|
||||
{
|
||||
struct arm_pmu *armpmu = to_arm_pmu(pmu);
|
||||
bool ret;
|
||||
|
||||
ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus);
|
||||
if (ret && armpmu->filter)
|
||||
return armpmu->filter(pmu, cpu);
|
||||
|
||||
return ret;
|
||||
return !cpumask_test_cpu(cpu, &armpmu->supported_cpus);
|
||||
}
|
||||
|
||||
static ssize_t cpus_show(struct device *dev,
|
||||
|
@ -100,7 +100,6 @@ struct arm_pmu {
|
||||
void (*stop)(struct arm_pmu *);
|
||||
void (*reset)(void *);
|
||||
int (*map_event)(struct perf_event *event);
|
||||
bool (*filter)(struct pmu *pmu, int cpu);
|
||||
int num_events;
|
||||
bool secure_access; /* 32-bit ARM only */
|
||||
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
|
||||
|
Loading…
x
Reference in New Issue
Block a user