mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
perf/arm_pmuv3: Add PMUv3.9 per counter EL0 access control
Armv8.9/9.4 PMUv3.9 adds per counter EL0 access controls. Per counter access is enabled with the UEN bit in PMUSERENR_EL1 register. Individual counters are enabled/disabled in the PMUACR_EL1 register. When UEN is set, the CR/ER bits control EL0 write access and must be set to disable write access. With the access controls, the clearing of unused counters can be skipped. KVM also configures PMUSERENR_EL1 in order to trap to EL2. UEN does not need to be set for it since only PMUv3.5 is exposed to guests. Signed-off-by: Rob Herring (Arm) <robh@kernel.org> Link: https://lore.kernel.org/r/20241002184326.1105499-1-robh@kernel.org Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
759b5fc6cc
commit
0bbff9ed81
@ -231,6 +231,7 @@ static inline void kvm_vcpu_pmu_resync_el0(void) {}
|
|||||||
#define ARMV8_PMU_DFR_VER_V3P1 0x4
|
#define ARMV8_PMU_DFR_VER_V3P1 0x4
|
||||||
#define ARMV8_PMU_DFR_VER_V3P4 0x5
|
#define ARMV8_PMU_DFR_VER_V3P4 0x5
|
||||||
#define ARMV8_PMU_DFR_VER_V3P5 0x6
|
#define ARMV8_PMU_DFR_VER_V3P5 0x6
|
||||||
|
#define ARMV8_PMU_DFR_VER_V3P9 0x9
|
||||||
#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
|
#define ARMV8_PMU_DFR_VER_IMP_DEF 0xF
|
||||||
|
|
||||||
static inline bool pmuv3_implemented(int pmuver)
|
static inline bool pmuv3_implemented(int pmuver)
|
||||||
@ -249,6 +250,11 @@ static inline bool is_pmuv3p5(int pmuver)
|
|||||||
return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
|
return pmuver >= ARMV8_PMU_DFR_VER_V3P5;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool is_pmuv3p9(int pmuver)
|
||||||
|
{
|
||||||
|
return pmuver >= ARMV8_PMU_DFR_VER_V3P9;
|
||||||
|
}
|
||||||
|
|
||||||
static inline u64 read_pmceid0(void)
|
static inline u64 read_pmceid0(void)
|
||||||
{
|
{
|
||||||
u64 val = read_sysreg(PMCEID0);
|
u64 val = read_sysreg(PMCEID0);
|
||||||
|
@ -152,6 +152,11 @@ static inline void write_pmuserenr(u32 val)
|
|||||||
write_sysreg(val, pmuserenr_el0);
|
write_sysreg(val, pmuserenr_el0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void write_pmuacr(u64 val)
|
||||||
|
{
|
||||||
|
write_sysreg_s(val, SYS_PMUACR_EL1);
|
||||||
|
}
|
||||||
|
|
||||||
static inline u64 read_pmceid0(void)
|
static inline u64 read_pmceid0(void)
|
||||||
{
|
{
|
||||||
return read_sysreg(pmceid0_el0);
|
return read_sysreg(pmceid0_el0);
|
||||||
@ -178,4 +183,9 @@ static inline bool is_pmuv3p5(int pmuver)
|
|||||||
return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;
|
return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline bool is_pmuv3p9(int pmuver)
|
||||||
|
{
|
||||||
|
return pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P9;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -1238,6 +1238,7 @@ UnsignedEnum 11:8 PMUVer
|
|||||||
0b0110 V3P5
|
0b0110 V3P5
|
||||||
0b0111 V3P7
|
0b0111 V3P7
|
||||||
0b1000 V3P8
|
0b1000 V3P8
|
||||||
|
0b1001 V3P9
|
||||||
0b1111 IMP_DEF
|
0b1111 IMP_DEF
|
||||||
EndEnum
|
EndEnum
|
||||||
UnsignedEnum 7:4 TraceVer
|
UnsignedEnum 7:4 TraceVer
|
||||||
@ -2178,6 +2179,13 @@ Field 4 P
|
|||||||
Field 3:0 ALIGN
|
Field 3:0 ALIGN
|
||||||
EndSysreg
|
EndSysreg
|
||||||
|
|
||||||
|
Sysreg PMUACR_EL1 3 0 9 14 4
|
||||||
|
Res0 63:33
|
||||||
|
Field 32 F0
|
||||||
|
Field 31 C
|
||||||
|
Field 30:0 P
|
||||||
|
EndSysreg
|
||||||
|
|
||||||
Sysreg PMSELR_EL0 3 3 9 12 5
|
Sysreg PMSELR_EL0 3 3 9 12 5
|
||||||
Res0 63:5
|
Res0 63:5
|
||||||
Field 4:0 SEL
|
Field 4:0 SEL
|
||||||
|
@ -770,18 +770,27 @@ static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu)
|
|||||||
int i;
|
int i;
|
||||||
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
|
struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
|
||||||
|
|
||||||
/* Clear any unused counters to avoid leaking their contents */
|
if (is_pmuv3p9(cpu_pmu->pmuver)) {
|
||||||
for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask,
|
u64 mask = 0;
|
||||||
ARMPMU_MAX_HWEVENTS) {
|
for_each_set_bit(i, cpuc->used_mask, ARMPMU_MAX_HWEVENTS) {
|
||||||
if (i == ARMV8_PMU_CYCLE_IDX)
|
if (armv8pmu_event_has_user_read(cpuc->events[i]))
|
||||||
write_pmccntr(0);
|
mask |= BIT(i);
|
||||||
else if (i == ARMV8_PMU_INSTR_IDX)
|
}
|
||||||
write_pmicntr(0);
|
write_pmuacr(mask);
|
||||||
else
|
} else {
|
||||||
armv8pmu_write_evcntr(i, 0);
|
/* Clear any unused counters to avoid leaking their contents */
|
||||||
|
for_each_andnot_bit(i, cpu_pmu->cntr_mask, cpuc->used_mask,
|
||||||
|
ARMPMU_MAX_HWEVENTS) {
|
||||||
|
if (i == ARMV8_PMU_CYCLE_IDX)
|
||||||
|
write_pmccntr(0);
|
||||||
|
else if (i == ARMV8_PMU_INSTR_IDX)
|
||||||
|
write_pmicntr(0);
|
||||||
|
else
|
||||||
|
armv8pmu_write_evcntr(i, 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR);
|
update_pmuserenr(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_UEN);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void armv8pmu_enable_event(struct perf_event *event)
|
static void armv8pmu_enable_event(struct perf_event *event)
|
||||||
|
@ -257,6 +257,7 @@
|
|||||||
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
|
#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
|
||||||
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
|
#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
|
||||||
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
|
#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
|
||||||
|
#define ARMV8_PMU_USERENR_UEN (1 << 4) /* Fine grained per counter access at EL0 */
|
||||||
/* Mask for writable bits */
|
/* Mask for writable bits */
|
||||||
#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
|
#define ARMV8_PMU_USERENR_MASK (ARMV8_PMU_USERENR_EN | ARMV8_PMU_USERENR_SW | \
|
||||||
ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
|
ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_ER)
|
||||||
|
Loading…
Reference in New Issue
Block a user