mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-17 05:45:20 +00:00
a8625217a0
SBI v2.0 SBI introduced PMU snapshot feature which adds the following features. 1. Read counter values directly from the shared memory instead of csr read. 2. Start multiple counters with initial values with one SBI call. These functionalities optimizes the number of traps to the higher privilege mode. If the kernel is in VS mode while the hypervisor deploy trap & emulate method, this would minimize all the hpmcounter CSR read traps. If the kernel is running in S-mode, the benefits reduced to CSR latency vs DRAM/cache latency as there is no trap involved while accessing the hpmcounter CSRs. In both modes, it does saves the number of ecalls while starting multiple counter together with an initial values. This is a likely scenario if multiple counters overflow at the same time. Acked-by: Palmer Dabbelt <palmer@rivosinc.com> Reviewed-by: Anup Patel <anup@brainfault.org> Reviewed-by: Conor Dooley <conor.dooley@microchip.com> Reviewed-by: Andrew Jones <ajones@ventanamicro.com> Reviewed-by: Samuel Holland <samuel.holland@sifive.com> Signed-off-by: Atish Patra <atishp@rivosinc.com> Link: https://lore.kernel.org/r/20240420151741.962500-10-atishp@rivosinc.com Signed-off-by: Anup Patel <anup@brainfault.org>
97 lines
3.0 KiB
C
97 lines
3.0 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
/*
|
|
* Copyright (C) 2018 SiFive
|
|
* Copyright (C) 2018 Andes Technology Corporation
|
|
* Copyright (C) 2021 Western Digital Corporation or its affiliates.
|
|
*
|
|
*/
|
|
|
|
#ifndef _RISCV_PMU_H
|
|
#define _RISCV_PMU_H
|
|
|
|
#include <linux/perf_event.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/interrupt.h>
|
|
|
|
#ifdef CONFIG_RISCV_PMU
|
|
|
|
/*
|
|
* The RISCV_MAX_COUNTERS parameter should be specified.
|
|
*/
|
|
|
|
#define RISCV_MAX_COUNTERS 64
|
|
#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
|
|
#define RISCV_PMU_SBI_PDEV_NAME "riscv-pmu-sbi"
|
|
#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
|
|
|
|
#define RISCV_PMU_STOP_FLAG_RESET 1
|
|
|
|
#define RISCV_PMU_CONFIG1_GUEST_EVENTS 0x1
|
|
|
|
struct cpu_hw_events {
|
|
/* currently enabled events */
|
|
int n_events;
|
|
/* Counter overflow interrupt */
|
|
int irq;
|
|
/* currently enabled events */
|
|
struct perf_event *events[RISCV_MAX_COUNTERS];
|
|
/* currently enabled hardware counters */
|
|
DECLARE_BITMAP(used_hw_ctrs, RISCV_MAX_COUNTERS);
|
|
/* currently enabled firmware counters */
|
|
DECLARE_BITMAP(used_fw_ctrs, RISCV_MAX_COUNTERS);
|
|
/* The virtual address of the shared memory where counter snapshot will be taken */
|
|
void *snapshot_addr;
|
|
/* The physical address of the shared memory where counter snapshot will be taken */
|
|
phys_addr_t snapshot_addr_phys;
|
|
/* Boolean flag to indicate setup is already done */
|
|
bool snapshot_set_done;
|
|
/* A shadow copy of the counter values to avoid clobbering during multiple SBI calls */
|
|
u64 snapshot_cval_shcopy[RISCV_MAX_COUNTERS];
|
|
};
|
|
|
|
struct riscv_pmu {
|
|
struct pmu pmu;
|
|
char *name;
|
|
|
|
irqreturn_t (*handle_irq)(int irq_num, void *dev);
|
|
|
|
unsigned long cmask;
|
|
u64 (*ctr_read)(struct perf_event *event);
|
|
int (*ctr_get_idx)(struct perf_event *event);
|
|
int (*ctr_get_width)(int idx);
|
|
void (*ctr_clear_idx)(struct perf_event *event);
|
|
void (*ctr_start)(struct perf_event *event, u64 init_val);
|
|
void (*ctr_stop)(struct perf_event *event, unsigned long flag);
|
|
int (*event_map)(struct perf_event *event, u64 *config);
|
|
void (*event_init)(struct perf_event *event);
|
|
void (*event_mapped)(struct perf_event *event, struct mm_struct *mm);
|
|
void (*event_unmapped)(struct perf_event *event, struct mm_struct *mm);
|
|
uint8_t (*csr_index)(struct perf_event *event);
|
|
|
|
struct cpu_hw_events __percpu *hw_events;
|
|
struct hlist_node node;
|
|
struct notifier_block riscv_pm_nb;
|
|
};
|
|
|
|
#define to_riscv_pmu(p) (container_of(p, struct riscv_pmu, pmu))
|
|
|
|
void riscv_pmu_start(struct perf_event *event, int flags);
|
|
void riscv_pmu_stop(struct perf_event *event, int flags);
|
|
unsigned long riscv_pmu_ctr_read_csr(unsigned long csr);
|
|
int riscv_pmu_event_set_period(struct perf_event *event);
|
|
uint64_t riscv_pmu_ctr_get_width_mask(struct perf_event *event);
|
|
u64 riscv_pmu_event_update(struct perf_event *event);
|
|
#ifdef CONFIG_RISCV_PMU_LEGACY
|
|
void riscv_pmu_legacy_skip_init(void);
|
|
#else
|
|
static inline void riscv_pmu_legacy_skip_init(void) {};
|
|
#endif
|
|
struct riscv_pmu *riscv_pmu_alloc(void);
|
|
#ifdef CONFIG_RISCV_PMU_SBI
|
|
int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr);
|
|
#endif
|
|
|
|
#endif /* CONFIG_RISCV_PMU */
|
|
|
|
#endif /* _RISCV_PMU_H */
|