mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
RISC-V Fixes for 6.10-rc7
* A fix for the CMODX example in therecently added icache flushing prctl(). * A fix to the perf driver to avoid corrupting event data on counter overflows when external overflow handlers are in use. * A fix to clear all hardware performance monitor events on boot, to avoid dangling events firmware or previously booted kernels from triggering spuriously. * A fix to the perf event probing logic to avoid erroneously reporting the presence of unimplemented counters. This also prevents some implemented counters from being reported. * A build fix for the vector sigreturn selftest on clang. * A fix to ftrace, which now requires the previously optional index argument to ftrace_graph_ret_addr(). * A fix to avoid deadlocking if kexec crash handling triggers in an interrupt context. -----BEGIN PGP SIGNATURE----- iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmaIJBATHHBhbG1lckBk YWJiZWx0LmNvbQAKCRAuExnzX7sYiYk6D/981DUAWJ5JPsqve7PihWnhFXh7T/fm KZL7cNQN7/9QmqzJMD756oQCHZT2TeDTxwji4WUQo27uoS1SamsAxRWCPdW8GqDt GwBJeviyWDwjNMgrejWwgH3d9so+WZ4kNKfiUrY+j1vgQ8TkE4h5wMzUtOBTSgDI 5EhHT5B5yjiRcadPshXivZAyimc6mxKJKph5v8W3BGgtLQRHs5tYop4ZkP5Utmv3 yBie7orfMRx5fNxE6fgn0c/3r49i+KGTSCzkK+0689qPlQNt7MTj4kqDVp7xu2ll jl5GJNZrWSZR0cST9AG3VByqfeN2f9sbGYq5fAozkZy3idEYovtvGIU2xJVZRuIU ZhY+VTk0fwO8HlilTLMbyk7t99EJ4a7bXcUuD6ub3BthlKfc41PArhZgasL/dFPd VOSjy5hfGpJgmifSTpPXElf8jgBq6N4Kw9N+rBNkNiruEiwtWfsyqOckYAfNbULe Z8Nikl+3pfWlwzQrAb30X78s4ZyJyOX+XxP118lvx+UAbZofxg5qJJGo7U0Ru54r JPBCW8swlco6AXwvAj3yKcaL3qtKlc6f068QvcSaRELUvS2qfuJ7w4fjKdl/IT93 QggGUyuEVG3UC1Dj961plrACXmqISTAlW8HqkdPvUgLY9rSPuTLuCR54b3fGI+n/ 3wJF6gl5leEPMw== =/Gsf -----END PGP SIGNATURE----- Merge tag 'riscv-for-linus-6.10-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux Pull RISC-V fixes from Palmer Dabbelt: - A fix for the CMODX example in the recently added icache flushing prctl() - A fix to the perf driver to avoid corrupting event data on counter overflows when external overflow handlers are in use - A fix to clear all hardware performance monitor events on boot, to avoid dangling events firmware or previously booted kernels from triggering spuriously - A fix to the perf event probing logic to avoid erroneously reporting the presence of unimplemented counters. This also prevents some implemented counters from being reported - A build fix for the vector sigreturn selftest on clang - A fix to ftrace, which now requires the previously optional index argument to ftrace_graph_ret_addr() - A fix to avoid deadlocking if kexec crash handling triggers in an interrupt context * tag 'riscv-for-linus-6.10-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: riscv: kexec: Avoid deadlock in kexec crash path riscv: stacktrace: fix usage of ftrace_graph_ret_addr() riscv: selftests: Fix vsetivli args for clang perf: RISC-V: Check standard event availability drivers/perf: riscv: Reset the counter to hpmevent mapping while starting cpus drivers/perf: riscv: Do not update the event data if uptodate documentation: Fix riscv cmodx example
This commit is contained in:
commit
b673f2bda0
@ -62,10 +62,10 @@ cmodx.c::
|
||||
printf("Value before cmodx: %d\n", value);
|
||||
|
||||
// Call prctl before first fence.i is called inside modify_instruction
|
||||
prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX_ON, PR_RISCV_CTX_SW_FENCEI, PR_RISCV_SCOPE_PER_PROCESS);
|
||||
prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX, PR_RISCV_CTX_SW_FENCEI_ON, PR_RISCV_SCOPE_PER_PROCESS);
|
||||
modify_instruction();
|
||||
// Call prctl after final fence.i is called in process
|
||||
prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX_OFF, PR_RISCV_CTX_SW_FENCEI, PR_RISCV_SCOPE_PER_PROCESS);
|
||||
prctl(PR_RISCV_SET_ICACHE_FLUSH_CTX, PR_RISCV_CTX_SW_FENCEI_OFF, PR_RISCV_SCOPE_PER_PROCESS);
|
||||
|
||||
value = get_value();
|
||||
printf("Value after cmodx: %d\n", value);
|
||||
|
@ -121,20 +121,12 @@ static void machine_kexec_mask_interrupts(void)
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
struct irq_chip *chip;
|
||||
int ret;
|
||||
|
||||
chip = irq_desc_get_chip(desc);
|
||||
if (!chip)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* First try to remove the active state. If this
|
||||
* fails, try to EOI the interrupt.
|
||||
*/
|
||||
ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
|
||||
|
||||
if (ret && irqd_irq_inprogress(&desc->irq_data) &&
|
||||
chip->irq_eoi)
|
||||
if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
|
||||
chip->irq_eoi(&desc->irq_data);
|
||||
|
||||
if (chip->irq_mask)
|
||||
|
@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
bool (*fn)(void *, unsigned long), void *arg)
|
||||
{
|
||||
unsigned long fp, sp, pc;
|
||||
int graph_idx = 0;
|
||||
int level = 0;
|
||||
|
||||
if (regs) {
|
||||
@ -68,7 +69,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
||||
pc = regs->ra;
|
||||
} else {
|
||||
fp = frame->fp;
|
||||
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
|
||||
pc = ftrace_graph_ret_addr(current, &graph_idx, frame->ra,
|
||||
&frame->ra);
|
||||
if (pc == (unsigned long)ret_from_exception) {
|
||||
if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
|
||||
|
@ -327,7 +327,7 @@ static long kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_att
|
||||
|
||||
event = perf_event_create_kernel_counter(attr, -1, current, kvm_riscv_pmu_overflow, pmc);
|
||||
if (IS_ERR(event)) {
|
||||
pr_err("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
|
||||
pr_debug("kvm pmu event creation failed for eidx %lx: %ld\n", eidx, PTR_ERR(event));
|
||||
return PTR_ERR(event);
|
||||
}
|
||||
|
||||
|
@ -167,7 +167,7 @@ u64 riscv_pmu_event_update(struct perf_event *event)
|
||||
unsigned long cmask;
|
||||
u64 oldval, delta;
|
||||
|
||||
if (!rvpmu->ctr_read)
|
||||
if (!rvpmu->ctr_read || (hwc->state & PERF_HES_UPTODATE))
|
||||
return 0;
|
||||
|
||||
cmask = riscv_pmu_ctr_get_width_mask(event);
|
||||
|
@ -20,6 +20,7 @@
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/soc/andes/irq.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/errata_list.h>
|
||||
#include <asm/sbi.h>
|
||||
@ -114,7 +115,7 @@ struct sbi_pmu_event_data {
|
||||
};
|
||||
};
|
||||
|
||||
static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
|
||||
static struct sbi_pmu_event_data pmu_hw_event_map[] = {
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = {.hw_gen_event = {
|
||||
SBI_PMU_HW_CPU_CYCLES,
|
||||
SBI_PMU_EVENT_TYPE_HW, 0}},
|
||||
@ -148,7 +149,7 @@ static const struct sbi_pmu_event_data pmu_hw_event_map[] = {
|
||||
};
|
||||
|
||||
#define C(x) PERF_COUNT_HW_CACHE_##x
|
||||
static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
static struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
|
||||
[C(L1D)] = {
|
||||
@ -293,6 +294,34 @@ static const struct sbi_pmu_event_data pmu_cache_event_map[PERF_COUNT_HW_CACHE_M
|
||||
},
|
||||
};
|
||||
|
||||
static void pmu_sbi_check_event(struct sbi_pmu_event_data *edata)
|
||||
{
|
||||
struct sbiret ret;
|
||||
|
||||
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH,
|
||||
0, cmask, 0, edata->event_idx, 0, 0);
|
||||
if (!ret.error) {
|
||||
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
|
||||
ret.value, 0x1, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
|
||||
} else if (ret.error == SBI_ERR_NOT_SUPPORTED) {
|
||||
/* This event cannot be monitored by any counter */
|
||||
edata->event_idx = -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static void pmu_sbi_check_std_events(struct work_struct *work)
|
||||
{
|
||||
for (int i = 0; i < ARRAY_SIZE(pmu_hw_event_map); i++)
|
||||
pmu_sbi_check_event(&pmu_hw_event_map[i]);
|
||||
|
||||
for (int i = 0; i < ARRAY_SIZE(pmu_cache_event_map); i++)
|
||||
for (int j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++)
|
||||
for (int k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++)
|
||||
pmu_sbi_check_event(&pmu_cache_event_map[i][j][k]);
|
||||
}
|
||||
|
||||
static DECLARE_WORK(check_std_events_work, pmu_sbi_check_std_events);
|
||||
|
||||
static int pmu_sbi_ctr_get_width(int idx)
|
||||
{
|
||||
return pmu_ctr_list[idx].width;
|
||||
@ -478,6 +507,12 @@ static int pmu_sbi_event_map(struct perf_event *event, u64 *econfig)
|
||||
u64 raw_config_val;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Ensure we are finished checking standard hardware events for
|
||||
* validity before allowing userspace to configure any events.
|
||||
*/
|
||||
flush_work(&check_std_events_work);
|
||||
|
||||
switch (type) {
|
||||
case PERF_TYPE_HARDWARE:
|
||||
if (config >= PERF_COUNT_HW_MAX)
|
||||
@ -762,7 +797,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
|
||||
* which may include counters that are not enabled yet.
|
||||
*/
|
||||
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
|
||||
0, pmu->cmask, 0, 0, 0, 0);
|
||||
0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0);
|
||||
}
|
||||
|
||||
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
|
||||
@ -1359,6 +1394,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto out_unregister;
|
||||
|
||||
/* Asynchronously check which standard events are available */
|
||||
schedule_work(&check_std_events_work);
|
||||
|
||||
return 0;
|
||||
|
||||
out_unregister:
|
||||
|
@ -51,7 +51,7 @@ static int vector_sigreturn(int data, void (*handler)(int, siginfo_t *, void *))
|
||||
|
||||
asm(".option push \n\
|
||||
.option arch, +v \n\
|
||||
vsetivli x0, 1, e32, ta, ma \n\
|
||||
vsetivli x0, 1, e32, m1, ta, ma \n\
|
||||
vmv.s.x v0, %1 \n\
|
||||
# Generate SIGSEGV \n\
|
||||
lw a0, 0(x0) \n\
|
||||
|
Loading…
Reference in New Issue
Block a user