2018-03-14 22:15:19 +01:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2018-03-20 14:17:04 +01:00
|
|
|
// Copyright 2017 Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
|
2017-06-20 01:37:17 +02:00
|
|
|
#include <linux/irqdomain.h>
|
|
|
|
#include <linux/irq.h>
|
2017-08-18 09:11:56 +01:00
|
|
|
#include <linux/uaccess.h>
|
2017-06-20 01:37:17 +02:00
|
|
|
|
|
|
|
#include "internals.h"
|
|
|
|
|
|
|
|
static struct dentry *irq_dir;
|
|
|
|
|
2024-05-29 09:16:28 +00:00
|
|
|
void irq_debug_show_bits(struct seq_file *m, int ind, unsigned int state,
|
|
|
|
const struct irq_bit_descr *sd, int size)
|
2017-06-20 01:37:17 +02:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < size; i++, sd++) {
|
|
|
|
if (state & sd->mask)
|
|
|
|
seq_printf(m, "%*s%s\n", ind + 12, "", sd->name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
struct irq_data *data = irq_desc_get_irq_data(desc);
|
2022-07-01 15:00:55 -05:00
|
|
|
const struct cpumask *msk;
|
2017-06-20 01:37:17 +02:00
|
|
|
|
|
|
|
msk = irq_data_get_affinity_mask(data);
|
|
|
|
seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk));
|
2017-06-20 01:37:38 +02:00
|
|
|
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
|
|
|
|
msk = irq_data_get_effective_affinity_mask(data);
|
|
|
|
seq_printf(m, "effectiv: %*pbl\n", cpumask_pr_args(msk));
|
|
|
|
#endif
|
2017-06-20 01:37:17 +02:00
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
|
|
msk = desc->pending_mask;
|
|
|
|
seq_printf(m, "pending: %*pbl\n", cpumask_pr_args(msk));
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) { }
|
|
|
|
#endif
|
|
|
|
|
|
|
|
static const struct irq_bit_descr irqchip_flags[] = {
|
|
|
|
BIT_MASK_DESCR(IRQCHIP_SET_TYPE_MASKED),
|
|
|
|
BIT_MASK_DESCR(IRQCHIP_EOI_IF_HANDLED),
|
|
|
|
BIT_MASK_DESCR(IRQCHIP_MASK_ON_SUSPEND),
|
|
|
|
BIT_MASK_DESCR(IRQCHIP_ONOFFLINE_ENABLED),
|
|
|
|
BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
|
|
|
|
BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
|
|
|
|
BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
|
2018-06-22 10:52:48 +01:00
|
|
|
BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
|
2019-01-31 14:53:58 +00:00
|
|
|
BIT_MASK_DESCR(IRQCHIP_SUPPORTS_NMI),
|
2020-09-28 10:02:01 +05:30
|
|
|
BIT_MASK_DESCR(IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND),
|
2022-04-19 15:18:37 +01:00
|
|
|
BIT_MASK_DESCR(IRQCHIP_IMMUTABLE),
|
2017-06-20 01:37:17 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static void
|
|
|
|
irq_debug_show_chip(struct seq_file *m, struct irq_data *data, int ind)
|
|
|
|
{
|
|
|
|
struct irq_chip *chip = data->chip;
|
|
|
|
|
|
|
|
if (!chip) {
|
|
|
|
seq_printf(m, "chip: None\n");
|
|
|
|
return;
|
|
|
|
}
|
2022-02-10 09:27:21 +00:00
|
|
|
seq_printf(m, "%*schip: ", ind, "");
|
|
|
|
if (chip->irq_print_chip)
|
|
|
|
chip->irq_print_chip(data, m);
|
|
|
|
else
|
|
|
|
seq_printf(m, "%s", chip->name);
|
|
|
|
seq_printf(m, "\n%*sflags: 0x%lx\n", ind + 1, "", chip->flags);
|
2017-06-20 01:37:17 +02:00
|
|
|
irq_debug_show_bits(m, ind, chip->flags, irqchip_flags,
|
|
|
|
ARRAY_SIZE(irqchip_flags));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
irq_debug_show_data(struct seq_file *m, struct irq_data *data, int ind)
|
|
|
|
{
|
|
|
|
seq_printf(m, "%*sdomain: %s\n", ind, "",
|
|
|
|
data->domain ? data->domain->name : "");
|
|
|
|
seq_printf(m, "%*shwirq: 0x%lx\n", ind + 1, "", data->hwirq);
|
|
|
|
irq_debug_show_chip(m, data, ind + 1);
|
2017-09-13 23:29:06 +02:00
|
|
|
if (data->domain && data->domain->ops && data->domain->ops->debug_show)
|
|
|
|
data->domain->ops->debug_show(m, NULL, data, ind + 1);
|
2017-06-20 01:37:17 +02:00
|
|
|
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
|
|
|
|
if (!data->parent_data)
|
|
|
|
return;
|
|
|
|
seq_printf(m, "%*sparent:\n", ind + 1, "");
|
|
|
|
irq_debug_show_data(m, data->parent_data, ind + 4);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct irq_bit_descr irqdata_states[] = {
|
|
|
|
BIT_MASK_DESCR(IRQ_TYPE_EDGE_RISING),
|
|
|
|
BIT_MASK_DESCR(IRQ_TYPE_EDGE_FALLING),
|
|
|
|
BIT_MASK_DESCR(IRQ_TYPE_LEVEL_HIGH),
|
|
|
|
BIT_MASK_DESCR(IRQ_TYPE_LEVEL_LOW),
|
|
|
|
BIT_MASK_DESCR(IRQD_LEVEL),
|
|
|
|
|
|
|
|
BIT_MASK_DESCR(IRQD_ACTIVATED),
|
|
|
|
BIT_MASK_DESCR(IRQD_IRQ_STARTED),
|
|
|
|
BIT_MASK_DESCR(IRQD_IRQ_DISABLED),
|
|
|
|
BIT_MASK_DESCR(IRQD_IRQ_MASKED),
|
|
|
|
BIT_MASK_DESCR(IRQD_IRQ_INPROGRESS),
|
|
|
|
|
|
|
|
BIT_MASK_DESCR(IRQD_PER_CPU),
|
|
|
|
BIT_MASK_DESCR(IRQD_NO_BALANCING),
|
|
|
|
|
2017-06-20 01:37:52 +02:00
|
|
|
BIT_MASK_DESCR(IRQD_SINGLE_TARGET),
|
2017-06-20 01:37:17 +02:00
|
|
|
BIT_MASK_DESCR(IRQD_MOVE_PCNTXT),
|
|
|
|
BIT_MASK_DESCR(IRQD_AFFINITY_SET),
|
|
|
|
BIT_MASK_DESCR(IRQD_SETAFFINITY_PENDING),
|
|
|
|
BIT_MASK_DESCR(IRQD_AFFINITY_MANAGED),
|
2020-07-25 13:30:55 +01:00
|
|
|
BIT_MASK_DESCR(IRQD_AFFINITY_ON_ACTIVATE),
|
2017-06-20 01:37:17 +02:00
|
|
|
BIT_MASK_DESCR(IRQD_MANAGED_SHUTDOWN),
|
2017-12-29 16:44:34 +01:00
|
|
|
BIT_MASK_DESCR(IRQD_CAN_RESERVE),
|
2017-06-20 01:37:17 +02:00
|
|
|
|
|
|
|
BIT_MASK_DESCR(IRQD_FORWARDED_TO_VCPU),
|
|
|
|
|
|
|
|
BIT_MASK_DESCR(IRQD_WAKEUP_STATE),
|
|
|
|
BIT_MASK_DESCR(IRQD_WAKEUP_ARMED),
|
2020-07-25 13:30:55 +01:00
|
|
|
|
|
|
|
BIT_MASK_DESCR(IRQD_DEFAULT_TRIGGER_SET),
|
|
|
|
|
|
|
|
BIT_MASK_DESCR(IRQD_HANDLE_ENFORCE_IRQCTX),
|
2020-09-28 10:02:01 +05:30
|
|
|
|
|
|
|
BIT_MASK_DESCR(IRQD_IRQ_ENABLED_ON_SUSPEND),
|
genirq: Allow fasteoi handler to resend interrupts on concurrent handling
There is a class of interrupt controllers out there that, once they
have signalled a given interrupt number, will still signal incoming
instances of the *same* interrupt despite the original interrupt
not having been EOIed yet.
As long as the new interrupt reaches the *same* CPU, nothing bad
happens, as that CPU still has its interrupts globally disabled,
and we will only take the new interrupt once the interrupt has
been EOIed.
However, things become more "interesting" if an affinity change comes
in while the interrupt is being handled. More specifically, while
the per-irq lock is being dropped. This results in the affinity change
taking place immediately. At this point, there is nothing that prevents
the interrupt from firing on the new target CPU. We end-up with the
interrupt running concurrently on two CPUs, which isn't a good thing.
And that's where things become worse: the new CPU notices that the
interrupt handling is in progress (irq_may_run() return false), and
*drops the interrupt on the floor*.
The whole race looks like this:
CPU 0 | CPU 1
-----------------------------|-----------------------------
interrupt start |
handle_fasteoi_irq | set_affinity(CPU 1)
handler |
... | interrupt start
... | handle_fasteoi_irq -> early out
handle_fasteoi_irq return | interrupt end
interrupt end |
If the interrupt was an edge, too bad. The interrupt is lost, and
the system will eventually die one way or another. Not great.
A way to avoid this situation is to detect this problem at the point
we handle the interrupt on the new target. Instead of dropping the
interrupt, use the resend mechanism to force it to be replayed.
Also, in order to limit the impact of this workaround to the pathetic
architectures that require it, gate it behind a new irq flag aptly
named IRQD_RESEND_WHEN_IN_PROGRESS.
Suggested-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: James Gowans <jgowans@amazon.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Marc Zyngier <maz@kernel.org>
Cc: KarimAllah Raslan <karahmed@amazon.com>
Cc: Yipeng Zou <zouyipeng@huawei.com>
Cc: Zhang Jianhua <chris.zjh@huawei.com>
[maz: reworded commit mesage]
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20230608120021.3273400-3-jgowans@amazon.com
2023-06-08 14:00:20 +02:00
|
|
|
|
|
|
|
BIT_MASK_DESCR(IRQD_RESEND_WHEN_IN_PROGRESS),
|
2017-06-20 01:37:17 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct irq_bit_descr irqdesc_states[] = {
|
|
|
|
BIT_MASK_DESCR(_IRQ_NOPROBE),
|
|
|
|
BIT_MASK_DESCR(_IRQ_NOREQUEST),
|
|
|
|
BIT_MASK_DESCR(_IRQ_NOTHREAD),
|
|
|
|
BIT_MASK_DESCR(_IRQ_NOAUTOEN),
|
|
|
|
BIT_MASK_DESCR(_IRQ_NESTED_THREAD),
|
|
|
|
BIT_MASK_DESCR(_IRQ_PER_CPU_DEVID),
|
|
|
|
BIT_MASK_DESCR(_IRQ_IS_POLLED),
|
|
|
|
BIT_MASK_DESCR(_IRQ_DISABLE_UNLAZY),
|
2020-05-19 14:58:13 +01:00
|
|
|
BIT_MASK_DESCR(_IRQ_HIDDEN),
|
2017-06-20 01:37:17 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct irq_bit_descr irqdesc_istates[] = {
|
|
|
|
BIT_MASK_DESCR(IRQS_AUTODETECT),
|
|
|
|
BIT_MASK_DESCR(IRQS_SPURIOUS_DISABLED),
|
|
|
|
BIT_MASK_DESCR(IRQS_POLL_INPROGRESS),
|
|
|
|
BIT_MASK_DESCR(IRQS_ONESHOT),
|
|
|
|
BIT_MASK_DESCR(IRQS_REPLAY),
|
|
|
|
BIT_MASK_DESCR(IRQS_WAITING),
|
|
|
|
BIT_MASK_DESCR(IRQS_PENDING),
|
|
|
|
BIT_MASK_DESCR(IRQS_SUSPENDED),
|
2019-01-31 14:53:58 +00:00
|
|
|
BIT_MASK_DESCR(IRQS_NMI),
|
2017-06-20 01:37:17 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
static int irq_debug_show(struct seq_file *m, void *p)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc = m->private;
|
|
|
|
struct irq_data *data;
|
|
|
|
|
|
|
|
raw_spin_lock_irq(&desc->lock);
|
|
|
|
data = irq_desc_get_irq_data(desc);
|
2019-03-25 21:32:28 +02:00
|
|
|
seq_printf(m, "handler: %ps\n", desc->handle_irq);
|
2017-09-13 23:29:05 +02:00
|
|
|
seq_printf(m, "device: %s\n", desc->dev_name);
|
2017-06-20 01:37:17 +02:00
|
|
|
seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors);
|
|
|
|
irq_debug_show_bits(m, 0, desc->status_use_accessors, irqdesc_states,
|
|
|
|
ARRAY_SIZE(irqdesc_states));
|
|
|
|
seq_printf(m, "istate: 0x%08x\n", desc->istate);
|
|
|
|
irq_debug_show_bits(m, 0, desc->istate, irqdesc_istates,
|
|
|
|
ARRAY_SIZE(irqdesc_istates));
|
|
|
|
seq_printf(m, "ddepth: %u\n", desc->depth);
|
|
|
|
seq_printf(m, "wdepth: %u\n", desc->wake_depth);
|
|
|
|
seq_printf(m, "dstate: 0x%08x\n", irqd_get(data));
|
|
|
|
irq_debug_show_bits(m, 0, irqd_get(data), irqdata_states,
|
|
|
|
ARRAY_SIZE(irqdata_states));
|
|
|
|
seq_printf(m, "node: %d\n", irq_data_get_node(data));
|
|
|
|
irq_debug_show_masks(m, desc);
|
|
|
|
irq_debug_show_data(m, data, 0);
|
|
|
|
raw_spin_unlock_irq(&desc->lock);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int irq_debug_open(struct inode *inode, struct file *file)
|
|
|
|
{
|
|
|
|
return single_open(file, irq_debug_show, inode->i_private);
|
|
|
|
}
|
|
|
|
|
2017-08-18 09:11:56 +01:00
|
|
|
static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
|
|
|
|
size_t count, loff_t *ppos)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc = file_inode(file)->i_private;
|
|
|
|
char buf[8] = { 0, };
|
|
|
|
size_t size;
|
|
|
|
|
|
|
|
size = min(sizeof(buf) - 1, count);
|
|
|
|
if (copy_from_user(buf, user_buf, size))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (!strncmp(buf, "trigger", size)) {
|
2020-03-06 14:03:47 +01:00
|
|
|
int err = irq_inject_interrupt(irq_desc_get_irq(desc));
|
2017-08-18 09:11:56 +01:00
|
|
|
|
|
|
|
return err ? err : count;
|
|
|
|
}
|
|
|
|
|
|
|
|
return count;
|
|
|
|
}
|
|
|
|
|
2017-06-20 01:37:17 +02:00
|
|
|
static const struct file_operations dfs_irq_ops = {
|
|
|
|
.open = irq_debug_open,
|
2017-08-18 09:11:56 +01:00
|
|
|
.write = irq_debug_write,
|
2017-06-20 01:37:17 +02:00
|
|
|
.read = seq_read,
|
|
|
|
.llseek = seq_lseek,
|
|
|
|
.release = single_release,
|
|
|
|
};
|
|
|
|
|
2017-09-13 23:29:05 +02:00
|
|
|
void irq_debugfs_copy_devname(int irq, struct device *dev)
|
|
|
|
{
|
|
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
const char *name = dev_name(dev);
|
|
|
|
|
|
|
|
if (name)
|
|
|
|
desc->dev_name = kstrdup(name, GFP_KERNEL);
|
|
|
|
}
|
|
|
|
|
2017-06-20 01:37:17 +02:00
|
|
|
void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc)
|
|
|
|
{
|
|
|
|
char name [10];
|
|
|
|
|
|
|
|
if (!irq_dir || !desc || desc->debugfs_file)
|
|
|
|
return;
|
|
|
|
|
|
|
|
sprintf(name, "%d", irq);
|
2017-08-18 09:11:56 +01:00
|
|
|
desc->debugfs_file = debugfs_create_file(name, 0644, irq_dir, desc,
|
2017-06-20 01:37:17 +02:00
|
|
|
&dfs_irq_ops);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __init irq_debugfs_init(void)
|
|
|
|
{
|
|
|
|
struct dentry *root_dir;
|
|
|
|
int irq;
|
|
|
|
|
|
|
|
root_dir = debugfs_create_dir("irq", NULL);
|
|
|
|
|
|
|
|
irq_domain_debugfs_init(root_dir);
|
|
|
|
|
|
|
|
irq_dir = debugfs_create_dir("irqs", root_dir);
|
|
|
|
|
|
|
|
irq_lock_sparse();
|
|
|
|
for_each_active_irq(irq)
|
|
|
|
irq_add_debugfs_entry(irq, irq_to_desc(irq));
|
|
|
|
irq_unlock_sparse();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
__initcall(irq_debugfs_init);
|