Merge branch into tip/master: 'irq/core'

# New commits in irq/core:
    b4706d814921 ("genirq/kexec: Prevent redundant IRQ masking by checking state before shutdown")
    bad6722e478f ("kexec: Consolidate machine_kexec_mask_interrupts() implementation")
    429f49ad361c ("genirq: Reuse irq_thread_fn() for forced thread case")
    6f8b79683dfb ("genirq: Move irq_thread_fn() further up in the code")

Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2025-01-11 17:04:59 +01:00
commit 2d7c58ef93
12 changed files with 66 additions and 126 deletions

View File

@ -127,29 +127,6 @@ void crash_smp_send_stop(void)
cpus_stopped = 1;
}
static void machine_kexec_mask_interrupts(void)
{
unsigned int i;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
chip->irq_mask(&desc->irq_data);
if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
chip->irq_disable(&desc->irq_data);
}
}
void machine_crash_shutdown(struct pt_regs *regs)
{
local_irq_disable();

View File

@ -149,6 +149,7 @@ config ARM64
select GENERIC_IDLE_POLL_SETUP
select GENERIC_IOREMAP
select GENERIC_IRQ_IPI
select GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_IRQ_SHOW_LEVEL

View File

@ -207,37 +207,6 @@ void machine_kexec(struct kimage *kimage)
BUG(); /* Should never get here. */
}
static void machine_kexec_mask_interrupts(void)
{
unsigned int i;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
int ret;
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
/*
* First try to remove the active state. If this
* fails, try to EOI the interrupt.
*/
ret = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
if (ret && irqd_irq_inprogress(&desc->irq_data) &&
chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
chip->irq_mask(&desc->irq_data);
if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
chip->irq_disable(&desc->irq_data);
}
}
/**
* machine_crash_shutdown - shutdown non-crashing cpus and save registers
*/

View File

@ -61,7 +61,6 @@ struct pt_regs;
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
master to copy new code to 0 */
extern void default_machine_kexec(struct kimage *image);
extern void machine_kexec_mask_interrupts(void);
void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
unsigned long start_address) __noreturn;

View File

@ -22,28 +22,6 @@
#include <asm/setup.h>
#include <asm/firmware.h>
void machine_kexec_mask_interrupts(void) {
unsigned int i;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
chip->irq_mask(&desc->irq_data);
if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
chip->irq_disable(&desc->irq_data);
}
}
#ifdef CONFIG_CRASH_DUMP
void machine_crash_shutdown(struct pt_regs *regs)
{

View File

@ -7,6 +7,7 @@
* Copyright (C) 2005 IBM Corporation.
*/
#include <linux/irq.h>
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/string.h>

View File

@ -114,29 +114,6 @@ void machine_shutdown(void)
#endif
}
static void machine_kexec_mask_interrupts(void)
{
unsigned int i;
struct irq_desc *desc;
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
chip->irq_eoi(&desc->irq_data);
if (chip->irq_mask)
chip->irq_mask(&desc->irq_data);
if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
chip->irq_disable(&desc->irq_data);
}
}
/*
* machine_crash_shutdown - Prepare to kexec after a kernel crash
*

View File

@ -694,6 +694,9 @@ extern int irq_chip_request_resources_parent(struct irq_data *data);
extern void irq_chip_release_resources_parent(struct irq_data *data);
#endif
/* Disable or mask interrupts during a kernel kexec */
extern void machine_kexec_mask_interrupts(void);
/* Handling of unhandled and spurious interrupts: */
extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret);

View File

@ -141,6 +141,12 @@ config GENERIC_IRQ_DEBUGFS
If you don't know what to do here, say N.
# Clear forwarded VM interrupts during kexec.
# This option ensures the kernel clears active states for interrupts
# forwarded to virtual machines (VMs) during a machine kexec.
config GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD
bool
endmenu
config GENERIC_IRQ_MULTI_HANDLER

View File

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o
obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o kexec.o
obj-$(CONFIG_IRQ_TIMINGS) += timings.o
ifeq ($(CONFIG_TEST_IRQ_TIMINGS),y)
CFLAGS_timings.o += -DDEBUG

36
kernel/irq/kexec.c Normal file
View File

@ -0,0 +1,36 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdesc.h>
#include <linux/irqnr.h>
#include "internals.h"
void machine_kexec_mask_interrupts(void)
{
struct irq_desc *desc;
unsigned int i;
for_each_irq_desc(i, desc) {
struct irq_chip *chip;
int check_eoi = 1;
chip = irq_desc_get_chip(desc);
if (!chip || !irqd_is_started(&desc->irq_data))
continue;
if (IS_ENABLED(CONFIG_GENERIC_IRQ_KEXEC_CLEAR_VM_FORWARD)) {
/*
* First try to remove the active state from an interrupt which is forwarded
* to a VM. If the interrupt is not forwarded, try to EOI the interrupt.
*/
check_eoi = irq_set_irqchip_state(i, IRQCHIP_STATE_ACTIVE, false);
}
if (check_eoi && chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
chip->irq_eoi(&desc->irq_data);
irq_shutdown(desc);
}
}

View File

@ -1181,49 +1181,42 @@ out_unlock:
chip_bus_sync_unlock(desc);
}
/*
* Interrupts explicitly requested as threaded interrupts want to be
* preemptible - many of them need to sleep and wait for slow busses to
* complete.
*/
static irqreturn_t irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
{
irqreturn_t ret = action->thread_fn(action->irq, action->dev_id);
if (ret == IRQ_HANDLED)
atomic_inc(&desc->threads_handled);
irq_finalize_oneshot(desc, action);
return ret;
}
/*
* Interrupts which are not explicitly requested as threaded
* interrupts rely on the implicit bh/preempt disable of the hard irq
* context. So we need to disable bh here to avoid deadlocks and other
* side effects.
*/
static irqreturn_t
irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
static irqreturn_t irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
{
irqreturn_t ret;
local_bh_disable();
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_disable();
ret = action->thread_fn(action->irq, action->dev_id);
if (ret == IRQ_HANDLED)
atomic_inc(&desc->threads_handled);
irq_finalize_oneshot(desc, action);
ret = irq_thread_fn(desc, action);
if (!IS_ENABLED(CONFIG_PREEMPT_RT))
local_irq_enable();
local_bh_enable();
return ret;
}
/*
* Interrupts explicitly requested as threaded interrupts want to be
* preemptible - many of them need to sleep and wait for slow busses to
* complete.
*/
static irqreturn_t irq_thread_fn(struct irq_desc *desc,
struct irqaction *action)
{
irqreturn_t ret;
ret = action->thread_fn(action->irq, action->dev_id);
if (ret == IRQ_HANDLED)
atomic_inc(&desc->threads_handled);
irq_finalize_oneshot(desc, action);
return ret;
}
void wake_threads_waitq(struct irq_desc *desc)
{
if (atomic_dec_and_test(&desc->threads_active))