mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
um: move thread info into task
This selects the THREAD_INFO_IN_TASK option for UM and changes the way that the current task is discovered. This is trivial though, as UML already tracks the current task in cpu_tasks[] and this can be used to retrieve it. Also remove the signal handler code that copies the thread information into the IRQ stack. It is obsolete now, which also means that the mentioned race condition cannot happen anymore. Signed-off-by: Benjamin Berg <benjamin.berg@intel.com> Reviewed-by: Hajime Tazaki <thehajime@gmail.com> Link: https://patch.msgid.link/20241111102910.46512-1-benjamin@sipsolutions.net Signed-off-by: Johannes Berg <johannes.berg@intel.com>
This commit is contained in:
parent
0f659ff362
commit
2f681ba4b3
@ -34,6 +34,7 @@ config UML
|
|||||||
select HAVE_RUST
|
select HAVE_RUST
|
||||||
select ARCH_HAS_UBSAN
|
select ARCH_HAS_UBSAN
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
|
select THREAD_INFO_IN_TASK
|
||||||
|
|
||||||
config MMU
|
config MMU
|
||||||
bool
|
bool
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
# SPDX-License-Identifier: GPL-2.0
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
generic-y += bug.h
|
generic-y += bug.h
|
||||||
generic-y += compat.h
|
generic-y += compat.h
|
||||||
generic-y += current.h
|
|
||||||
generic-y += device.h
|
generic-y += device.h
|
||||||
generic-y += dma-mapping.h
|
generic-y += dma-mapping.h
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
|
23
arch/um/include/asm/current.h
Normal file
23
arch/um/include/asm/current.h
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef __ASM_CURRENT_H
|
||||||
|
#define __ASM_CURRENT_H
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
#include <linux/threads.h>
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
struct task_struct;
|
||||||
|
extern struct task_struct *cpu_tasks[NR_CPUS];
|
||||||
|
|
||||||
|
static __always_inline struct task_struct *get_current(void)
|
||||||
|
{
|
||||||
|
return cpu_tasks[0];
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
#define current get_current()
|
||||||
|
|
||||||
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
|
#endif /* __ASM_CURRENT_H */
|
@ -17,33 +17,17 @@
|
|||||||
#include <sysdep/ptrace_user.h>
|
#include <sysdep/ptrace_user.h>
|
||||||
|
|
||||||
struct thread_info {
|
struct thread_info {
|
||||||
struct task_struct *task; /* main task structure */
|
|
||||||
unsigned long flags; /* low level flags */
|
unsigned long flags; /* low level flags */
|
||||||
__u32 cpu; /* current CPU */
|
__u32 cpu; /* current CPU */
|
||||||
int preempt_count; /* 0 => preemptable,
|
int preempt_count; /* 0 => preemptable,
|
||||||
<0 => BUG */
|
<0 => BUG */
|
||||||
struct thread_info *real_thread; /* Points to non-IRQ stack */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#define INIT_THREAD_INFO(tsk) \
|
#define INIT_THREAD_INFO(tsk) \
|
||||||
{ \
|
{ \
|
||||||
.task = &tsk, \
|
|
||||||
.flags = 0, \
|
.flags = 0, \
|
||||||
.cpu = 0, \
|
.cpu = 0, \
|
||||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||||
.real_thread = NULL, \
|
|
||||||
}
|
|
||||||
|
|
||||||
/* how to get the thread information struct from C */
|
|
||||||
static inline struct thread_info *current_thread_info(void)
|
|
||||||
{
|
|
||||||
struct thread_info *ti;
|
|
||||||
unsigned long mask = THREAD_SIZE - 1;
|
|
||||||
void *p;
|
|
||||||
|
|
||||||
asm volatile ("" : "=r" (p) : "0" (&ti));
|
|
||||||
ti = (struct thread_info *) (((unsigned long)p) & ~mask);
|
|
||||||
return ti;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -30,11 +30,8 @@
|
|||||||
|
|
||||||
#include <sysdep/ptrace.h>
|
#include <sysdep/ptrace.h>
|
||||||
|
|
||||||
struct cpu_task {
|
struct task_struct;
|
||||||
void *task;
|
extern struct task_struct *cpu_tasks[];
|
||||||
};
|
|
||||||
|
|
||||||
extern struct cpu_task cpu_tasks[];
|
|
||||||
|
|
||||||
extern unsigned long long physmem_size;
|
extern unsigned long long physmem_size;
|
||||||
|
|
||||||
|
@ -116,8 +116,6 @@ SECTIONS
|
|||||||
.fini_array : { *(.fini_array) }
|
.fini_array : { *(.fini_array) }
|
||||||
.data : {
|
.data : {
|
||||||
INIT_TASK_DATA(KERNEL_STACK_SIZE)
|
INIT_TASK_DATA(KERNEL_STACK_SIZE)
|
||||||
. = ALIGN(KERNEL_STACK_SIZE);
|
|
||||||
*(.data..init_irqstack)
|
|
||||||
DATA_DATA
|
DATA_DATA
|
||||||
*(.data.* .gnu.linkonce.d.*)
|
*(.data.* .gnu.linkonce.d.*)
|
||||||
SORT(CONSTRUCTORS)
|
SORT(CONSTRUCTORS)
|
||||||
|
@ -674,115 +674,3 @@ void __init init_IRQ(void)
|
|||||||
/* Initialize EPOLL Loop */
|
/* Initialize EPOLL Loop */
|
||||||
os_setup_epoll();
|
os_setup_epoll();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* IRQ stack entry and exit:
|
|
||||||
*
|
|
||||||
* Unlike i386, UML doesn't receive IRQs on the normal kernel stack
|
|
||||||
* and switch over to the IRQ stack after some preparation. We use
|
|
||||||
* sigaltstack to receive signals on a separate stack from the start.
|
|
||||||
* These two functions make sure the rest of the kernel won't be too
|
|
||||||
* upset by being on a different stack. The IRQ stack has a
|
|
||||||
* thread_info structure at the bottom so that current et al continue
|
|
||||||
* to work.
|
|
||||||
*
|
|
||||||
* to_irq_stack copies the current task's thread_info to the IRQ stack
|
|
||||||
* thread_info and sets the tasks's stack to point to the IRQ stack.
|
|
||||||
*
|
|
||||||
* from_irq_stack copies the thread_info struct back (flags may have
|
|
||||||
* been modified) and resets the task's stack pointer.
|
|
||||||
*
|
|
||||||
* Tricky bits -
|
|
||||||
*
|
|
||||||
* What happens when two signals race each other? UML doesn't block
|
|
||||||
* signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
|
|
||||||
* could arrive while a previous one is still setting up the
|
|
||||||
* thread_info.
|
|
||||||
*
|
|
||||||
* There are three cases -
|
|
||||||
* The first interrupt on the stack - sets up the thread_info and
|
|
||||||
* handles the interrupt
|
|
||||||
* A nested interrupt interrupting the copying of the thread_info -
|
|
||||||
* can't handle the interrupt, as the stack is in an unknown state
|
|
||||||
* A nested interrupt not interrupting the copying of the
|
|
||||||
* thread_info - doesn't do any setup, just handles the interrupt
|
|
||||||
*
|
|
||||||
* The first job is to figure out whether we interrupted stack setup.
|
|
||||||
* This is done by xchging the signal mask with thread_info->pending.
|
|
||||||
* If the value that comes back is zero, then there is no setup in
|
|
||||||
* progress, and the interrupt can be handled. If the value is
|
|
||||||
* non-zero, then there is stack setup in progress. In order to have
|
|
||||||
* the interrupt handled, we leave our signal in the mask, and it will
|
|
||||||
* be handled by the upper handler after it has set up the stack.
|
|
||||||
*
|
|
||||||
* Next is to figure out whether we are the outer handler or a nested
|
|
||||||
* one. As part of setting up the stack, thread_info->real_thread is
|
|
||||||
* set to non-NULL (and is reset to NULL on exit). This is the
|
|
||||||
* nesting indicator. If it is non-NULL, then the stack is already
|
|
||||||
* set up and the handler can run.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static unsigned long pending_mask;
|
|
||||||
|
|
||||||
unsigned long to_irq_stack(unsigned long *mask_out)
|
|
||||||
{
|
|
||||||
struct thread_info *ti;
|
|
||||||
unsigned long mask, old;
|
|
||||||
int nested;
|
|
||||||
|
|
||||||
mask = xchg(&pending_mask, *mask_out);
|
|
||||||
if (mask != 0) {
|
|
||||||
/*
|
|
||||||
* If any interrupts come in at this point, we want to
|
|
||||||
* make sure that their bits aren't lost by our
|
|
||||||
* putting our bit in. So, this loop accumulates bits
|
|
||||||
* until xchg returns the same value that we put in.
|
|
||||||
* When that happens, there were no new interrupts,
|
|
||||||
* and pending_mask contains a bit for each interrupt
|
|
||||||
* that came in.
|
|
||||||
*/
|
|
||||||
old = *mask_out;
|
|
||||||
do {
|
|
||||||
old |= mask;
|
|
||||||
mask = xchg(&pending_mask, old);
|
|
||||||
} while (mask != old);
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
ti = current_thread_info();
|
|
||||||
nested = (ti->real_thread != NULL);
|
|
||||||
if (!nested) {
|
|
||||||
struct task_struct *task;
|
|
||||||
struct thread_info *tti;
|
|
||||||
|
|
||||||
task = cpu_tasks[ti->cpu].task;
|
|
||||||
tti = task_thread_info(task);
|
|
||||||
|
|
||||||
*ti = *tti;
|
|
||||||
ti->real_thread = tti;
|
|
||||||
task->stack = ti;
|
|
||||||
}
|
|
||||||
|
|
||||||
mask = xchg(&pending_mask, 0);
|
|
||||||
*mask_out |= mask | nested;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
unsigned long from_irq_stack(int nested)
|
|
||||||
{
|
|
||||||
struct thread_info *ti, *to;
|
|
||||||
unsigned long mask;
|
|
||||||
|
|
||||||
ti = current_thread_info();
|
|
||||||
|
|
||||||
pending_mask = 1;
|
|
||||||
|
|
||||||
to = ti->real_thread;
|
|
||||||
current->stack = to;
|
|
||||||
ti->real_thread = NULL;
|
|
||||||
*to = *ti;
|
|
||||||
|
|
||||||
mask = xchg(&pending_mask, 0);
|
|
||||||
return mask & ~1;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
@ -43,7 +43,8 @@
|
|||||||
* cares about its entry, so it's OK if another processor is modifying its
|
* cares about its entry, so it's OK if another processor is modifying its
|
||||||
* entry.
|
* entry.
|
||||||
*/
|
*/
|
||||||
struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { NULL } };
|
struct task_struct *cpu_tasks[NR_CPUS];
|
||||||
|
EXPORT_SYMBOL(cpu_tasks);
|
||||||
|
|
||||||
void free_stack(unsigned long stack, int order)
|
void free_stack(unsigned long stack, int order)
|
||||||
{
|
{
|
||||||
@ -64,7 +65,7 @@ unsigned long alloc_stack(int order, int atomic)
|
|||||||
|
|
||||||
static inline void set_current(struct task_struct *task)
|
static inline void set_current(struct task_struct *task)
|
||||||
{
|
{
|
||||||
cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) { task });
|
cpu_tasks[task_thread_info(task)->cpu] = task;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct task_struct *__switch_to(struct task_struct *from, struct task_struct *to)
|
struct task_struct *__switch_to(struct task_struct *from, struct task_struct *to)
|
||||||
|
@ -22,15 +22,13 @@ static int __init start_kernel_proc(void *unused)
|
|||||||
{
|
{
|
||||||
block_signals_trace();
|
block_signals_trace();
|
||||||
|
|
||||||
cpu_tasks[0].task = current;
|
|
||||||
|
|
||||||
start_kernel();
|
start_kernel();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int userspace_pid[];
|
extern int userspace_pid[];
|
||||||
|
|
||||||
extern char cpu0_irqstack[];
|
static char cpu0_irqstack[THREAD_SIZE] __aligned(THREAD_SIZE);
|
||||||
|
|
||||||
int __init start_uml(void)
|
int __init start_uml(void)
|
||||||
{
|
{
|
||||||
|
@ -65,9 +65,6 @@ struct cpuinfo_um boot_cpu_data = {
|
|||||||
|
|
||||||
EXPORT_SYMBOL(boot_cpu_data);
|
EXPORT_SYMBOL(boot_cpu_data);
|
||||||
|
|
||||||
union thread_union cpu0_irqstack
|
|
||||||
__section(".data..init_irqstack") =
|
|
||||||
{ .thread_info = INIT_THREAD_INFO(init_task) };
|
|
||||||
|
|
||||||
/* Changed in setup_arch, which is called in early boot */
|
/* Changed in setup_arch, which is called in early boot */
|
||||||
static char host_info[(__NEW_UTS_LEN + 1) * 5];
|
static char host_info[(__NEW_UTS_LEN + 1) * 5];
|
||||||
@ -244,6 +241,8 @@ static struct notifier_block panic_exit_notifier = {
|
|||||||
|
|
||||||
void uml_finishsetup(void)
|
void uml_finishsetup(void)
|
||||||
{
|
{
|
||||||
|
cpu_tasks[0] = &init_task;
|
||||||
|
|
||||||
atomic_notifier_chain_register(&panic_notifier_list,
|
atomic_notifier_chain_register(&panic_notifier_list,
|
||||||
&panic_exit_notifier);
|
&panic_exit_notifier);
|
||||||
|
|
||||||
@ -418,7 +417,7 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
{
|
{
|
||||||
u8 rng_seed[32];
|
u8 rng_seed[32];
|
||||||
|
|
||||||
stack_protections((unsigned long) &init_thread_info);
|
stack_protections((unsigned long) init_task.stack);
|
||||||
setup_physmem(uml_physmem, uml_reserved, physmem_size);
|
setup_physmem(uml_physmem, uml_reserved, physmem_size);
|
||||||
mem_total_pages(physmem_size, iomem_size);
|
mem_total_pages(physmem_size, iomem_size);
|
||||||
uml_dtb_init();
|
uml_dtb_init();
|
||||||
|
@ -77,8 +77,6 @@ SECTIONS
|
|||||||
.data :
|
.data :
|
||||||
{
|
{
|
||||||
INIT_TASK_DATA(KERNEL_STACK_SIZE)
|
INIT_TASK_DATA(KERNEL_STACK_SIZE)
|
||||||
. = ALIGN(KERNEL_STACK_SIZE);
|
|
||||||
*(.data..init_irqstack)
|
|
||||||
DATA_DATA
|
DATA_DATA
|
||||||
*(.gnu.linkonce.d*)
|
*(.gnu.linkonce.d*)
|
||||||
CONSTRUCTORS
|
CONSTRUCTORS
|
||||||
|
@ -190,43 +190,8 @@ static void hard_handler(int sig, siginfo_t *si, void *p)
|
|||||||
{
|
{
|
||||||
ucontext_t *uc = p;
|
ucontext_t *uc = p;
|
||||||
mcontext_t *mc = &uc->uc_mcontext;
|
mcontext_t *mc = &uc->uc_mcontext;
|
||||||
unsigned long pending = 1UL << sig;
|
|
||||||
|
|
||||||
do {
|
(*handlers[sig])(sig, (struct siginfo *)si, mc);
|
||||||
int nested, bail;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* pending comes back with one bit set for each
|
|
||||||
* interrupt that arrived while setting up the stack,
|
|
||||||
* plus a bit for this interrupt, plus the zero bit is
|
|
||||||
* set if this is a nested interrupt.
|
|
||||||
* If bail is true, then we interrupted another
|
|
||||||
* handler setting up the stack. In this case, we
|
|
||||||
* have to return, and the upper handler will deal
|
|
||||||
* with this interrupt.
|
|
||||||
*/
|
|
||||||
bail = to_irq_stack(&pending);
|
|
||||||
if (bail)
|
|
||||||
return;
|
|
||||||
|
|
||||||
nested = pending & 1;
|
|
||||||
pending &= ~1;
|
|
||||||
|
|
||||||
while ((sig = ffs(pending)) != 0){
|
|
||||||
sig--;
|
|
||||||
pending &= ~(1 << sig);
|
|
||||||
(*handlers[sig])(sig, (struct siginfo *)si, mc);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Again, pending comes back with a mask of signals
|
|
||||||
* that arrived while tearing down the stack. If this
|
|
||||||
* is non-zero, we just go back, set up the stack
|
|
||||||
* again, and handle the new interrupts.
|
|
||||||
*/
|
|
||||||
if (!nested)
|
|
||||||
pending = from_irq_stack(nested);
|
|
||||||
} while (pending);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void set_handler(int sig)
|
void set_handler(int sig)
|
||||||
|
Loading…
Reference in New Issue
Block a user