mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2024-12-29 17:23:36 +00:00
d090ec0df8
call_single_data_t is a size-aligned typedef of struct __call_single_data.
This alignment is desirable in order to have smp_call_function*() avoid
bouncing an extra cacheline in case of an unaligned csd, given this
would hurt performance.
Since the removal of struct request->csd in commit 660e802c76
("blk-mq: use percpu csd to remote complete instead of per-rq csd") there
are no current users of smp_call_function*() with unaligned csd.
Change every 'struct __call_single_data' function parameter to
'call_single_data_t', so we have warnings if any new code tries to
introduce an smp_call_function*() call with unaligned csd.
Signed-off-by: Leonardo Bras <leobras@redhat.com>
Reviewed-by: Guo Ren <guoren@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230831063129.335425-1-leobras@redhat.com
74 lines
1.5 KiB
C
74 lines
1.5 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Uniprocessor-only support functions. The counterpart to kernel/smp.c
|
|
*/
|
|
|
|
#include <linux/interrupt.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/export.h>
|
|
#include <linux/smp.h>
|
|
#include <linux/hypervisor.h>
|
|
|
|
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
|
int wait)
|
|
{
|
|
unsigned long flags;
|
|
|
|
if (cpu != 0)
|
|
return -ENXIO;
|
|
|
|
local_irq_save(flags);
|
|
func(info);
|
|
local_irq_restore(flags);
|
|
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(smp_call_function_single);
|
|
|
|
int smp_call_function_single_async(int cpu, call_single_data_t *csd)
|
|
{
|
|
unsigned long flags;
|
|
|
|
local_irq_save(flags);
|
|
csd->func(csd->info);
|
|
local_irq_restore(flags);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(smp_call_function_single_async);
|
|
|
|
/*
|
|
* Preemption is disabled here to make sure the cond_func is called under the
|
|
* same conditions in UP and SMP.
|
|
*/
|
|
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
|
|
void *info, bool wait, const struct cpumask *mask)
|
|
{
|
|
unsigned long flags;
|
|
|
|
preempt_disable();
|
|
if ((!cond_func || cond_func(0, info)) && cpumask_test_cpu(0, mask)) {
|
|
local_irq_save(flags);
|
|
func(info);
|
|
local_irq_restore(flags);
|
|
}
|
|
preempt_enable();
|
|
}
|
|
EXPORT_SYMBOL(on_each_cpu_cond_mask);
|
|
|
|
int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
|
|
{
|
|
int ret;
|
|
|
|
if (cpu != 0)
|
|
return -ENXIO;
|
|
|
|
if (phys)
|
|
hypervisor_pin_vcpu(0);
|
|
ret = func(par);
|
|
if (phys)
|
|
hypervisor_pin_vcpu(-1);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL_GPL(smp_call_on_cpu);
|