mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-09 14:43:16 +00:00
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: sparc64: Fix cpumask related build failure smp_call_function_single(): be slightly less stupid, fix smp_call_function_single(): be slightly less stupid rcu: fix bug in rcutorture system-shutdown code
This commit is contained in:
commit
1181a24499
@ -47,6 +47,10 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
|
|||||||
(pcibus_to_node(bus) == -1 ? \
|
(pcibus_to_node(bus) == -1 ? \
|
||||||
CPU_MASK_ALL : \
|
CPU_MASK_ALL : \
|
||||||
node_to_cpumask(pcibus_to_node(bus)))
|
node_to_cpumask(pcibus_to_node(bus)))
|
||||||
|
#define cpumask_of_pcibus(bus) \
|
||||||
|
(pcibus_to_node(bus) == -1 ? \
|
||||||
|
CPU_MASK_ALL_PTR : \
|
||||||
|
cpumask_of_node(pcibus_to_node(bus)))
|
||||||
|
|
||||||
#define SD_NODE_INIT (struct sched_domain) { \
|
#define SD_NODE_INIT (struct sched_domain) { \
|
||||||
.min_interval = 8, \
|
.min_interval = 8, \
|
||||||
|
@ -24,6 +24,9 @@ struct call_single_data {
|
|||||||
/* total number of cpus in this system (may exceed NR_CPUS) */
|
/* total number of cpus in this system (may exceed NR_CPUS) */
|
||||||
extern unsigned int total_cpus;
|
extern unsigned int total_cpus;
|
||||||
|
|
||||||
|
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
||||||
|
int wait);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
|
|
||||||
#include <linux/preempt.h>
|
#include <linux/preempt.h>
|
||||||
@ -79,8 +82,6 @@ smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
|
||||||
int wait);
|
|
||||||
void __smp_call_function_single(int cpuid, struct call_single_data *data);
|
void __smp_call_function_single(int cpuid, struct call_single_data *data);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -140,14 +141,6 @@ static inline int up_smp_call_function(void (*func)(void *), void *info)
|
|||||||
static inline void smp_send_reschedule(int cpu) { }
|
static inline void smp_send_reschedule(int cpu) { }
|
||||||
#define num_booting_cpus() 1
|
#define num_booting_cpus() 1
|
||||||
#define smp_prepare_boot_cpu() do {} while (0)
|
#define smp_prepare_boot_cpu() do {} while (0)
|
||||||
#define smp_call_function_single(cpuid, func, info, wait) \
|
|
||||||
({ \
|
|
||||||
WARN_ON(cpuid != 0); \
|
|
||||||
local_irq_disable(); \
|
|
||||||
(func)(info); \
|
|
||||||
local_irq_enable(); \
|
|
||||||
0; \
|
|
||||||
})
|
|
||||||
#define smp_call_function_mask(mask, func, info, wait) \
|
#define smp_call_function_mask(mask, func, info, wait) \
|
||||||
(up_smp_call_function(func, info))
|
(up_smp_call_function(func, info))
|
||||||
#define smp_call_function_many(mask, func, info, wait) \
|
#define smp_call_function_many(mask, func, info, wait) \
|
||||||
|
@ -40,7 +40,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
|
|||||||
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
|
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
|
||||||
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
|
obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
|
||||||
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
|
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
|
||||||
obj-$(CONFIG_USE_GENERIC_SMP_HELPERS) += smp.o
|
ifeq ($(CONFIG_USE_GENERIC_SMP_HELPERS),y)
|
||||||
|
obj-y += smp.o
|
||||||
|
else
|
||||||
|
obj-y += up.o
|
||||||
|
endif
|
||||||
obj-$(CONFIG_SMP) += spinlock.o
|
obj-$(CONFIG_SMP) += spinlock.o
|
||||||
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
|
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
|
||||||
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
|
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
|
||||||
|
@ -136,28 +136,46 @@ static int stutter_pause_test = 0;
|
|||||||
#endif
|
#endif
|
||||||
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
|
int rcutorture_runnable = RCUTORTURE_RUNNABLE_INIT;
|
||||||
|
|
||||||
#define FULLSTOP_SHUTDOWN 1 /* Bail due to system shutdown/panic. */
|
/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
|
||||||
#define FULLSTOP_CLEANUP 2 /* Orderly shutdown. */
|
|
||||||
static int fullstop; /* stop generating callbacks at test end. */
|
#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
|
||||||
DEFINE_MUTEX(fullstop_mutex); /* protect fullstop transitions and */
|
#define FULLSTOP_SHUTDOWN 1 /* System shutdown with rcutorture running. */
|
||||||
/* spawning of kthreads. */
|
#define FULLSTOP_RMMOD 2 /* Normal rmmod of rcutorture. */
|
||||||
|
static int fullstop = FULLSTOP_RMMOD;
|
||||||
|
DEFINE_MUTEX(fullstop_mutex); /* Protect fullstop transitions and spawning */
|
||||||
|
/* of kthreads. */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Detect and respond to a signal-based shutdown.
|
* Detect and respond to a system shutdown.
|
||||||
*/
|
*/
|
||||||
static int
|
static int
|
||||||
rcutorture_shutdown_notify(struct notifier_block *unused1,
|
rcutorture_shutdown_notify(struct notifier_block *unused1,
|
||||||
unsigned long unused2, void *unused3)
|
unsigned long unused2, void *unused3)
|
||||||
{
|
{
|
||||||
if (fullstop)
|
|
||||||
return NOTIFY_DONE;
|
|
||||||
mutex_lock(&fullstop_mutex);
|
mutex_lock(&fullstop_mutex);
|
||||||
if (!fullstop)
|
if (fullstop == FULLSTOP_DONTSTOP)
|
||||||
fullstop = FULLSTOP_SHUTDOWN;
|
fullstop = FULLSTOP_SHUTDOWN;
|
||||||
|
else
|
||||||
|
printk(KERN_WARNING /* but going down anyway, so... */
|
||||||
|
"Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
|
||||||
mutex_unlock(&fullstop_mutex);
|
mutex_unlock(&fullstop_mutex);
|
||||||
return NOTIFY_DONE;
|
return NOTIFY_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Absorb kthreads into a kernel function that won't return, so that
|
||||||
|
* they won't ever access module text or data again.
|
||||||
|
*/
|
||||||
|
static void rcutorture_shutdown_absorb(char *title)
|
||||||
|
{
|
||||||
|
if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
|
||||||
|
printk(KERN_NOTICE
|
||||||
|
"rcutorture thread %s parking due to system shutdown\n",
|
||||||
|
title);
|
||||||
|
schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allocate an element from the rcu_tortures pool.
|
* Allocate an element from the rcu_tortures pool.
|
||||||
*/
|
*/
|
||||||
@ -219,13 +237,14 @@ rcu_random(struct rcu_random_state *rrsp)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
rcu_stutter_wait(void)
|
rcu_stutter_wait(char *title)
|
||||||
{
|
{
|
||||||
while ((stutter_pause_test || !rcutorture_runnable) && !fullstop) {
|
while (stutter_pause_test || !rcutorture_runnable) {
|
||||||
if (rcutorture_runnable)
|
if (rcutorture_runnable)
|
||||||
schedule_timeout_interruptible(1);
|
schedule_timeout_interruptible(1);
|
||||||
else
|
else
|
||||||
schedule_timeout_interruptible(round_jiffies_relative(HZ));
|
schedule_timeout_interruptible(round_jiffies_relative(HZ));
|
||||||
|
rcutorture_shutdown_absorb(title);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -287,7 +306,7 @@ rcu_torture_cb(struct rcu_head *p)
|
|||||||
int i;
|
int i;
|
||||||
struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
|
struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
|
||||||
|
|
||||||
if (fullstop) {
|
if (fullstop != FULLSTOP_DONTSTOP) {
|
||||||
/* Test is ending, just drop callbacks on the floor. */
|
/* Test is ending, just drop callbacks on the floor. */
|
||||||
/* The next initialization will pick up the pieces. */
|
/* The next initialization will pick up the pieces. */
|
||||||
return;
|
return;
|
||||||
@ -619,10 +638,11 @@ rcu_torture_writer(void *arg)
|
|||||||
}
|
}
|
||||||
rcu_torture_current_version++;
|
rcu_torture_current_version++;
|
||||||
oldbatch = cur_ops->completed();
|
oldbatch = cur_ops->completed();
|
||||||
rcu_stutter_wait();
|
rcu_stutter_wait("rcu_torture_writer");
|
||||||
} while (!kthread_should_stop() && !fullstop);
|
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
|
||||||
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
|
VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
|
||||||
while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
|
rcutorture_shutdown_absorb("rcu_torture_writer");
|
||||||
|
while (!kthread_should_stop())
|
||||||
schedule_timeout_uninterruptible(1);
|
schedule_timeout_uninterruptible(1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -643,11 +663,12 @@ rcu_torture_fakewriter(void *arg)
|
|||||||
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
|
schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
|
||||||
udelay(rcu_random(&rand) & 0x3ff);
|
udelay(rcu_random(&rand) & 0x3ff);
|
||||||
cur_ops->sync();
|
cur_ops->sync();
|
||||||
rcu_stutter_wait();
|
rcu_stutter_wait("rcu_torture_fakewriter");
|
||||||
} while (!kthread_should_stop() && !fullstop);
|
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
|
||||||
|
|
||||||
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
|
VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
|
||||||
while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
|
rcutorture_shutdown_absorb("rcu_torture_fakewriter");
|
||||||
|
while (!kthread_should_stop())
|
||||||
schedule_timeout_uninterruptible(1);
|
schedule_timeout_uninterruptible(1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -752,12 +773,13 @@ rcu_torture_reader(void *arg)
|
|||||||
preempt_enable();
|
preempt_enable();
|
||||||
cur_ops->readunlock(idx);
|
cur_ops->readunlock(idx);
|
||||||
schedule();
|
schedule();
|
||||||
rcu_stutter_wait();
|
rcu_stutter_wait("rcu_torture_reader");
|
||||||
} while (!kthread_should_stop() && !fullstop);
|
} while (!kthread_should_stop() && fullstop == FULLSTOP_DONTSTOP);
|
||||||
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
|
VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
|
||||||
|
rcutorture_shutdown_absorb("rcu_torture_reader");
|
||||||
if (irqreader && cur_ops->irqcapable)
|
if (irqreader && cur_ops->irqcapable)
|
||||||
del_timer_sync(&t);
|
del_timer_sync(&t);
|
||||||
while (!kthread_should_stop() && fullstop != FULLSTOP_SHUTDOWN)
|
while (!kthread_should_stop())
|
||||||
schedule_timeout_uninterruptible(1);
|
schedule_timeout_uninterruptible(1);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -854,7 +876,8 @@ rcu_torture_stats(void *arg)
|
|||||||
do {
|
do {
|
||||||
schedule_timeout_interruptible(stat_interval * HZ);
|
schedule_timeout_interruptible(stat_interval * HZ);
|
||||||
rcu_torture_stats_print();
|
rcu_torture_stats_print();
|
||||||
} while (!kthread_should_stop() && !fullstop);
|
rcutorture_shutdown_absorb("rcu_torture_stats");
|
||||||
|
} while (!kthread_should_stop());
|
||||||
VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
|
VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -866,52 +889,49 @@ static int rcu_idle_cpu; /* Force all torture tasks off this CPU */
|
|||||||
*/
|
*/
|
||||||
static void rcu_torture_shuffle_tasks(void)
|
static void rcu_torture_shuffle_tasks(void)
|
||||||
{
|
{
|
||||||
cpumask_var_t tmp_mask;
|
cpumask_t tmp_mask;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL))
|
cpus_setall(tmp_mask);
|
||||||
BUG();
|
|
||||||
|
|
||||||
cpumask_setall(tmp_mask);
|
|
||||||
get_online_cpus();
|
get_online_cpus();
|
||||||
|
|
||||||
/* No point in shuffling if there is only one online CPU (ex: UP) */
|
/* No point in shuffling if there is only one online CPU (ex: UP) */
|
||||||
if (num_online_cpus() == 1)
|
if (num_online_cpus() == 1) {
|
||||||
goto out;
|
put_online_cpus();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
if (rcu_idle_cpu != -1)
|
if (rcu_idle_cpu != -1)
|
||||||
cpumask_clear_cpu(rcu_idle_cpu, tmp_mask);
|
cpu_clear(rcu_idle_cpu, tmp_mask);
|
||||||
|
|
||||||
set_cpus_allowed_ptr(current, tmp_mask);
|
set_cpus_allowed_ptr(current, &tmp_mask);
|
||||||
|
|
||||||
if (reader_tasks) {
|
if (reader_tasks) {
|
||||||
for (i = 0; i < nrealreaders; i++)
|
for (i = 0; i < nrealreaders; i++)
|
||||||
if (reader_tasks[i])
|
if (reader_tasks[i])
|
||||||
set_cpus_allowed_ptr(reader_tasks[i],
|
set_cpus_allowed_ptr(reader_tasks[i],
|
||||||
tmp_mask);
|
&tmp_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fakewriter_tasks) {
|
if (fakewriter_tasks) {
|
||||||
for (i = 0; i < nfakewriters; i++)
|
for (i = 0; i < nfakewriters; i++)
|
||||||
if (fakewriter_tasks[i])
|
if (fakewriter_tasks[i])
|
||||||
set_cpus_allowed_ptr(fakewriter_tasks[i],
|
set_cpus_allowed_ptr(fakewriter_tasks[i],
|
||||||
tmp_mask);
|
&tmp_mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (writer_task)
|
if (writer_task)
|
||||||
set_cpus_allowed_ptr(writer_task, tmp_mask);
|
set_cpus_allowed_ptr(writer_task, &tmp_mask);
|
||||||
|
|
||||||
if (stats_task)
|
if (stats_task)
|
||||||
set_cpus_allowed_ptr(stats_task, tmp_mask);
|
set_cpus_allowed_ptr(stats_task, &tmp_mask);
|
||||||
|
|
||||||
if (rcu_idle_cpu == -1)
|
if (rcu_idle_cpu == -1)
|
||||||
rcu_idle_cpu = num_online_cpus() - 1;
|
rcu_idle_cpu = num_online_cpus() - 1;
|
||||||
else
|
else
|
||||||
rcu_idle_cpu--;
|
rcu_idle_cpu--;
|
||||||
|
|
||||||
out:
|
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
free_cpumask_var(tmp_mask);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
|
/* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
|
||||||
@ -925,7 +945,8 @@ rcu_torture_shuffle(void *arg)
|
|||||||
do {
|
do {
|
||||||
schedule_timeout_interruptible(shuffle_interval * HZ);
|
schedule_timeout_interruptible(shuffle_interval * HZ);
|
||||||
rcu_torture_shuffle_tasks();
|
rcu_torture_shuffle_tasks();
|
||||||
} while (!kthread_should_stop() && !fullstop);
|
rcutorture_shutdown_absorb("rcu_torture_shuffle");
|
||||||
|
} while (!kthread_should_stop());
|
||||||
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
|
VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -940,10 +961,11 @@ rcu_torture_stutter(void *arg)
|
|||||||
do {
|
do {
|
||||||
schedule_timeout_interruptible(stutter * HZ);
|
schedule_timeout_interruptible(stutter * HZ);
|
||||||
stutter_pause_test = 1;
|
stutter_pause_test = 1;
|
||||||
if (!kthread_should_stop() && !fullstop)
|
if (!kthread_should_stop())
|
||||||
schedule_timeout_interruptible(stutter * HZ);
|
schedule_timeout_interruptible(stutter * HZ);
|
||||||
stutter_pause_test = 0;
|
stutter_pause_test = 0;
|
||||||
} while (!kthread_should_stop() && !fullstop);
|
rcutorture_shutdown_absorb("rcu_torture_stutter");
|
||||||
|
} while (!kthread_should_stop());
|
||||||
VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
|
VERBOSE_PRINTK_STRING("rcu_torture_stutter task stopping");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -970,15 +992,16 @@ rcu_torture_cleanup(void)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
mutex_lock(&fullstop_mutex);
|
mutex_lock(&fullstop_mutex);
|
||||||
if (!fullstop) {
|
if (fullstop == FULLSTOP_SHUTDOWN) {
|
||||||
/* If being signaled, let it happen, then exit. */
|
printk(KERN_WARNING /* but going down anyway, so... */
|
||||||
|
"Concurrent 'rmmod rcutorture' and shutdown illegal!\n");
|
||||||
mutex_unlock(&fullstop_mutex);
|
mutex_unlock(&fullstop_mutex);
|
||||||
schedule_timeout_interruptible(10 * HZ);
|
schedule_timeout_uninterruptible(10);
|
||||||
if (cur_ops->cb_barrier != NULL)
|
if (cur_ops->cb_barrier != NULL)
|
||||||
cur_ops->cb_barrier();
|
cur_ops->cb_barrier();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
fullstop = FULLSTOP_CLEANUP;
|
fullstop = FULLSTOP_RMMOD;
|
||||||
mutex_unlock(&fullstop_mutex);
|
mutex_unlock(&fullstop_mutex);
|
||||||
unregister_reboot_notifier(&rcutorture_nb);
|
unregister_reboot_notifier(&rcutorture_nb);
|
||||||
if (stutter_task) {
|
if (stutter_task) {
|
||||||
@ -1078,7 +1101,7 @@ rcu_torture_init(void)
|
|||||||
else
|
else
|
||||||
nrealreaders = 2 * num_online_cpus();
|
nrealreaders = 2 * num_online_cpus();
|
||||||
rcu_torture_print_module_parms("Start of test");
|
rcu_torture_print_module_parms("Start of test");
|
||||||
fullstop = 0;
|
fullstop = FULLSTOP_DONTSTOP;
|
||||||
|
|
||||||
/* Set up the freelist. */
|
/* Set up the freelist. */
|
||||||
|
|
||||||
|
20
kernel/up.c
Normal file
20
kernel/up.c
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
/*
|
||||||
|
* Uniprocessor-only support functions. The counterpart to kernel/smp.c
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
|
||||||
|
int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||||
|
int wait)
|
||||||
|
{
|
||||||
|
WARN_ON(cpu != 0);
|
||||||
|
|
||||||
|
local_irq_disable();
|
||||||
|
(func)(info);
|
||||||
|
local_irq_enable();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(smp_call_function_single);
|
Loading…
x
Reference in New Issue
Block a user