mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
workqueue: Avoid premature init of wq->node_nr_active[].max
System workqueues are allocated early during boot from workqueue_init_early(). While allocating unbound workqueues, wq_update_node_max_active() is invoked from apply_workqueue_attrs() and accesses NUMA topology to initialize wq->node_nr_active[].max. However, topology information may not be set up at this point. wq_update_node_max_active() is explicitly invoked from workqueue_init_topology() later when topology information is known to be available. This doesn't seem to crash anything but it's doing useless work with dubious data. Let's skip the premature and duplicate node_max_active updates by initializing the field to WQ_DFL_MIN_ACTIVE on allocation and making wq_update_node_max_active() noop until workqueue_init_topology(). Signed-off-by: Tejun Heo <tj@kernel.org> --- kernel/workqueue.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9221a4c57ae1..a65081ec6780 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -386,6 +386,8 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = { [WQ_AFFN_SYSTEM] = "system", }; +static bool wq_topo_initialized = false; + /* * Per-cpu work items which run for longer than the following threshold are * automatically considered CPU intensive and excluded from concurrency @@ -1510,6 +1512,9 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu) lockdep_assert_held(&wq->mutex); + if (!wq_topo_initialized) + return; + if (!cpumask_test_cpu(off_cpu, effective)) off_cpu = -1; @@ -4356,6 +4361,7 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar) static void init_node_nr_active(struct wq_node_nr_active *nna) { + nna->max = WQ_DFL_MIN_ACTIVE; atomic_set(&nna->nr, 0); raw_spin_lock_init(&nna->lock); INIT_LIST_HEAD(&nna->pending_pwqs); @@ -7400,6 +7406,8 @@ void __init workqueue_init_topology(void) init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache); init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa); + wq_topo_initialized = true; + mutex_lock(&wq_pool_mutex); /*
This commit is contained in:
parent
15930da42f
commit
c5f8cd6c62
@ -386,6 +386,8 @@ static const char *wq_affn_names[WQ_AFFN_NR_TYPES] = {
|
||||
[WQ_AFFN_SYSTEM] = "system",
|
||||
};
|
||||
|
||||
static bool wq_topo_initialized __read_mostly = false;
|
||||
|
||||
/*
|
||||
* Per-cpu work items which run for longer than the following threshold are
|
||||
* automatically considered CPU intensive and excluded from concurrency
|
||||
@ -1510,6 +1512,9 @@ static void wq_update_node_max_active(struct workqueue_struct *wq, int off_cpu)
|
||||
|
||||
lockdep_assert_held(&wq->mutex);
|
||||
|
||||
if (!wq_topo_initialized)
|
||||
return;
|
||||
|
||||
if (off_cpu >= 0 && !cpumask_test_cpu(off_cpu, effective))
|
||||
off_cpu = -1;
|
||||
|
||||
@ -4356,6 +4361,7 @@ static void free_node_nr_active(struct wq_node_nr_active **nna_ar)
|
||||
|
||||
static void init_node_nr_active(struct wq_node_nr_active *nna)
|
||||
{
|
||||
nna->max = WQ_DFL_MIN_ACTIVE;
|
||||
atomic_set(&nna->nr, 0);
|
||||
raw_spin_lock_init(&nna->lock);
|
||||
INIT_LIST_HEAD(&nna->pending_pwqs);
|
||||
@ -7400,6 +7406,8 @@ void __init workqueue_init_topology(void)
|
||||
init_pod_type(&wq_pod_types[WQ_AFFN_CACHE], cpus_share_cache);
|
||||
init_pod_type(&wq_pod_types[WQ_AFFN_NUMA], cpus_share_numa);
|
||||
|
||||
wq_topo_initialized = true;
|
||||
|
||||
mutex_lock(&wq_pool_mutex);
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user