mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-09 06:43:09 +00:00
workqueue: use manager lock only to protect worker_idr
worker_idr is highly bound to managers and is always/only accessed in manager lock context. So we don't need pool->lock for it. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
This commit is contained in:
parent
cf416171e7
commit
9625ab1727
@ -124,8 +124,7 @@ enum {
|
|||||||
* cpu or grabbing pool->lock is enough for read access. If
|
* cpu or grabbing pool->lock is enough for read access. If
|
||||||
* POOL_DISASSOCIATED is set, it's identical to L.
|
* POOL_DISASSOCIATED is set, it's identical to L.
|
||||||
*
|
*
|
||||||
* MG: pool->manager_mutex and pool->lock protected. Writes require both
|
* M: pool->manager_mutex protected.
|
||||||
* locks. Reads can happen under either lock.
|
|
||||||
*
|
*
|
||||||
* PL: wq_pool_mutex protected.
|
* PL: wq_pool_mutex protected.
|
||||||
*
|
*
|
||||||
@ -164,7 +163,7 @@ struct worker_pool {
|
|||||||
/* see manage_workers() for details on the two manager mutexes */
|
/* see manage_workers() for details on the two manager mutexes */
|
||||||
struct mutex manager_arb; /* manager arbitration */
|
struct mutex manager_arb; /* manager arbitration */
|
||||||
struct mutex manager_mutex; /* manager exclusion */
|
struct mutex manager_mutex; /* manager exclusion */
|
||||||
struct idr worker_idr; /* MG: worker IDs and iteration */
|
struct idr worker_idr; /* M: worker IDs and iteration */
|
||||||
|
|
||||||
struct workqueue_attrs *attrs; /* I: worker attributes */
|
struct workqueue_attrs *attrs; /* I: worker attributes */
|
||||||
struct hlist_node hash_node; /* PL: unbound_pool_hash node */
|
struct hlist_node hash_node; /* PL: unbound_pool_hash node */
|
||||||
@ -340,16 +339,6 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
|
|||||||
lockdep_is_held(&wq->mutex), \
|
lockdep_is_held(&wq->mutex), \
|
||||||
"sched RCU or wq->mutex should be held")
|
"sched RCU or wq->mutex should be held")
|
||||||
|
|
||||||
#ifdef CONFIG_LOCKDEP
|
|
||||||
#define assert_manager_or_pool_lock(pool) \
|
|
||||||
WARN_ONCE(debug_locks && \
|
|
||||||
!lockdep_is_held(&(pool)->manager_mutex) && \
|
|
||||||
!lockdep_is_held(&(pool)->lock), \
|
|
||||||
"pool->manager_mutex or ->lock should be held")
|
|
||||||
#else
|
|
||||||
#define assert_manager_or_pool_lock(pool) do { } while (0)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#define for_each_cpu_worker_pool(pool, cpu) \
|
#define for_each_cpu_worker_pool(pool, cpu) \
|
||||||
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
|
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
|
||||||
(pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
|
(pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
|
||||||
@ -378,14 +367,14 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
|
|||||||
* @wi: integer used for iteration
|
* @wi: integer used for iteration
|
||||||
* @pool: worker_pool to iterate workers of
|
* @pool: worker_pool to iterate workers of
|
||||||
*
|
*
|
||||||
* This must be called with either @pool->manager_mutex or ->lock held.
|
* This must be called with @pool->manager_mutex.
|
||||||
*
|
*
|
||||||
* The if/else clause exists only for the lockdep assertion and can be
|
* The if/else clause exists only for the lockdep assertion and can be
|
||||||
* ignored.
|
* ignored.
|
||||||
*/
|
*/
|
||||||
#define for_each_pool_worker(worker, wi, pool) \
|
#define for_each_pool_worker(worker, wi, pool) \
|
||||||
idr_for_each_entry(&(pool)->worker_idr, (worker), (wi)) \
|
idr_for_each_entry(&(pool)->worker_idr, (worker), (wi)) \
|
||||||
if (({ assert_manager_or_pool_lock((pool)); false; })) { } \
|
if (({ lockdep_assert_held(&pool->manager_mutex); false; })) { } \
|
||||||
else
|
else
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1725,13 +1714,7 @@ static struct worker *create_worker(struct worker_pool *pool)
|
|||||||
* ID is needed to determine kthread name. Allocate ID first
|
* ID is needed to determine kthread name. Allocate ID first
|
||||||
* without installing the pointer.
|
* without installing the pointer.
|
||||||
*/
|
*/
|
||||||
idr_preload(GFP_KERNEL);
|
id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_KERNEL);
|
||||||
spin_lock_irq(&pool->lock);
|
|
||||||
|
|
||||||
id = idr_alloc(&pool->worker_idr, NULL, 0, 0, GFP_NOWAIT);
|
|
||||||
|
|
||||||
spin_unlock_irq(&pool->lock);
|
|
||||||
idr_preload_end();
|
|
||||||
if (id < 0)
|
if (id < 0)
|
||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
@ -1773,18 +1756,13 @@ static struct worker *create_worker(struct worker_pool *pool)
|
|||||||
worker->flags |= WORKER_UNBOUND;
|
worker->flags |= WORKER_UNBOUND;
|
||||||
|
|
||||||
/* successful, commit the pointer to idr */
|
/* successful, commit the pointer to idr */
|
||||||
spin_lock_irq(&pool->lock);
|
|
||||||
idr_replace(&pool->worker_idr, worker, worker->id);
|
idr_replace(&pool->worker_idr, worker, worker->id);
|
||||||
spin_unlock_irq(&pool->lock);
|
|
||||||
|
|
||||||
return worker;
|
return worker;
|
||||||
|
|
||||||
fail:
|
fail:
|
||||||
if (id >= 0) {
|
if (id >= 0)
|
||||||
spin_lock_irq(&pool->lock);
|
|
||||||
idr_remove(&pool->worker_idr, id);
|
idr_remove(&pool->worker_idr, id);
|
||||||
spin_unlock_irq(&pool->lock);
|
|
||||||
}
|
|
||||||
kfree(worker);
|
kfree(worker);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user