mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-04 04:06:26 +00:00
Workqueue fixes for v5.19-rc1
Tetsuo's patch to trigger build warnings if system-wide wq's are flushed along with a TP type update and trivial comment update. -----BEGIN PGP SIGNATURE----- iIQEABYIACwWIQTfIjM1kS57o3GsC/uxYfJx3gVYGQUCYqUyqQ4cdGpAa2VybmVs Lm9yZwAKCRCxYfJx3gVYGQPtAQCQZuNFoWhCtdpjW/MWuGdY1pGGPMVl+60xwvew Ad8gegD/eoAsXP1XAzJ9Z1BPqr/IxncfOgGGDGHbR1Ll39qLlwE= =i3Sx -----END PGP SIGNATURE----- Merge tag 'wq-for-5.19-rc1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq Pull workqueue fixes from Tejun Heo: "Tetsuo's patch to trigger build warnings if system-wide wq's are flushed along with a TP type update and trivial comment update" * tag 'wq-for-5.19-rc1-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: workqueue: Switch to new kerneldoc syntax for named variable macro argument workqueue: Fix type of cpu in trace event workqueue: Wrap flush_workqueue() using a macro
This commit is contained in:
commit
b0cb8db396
@ -406,7 +406,7 @@ alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
|
|||||||
* alloc_ordered_workqueue - allocate an ordered workqueue
|
* alloc_ordered_workqueue - allocate an ordered workqueue
|
||||||
* @fmt: printf format for the name of the workqueue
|
* @fmt: printf format for the name of the workqueue
|
||||||
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
|
* @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
|
||||||
* @args...: args for @fmt
|
* @args: args for @fmt
|
||||||
*
|
*
|
||||||
* Allocate an ordered workqueue. An ordered workqueue executes at
|
* Allocate an ordered workqueue. An ordered workqueue executes at
|
||||||
* most one work item at any given time in the queued order. They are
|
* most one work item at any given time in the queued order. They are
|
||||||
@ -445,7 +445,7 @@ extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq,
|
|||||||
struct delayed_work *dwork, unsigned long delay);
|
struct delayed_work *dwork, unsigned long delay);
|
||||||
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
|
extern bool queue_rcu_work(struct workqueue_struct *wq, struct rcu_work *rwork);
|
||||||
|
|
||||||
extern void flush_workqueue(struct workqueue_struct *wq);
|
extern void __flush_workqueue(struct workqueue_struct *wq);
|
||||||
extern void drain_workqueue(struct workqueue_struct *wq);
|
extern void drain_workqueue(struct workqueue_struct *wq);
|
||||||
|
|
||||||
extern int schedule_on_each_cpu(work_func_t func);
|
extern int schedule_on_each_cpu(work_func_t func);
|
||||||
@ -563,15 +563,23 @@ static inline bool schedule_work(struct work_struct *work)
|
|||||||
return queue_work(system_wq, work);
|
return queue_work(system_wq, work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Detect attempt to flush system-wide workqueues at compile time when possible.
|
||||||
|
*
|
||||||
|
* See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
|
||||||
|
* for reasons and steps for converting system-wide workqueues into local workqueues.
|
||||||
|
*/
|
||||||
|
extern void __warn_flushing_systemwide_wq(void)
|
||||||
|
__compiletime_warning("Please avoid flushing system-wide workqueues.");
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* flush_scheduled_work - ensure that any scheduled work has run to completion.
|
* flush_scheduled_work - ensure that any scheduled work has run to completion.
|
||||||
*
|
*
|
||||||
* Forces execution of the kernel-global workqueue and blocks until its
|
* Forces execution of the kernel-global workqueue and blocks until its
|
||||||
* completion.
|
* completion.
|
||||||
*
|
*
|
||||||
* Think twice before calling this function! It's very easy to get into
|
* It's very easy to get into trouble if you don't take great care.
|
||||||
* trouble if you don't take great care. Either of the following situations
|
* Either of the following situations will lead to deadlock:
|
||||||
* will lead to deadlock:
|
|
||||||
*
|
*
|
||||||
* One of the work items currently on the workqueue needs to acquire
|
* One of the work items currently on the workqueue needs to acquire
|
||||||
* a lock held by your code or its caller.
|
* a lock held by your code or its caller.
|
||||||
@ -586,11 +594,51 @@ static inline bool schedule_work(struct work_struct *work)
|
|||||||
* need to know that a particular work item isn't queued and isn't running.
|
* need to know that a particular work item isn't queued and isn't running.
|
||||||
* In such cases you should use cancel_delayed_work_sync() or
|
* In such cases you should use cancel_delayed_work_sync() or
|
||||||
* cancel_work_sync() instead.
|
* cancel_work_sync() instead.
|
||||||
|
*
|
||||||
|
* Please stop calling this function! A conversion to stop flushing system-wide
|
||||||
|
* workqueues is in progress. This function will be removed after all in-tree
|
||||||
|
* users stopped calling this function.
|
||||||
*/
|
*/
|
||||||
static inline void flush_scheduled_work(void)
|
/*
|
||||||
{
|
* The background of commit 771c035372a036f8 ("deprecate the
|
||||||
flush_workqueue(system_wq);
|
* '__deprecated' attribute warnings entirely and for good") is that,
|
||||||
}
|
* since Linus builds all modules between every single pull he does,
|
||||||
|
* the standard kernel build needs to be _clean_ in order to be able to
|
||||||
|
* notice when new problems happen. Therefore, don't emit warning while
|
||||||
|
* there are in-tree users.
|
||||||
|
*/
|
||||||
|
#define flush_scheduled_work() \
|
||||||
|
({ \
|
||||||
|
if (0) \
|
||||||
|
__warn_flushing_systemwide_wq(); \
|
||||||
|
__flush_workqueue(system_wq); \
|
||||||
|
})
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Although there is no longer in-tree caller, for now just emit warning
|
||||||
|
* in order to give out-of-tree callers time to update.
|
||||||
|
*/
|
||||||
|
#define flush_workqueue(wq) \
|
||||||
|
({ \
|
||||||
|
struct workqueue_struct *_wq = (wq); \
|
||||||
|
\
|
||||||
|
if ((__builtin_constant_p(_wq == system_wq) && \
|
||||||
|
_wq == system_wq) || \
|
||||||
|
(__builtin_constant_p(_wq == system_highpri_wq) && \
|
||||||
|
_wq == system_highpri_wq) || \
|
||||||
|
(__builtin_constant_p(_wq == system_long_wq) && \
|
||||||
|
_wq == system_long_wq) || \
|
||||||
|
(__builtin_constant_p(_wq == system_unbound_wq) && \
|
||||||
|
_wq == system_unbound_wq) || \
|
||||||
|
(__builtin_constant_p(_wq == system_freezable_wq) && \
|
||||||
|
_wq == system_freezable_wq) || \
|
||||||
|
(__builtin_constant_p(_wq == system_power_efficient_wq) && \
|
||||||
|
_wq == system_power_efficient_wq) || \
|
||||||
|
(__builtin_constant_p(_wq == system_freezable_power_efficient_wq) && \
|
||||||
|
_wq == system_freezable_power_efficient_wq)) \
|
||||||
|
__warn_flushing_systemwide_wq(); \
|
||||||
|
__flush_workqueue(_wq); \
|
||||||
|
})
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
|
* schedule_delayed_work_on - queue work in global workqueue on CPU after delay
|
||||||
|
@ -22,7 +22,7 @@ struct pool_workqueue;
|
|||||||
*/
|
*/
|
||||||
TRACE_EVENT(workqueue_queue_work,
|
TRACE_EVENT(workqueue_queue_work,
|
||||||
|
|
||||||
TP_PROTO(unsigned int req_cpu, struct pool_workqueue *pwq,
|
TP_PROTO(int req_cpu, struct pool_workqueue *pwq,
|
||||||
struct work_struct *work),
|
struct work_struct *work),
|
||||||
|
|
||||||
TP_ARGS(req_cpu, pwq, work),
|
TP_ARGS(req_cpu, pwq, work),
|
||||||
@ -31,8 +31,8 @@ TRACE_EVENT(workqueue_queue_work,
|
|||||||
__field( void *, work )
|
__field( void *, work )
|
||||||
__field( void *, function)
|
__field( void *, function)
|
||||||
__string( workqueue, pwq->wq->name)
|
__string( workqueue, pwq->wq->name)
|
||||||
__field( unsigned int, req_cpu )
|
__field( int, req_cpu )
|
||||||
__field( unsigned int, cpu )
|
__field( int, cpu )
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
@ -43,7 +43,7 @@ TRACE_EVENT(workqueue_queue_work,
|
|||||||
__entry->cpu = pwq->pool->cpu;
|
__entry->cpu = pwq->pool->cpu;
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%u cpu=%u",
|
TP_printk("work struct=%p function=%ps workqueue=%s req_cpu=%d cpu=%d",
|
||||||
__entry->work, __entry->function, __get_str(workqueue),
|
__entry->work, __entry->function, __get_str(workqueue),
|
||||||
__entry->req_cpu, __entry->cpu)
|
__entry->req_cpu, __entry->cpu)
|
||||||
);
|
);
|
||||||
|
@ -2788,13 +2788,13 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* flush_workqueue - ensure that any scheduled work has run to completion.
|
* __flush_workqueue - ensure that any scheduled work has run to completion.
|
||||||
* @wq: workqueue to flush
|
* @wq: workqueue to flush
|
||||||
*
|
*
|
||||||
* This function sleeps until all work items which were queued on entry
|
* This function sleeps until all work items which were queued on entry
|
||||||
* have finished execution, but it is not livelocked by new incoming ones.
|
* have finished execution, but it is not livelocked by new incoming ones.
|
||||||
*/
|
*/
|
||||||
void flush_workqueue(struct workqueue_struct *wq)
|
void __flush_workqueue(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
struct wq_flusher this_flusher = {
|
struct wq_flusher this_flusher = {
|
||||||
.list = LIST_HEAD_INIT(this_flusher.list),
|
.list = LIST_HEAD_INIT(this_flusher.list),
|
||||||
@ -2943,7 +2943,7 @@ void flush_workqueue(struct workqueue_struct *wq)
|
|||||||
out_unlock:
|
out_unlock:
|
||||||
mutex_unlock(&wq->mutex);
|
mutex_unlock(&wq->mutex);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(flush_workqueue);
|
EXPORT_SYMBOL(__flush_workqueue);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drain_workqueue - drain a workqueue
|
* drain_workqueue - drain a workqueue
|
||||||
@ -2971,7 +2971,7 @@ void drain_workqueue(struct workqueue_struct *wq)
|
|||||||
wq->flags |= __WQ_DRAINING;
|
wq->flags |= __WQ_DRAINING;
|
||||||
mutex_unlock(&wq->mutex);
|
mutex_unlock(&wq->mutex);
|
||||||
reflush:
|
reflush:
|
||||||
flush_workqueue(wq);
|
__flush_workqueue(wq);
|
||||||
|
|
||||||
mutex_lock(&wq->mutex);
|
mutex_lock(&wq->mutex);
|
||||||
|
|
||||||
@ -6111,3 +6111,11 @@ void __init workqueue_init(void)
|
|||||||
wq_online = true;
|
wq_online = true;
|
||||||
wq_watchdog_init();
|
wq_watchdog_init();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Despite the naming, this is a no-op function which is here only for avoiding
|
||||||
|
* link error. Since compile-time warning may fail to catch, we will need to
|
||||||
|
* emit run-time warning from __flush_workqueue().
|
||||||
|
*/
|
||||||
|
void __warn_flushing_systemwide_wq(void) { }
|
||||||
|
EXPORT_SYMBOL(__warn_flushing_systemwide_wq);
|
||||||
|
Loading…
Reference in New Issue
Block a user