mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-08 14:23:19 +00:00
workqueue: Factor out work_grab_pending() from __cancel_work_sync()
The planned disable/enable support will need the same logic. Let's factor it out. No functional changes. v2: Update function comment to include @irq_flags. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <jiangshanlai@gmail.com>
This commit is contained in:
parent
e9a8e01f9b
commit
978b8409ea
@ -484,6 +484,12 @@ static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
|
||||
/* I: attributes used when instantiating ordered pools on demand */
|
||||
static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
|
||||
|
||||
/*
|
||||
* Used to synchronize multiple cancel_sync attempts on the same work item. See
|
||||
* work_grab_pending() and __cancel_work_sync().
|
||||
*/
|
||||
static DECLARE_WAIT_QUEUE_HEAD(wq_cancel_waitq);
|
||||
|
||||
/*
|
||||
* I: kthread_worker to release pwq's. pwq release needs to be bounced to a
|
||||
* process context while holding a pool lock. Bounce to a dedicated kthread
|
||||
@ -2147,6 +2153,75 @@ static int try_to_grab_pending(struct work_struct *work, u32 cflags,
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
struct cwt_wait {
|
||||
wait_queue_entry_t wait;
|
||||
struct work_struct *work;
|
||||
};
|
||||
|
||||
static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
|
||||
{
|
||||
struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
|
||||
|
||||
if (cwait->work != key)
|
||||
return 0;
|
||||
return autoremove_wake_function(wait, mode, sync, key);
|
||||
}
|
||||
|
||||
/**
|
||||
* work_grab_pending - steal work item from worklist and disable irq
|
||||
* @work: work item to steal
|
||||
* @cflags: %WORK_CANCEL_ flags
|
||||
* @irq_flags: place to store IRQ state
|
||||
*
|
||||
* Grab PENDING bit of @work. @work can be in any stable state - idle, on timer
|
||||
* or on worklist.
|
||||
*
|
||||
* Must be called in process context. IRQ is disabled on return with IRQ state
|
||||
* stored in *@irq_flags. The caller is responsible for re-enabling it using
|
||||
* local_irq_restore().
|
||||
*
|
||||
* Returns %true if @work was pending. %false if idle.
|
||||
*/
|
||||
static bool work_grab_pending(struct work_struct *work, u32 cflags,
|
||||
unsigned long *irq_flags)
|
||||
{
|
||||
struct cwt_wait cwait;
|
||||
int ret;
|
||||
|
||||
might_sleep();
|
||||
repeat:
|
||||
ret = try_to_grab_pending(work, cflags, irq_flags);
|
||||
if (likely(ret >= 0))
|
||||
return ret;
|
||||
if (ret != -ENOENT)
|
||||
goto repeat;
|
||||
|
||||
/*
|
||||
* Someone is already canceling. Wait for it to finish. flush_work()
|
||||
* doesn't work for PREEMPT_NONE because we may get woken up between
|
||||
* @work's completion and the other canceling task resuming and clearing
|
||||
* CANCELING - flush_work() will return false immediately as @work is no
|
||||
* longer busy, try_to_grab_pending() will return -ENOENT as @work is
|
||||
* still being canceled and the other canceling task won't be able to
|
||||
* clear CANCELING as we're hogging the CPU.
|
||||
*
|
||||
* Let's wait for completion using a waitqueue. As this may lead to the
|
||||
* thundering herd problem, use a custom wake function which matches
|
||||
* @work along with exclusive wait and wakeup.
|
||||
*/
|
||||
init_wait(&cwait.wait);
|
||||
cwait.wait.func = cwt_wakefn;
|
||||
cwait.work = work;
|
||||
|
||||
prepare_to_wait_exclusive(&wq_cancel_waitq, &cwait.wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if (work_is_canceling(work))
|
||||
schedule();
|
||||
finish_wait(&wq_cancel_waitq, &cwait.wait);
|
||||
|
||||
goto repeat;
|
||||
}
|
||||
|
||||
/**
|
||||
* insert_work - insert a work into a pool
|
||||
* @pwq: pwq @work belongs to
|
||||
@ -4125,60 +4200,13 @@ static bool __cancel_work(struct work_struct *work, u32 cflags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct cwt_wait {
|
||||
wait_queue_entry_t wait;
|
||||
struct work_struct *work;
|
||||
};
|
||||
|
||||
static int cwt_wakefn(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
|
||||
{
|
||||
struct cwt_wait *cwait = container_of(wait, struct cwt_wait, wait);
|
||||
|
||||
if (cwait->work != key)
|
||||
return 0;
|
||||
return autoremove_wake_function(wait, mode, sync, key);
|
||||
}
|
||||
|
||||
static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
|
||||
{
|
||||
static DECLARE_WAIT_QUEUE_HEAD(cancel_waitq);
|
||||
unsigned long irq_flags;
|
||||
int ret;
|
||||
bool ret;
|
||||
|
||||
do {
|
||||
ret = try_to_grab_pending(work, cflags, &irq_flags);
|
||||
/*
|
||||
* If someone else is already canceling, wait for it to
|
||||
* finish. flush_work() doesn't work for PREEMPT_NONE
|
||||
* because we may get scheduled between @work's completion
|
||||
* and the other canceling task resuming and clearing
|
||||
* CANCELING - flush_work() will return false immediately
|
||||
* as @work is no longer busy, try_to_grab_pending() will
|
||||
* return -ENOENT as @work is still being canceled and the
|
||||
* other canceling task won't be able to clear CANCELING as
|
||||
* we're hogging the CPU.
|
||||
*
|
||||
* Let's wait for completion using a waitqueue. As this
|
||||
* may lead to the thundering herd problem, use a custom
|
||||
* wake function which matches @work along with exclusive
|
||||
* wait and wakeup.
|
||||
*/
|
||||
if (unlikely(ret == -ENOENT)) {
|
||||
struct cwt_wait cwait;
|
||||
|
||||
init_wait(&cwait.wait);
|
||||
cwait.wait.func = cwt_wakefn;
|
||||
cwait.work = work;
|
||||
|
||||
prepare_to_wait_exclusive(&cancel_waitq, &cwait.wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if (work_is_canceling(work))
|
||||
schedule();
|
||||
finish_wait(&cancel_waitq, &cwait.wait);
|
||||
}
|
||||
} while (unlikely(ret < 0));
|
||||
|
||||
/* tell other tasks trying to grab @work to back off */
|
||||
/* claim @work and tell other tasks trying to grab @work to back off */
|
||||
ret = work_grab_pending(work, cflags, &irq_flags);
|
||||
mark_work_canceling(work);
|
||||
local_irq_restore(irq_flags);
|
||||
|
||||
@ -4197,8 +4225,8 @@ static bool __cancel_work_sync(struct work_struct *work, u32 cflags)
|
||||
* visible there.
|
||||
*/
|
||||
smp_mb();
|
||||
if (waitqueue_active(&cancel_waitq))
|
||||
__wake_up(&cancel_waitq, TASK_NORMAL, 1, work);
|
||||
if (waitqueue_active(&wq_cancel_waitq))
|
||||
__wake_up(&wq_cancel_waitq, TASK_NORMAL, 1, work);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user