mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 09:16:33 +00:00
sched/wait: Disambiguate wq_entry->task_list and wq_head->task_list naming
So I've noticed a number of instances where it was not obvious from the code whether ->task_list was for a wait-queue head or a wait-queue entry. Furthermore, there's a number of wait-queue users where the lists are not for 'tasks' but other entities (poll tables, etc.), in which case the 'task_list' name is actively confusing. To clear this all up, name the wait-queue head and entry list structure fields unambiguously: struct wait_queue_head::task_list => ::head struct wait_queue_entry::task_list => ::entry For example, this code: rqw->wait.task_list.next != &wait->task_list ... is was pretty unclear (to me) what it's doing, while now it's written this way: rqw->wait.head.next != &wait->entry ... which makes it pretty clear that we are iterating a list until we see the head. Other examples are: list_for_each_entry_safe(pos, next, &x->task_list, task_list) { list_for_each_entry(wq, &fence->wait.task_list, task_list) { ... where it's unclear (to me) what we are iterating, and during review it's hard to tell whether it's trying to walk a wait-queue entry (which would be a bug), while now it's written as: list_for_each_entry_safe(pos, next, &x->head, entry) { list_for_each_entry(wq, &fence->wait.head, entry) { Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
5822a454d6
commit
2055da9738
@ -933,7 +933,7 @@ static int blk_mq_dispatch_wake(wait_queue_entry_t *wait, unsigned mode, int fla
|
||||
|
||||
hctx = container_of(wait, struct blk_mq_hw_ctx, dispatch_wait);
|
||||
|
||||
list_del(&wait->task_list);
|
||||
list_del(&wait->entry);
|
||||
clear_bit_unlock(BLK_MQ_S_TAG_WAITING, &hctx->state);
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
return 1;
|
||||
|
@ -520,7 +520,7 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
|
||||
* in line to be woken up, wait for our turn.
|
||||
*/
|
||||
if (waitqueue_active(&rqw->wait) &&
|
||||
rqw->wait.task_list.next != &wait->task_list)
|
||||
rqw->wait.head.next != &wait->entry)
|
||||
return false;
|
||||
|
||||
return atomic_inc_below(&rqw->inflight, get_limit(rwb, rw));
|
||||
|
@ -385,7 +385,7 @@ static int kyber_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
|
||||
|
||||
for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
|
||||
INIT_LIST_HEAD(&khd->rqs[i]);
|
||||
INIT_LIST_HEAD(&khd->domain_wait[i].task_list);
|
||||
INIT_LIST_HEAD(&khd->domain_wait[i].entry);
|
||||
atomic_set(&khd->wait_index[i], 0);
|
||||
}
|
||||
|
||||
@ -512,7 +512,7 @@ static int kyber_domain_wake(wait_queue_entry_t *wait, unsigned mode, int flags,
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = READ_ONCE(wait->private);
|
||||
|
||||
list_del_init(&wait->task_list);
|
||||
list_del_init(&wait->entry);
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
return 1;
|
||||
}
|
||||
@ -536,7 +536,7 @@ static int kyber_get_domain_token(struct kyber_queue_data *kqd,
|
||||
* run when one becomes available. Note that this is serialized on
|
||||
* khd->lock, but we still need to be careful about the waker.
|
||||
*/
|
||||
if (list_empty_careful(&wait->task_list)) {
|
||||
if (list_empty_careful(&wait->entry)) {
|
||||
init_waitqueue_func_entry(wait, kyber_domain_wake);
|
||||
wait->private = hctx;
|
||||
ws = sbq_wait_ptr(domain_tokens,
|
||||
@ -736,7 +736,7 @@ static int kyber_##name##_waiting_show(void *data, struct seq_file *m) \
|
||||
struct kyber_hctx_data *khd = hctx->sched_data; \
|
||||
wait_queue_entry_t *wait = &khd->domain_wait[domain]; \
|
||||
\
|
||||
seq_printf(m, "%d\n", !list_empty_careful(&wait->task_list)); \
|
||||
seq_printf(m, "%d\n", !list_empty_careful(&wait->entry)); \
|
||||
return 0; \
|
||||
}
|
||||
KYBER_DEBUGFS_DOMAIN_ATTRS(KYBER_READ, read)
|
||||
|
@ -160,31 +160,30 @@ static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
|
||||
|
||||
/*
|
||||
* To prevent unbounded recursion as we traverse the graph of
|
||||
* i915_sw_fences, we move the task_list from this, the next ready
|
||||
* fence, to the tail of the original fence's task_list
|
||||
* i915_sw_fences, we move the entry list from this, the next ready
|
||||
* fence, to the tail of the original fence's entry list
|
||||
* (and so added to the list to be woken).
|
||||
*/
|
||||
|
||||
spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
|
||||
if (continuation) {
|
||||
list_for_each_entry_safe(pos, next, &x->task_list, task_list) {
|
||||
list_for_each_entry_safe(pos, next, &x->head, entry) {
|
||||
if (pos->func == autoremove_wake_function)
|
||||
pos->func(pos, TASK_NORMAL, 0, continuation);
|
||||
else
|
||||
list_move_tail(&pos->task_list, continuation);
|
||||
list_move_tail(&pos->entry, continuation);
|
||||
}
|
||||
} else {
|
||||
LIST_HEAD(extra);
|
||||
|
||||
do {
|
||||
list_for_each_entry_safe(pos, next,
|
||||
&x->task_list, task_list)
|
||||
list_for_each_entry_safe(pos, next, &x->head, entry)
|
||||
pos->func(pos, TASK_NORMAL, 0, &extra);
|
||||
|
||||
if (list_empty(&extra))
|
||||
break;
|
||||
|
||||
list_splice_tail_init(&extra, &x->task_list);
|
||||
list_splice_tail_init(&extra, &x->head);
|
||||
} while (1);
|
||||
}
|
||||
spin_unlock_irqrestore(&x->lock, flags);
|
||||
@ -256,7 +255,7 @@ void i915_sw_fence_commit(struct i915_sw_fence *fence)
|
||||
|
||||
static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
|
||||
{
|
||||
list_del(&wq->task_list);
|
||||
list_del(&wq->entry);
|
||||
__i915_sw_fence_complete(wq->private, key);
|
||||
i915_sw_fence_put(wq->private);
|
||||
if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
|
||||
@ -275,7 +274,7 @@ static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
|
||||
if (fence == signaler)
|
||||
return true;
|
||||
|
||||
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
|
||||
list_for_each_entry(wq, &fence->wait.head, entry) {
|
||||
if (wq->func != i915_sw_fence_wake)
|
||||
continue;
|
||||
|
||||
@ -293,7 +292,7 @@ static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
|
||||
if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
|
||||
return;
|
||||
|
||||
list_for_each_entry(wq, &fence->wait.task_list, task_list) {
|
||||
list_for_each_entry(wq, &fence->wait.head, entry) {
|
||||
if (wq->func != i915_sw_fence_wake)
|
||||
continue;
|
||||
|
||||
@ -350,7 +349,7 @@ static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
|
||||
pending |= I915_SW_FENCE_FLAG_ALLOC;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&wq->task_list);
|
||||
INIT_LIST_HEAD(&wq->entry);
|
||||
wq->flags = pending;
|
||||
wq->func = i915_sw_fence_wake;
|
||||
wq->private = i915_sw_fence_get(fence);
|
||||
|
@ -709,7 +709,7 @@ static irqreturn_t dryice_irq(int irq, void *dev_id)
|
||||
/*If the write wait queue is empty then there is no pending
|
||||
operations. It means the interrupt is for DryIce -Security.
|
||||
IRQ must be returned as none.*/
|
||||
if (list_empty_careful(&imxdi->write_wait.task_list))
|
||||
if (list_empty_careful(&imxdi->write_wait.head))
|
||||
return rc;
|
||||
|
||||
/* DSR_WCF clears itself on DSR read */
|
||||
|
@ -48,7 +48,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
|
||||
}
|
||||
|
||||
/* remove from the waitqueue */
|
||||
list_del(&wait->task_list);
|
||||
list_del(&wait->entry);
|
||||
|
||||
/* move onto the action list and queue for FS-Cache thread pool */
|
||||
ASSERT(monitor->op);
|
||||
|
@ -1094,7 +1094,7 @@ static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, v
|
||||
* can't use __remove_wait_queue(). whead->lock is held by
|
||||
* the caller.
|
||||
*/
|
||||
list_del_init(&wait->task_list);
|
||||
list_del_init(&wait->entry);
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&ep->lock, flags);
|
||||
|
@ -61,7 +61,7 @@ void pin_kill(struct fs_pin *p)
|
||||
rcu_read_unlock();
|
||||
schedule();
|
||||
rcu_read_lock();
|
||||
if (likely(list_empty(&wait.task_list)))
|
||||
if (likely(list_empty(&wait.entry)))
|
||||
break;
|
||||
/* OK, we know p couldn't have been freed yet */
|
||||
spin_lock_irq(&p->wait.lock);
|
||||
|
@ -2206,8 +2206,7 @@ static void nilfs_segctor_wakeup(struct nilfs_sc_info *sci, int err)
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sci->sc_wait_request.lock, flags);
|
||||
list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.task_list,
|
||||
wq.task_list) {
|
||||
list_for_each_entry_safe(wrq, n, &sci->sc_wait_request.head, wq.entry) {
|
||||
if (!atomic_read(&wrq->done) &&
|
||||
nilfs_cnt32_ge(sci->sc_seq_done, wrq->seq)) {
|
||||
wrq->err = err;
|
||||
|
@ -46,7 +46,7 @@ static void run_down(struct slot_map *m)
|
||||
spin_lock(&m->q.lock);
|
||||
if (m->c != -1) {
|
||||
for (;;) {
|
||||
if (likely(list_empty(&wait.task_list)))
|
||||
if (likely(list_empty(&wait.entry)))
|
||||
__add_wait_queue_entry_tail(&m->q, &wait);
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
|
||||
@ -84,7 +84,7 @@ static int wait_for_free(struct slot_map *m)
|
||||
|
||||
do {
|
||||
long n = left, t;
|
||||
if (likely(list_empty(&wait.task_list)))
|
||||
if (likely(list_empty(&wait.entry)))
|
||||
__add_wait_queue_entry_tail_exclusive(&m->q, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
@ -108,8 +108,8 @@ static int wait_for_free(struct slot_map *m)
|
||||
left = -EINTR;
|
||||
} while (left > 0);
|
||||
|
||||
if (!list_empty(&wait.task_list))
|
||||
list_del(&wait.task_list);
|
||||
if (!list_empty(&wait.entry))
|
||||
list_del(&wait.entry);
|
||||
else if (left <= 0 && waitqueue_active(&m->q))
|
||||
__wake_up_locked_key(&m->q, TASK_INTERRUPTIBLE, NULL);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
@ -129,7 +129,7 @@ static int userfaultfd_wake_function(wait_queue_entry_t *wq, unsigned mode,
|
||||
* wouldn't be enough, the smp_mb__before_spinlock is
|
||||
* enough to avoid an explicit smp_mb() here.
|
||||
*/
|
||||
list_del_init(&wq->task_list);
|
||||
list_del_init(&wq->entry);
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@ -522,13 +522,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
|
||||
* and it's fine not to block on the spinlock. The uwq on this
|
||||
* kernel stack can be released after the list_del_init.
|
||||
*/
|
||||
if (!list_empty_careful(&uwq.wq.task_list)) {
|
||||
if (!list_empty_careful(&uwq.wq.entry)) {
|
||||
spin_lock(&ctx->fault_pending_wqh.lock);
|
||||
/*
|
||||
* No need of list_del_init(), the uwq on the stack
|
||||
* will be freed shortly anyway.
|
||||
*/
|
||||
list_del(&uwq.wq.task_list);
|
||||
list_del(&uwq.wq.entry);
|
||||
spin_unlock(&ctx->fault_pending_wqh.lock);
|
||||
}
|
||||
|
||||
@ -869,7 +869,7 @@ static inline struct userfaultfd_wait_queue *find_userfault_in(
|
||||
if (!waitqueue_active(wqh))
|
||||
goto out;
|
||||
/* walk in reverse to provide FIFO behavior to read userfaults */
|
||||
wq = list_last_entry(&wqh->task_list, typeof(*wq), task_list);
|
||||
wq = list_last_entry(&wqh->head, typeof(*wq), entry);
|
||||
uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
|
||||
out:
|
||||
return uwq;
|
||||
@ -1003,14 +1003,14 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
|
||||
* changes __remove_wait_queue() to use
|
||||
* list_del_init() in turn breaking the
|
||||
* !list_empty_careful() check in
|
||||
* handle_userfault(). The uwq->wq.task_list
|
||||
* handle_userfault(). The uwq->wq.head list
|
||||
* must never be empty at any time during the
|
||||
* refile, or the waitqueue could disappear
|
||||
* from under us. The "wait_queue_head_t"
|
||||
* parameter of __remove_wait_queue() is unused
|
||||
* anyway.
|
||||
*/
|
||||
list_del(&uwq->wq.task_list);
|
||||
list_del(&uwq->wq.entry);
|
||||
__add_wait_queue(&ctx->fault_wqh, &uwq->wq);
|
||||
|
||||
write_seqcount_end(&ctx->refile_seq);
|
||||
@ -1032,7 +1032,7 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
|
||||
fork_nctx = (struct userfaultfd_ctx *)
|
||||
(unsigned long)
|
||||
uwq->msg.arg.reserved.reserved1;
|
||||
list_move(&uwq->wq.task_list, &fork_event);
|
||||
list_move(&uwq->wq.entry, &fork_event);
|
||||
spin_unlock(&ctx->event_wqh.lock);
|
||||
ret = 0;
|
||||
break;
|
||||
@ -1069,8 +1069,8 @@ static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait,
|
||||
if (!list_empty(&fork_event)) {
|
||||
uwq = list_first_entry(&fork_event,
|
||||
typeof(*uwq),
|
||||
wq.task_list);
|
||||
list_del(&uwq->wq.task_list);
|
||||
wq.entry);
|
||||
list_del(&uwq->wq.entry);
|
||||
__add_wait_queue(&ctx->event_wqh, &uwq->wq);
|
||||
userfaultfd_event_complete(ctx, uwq);
|
||||
}
|
||||
@ -1752,12 +1752,12 @@ static void userfaultfd_show_fdinfo(struct seq_file *m, struct file *f)
|
||||
unsigned long pending = 0, total = 0;
|
||||
|
||||
spin_lock(&ctx->fault_pending_wqh.lock);
|
||||
list_for_each_entry(wq, &ctx->fault_pending_wqh.task_list, task_list) {
|
||||
list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) {
|
||||
uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
|
||||
pending++;
|
||||
total++;
|
||||
}
|
||||
list_for_each_entry(wq, &ctx->fault_wqh.task_list, task_list) {
|
||||
list_for_each_entry(wq, &ctx->fault_wqh.head, entry) {
|
||||
uwq = container_of(wq, struct userfaultfd_wait_queue, wq);
|
||||
total++;
|
||||
}
|
||||
|
@ -26,12 +26,12 @@ struct wait_queue_entry {
|
||||
unsigned int flags;
|
||||
void *private;
|
||||
wait_queue_func_t func;
|
||||
struct list_head task_list;
|
||||
struct list_head entry;
|
||||
};
|
||||
|
||||
struct wait_queue_head {
|
||||
spinlock_t lock;
|
||||
struct list_head task_list;
|
||||
struct list_head head;
|
||||
};
|
||||
typedef struct wait_queue_head wait_queue_head_t;
|
||||
|
||||
@ -44,14 +44,14 @@ struct task_struct;
|
||||
#define __WAITQUEUE_INITIALIZER(name, tsk) { \
|
||||
.private = tsk, \
|
||||
.func = default_wake_function, \
|
||||
.task_list = { NULL, NULL } }
|
||||
.entry = { NULL, NULL } }
|
||||
|
||||
#define DECLARE_WAITQUEUE(name, tsk) \
|
||||
struct wait_queue_entry name = __WAITQUEUE_INITIALIZER(name, tsk)
|
||||
|
||||
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) { \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
.task_list = { &(name).task_list, &(name).task_list } }
|
||||
.head = { &(name).head, &(name).head } }
|
||||
|
||||
#define DECLARE_WAIT_QUEUE_HEAD(name) \
|
||||
struct wait_queue_head name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
|
||||
@ -121,7 +121,7 @@ init_waitqueue_func_entry(struct wait_queue_entry *wq_entry, wait_queue_func_t f
|
||||
*/
|
||||
static inline int waitqueue_active(struct wait_queue_head *wq_head)
|
||||
{
|
||||
return !list_empty(&wq_head->task_list);
|
||||
return !list_empty(&wq_head->head);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -151,7 +151,7 @@ extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue
|
||||
|
||||
static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
list_add(&wq_entry->task_list, &wq_head->task_list);
|
||||
list_add(&wq_entry->entry, &wq_head->head);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -166,7 +166,7 @@ __add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_en
|
||||
|
||||
static inline void __add_wait_queue_entry_tail(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
list_add_tail(&wq_entry->task_list, &wq_head->task_list);
|
||||
list_add_tail(&wq_entry->entry, &wq_head->head);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -179,7 +179,7 @@ __add_wait_queue_entry_tail_exclusive(struct wait_queue_head *wq_head, struct wa
|
||||
static inline void
|
||||
__remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
|
||||
{
|
||||
list_del(&wq_entry->task_list);
|
||||
list_del(&wq_entry->entry);
|
||||
}
|
||||
|
||||
void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
|
||||
@ -952,7 +952,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
|
||||
struct wait_queue_entry name = { \
|
||||
.private = current, \
|
||||
.func = function, \
|
||||
.task_list = LIST_HEAD_INIT((name).task_list), \
|
||||
.entry = LIST_HEAD_INIT((name).entry), \
|
||||
}
|
||||
|
||||
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
|
||||
@ -961,7 +961,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
|
||||
do { \
|
||||
(wait)->private = current; \
|
||||
(wait)->func = autoremove_wake_function; \
|
||||
INIT_LIST_HEAD(&(wait)->task_list); \
|
||||
INIT_LIST_HEAD(&(wait)->entry); \
|
||||
(wait)->flags = 0; \
|
||||
} while (0)
|
||||
|
||||
|
@ -45,8 +45,8 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync
|
||||
.wq_entry = { \
|
||||
.private = current, \
|
||||
.func = wake_bit_function, \
|
||||
.task_list = \
|
||||
LIST_HEAD_INIT((name).wq_entry.task_list), \
|
||||
.entry = \
|
||||
LIST_HEAD_INIT((name).wq_entry.entry), \
|
||||
}, \
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, st
|
||||
{
|
||||
spin_lock_init(&wq_head->lock);
|
||||
lockdep_set_class_and_name(&wq_head->lock, key, name);
|
||||
INIT_LIST_HEAD(&wq_head->task_list);
|
||||
INIT_LIST_HEAD(&wq_head->head);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__init_waitqueue_head);
|
||||
@ -68,7 +68,7 @@ static void __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
|
||||
{
|
||||
wait_queue_entry_t *curr, *next;
|
||||
|
||||
list_for_each_entry_safe(curr, next, &wq_head->task_list, task_list) {
|
||||
list_for_each_entry_safe(curr, next, &wq_head->head, entry) {
|
||||
unsigned flags = curr->flags;
|
||||
|
||||
if (curr->func(curr, mode, wake_flags, key) &&
|
||||
@ -176,7 +176,7 @@ prepare_to_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_ent
|
||||
|
||||
wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE;
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
if (list_empty(&wq_entry->task_list))
|
||||
if (list_empty(&wq_entry->entry))
|
||||
__add_wait_queue(wq_head, wq_entry);
|
||||
set_current_state(state);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
@ -190,7 +190,7 @@ prepare_to_wait_exclusive(struct wait_queue_head *wq_head, struct wait_queue_ent
|
||||
|
||||
wq_entry->flags |= WQ_FLAG_EXCLUSIVE;
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
if (list_empty(&wq_entry->task_list))
|
||||
if (list_empty(&wq_entry->entry))
|
||||
__add_wait_queue_entry_tail(wq_head, wq_entry);
|
||||
set_current_state(state);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
@ -202,7 +202,7 @@ void init_wait_entry(struct wait_queue_entry *wq_entry, int flags)
|
||||
wq_entry->flags = flags;
|
||||
wq_entry->private = current;
|
||||
wq_entry->func = autoremove_wake_function;
|
||||
INIT_LIST_HEAD(&wq_entry->task_list);
|
||||
INIT_LIST_HEAD(&wq_entry->entry);
|
||||
}
|
||||
EXPORT_SYMBOL(init_wait_entry);
|
||||
|
||||
@ -225,10 +225,10 @@ long prepare_to_wait_event(struct wait_queue_head *wq_head, struct wait_queue_en
|
||||
* can't see us, it should wake up another exclusive waiter if
|
||||
* we fail.
|
||||
*/
|
||||
list_del_init(&wq_entry->task_list);
|
||||
list_del_init(&wq_entry->entry);
|
||||
ret = -ERESTARTSYS;
|
||||
} else {
|
||||
if (list_empty(&wq_entry->task_list)) {
|
||||
if (list_empty(&wq_entry->entry)) {
|
||||
if (wq_entry->flags & WQ_FLAG_EXCLUSIVE)
|
||||
__add_wait_queue_entry_tail(wq_head, wq_entry);
|
||||
else
|
||||
@ -251,7 +251,7 @@ EXPORT_SYMBOL(prepare_to_wait_event);
|
||||
*/
|
||||
int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
|
||||
{
|
||||
if (likely(list_empty(&wait->task_list)))
|
||||
if (likely(list_empty(&wait->entry)))
|
||||
__add_wait_queue_entry_tail(wq, wait);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
@ -267,7 +267,7 @@ EXPORT_SYMBOL(do_wait_intr);
|
||||
|
||||
int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
|
||||
{
|
||||
if (likely(list_empty(&wait->task_list)))
|
||||
if (likely(list_empty(&wait->entry)))
|
||||
__add_wait_queue_entry_tail(wq, wait);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
@ -308,9 +308,9 @@ void finish_wait(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_en
|
||||
* have _one_ other CPU that looks at or modifies
|
||||
* the list).
|
||||
*/
|
||||
if (!list_empty_careful(&wq_entry->task_list)) {
|
||||
if (!list_empty_careful(&wq_entry->entry)) {
|
||||
spin_lock_irqsave(&wq_head->lock, flags);
|
||||
list_del_init(&wq_entry->task_list);
|
||||
list_del_init(&wq_entry->entry);
|
||||
spin_unlock_irqrestore(&wq_head->lock, flags);
|
||||
}
|
||||
}
|
||||
@ -321,7 +321,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
|
||||
int ret = default_wake_function(wq_entry, mode, sync, key);
|
||||
|
||||
if (ret)
|
||||
list_del_init(&wq_entry->task_list);
|
||||
list_del_init(&wq_entry->entry);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(autoremove_wake_function);
|
||||
|
@ -205,8 +205,8 @@ int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_en
|
||||
.wq_entry = { \
|
||||
.private = current, \
|
||||
.func = wake_atomic_t_function, \
|
||||
.task_list = \
|
||||
LIST_HEAD_INIT((name).wq_entry.task_list), \
|
||||
.entry = \
|
||||
LIST_HEAD_INIT((name).wq_entry.entry), \
|
||||
}, \
|
||||
}
|
||||
|
||||
|
@ -845,7 +845,7 @@ static inline int wait_on_page_bit_common(wait_queue_head_t *q,
|
||||
for (;;) {
|
||||
spin_lock_irq(&q->lock);
|
||||
|
||||
if (likely(list_empty(&wait->task_list))) {
|
||||
if (likely(list_empty(&wait->entry))) {
|
||||
if (lock)
|
||||
__add_wait_queue_entry_tail_exclusive(q, wait);
|
||||
else
|
||||
|
@ -1570,7 +1570,7 @@ bool mem_cgroup_oom_synchronize(bool handle)
|
||||
owait.wait.flags = 0;
|
||||
owait.wait.func = memcg_oom_wake_function;
|
||||
owait.wait.private = current;
|
||||
INIT_LIST_HEAD(&owait.wait.task_list);
|
||||
INIT_LIST_HEAD(&owait.wait.entry);
|
||||
|
||||
prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
|
||||
mem_cgroup_mark_under_oom(memcg);
|
||||
|
@ -1905,7 +1905,7 @@ alloc_nohuge: page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
|
||||
static int synchronous_wake_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)
|
||||
{
|
||||
int ret = default_wake_function(wait, mode, sync, key);
|
||||
list_del_init(&wait->task_list);
|
||||
list_del_init(&wait->entry);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2840,7 +2840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
||||
spin_lock(&inode->i_lock);
|
||||
inode->i_private = NULL;
|
||||
wake_up_all(&shmem_falloc_waitq);
|
||||
WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.task_list));
|
||||
WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.head));
|
||||
spin_unlock(&inode->i_lock);
|
||||
error = 0;
|
||||
goto out;
|
||||
|
Loading…
Reference in New Issue
Block a user