Lines Matching +full:wake +full:- +full:up

1 // SPDX-License-Identifier: GPL-2.0-only
11 spin_lock_init(&wq_head->lock); in __init_waitqueue_head()
12 lockdep_set_class_and_name(&wq_head->lock, key, name); in __init_waitqueue_head()
13 INIT_LIST_HEAD(&wq_head->head); in __init_waitqueue_head()
22 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in add_wait_queue()
23 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue()
25 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue()
33 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in add_wait_queue_exclusive()
34 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_exclusive()
36 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_exclusive()
44 wq_entry->flags |= WQ_FLAG_EXCLUSIVE | WQ_FLAG_PRIORITY; in add_wait_queue_priority()
45 spin_lock_irqsave(&wq_head->lock, flags); in add_wait_queue_priority()
47 spin_unlock_irqrestore(&wq_head->lock, flags); in add_wait_queue_priority()
55 spin_lock_irqsave(&wq_head->lock, flags); in remove_wait_queue()
57 spin_unlock_irqrestore(&wq_head->lock, flags); in remove_wait_queue()
69 * The core wakeup function. Non-exclusive wakeups (nr_exclusive == 0) just
70 * wake everything up. If it's an exclusive wakeup (nr_exclusive == small +ve
71 * number) then we wake that number of exclusive tasks, and potentially all
72 * the non-exclusive tasks. Normally, exclusive tasks will be at the end of
73 * the list and any non-exclusive tasks will be woken first. A priority task
77 * There are circumstances in which we can try to wake a task which has already
88 lockdep_assert_held(&wq_head->lock); in __wake_up_common()
90 if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) { in __wake_up_common()
93 list_del(&bookmark->entry); in __wake_up_common()
94 bookmark->flags = 0; in __wake_up_common()
96 curr = list_first_entry(&wq_head->head, wait_queue_entry_t, entry); in __wake_up_common()
98 if (&curr->entry == &wq_head->head) in __wake_up_common()
101 list_for_each_entry_safe_from(curr, next, &wq_head->head, entry) { in __wake_up_common()
102 unsigned flags = curr->flags; in __wake_up_common()
108 ret = curr->func(curr, mode, wake_flags, key); in __wake_up_common()
111 if (ret && (flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive) in __wake_up_common()
115 (&next->entry != &wq_head->head)) { in __wake_up_common()
116 bookmark->flags = WQ_FLAG_BOOKMARK; in __wake_up_common()
117 list_add_tail(&bookmark->entry, &next->entry); in __wake_up_common()
137 spin_lock_irqsave(&wq_head->lock, flags); in __wake_up_common_lock()
140 spin_unlock_irqrestore(&wq_head->lock, flags); in __wake_up_common_lock()
145 * __wake_up - wake up threads blocked on a waitqueue.
148 * @nr_exclusive: how many wake-one or wake-many threads to wake up
151 * If this function wakes up a task, it executes a full memory barrier before
184 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
190 * away soon, so while the target thread will be woken up, it will not
191 * be migrated to another CPU - ie. the two threads are 'synchronized'
194 * On UP it can prevent extra preemption.
196 * If this function wakes up a task, it executes a full memory barrier before
210 * __wake_up_locked_sync_key - wake up a thread blocked on a locked waitqueue.
216 * away soon, so while the target thread will be woken up, it will not
217 * be migrated to another CPU - ie. the two threads are 'synchronized'
220 * On UP it can prevent extra preemption.
222 * If this function wakes up a task, it executes a full memory barrier before
233 * __wake_up_sync - see __wake_up_sync_key()
242 * Note: we use "set_current_state()" _after_ the wait-queue add,
244 * wake-function that tests for the wait-queue being active
248 * The spin_unlock() itself is semi-permeable and only protects
250 * stops them from bleeding out - it would still allow subsequent
258 wq_entry->flags &= ~WQ_FLAG_EXCLUSIVE; in prepare_to_wait()
259 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait()
260 if (list_empty(&wq_entry->entry)) in prepare_to_wait()
263 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait()
274 wq_entry->flags |= WQ_FLAG_EXCLUSIVE; in prepare_to_wait_exclusive()
275 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait_exclusive()
276 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_exclusive()
277 was_empty = list_empty(&wq_head->head); in prepare_to_wait_exclusive()
281 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait_exclusive()
288 wq_entry->flags = flags; in init_wait_entry()
289 wq_entry->private = current; in init_wait_entry()
290 wq_entry->func = autoremove_wake_function; in init_wait_entry()
291 INIT_LIST_HEAD(&wq_entry->entry); in init_wait_entry()
300 spin_lock_irqsave(&wq_head->lock, flags); in prepare_to_wait_event()
307 * we were already woken up, we can not miss the event because in prepare_to_wait_event()
308 * wakeup locks/unlocks the same wq_head->lock. in prepare_to_wait_event()
310 * But we need to ensure that set-condition + wakeup after that in prepare_to_wait_event()
311 * can't see us, it should wake up another exclusive waiter if in prepare_to_wait_event()
314 list_del_init(&wq_entry->entry); in prepare_to_wait_event()
315 ret = -ERESTARTSYS; in prepare_to_wait_event()
317 if (list_empty(&wq_entry->entry)) { in prepare_to_wait_event()
318 if (wq_entry->flags & WQ_FLAG_EXCLUSIVE) in prepare_to_wait_event()
325 spin_unlock_irqrestore(&wq_head->lock, flags); in prepare_to_wait_event()
333 * wait-queue lock held (and interrupts off in the _irq
336 * entry to the wake queue.
340 if (likely(list_empty(&wait->entry))) in do_wait_intr()
345 return -ERESTARTSYS; in do_wait_intr()
347 spin_unlock(&wq->lock); in do_wait_intr()
349 spin_lock(&wq->lock); in do_wait_intr()
357 if (likely(list_empty(&wait->entry))) in do_wait_intr_irq()
362 return -ERESTARTSYS; in do_wait_intr_irq()
364 spin_unlock_irq(&wq->lock); in do_wait_intr_irq()
366 spin_lock_irq(&wq->lock); in do_wait_intr_irq()
373 * finish_wait - clean up after waiting in a queue
389 * - we use the "careful" check that verifies both in finish_wait()
391 * be any half-pending updates in progress on other in finish_wait()
395 * - all other users take the lock (ie we can only in finish_wait()
399 if (!list_empty_careful(&wq_entry->entry)) { in finish_wait()
400 spin_lock_irqsave(&wq_head->lock, flags); in finish_wait()
401 list_del_init(&wq_entry->entry); in finish_wait()
402 spin_unlock_irqrestore(&wq_head->lock, flags); in finish_wait()
412 list_del_init_careful(&wq_entry->entry); in autoremove_wake_function()
420 return (current->flags & PF_KTHREAD) && kthread_should_stop(); in is_kthread_should_stop()
433 * p->state = mode; wq_entry->flags |= WQ_FLAG_WOKEN;
435 * if (!(wq_entry->flags & WQ_FLAG_WOKEN)) <full barrier>
436 * schedule() if (p->state & mode)
437 * p->state = TASK_RUNNING; p->state = TASK_RUNNING;
438 * wq_entry->flags &= ~WQ_FLAG_WOKEN; ~~~~~~~~~~~~~~~~~~
441 * remove_wait_queue(&wq_head, &wait); wq_entry->flags |= WQ_FLAG_WOKEN;
448 * either we see the store to wq_entry->flags in woken_wake_function() in wait_woken()
449 * or woken_wake_function() sees our store to current->state. in wait_woken()
452 if (!(wq_entry->flags & WQ_FLAG_WOKEN) && !is_kthread_should_stop()) in wait_woken()
459 * being true or the store to wq_entry->flags in woken_wake_function() in wait_woken()
462 smp_store_mb(wq_entry->flags, wq_entry->flags & ~WQ_FLAG_WOKEN); /* B */ in wait_woken()
472 wq_entry->flags |= WQ_FLAG_WOKEN; in woken_wake_function()