Lines Matching +full:wait +full:- +full:state
1 /* SPDX-License-Identifier: GPL-2.0 */
16 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
17 * pv_kick(cpu) -- wakes a suspended vcpu
31 * mitigates the slight slowdown for non-overcommitted guest with this
32 * aggressive wait-early mechanism.
53 u8 state; member
63 * The pending bit is set by the queue head vCPU of the MCS wait queue in
67 * enter the MCS wait queue. So lock starvation shouldn't happen as long
72 * mode spinning on the lock unless the MCS wait queue is empty. In this
85 * present in the MCS wait queue but the pending bit isn't set. in pv_hybrid_queued_unfair_trylock()
88 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock()
91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock()
111 WRITE_ONCE(lock->pending, 1); in set_pending()
121 return !READ_ONCE(lock->locked) && in trylock_clear_pending()
122 (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL, in trylock_clear_pending()
128 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending()
133 int val = atomic_read(&lock->val); in trylock_clear_pending()
146 val = atomic_cmpxchg_acquire(&lock->val, old, new); in trylock_clear_pending()
158 * Hashing is done on a per-cacheline basis to minimize the need to access
164 * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
196 * Allocate space from bootmem which should be page-size aligned in __pv_init_lock_hash()
208 for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \
210 offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
220 if (!cmpxchg(&he->lock, NULL, lock)) { in pv_hash()
221 WRITE_ONCE(he->node, node); in pv_hash()
223 return &he->lock; in pv_hash()
246 if (READ_ONCE(he->lock) == lock) { in pv_unhash()
247 node = READ_ONCE(he->node); in pv_unhash()
248 WRITE_ONCE(he->lock, NULL); in pv_unhash()
256 * having the lock owner do the unhash -- IFF the unlock sees the in pv_unhash()
264 * in a running state.
272 return READ_ONCE(prev->state) != vcpu_running; in pv_wait_early()
284 pn->cpu = smp_processor_id(); in pv_init_node()
285 pn->state = vcpu_running; in pv_init_node()
289 * Wait for node->locked to become true, halt the vcpu after a short spin.
301 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { in pv_wait_node()
302 if (READ_ONCE(node->locked)) in pv_wait_node()
312 * Order pn->state vs pn->locked thusly: in pv_wait_node()
314 * [S] pn->state = vcpu_halted [S] next->locked = 1 in pv_wait_node()
316 * [L] pn->locked [RmW] pn->state = vcpu_hashed in pv_wait_node()
320 smp_store_mb(pn->state, vcpu_halted); in pv_wait_node()
322 if (!READ_ONCE(node->locked)) { in pv_wait_node()
325 pv_wait(&pn->state, vcpu_halted); in pv_wait_node()
333 cmpxchg(&pn->state, vcpu_halted, vcpu_running); in pv_wait_node()
337 * spurious wakeup and the vCPU should wait again. However, in pv_wait_node()
343 !READ_ONCE(node->locked)); in pv_wait_node()
347 * By now our node->locked should be 1 and our caller will not actually in pv_wait_node()
348 * spin-wait for it. We do however rely on our caller to do a in pv_wait_node()
349 * load-acquire for us. in pv_wait_node()
354 * Called after setting next->locked = 1 when we're the lock owner.
356 * Instead of waking the waiters stuck in pv_wait_node() advance their state
365 * If the vCPU is indeed halted, advance its state to match that of in pv_kick_node()
367 * observe its next->locked value and advance itself. in pv_kick_node()
371 * The write to next->locked in arch_mcs_spin_unlock_contended() in pv_kick_node()
372 * must be ordered before the read of pn->state in the cmpxchg() in pv_kick_node()
376 * dependency will order the reading of pn->state before any in pv_kick_node()
380 if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed) in pv_kick_node()
391 WRITE_ONCE(lock->locked, _Q_SLOW_VAL); in pv_kick_node()
396 * Wait for l->locked to become clear and acquire the lock;
411 * If pv_kick_node() already advanced our state, we don't need to in pv_wait_head_or_lock()
414 if (READ_ONCE(pn->state) == vcpu_hashed) in pv_wait_head_or_lock()
424 * Set correct vCPU state to be used by queue node wait-early in pv_wait_head_or_lock()
427 WRITE_ONCE(pn->state, vcpu_running); in pv_wait_head_or_lock()
434 for (loop = SPIN_THRESHOLD; loop; loop--) { in pv_wait_head_or_lock()
450 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL in pv_wait_head_or_lock()
452 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> in pv_wait_head_or_lock()
456 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { in pv_wait_head_or_lock()
462 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock()
467 WRITE_ONCE(pn->state, vcpu_hashed); in pv_wait_head_or_lock()
470 pv_wait(&lock->locked, _Q_SLOW_VAL); in pv_wait_head_or_lock()
474 * able to acquire the lock before it has to wait again. in pv_wait_head_or_lock()
485 return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); in pv_wait_head_or_lock()
489 * Include the architecture specific callee-save thunk of the
491 * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
510 (unsigned long)lock, atomic_read(&lock->val)); in __pv_queued_spin_unlock_slowpath()
515 * A failed cmpxchg doesn't provide any memory-ordering guarantees, in __pv_queued_spin_unlock_slowpath()
533 smp_store_release(&lock->locked, 0); in __pv_queued_spin_unlock_slowpath()
543 pv_kick(node->cpu); in __pv_queued_spin_unlock_slowpath()
556 locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0); in __pv_queued_spin_unlock()