Lines Matching +full:lock +full:- +full:- +full:- +full:-

1 /* SPDX-License-Identifier: GPL-2.0 */
16 * pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
17 * pv_kick(cpu) -- wakes a suspended vcpu
30 * not running. The one lock stealing attempt allowed at slowpath entry
31 * mitigates the slight slowdown for non-overcommitted guest with this
32 * aggressive wait-early mechanism.
57 * Hybrid PV queued/unfair lock
60 * it will be called once when a lock waiter enter the PV slowpath before
64 * pv_wait_head_or_lock() to signal that it is ready to spin on the lock.
65 * When that bit becomes visible to the incoming waiters, no lock stealing
67 * enter the MCS wait queue. So lock starvation shouldn't happen as long
69 * and hence disabling lock stealing.
71 * When the pending bit isn't set, the lock waiters will stay in the unfair
72 * mode spinning on the lock unless the MCS wait queue is empty. In this
73 * case, the lock waiters will enter the queued mode slowpath trying to
76 * This hybrid PV queued/unfair lock combines the best attributes of a
77 * queued lock (no lock starvation) and an unfair lock (good performance
81 static inline bool pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) in pv_hybrid_queued_unfair_trylock() argument
84 * Stay in unfair lock mode as long as queued mode waiters are in pv_hybrid_queued_unfair_trylock()
88 int val = atomic_read(&lock->val); in pv_hybrid_queued_unfair_trylock()
91 (cmpxchg_acquire(&lock->locked, 0, _Q_LOCKED_VAL) == 0)) { in pv_hybrid_queued_unfair_trylock()
106 * is actively spinning on the lock and no lock stealing is allowed.
109 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument
111 WRITE_ONCE(lock->pending, 1); in set_pending()
117 * lock just to be sure that it will get it.
119 static __always_inline int trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument
121 return !READ_ONCE(lock->locked) && in trylock_clear_pending()
122 (cmpxchg_acquire(&lock->locked_pending, _Q_PENDING_VAL, in trylock_clear_pending()
126 static __always_inline void set_pending(struct qspinlock *lock) in set_pending() argument
128 atomic_or(_Q_PENDING_VAL, &lock->val); in set_pending()
131 static __always_inline int trylock_clear_pending(struct qspinlock *lock) in trylock_clear_pending() argument
133 int val = atomic_read(&lock->val); in trylock_clear_pending()
146 val = atomic_cmpxchg_acquire(&lock->val, old, new); in trylock_clear_pending()
156 * Lock and MCS node addresses hash table for fast lookup
158 * Hashing is done on a per-cacheline basis to minimize the need to access
164 * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
172 struct qspinlock *lock; member
196 * Allocate space from bootmem which should be page-size aligned in __pv_init_lock_hash()
208 for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0; \
210 offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
212 static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node) in pv_hash() argument
214 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); in pv_hash()
220 if (!cmpxchg(&he->lock, NULL, lock)) { in pv_hash()
221 WRITE_ONCE(he->node, node); in pv_hash()
223 return &he->lock; in pv_hash()
229 * This is guaranteed by ensuring every blocked lock only ever consumes in pv_hash()
233 * The single entry is guaranteed by having the lock owner unhash in pv_hash()
239 static struct pv_node *pv_unhash(struct qspinlock *lock) in pv_unhash() argument
241 unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits); in pv_unhash()
246 if (READ_ONCE(he->lock) == lock) { in pv_unhash()
247 node = READ_ONCE(he->node); in pv_unhash()
248 WRITE_ONCE(he->lock, NULL); in pv_unhash()
256 * having the lock owner do the unhash -- IFF the unlock sees the in pv_unhash()
272 return READ_ONCE(prev->state) != vcpu_running; in pv_wait_early()
284 pn->cpu = smp_processor_id(); in pv_init_node()
285 pn->state = vcpu_running; in pv_init_node()
289 * Wait for node->locked to become true, halt the vcpu after a short spin.
301 for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) { in pv_wait_node()
302 if (READ_ONCE(node->locked)) in pv_wait_node()
312 * Order pn->state vs pn->locked thusly: in pv_wait_node()
314 * [S] pn->state = vcpu_halted [S] next->locked = 1 in pv_wait_node()
316 * [L] pn->locked [RmW] pn->state = vcpu_hashed in pv_wait_node()
320 smp_store_mb(pn->state, vcpu_halted); in pv_wait_node()
322 if (!READ_ONCE(node->locked)) { in pv_wait_node()
325 pv_wait(&pn->state, vcpu_halted); in pv_wait_node()
331 * to hash this lock. in pv_wait_node()
333 cmpxchg(&pn->state, vcpu_halted, vcpu_running); in pv_wait_node()
340 * MCS lock will be released soon. in pv_wait_node()
343 !READ_ONCE(node->locked)); in pv_wait_node()
347 * By now our node->locked should be 1 and our caller will not actually in pv_wait_node()
348 * spin-wait for it. We do however rely on our caller to do a in pv_wait_node()
349 * load-acquire for us. in pv_wait_node()
354 * Called after setting next->locked = 1 when we're the lock owner.
360 static void pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) in pv_kick_node() argument
367 * observe its next->locked value and advance itself. in pv_kick_node()
371 * The write to next->locked in arch_mcs_spin_unlock_contended() in pv_kick_node()
372 * must be ordered before the read of pn->state in the cmpxchg() in pv_kick_node()
376 * dependency will order the reading of pn->state before any in pv_kick_node()
380 if (cmpxchg_relaxed(&pn->state, vcpu_halted, vcpu_hashed) in pv_kick_node()
385 * Put the lock into the hash table and set the _Q_SLOW_VAL. in pv_kick_node()
391 WRITE_ONCE(lock->locked, _Q_SLOW_VAL); in pv_kick_node()
392 (void)pv_hash(lock, pn); in pv_kick_node()
396 * Wait for l->locked to become clear and acquire the lock;
400 * The current value of the lock will be returned for additional processing.
403 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) in pv_wait_head_or_lock() argument
414 if (READ_ONCE(pn->state) == vcpu_hashed) in pv_wait_head_or_lock()
424 * Set correct vCPU state to be used by queue node wait-early in pv_wait_head_or_lock()
427 WRITE_ONCE(pn->state, vcpu_running); in pv_wait_head_or_lock()
430 * Set the pending bit in the active lock spinning loop to in pv_wait_head_or_lock()
431 * disable lock stealing before attempting to acquire the lock. in pv_wait_head_or_lock()
433 set_pending(lock); in pv_wait_head_or_lock()
434 for (loop = SPIN_THRESHOLD; loop; loop--) { in pv_wait_head_or_lock()
435 if (trylock_clear_pending(lock)) in pv_wait_head_or_lock()
439 clear_pending(lock); in pv_wait_head_or_lock()
443 lp = pv_hash(lock, pn); in pv_wait_head_or_lock()
450 * [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL in pv_wait_head_or_lock()
452 * [RmW] l->locked = _Q_SLOW_VAL [L] <unhash> in pv_wait_head_or_lock()
456 if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { in pv_wait_head_or_lock()
458 * The lock was free and now we own the lock. in pv_wait_head_or_lock()
459 * Change the lock value back to _Q_LOCKED_VAL in pv_wait_head_or_lock()
462 WRITE_ONCE(lock->locked, _Q_LOCKED_VAL); in pv_wait_head_or_lock()
467 WRITE_ONCE(pn->state, vcpu_hashed); in pv_wait_head_or_lock()
470 pv_wait(&lock->locked, _Q_SLOW_VAL); in pv_wait_head_or_lock()
473 * Because of lock stealing, the queue head vCPU may not be in pv_wait_head_or_lock()
474 * able to acquire the lock before it has to wait again. in pv_wait_head_or_lock()
485 return (u32)(atomic_read(&lock->val) | _Q_LOCKED_VAL); in pv_wait_head_or_lock()
493 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) in __pv_queued_spin_unlock_slowpath() argument
499 "pvqspinlock: lock 0x%lx has corrupted value 0x%x!\n", in __pv_queued_spin_unlock_slowpath()
500 (unsigned long)lock, atomic_read(&lock->val)); in __pv_queued_spin_unlock_slowpath()
505 * A failed cmpxchg doesn't provide any memory-ordering guarantees, in __pv_queued_spin_unlock_slowpath()
507 * pv_unhash *after* we've read the lock being _Q_SLOW_VAL. in __pv_queued_spin_unlock_slowpath()
517 node = pv_unhash(lock); in __pv_queued_spin_unlock_slowpath()
521 * release the lock. in __pv_queued_spin_unlock_slowpath()
523 smp_store_release(&lock->locked, 0); in __pv_queued_spin_unlock_slowpath()
526 * At this point the memory pointed at by lock can be freed/reused, in __pv_queued_spin_unlock_slowpath()
533 pv_kick(node->cpu); in __pv_queued_spin_unlock_slowpath()
537 * Include the architecture specific callee-save thunk of the
539 * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
547 __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock) in __pv_queued_spin_unlock() argument
553 * unhash. Otherwise it would be possible to have multiple @lock in __pv_queued_spin_unlock()
556 locked = cmpxchg_release(&lock->locked, _Q_LOCKED_VAL, 0); in __pv_queued_spin_unlock()
560 __pv_queued_spin_unlock_slowpath(lock, locked); in __pv_queued_spin_unlock()