Lines Matching refs:paravirt
75 static __always_inline int get_steal_spins(bool paravirt, bool sleepy) in get_steal_spins() argument
77 if (paravirt && sleepy) in get_steal_spins()
83 static __always_inline int get_remote_steal_spins(bool paravirt, bool sleepy) in get_remote_steal_spins() argument
85 if (paravirt && sleepy) in get_remote_steal_spins()
91 static __always_inline int get_head_spins(bool paravirt, bool sleepy) in get_head_spins() argument
93 if (paravirt && sleepy) in get_head_spins()
287 static __always_inline bool __yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt,… in __yield_to_locked_owner() argument
295 if (!paravirt) in __yield_to_locked_owner()
340 static __always_inline bool yield_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt) in yield_to_locked_owner() argument
342 return __yield_to_locked_owner(lock, val, paravirt, false); in yield_to_locked_owner()
346 …tic __always_inline bool yield_head_to_locked_owner(struct qspinlock *lock, u32 val, bool paravirt) in yield_head_to_locked_owner() argument
353 return __yield_to_locked_owner(lock, val, paravirt, mustq); in yield_head_to_locked_owner()
356 …ays_inline void propagate_yield_cpu(struct qnode *node, u32 val, int *set_yield_cpu, bool paravirt) in propagate_yield_cpu() argument
361 if (!paravirt) in propagate_yield_cpu()
384 …lways_inline bool yield_to_prev(struct qspinlock *lock, struct qnode *node, u32 val, bool paravirt) in yield_to_prev() argument
391 if (!paravirt) in yield_to_prev()
456 static __always_inline bool steal_break(u32 val, int iters, bool paravirt, bool sleepy) in steal_break() argument
458 if (iters >= get_steal_spins(paravirt, sleepy)) in steal_break()
462 (iters >= get_remote_steal_spins(paravirt, sleepy))) { in steal_break()
470 static __always_inline bool try_to_steal_lock(struct qspinlock *lock, bool paravirt) in try_to_steal_lock() argument
498 preempted = yield_to_locked_owner(lock, val, paravirt); in try_to_steal_lock()
501 if (paravirt && pv_sleepy_lock) { in try_to_steal_lock()
533 } while (!steal_break(val, iters, paravirt, sleepy)); in try_to_steal_lock()
540 static __always_inline void queued_spin_lock_mcs_queue(struct qspinlock *lock, bool paravirt) in queued_spin_lock_mcs_queue() argument
600 if (yield_to_prev(lock, node, old, paravirt)) in queued_spin_lock_mcs_queue()
607 if (paravirt && pv_yield_propagate_owner && node->yield_cpu != -1) in queued_spin_lock_mcs_queue()
635 if (paravirt && pv_sleepy_lock && maybe_stealers) { in queued_spin_lock_mcs_queue()
651 propagate_yield_cpu(node, val, &set_yield_cpu, paravirt); in queued_spin_lock_mcs_queue()
652 preempted = yield_head_to_locked_owner(lock, val, paravirt); in queued_spin_lock_mcs_queue()
659 if (paravirt && preempted) { in queued_spin_lock_mcs_queue()
668 if (!mustq && iters >= get_head_spins(paravirt, sleepy)) { in queued_spin_lock_mcs_queue()
704 if (paravirt && pv_prod_head) { in queued_spin_lock_mcs_queue()