Searched refs:vcpu_is_preempted (Results 1 – 16 of 16) sorted by relevance
72 unsigned char vcpu_is_preempted[2]; member76 .vcpu_is_preempted = { 0x31, 0xc0 }, // xor %eax, %eax116 case PARAVIRT_PATCH(lock.vcpu_is_preempted): in native_patch()118 return PATCH(lock, vcpu_is_preempted, insn_buff, len); in native_patch()
32 return pv_ops.lock.vcpu_is_preempted.func == in pv_is_native_vcpu_is_preempted()
523 if (vcpu_is_preempted(cpu)) { in kvm_smp_send_call_func_ipi()853 pv_ops.lock.vcpu_is_preempted = in kvm_spinlock_init()
445 .lock.vcpu_is_preempted =
58 #define vcpu_is_preempted vcpu_is_preempted macro59 static inline bool vcpu_is_preempted(long cpu) in vcpu_is_preempted() function
320 struct paravirt_callee_save vcpu_is_preempted; member
662 return PVOP_CALLEE1(bool, lock.vcpu_is_preempted, cpu); in pv_vcpu_is_preempted()
39 #define vcpu_is_preempted vcpu_is_preempted macro40 static inline bool vcpu_is_preempted(int cpu) in vcpu_is_preempted() function
80 pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(hv_vcpu_is_preempted); in hv_init_spinlocks()
25 #define vcpu_is_preempted arch_vcpu_is_preempted macro
135 pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); in xen_init_spinlocks()
143 if (need_resched() || vcpu_is_preempted(node_cpu(node->prev))) in osq_lock()
569 vcpu_is_preempted(task_cpu(owner))) { in mutex_spin_on_owner()605 retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); in mutex_can_spin_on_owner()
653 return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()
1845 #ifndef vcpu_is_preempted1846 static inline bool vcpu_is_preempted(int cpu) in vcpu_is_preempted() function
4646 if (vcpu_is_preempted(cpu)) in available_idle_cpu()