Lines Matching +full:guest +full:- +full:side
1 // SPDX-License-Identifier: GPL-2.0-only
52 return -EINVAL; in early_parse_kvm_cma_resv()
59 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); in kvm_alloc_hpt_cma()
73 * kvm_cma_reserve() - reserve area for kvm hash pagetable
97 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma", in kvm_cma_reserve()
103 * Real-mode H_CONFER implementation.
105 * still running in the guest and not ceded. If so, we pop up
106 * to the virtual-mode implementation; if not, just return to
107 * the guest.
112 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; in kvmppc_rm_h_confer()
113 int ptid = local_paca->kvm_hstate.ptid; in kvmppc_rm_h_confer()
120 set_bit(ptid, &vc->conferring_threads); in kvmppc_rm_h_confer()
123 threads_ceded = vc->napping_threads; in kvmppc_rm_h_confer()
124 threads_conferring = vc->conferring_threads; in kvmppc_rm_h_confer()
130 clear_bit(ptid, &vc->conferring_threads); in kvmppc_rm_h_confer()
169 if (cmd < hcall_real_table_end - hcall_real_table && in kvmppc_hcall_impl_hv_realmode()
186 ppc_md.get_random_seed(&vcpu->arch.regs.gpr[4])) in kvmppc_rm_h_random()
223 xics_phys = paca_ptrs[cpu]->kvm_hstate.xics_phys; in kvmhv_rm_send_ipi()
236 int cpu = vc->pcpu; in kvmhv_interrupt_vcore()
247 struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; in kvmhv_commence_exit()
248 int ptid = local_paca->kvm_hstate.ptid; in kvmhv_commence_exit()
249 struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; in kvmhv_commence_exit()
252 /* Set our bit in the threads-exiting-guest map in the 0xff00 in kvmhv_commence_exit()
253 bits of vcore->entry_exit_map */ in kvmhv_commence_exit()
256 ee = vc->entry_exit_map; in kvmhv_commence_exit()
257 } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); in kvmhv_commence_exit()
264 * Trigger the other threads in this vcore to exit the guest. in kvmhv_commence_exit()
266 * will be already on their way out of the guest. in kvmhv_commence_exit()
272 * If we are doing dynamic micro-threading, interrupt the other in kvmhv_commence_exit()
279 vc = sip->vc[i]; in kvmhv_commence_exit()
283 ee = vc->entry_exit_map; in kvmhv_commence_exit()
287 } while (cmpxchg(&vc->entry_exit_map, ee, in kvmhv_commence_exit()
313 for (i = 0; i < pimap->n_mapped; i++) { in get_irqmap()
314 if (xisr == pimap->mapped[i].r_hwirq) { in get_irqmap()
320 return &pimap->mapped[i]; in get_irqmap()
331 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
344 vcpu = local_paca->kvm_hstate.kvm_vcpu; in kvmppc_check_passthru()
347 pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); in kvmppc_check_passthru()
355 local_paca->kvm_hstate.saved_xirr = 0; in kvmppc_check_passthru()
373 * -1 if there was a guest wakeup IPI (which has now been cleared)
374 * -2 if there is PCI passthrough external interrupt that was handled
409 host_ipi = local_paca->kvm_hstate.host_ipi; in kvmppc_read_one_intr()
414 xics_phys = local_paca->kvm_hstate.xics_phys; in kvmppc_read_one_intr()
430 local_paca->kvm_hstate.saved_xirr = h_xirr; in kvmppc_read_one_intr()
433 * Ensure that the store/load complete to guarantee all side in kvmppc_read_one_intr()
459 * Need to ensure side effects of above stores in kvmppc_read_one_intr()
465 * We need to re-check host IPI now in case it got set in the in kvmppc_read_one_intr()
467 * guest in kvmppc_read_one_intr()
469 host_ipi = local_paca->kvm_hstate.host_ipi; in kvmppc_read_one_intr()
480 /* Let side effects complete */ in kvmppc_read_one_intr()
486 local_paca->kvm_hstate.saved_xirr = 0; in kvmppc_read_one_intr()
487 return -1; in kvmppc_read_one_intr()
495 vcpu->arch.ceded = 0; in kvmppc_end_cede()
496 if (vcpu->arch.timer_running) { in kvmppc_end_cede()
497 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_end_cede()
498 vcpu->arch.timer_running = 0; in kvmppc_end_cede()
504 /* Guest must always run with ME enabled, HV disabled. */ in kvmppc_set_msr_hv()
513 vcpu->arch.shregs.msr = msr; in kvmppc_set_msr_hv()
524 new_msr = vcpu->arch.intr_msr; in inject_interrupt()
538 * delivered to the guest), and does not apply if IR=0 or DR=0. in inject_interrupt()
542 (vcpu->arch.vcore->lpcr & LPCR_AIL) == LPCR_AIL_3 && in inject_interrupt()
551 vcpu->arch.shregs.msr = new_msr; in inject_interrupt()
562 * Is there a PRIV_DOORBELL pending for the guest (on POWER9)?
573 ext = (vcpu->arch.pending_exceptions >> BOOK3S_IRQPRIO_EXTERNAL) & 1; in kvmppc_guest_entry_inject_int()
579 if (vcpu->arch.shregs.msr & MSR_EE) { in kvmppc_guest_entry_inject_int()
592 if (vcpu->arch.doorbell_request) { in kvmppc_guest_entry_inject_int()
594 vcpu->arch.vcore->dpdes = 1; in kvmppc_guest_entry_inject_int()
596 vcpu->arch.doorbell_request = 0; in kvmppc_guest_entry_inject_int()
605 for (set = 0; set < kvm->arch.tlb_sets; ++set) { in flush_guest_tlb()
617 if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) { in kvmppc_check_need_tlb_flush()
621 cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush); in kvmppc_check_need_tlb_flush()