Lines Matching refs:svm

194 void avic_init_vmcb(struct vcpu_svm *svm)  in avic_init_vmcb()  argument
196 struct vmcb *vmcb = svm->vmcb; in avic_init_vmcb()
197 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm); in avic_init_vmcb()
198 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page)); in avic_init_vmcb()
206 if (kvm_apicv_activated(svm->vcpu.kvm)) in avic_init_vmcb()
265 struct vcpu_svm *svm = to_svm(vcpu); in avic_init_backing_page() local
270 if (!svm->vcpu.arch.apic->regs) in avic_init_backing_page()
281 svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs); in avic_init_backing_page()
288 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & in avic_init_backing_page()
293 svm->avic_physical_id_cache = entry; in avic_init_backing_page()
298 int avic_incomplete_ipi_interception(struct vcpu_svm *svm) in avic_incomplete_ipi_interception() argument
300 u32 icrh = svm->vmcb->control.exit_info_1 >> 32; in avic_incomplete_ipi_interception()
301 u32 icrl = svm->vmcb->control.exit_info_1; in avic_incomplete_ipi_interception()
302 u32 id = svm->vmcb->control.exit_info_2 >> 32; in avic_incomplete_ipi_interception()
303 u32 index = svm->vmcb->control.exit_info_2 & 0xFF; in avic_incomplete_ipi_interception()
304 struct kvm_lapic *apic = svm->vcpu.arch.apic; in avic_incomplete_ipi_interception()
306 trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index); in avic_incomplete_ipi_interception()
327 struct kvm *kvm = svm->vcpu.kvm; in avic_incomplete_ipi_interception()
328 struct kvm_lapic *apic = svm->vcpu.arch.apic; in avic_incomplete_ipi_interception()
348 index, svm->vcpu.vcpu_id, icrh, icrl); in avic_incomplete_ipi_interception()
410 struct vcpu_svm *svm = to_svm(vcpu); in avic_invalidate_logical_id_entry() local
411 bool flat = svm->dfr_reg == APIC_DFR_FLAT; in avic_invalidate_logical_id_entry()
412 u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); in avic_invalidate_logical_id_entry()
421 struct vcpu_svm *svm = to_svm(vcpu); in avic_handle_ldr_update() local
425 if (ldr == svm->ldr_reg) in avic_handle_ldr_update()
434 svm->ldr_reg = ldr; in avic_handle_ldr_update()
442 struct vcpu_svm *svm = to_svm(vcpu); in avic_handle_apic_id_update() local
462 if (svm->ldr_reg) in avic_handle_apic_id_update()
470 struct vcpu_svm *svm = to_svm(vcpu); in avic_handle_dfr_update() local
473 if (svm->dfr_reg == dfr) in avic_handle_dfr_update()
477 svm->dfr_reg = dfr; in avic_handle_dfr_update()
480 static int avic_unaccel_trap_write(struct vcpu_svm *svm) in avic_unaccel_trap_write() argument
482 struct kvm_lapic *apic = svm->vcpu.arch.apic; in avic_unaccel_trap_write()
483 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccel_trap_write()
488 if (avic_handle_apic_id_update(&svm->vcpu)) in avic_unaccel_trap_write()
492 if (avic_handle_ldr_update(&svm->vcpu)) in avic_unaccel_trap_write()
496 avic_handle_dfr_update(&svm->vcpu); in avic_unaccel_trap_write()
536 int avic_unaccelerated_access_interception(struct vcpu_svm *svm) in avic_unaccelerated_access_interception() argument
539 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccelerated_access_interception()
541 u32 vector = svm->vmcb->control.exit_info_2 & in avic_unaccelerated_access_interception()
543 bool write = (svm->vmcb->control.exit_info_1 >> 32) & in avic_unaccelerated_access_interception()
547 trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset, in avic_unaccelerated_access_interception()
552 ret = avic_unaccel_trap_write(svm); in avic_unaccelerated_access_interception()
555 ret = kvm_emulate_instruction(&svm->vcpu, 0); in avic_unaccelerated_access_interception()
561 int avic_init_vcpu(struct vcpu_svm *svm) in avic_init_vcpu() argument
564 struct kvm_vcpu *vcpu = &svm->vcpu; in avic_init_vcpu()
569 ret = avic_init_backing_page(&svm->vcpu); in avic_init_vcpu()
573 INIT_LIST_HEAD(&svm->ir_list); in avic_init_vcpu()
574 spin_lock_init(&svm->ir_list_lock); in avic_init_vcpu()
575 svm->dfr_reg = APIC_DFR_FLAT; in avic_init_vcpu()
617 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_pi_irte_mode() local
626 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_set_pi_irte_mode()
628 if (list_empty(&svm->ir_list)) in svm_set_pi_irte_mode()
631 list_for_each_entry(ir, &svm->ir_list, node) { in svm_set_pi_irte_mode()
640 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_set_pi_irte_mode()
646 struct vcpu_svm *svm = to_svm(vcpu); in svm_refresh_apicv_exec_ctrl() local
647 struct vmcb *vmcb = svm->vmcb; in svm_refresh_apicv_exec_ctrl()
701 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) in svm_ir_list_del() argument
706 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_del()
707 list_for_each_entry(cur, &svm->ir_list, node) { in svm_ir_list_del()
714 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_del()
717 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) in svm_ir_list_add() argument
729 struct kvm *kvm = svm->vcpu.kvm; in svm_ir_list_add()
754 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_add()
755 list_add(&ir->node, &svm->ir_list); in svm_ir_list_add()
756 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_add()
774 struct vcpu_data *vcpu_info, struct vcpu_svm **svm) in get_pi_vcpu_info() argument
790 *svm = to_svm(vcpu); in get_pi_vcpu_info()
791 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page)); in get_pi_vcpu_info()
826 struct vcpu_svm *svm = NULL; in svm_update_pi_irte() local
838 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set && in svm_update_pi_irte()
839 kvm_vcpu_apicv_active(&svm->vcpu)) { in svm_update_pi_irte()
843 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) & in svm_update_pi_irte()
846 svm->vcpu.vcpu_id); in svm_update_pi_irte()
859 svm_ir_list_add(svm, &pi); in svm_update_pi_irte()
889 if (!ret && svm) { in svm_update_pi_irte()
890 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, in svm_update_pi_irte()
930 struct vcpu_svm *svm = to_svm(vcpu); in avic_update_iommu_vcpu_affinity() local
939 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_update_iommu_vcpu_affinity()
941 if (list_empty(&svm->ir_list)) in avic_update_iommu_vcpu_affinity()
944 list_for_each_entry(ir, &svm->ir_list, node) { in avic_update_iommu_vcpu_affinity()
950 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_update_iommu_vcpu_affinity()
959 struct vcpu_svm *svm = to_svm(vcpu); in avic_vcpu_load() local
971 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_load()
978 if (svm->avic_is_running) in avic_vcpu_load()
981 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_load()
983 svm->avic_is_running); in avic_vcpu_load()
989 struct vcpu_svm *svm = to_svm(vcpu); in avic_vcpu_put() local
994 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_put()
999 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_put()
1007 struct vcpu_svm *svm = to_svm(vcpu); in avic_set_running() local
1009 svm->avic_is_running = is_run; in avic_set_running()