Lines Matching refs:svm

388 static void svm_complete_interrupts(struct vcpu_svm *svm);
390 static int nested_svm_exit_handled(struct vcpu_svm *svm);
391 static int nested_svm_intercept(struct vcpu_svm *svm);
392 static int nested_svm_vmexit(struct vcpu_svm *svm);
393 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
484 static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data) in avic_update_vapic_bar() argument
486 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK; in avic_update_vapic_bar()
487 mark_dirty(svm->vmcb, VMCB_AVIC); in avic_update_vapic_bar()
492 struct vcpu_svm *svm = to_svm(vcpu); in avic_vcpu_is_running() local
493 u64 *entry = svm->avic_physical_id_cache; in avic_vcpu_is_running()
501 static void recalc_intercepts(struct vcpu_svm *svm) in recalc_intercepts() argument
506 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in recalc_intercepts()
508 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
511 c = &svm->vmcb->control; in recalc_intercepts()
512 h = &svm->nested.hsave->control; in recalc_intercepts()
513 g = &svm->nested; in recalc_intercepts()
521 static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm) in get_host_vmcb() argument
523 if (is_guest_mode(&svm->vcpu)) in get_host_vmcb()
524 return svm->nested.hsave; in get_host_vmcb()
526 return svm->vmcb; in get_host_vmcb()
529 static inline void set_cr_intercept(struct vcpu_svm *svm, int bit) in set_cr_intercept() argument
531 struct vmcb *vmcb = get_host_vmcb(svm); in set_cr_intercept()
535 recalc_intercepts(svm); in set_cr_intercept()
538 static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit) in clr_cr_intercept() argument
540 struct vmcb *vmcb = get_host_vmcb(svm); in clr_cr_intercept()
544 recalc_intercepts(svm); in clr_cr_intercept()
547 static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit) in is_cr_intercept() argument
549 struct vmcb *vmcb = get_host_vmcb(svm); in is_cr_intercept()
554 static inline void set_dr_intercepts(struct vcpu_svm *svm) in set_dr_intercepts() argument
556 struct vmcb *vmcb = get_host_vmcb(svm); in set_dr_intercepts()
575 recalc_intercepts(svm); in set_dr_intercepts()
578 static inline void clr_dr_intercepts(struct vcpu_svm *svm) in clr_dr_intercepts() argument
580 struct vmcb *vmcb = get_host_vmcb(svm); in clr_dr_intercepts()
584 recalc_intercepts(svm); in clr_dr_intercepts()
587 static inline void set_exception_intercept(struct vcpu_svm *svm, int bit) in set_exception_intercept() argument
589 struct vmcb *vmcb = get_host_vmcb(svm); in set_exception_intercept()
593 recalc_intercepts(svm); in set_exception_intercept()
596 static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit) in clr_exception_intercept() argument
598 struct vmcb *vmcb = get_host_vmcb(svm); in clr_exception_intercept()
602 recalc_intercepts(svm); in clr_exception_intercept()
605 static inline void set_intercept(struct vcpu_svm *svm, int bit) in set_intercept() argument
607 struct vmcb *vmcb = get_host_vmcb(svm); in set_intercept()
611 recalc_intercepts(svm); in set_intercept()
614 static inline void clr_intercept(struct vcpu_svm *svm, int bit) in clr_intercept() argument
616 struct vmcb *vmcb = get_host_vmcb(svm); in clr_intercept()
620 recalc_intercepts(svm); in clr_intercept()
623 static inline bool vgif_enabled(struct vcpu_svm *svm) in vgif_enabled() argument
625 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK); in vgif_enabled()
628 static inline void enable_gif(struct vcpu_svm *svm) in enable_gif() argument
630 if (vgif_enabled(svm)) in enable_gif()
631 svm->vmcb->control.int_ctl |= V_GIF_MASK; in enable_gif()
633 svm->vcpu.arch.hflags |= HF_GIF_MASK; in enable_gif()
636 static inline void disable_gif(struct vcpu_svm *svm) in disable_gif() argument
638 if (vgif_enabled(svm)) in disable_gif()
639 svm->vmcb->control.int_ctl &= ~V_GIF_MASK; in disable_gif()
641 svm->vcpu.arch.hflags &= ~HF_GIF_MASK; in disable_gif()
644 static inline bool gif_set(struct vcpu_svm *svm) in gif_set() argument
646 if (vgif_enabled(svm)) in gif_set()
647 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK); in gif_set()
649 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK); in gif_set()
758 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_interrupt_shadow() local
761 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow()
768 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_interrupt_shadow() local
771 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
773 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
779 struct vcpu_svm *svm = to_svm(vcpu); in skip_emulated_instruction() local
781 if (nrips && svm->vmcb->control.next_rip != 0) { in skip_emulated_instruction()
783 svm->next_rip = svm->vmcb->control.next_rip; in skip_emulated_instruction()
786 if (!svm->next_rip) { in skip_emulated_instruction()
790 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE) in skip_emulated_instruction()
792 __func__, kvm_rip_read(vcpu), svm->next_rip); in skip_emulated_instruction()
793 kvm_rip_write(vcpu, svm->next_rip); in skip_emulated_instruction()
802 struct vcpu_svm *svm = to_svm(vcpu); in svm_queue_exception() local
813 nested_svm_check_exception(svm, nr, has_error_code, error_code)) in svm_queue_exception()
816 kvm_deliver_exception_payload(&svm->vcpu); in svm_queue_exception()
819 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
828 (void)skip_emulated_instruction(&svm->vcpu); in svm_queue_exception()
829 rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
830 svm->int3_rip = rip + svm->vmcb->save.cs.base; in svm_queue_exception()
831 svm->int3_injected = rip - old_rip; in svm_queue_exception()
834 svm->vmcb->control.event_inj = nr in svm_queue_exception()
838 svm->vmcb->control.event_inj_err = error_code; in svm_queue_exception()
1144 static void svm_enable_lbrv(struct vcpu_svm *svm) in svm_enable_lbrv() argument
1146 u32 *msrpm = svm->msrpm; in svm_enable_lbrv()
1148 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; in svm_enable_lbrv()
1155 static void svm_disable_lbrv(struct vcpu_svm *svm) in svm_disable_lbrv() argument
1157 u32 *msrpm = svm->msrpm; in svm_disable_lbrv()
1159 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; in svm_disable_lbrv()
1166 static void disable_nmi_singlestep(struct vcpu_svm *svm) in disable_nmi_singlestep() argument
1168 svm->nmi_singlestep = false; in disable_nmi_singlestep()
1170 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { in disable_nmi_singlestep()
1172 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in disable_nmi_singlestep()
1173 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; in disable_nmi_singlestep()
1174 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in disable_nmi_singlestep()
1175 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; in disable_nmi_singlestep()
1267 struct vcpu_svm *svm = to_svm(vcpu); in grow_ple_window() local
1268 struct vmcb_control_area *control = &svm->vmcb->control; in grow_ple_window()
1277 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in grow_ple_window()
1285 struct vcpu_svm *svm = to_svm(vcpu); in shrink_ple_window() local
1286 struct vmcb_control_area *control = &svm->vmcb->control; in shrink_ple_window()
1295 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in shrink_ple_window()
1450 struct vcpu_svm *svm = to_svm(vcpu); in svm_read_l1_tsc_offset() local
1453 return svm->nested.hsave->control.tsc_offset; in svm_read_l1_tsc_offset()
1460 struct vcpu_svm *svm = to_svm(vcpu); in svm_write_l1_tsc_offset() local
1465 g_tsc_offset = svm->vmcb->control.tsc_offset - in svm_write_l1_tsc_offset()
1466 svm->nested.hsave->control.tsc_offset; in svm_write_l1_tsc_offset()
1467 svm->nested.hsave->control.tsc_offset = offset; in svm_write_l1_tsc_offset()
1471 svm->vmcb->control.tsc_offset - g_tsc_offset, in svm_write_l1_tsc_offset()
1474 svm->vmcb->control.tsc_offset = offset + g_tsc_offset; in svm_write_l1_tsc_offset()
1476 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_write_l1_tsc_offset()
1477 return svm->vmcb->control.tsc_offset; in svm_write_l1_tsc_offset()
1480 static void avic_init_vmcb(struct vcpu_svm *svm) in avic_init_vmcb() argument
1482 struct vmcb *vmcb = svm->vmcb; in avic_init_vmcb()
1483 struct kvm_svm *kvm_svm = to_kvm_svm(svm->vcpu.kvm); in avic_init_vmcb()
1484 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page)); in avic_init_vmcb()
1495 static void init_vmcb(struct vcpu_svm *svm) in init_vmcb() argument
1497 struct vmcb_control_area *control = &svm->vmcb->control; in init_vmcb()
1498 struct vmcb_save_area *save = &svm->vmcb->save; in init_vmcb()
1500 svm->vcpu.arch.hflags = 0; in init_vmcb()
1502 set_cr_intercept(svm, INTERCEPT_CR0_READ); in init_vmcb()
1503 set_cr_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1504 set_cr_intercept(svm, INTERCEPT_CR4_READ); in init_vmcb()
1505 set_cr_intercept(svm, INTERCEPT_CR0_WRITE); in init_vmcb()
1506 set_cr_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1507 set_cr_intercept(svm, INTERCEPT_CR4_WRITE); in init_vmcb()
1508 if (!kvm_vcpu_apicv_active(&svm->vcpu)) in init_vmcb()
1509 set_cr_intercept(svm, INTERCEPT_CR8_WRITE); in init_vmcb()
1511 set_dr_intercepts(svm); in init_vmcb()
1513 set_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1514 set_exception_intercept(svm, UD_VECTOR); in init_vmcb()
1515 set_exception_intercept(svm, MC_VECTOR); in init_vmcb()
1516 set_exception_intercept(svm, AC_VECTOR); in init_vmcb()
1517 set_exception_intercept(svm, DB_VECTOR); in init_vmcb()
1525 set_exception_intercept(svm, GP_VECTOR); in init_vmcb()
1527 set_intercept(svm, INTERCEPT_INTR); in init_vmcb()
1528 set_intercept(svm, INTERCEPT_NMI); in init_vmcb()
1529 set_intercept(svm, INTERCEPT_SMI); in init_vmcb()
1530 set_intercept(svm, INTERCEPT_SELECTIVE_CR0); in init_vmcb()
1531 set_intercept(svm, INTERCEPT_RDPMC); in init_vmcb()
1532 set_intercept(svm, INTERCEPT_CPUID); in init_vmcb()
1533 set_intercept(svm, INTERCEPT_INVD); in init_vmcb()
1534 set_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1535 set_intercept(svm, INTERCEPT_INVLPGA); in init_vmcb()
1536 set_intercept(svm, INTERCEPT_IOIO_PROT); in init_vmcb()
1537 set_intercept(svm, INTERCEPT_MSR_PROT); in init_vmcb()
1538 set_intercept(svm, INTERCEPT_TASK_SWITCH); in init_vmcb()
1539 set_intercept(svm, INTERCEPT_SHUTDOWN); in init_vmcb()
1540 set_intercept(svm, INTERCEPT_VMRUN); in init_vmcb()
1541 set_intercept(svm, INTERCEPT_VMMCALL); in init_vmcb()
1542 set_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb()
1543 set_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb()
1544 set_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1545 set_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1546 set_intercept(svm, INTERCEPT_SKINIT); in init_vmcb()
1547 set_intercept(svm, INTERCEPT_WBINVD); in init_vmcb()
1548 set_intercept(svm, INTERCEPT_XSETBV); in init_vmcb()
1549 set_intercept(svm, INTERCEPT_RDPRU); in init_vmcb()
1550 set_intercept(svm, INTERCEPT_RSM); in init_vmcb()
1552 if (!kvm_mwait_in_guest(svm->vcpu.kvm)) { in init_vmcb()
1553 set_intercept(svm, INTERCEPT_MONITOR); in init_vmcb()
1554 set_intercept(svm, INTERCEPT_MWAIT); in init_vmcb()
1557 if (!kvm_hlt_in_guest(svm->vcpu.kvm)) in init_vmcb()
1558 set_intercept(svm, INTERCEPT_HLT); in init_vmcb()
1561 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); in init_vmcb()
1583 svm_set_efer(&svm->vcpu, 0); in init_vmcb()
1585 kvm_set_rflags(&svm->vcpu, 2); in init_vmcb()
1587 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; in init_vmcb()
1593 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); in init_vmcb()
1594 kvm_mmu_reset_context(&svm->vcpu); in init_vmcb()
1602 clr_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1603 clr_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1604 clr_cr_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1605 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1606 save->g_pat = svm->vcpu.arch.pat; in init_vmcb()
1610 svm->asid_generation = 0; in init_vmcb()
1612 svm->nested.vmcb = 0; in init_vmcb()
1613 svm->vcpu.arch.hflags = 0; in init_vmcb()
1619 set_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1621 clr_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1624 if (kvm_vcpu_apicv_active(&svm->vcpu)) in init_vmcb()
1625 avic_init_vmcb(svm); in init_vmcb()
1632 clr_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb()
1633 clr_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb()
1634 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in init_vmcb()
1638 clr_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1639 clr_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1640 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; in init_vmcb()
1643 if (sev_guest(svm->vcpu.kvm)) { in init_vmcb()
1644 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; in init_vmcb()
1645 clr_exception_intercept(svm, UD_VECTOR); in init_vmcb()
1648 mark_all_dirty(svm->vmcb); in init_vmcb()
1650 enable_gif(svm); in init_vmcb()
1703 struct vcpu_svm *svm = to_svm(vcpu); in avic_init_backing_page() local
1712 if (!svm->vcpu.arch.apic->regs) in avic_init_backing_page()
1715 svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs); in avic_init_backing_page()
1722 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) & in avic_init_backing_page()
1727 svm->avic_physical_id_cache = entry; in avic_init_backing_page()
2006 struct vcpu_svm *svm = to_svm(vcpu); in avic_update_iommu_vcpu_affinity() local
2015 spin_lock_irqsave(&svm->ir_list_lock, flags); in avic_update_iommu_vcpu_affinity()
2017 if (list_empty(&svm->ir_list)) in avic_update_iommu_vcpu_affinity()
2020 list_for_each_entry(ir, &svm->ir_list, node) { in avic_update_iommu_vcpu_affinity()
2026 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in avic_update_iommu_vcpu_affinity()
2035 struct vcpu_svm *svm = to_svm(vcpu); in avic_vcpu_load() local
2047 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_load()
2054 if (svm->avic_is_running) in avic_vcpu_load()
2057 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_load()
2059 svm->avic_is_running); in avic_vcpu_load()
2065 struct vcpu_svm *svm = to_svm(vcpu); in avic_vcpu_put() local
2070 entry = READ_ONCE(*(svm->avic_physical_id_cache)); in avic_vcpu_put()
2075 WRITE_ONCE(*(svm->avic_physical_id_cache), entry); in avic_vcpu_put()
2083 struct vcpu_svm *svm = to_svm(vcpu); in avic_set_running() local
2085 svm->avic_is_running = is_run; in avic_set_running()
2094 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_reset() local
2099 svm->spec_ctrl = 0; in svm_vcpu_reset()
2100 svm->virt_spec_ctrl = 0; in svm_vcpu_reset()
2103 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | in svm_vcpu_reset()
2105 if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) in svm_vcpu_reset()
2106 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; in svm_vcpu_reset()
2108 init_vmcb(svm); in svm_vcpu_reset()
2114 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); in svm_vcpu_reset()
2117 static int avic_init_vcpu(struct vcpu_svm *svm) in avic_init_vcpu() argument
2121 if (!kvm_vcpu_apicv_active(&svm->vcpu)) in avic_init_vcpu()
2124 ret = avic_init_backing_page(&svm->vcpu); in avic_init_vcpu()
2128 INIT_LIST_HEAD(&svm->ir_list); in avic_init_vcpu()
2129 spin_lock_init(&svm->ir_list_lock); in avic_init_vcpu()
2130 svm->dfr_reg = APIC_DFR_FLAT; in avic_init_vcpu()
2137 struct vcpu_svm *svm; in svm_create_vcpu() local
2147 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL_ACCOUNT); in svm_create_vcpu()
2148 if (!svm) { in svm_create_vcpu()
2153 svm->vcpu.arch.user_fpu = kmem_cache_zalloc(x86_fpu_cache, in svm_create_vcpu()
2155 if (!svm->vcpu.arch.user_fpu) { in svm_create_vcpu()
2161 svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, in svm_create_vcpu()
2163 if (!svm->vcpu.arch.guest_fpu) { in svm_create_vcpu()
2169 err = kvm_vcpu_init(&svm->vcpu, kvm, id); in svm_create_vcpu()
2190 err = avic_init_vcpu(svm); in svm_create_vcpu()
2197 svm->avic_is_running = true; in svm_create_vcpu()
2199 svm->nested.hsave = page_address(hsave_page); in svm_create_vcpu()
2201 svm->msrpm = page_address(msrpm_pages); in svm_create_vcpu()
2202 svm_vcpu_init_msrpm(svm->msrpm); in svm_create_vcpu()
2204 svm->nested.msrpm = page_address(nested_msrpm_pages); in svm_create_vcpu()
2205 svm_vcpu_init_msrpm(svm->nested.msrpm); in svm_create_vcpu()
2207 svm->vmcb = page_address(page); in svm_create_vcpu()
2208 clear_page(svm->vmcb); in svm_create_vcpu()
2209 svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT); in svm_create_vcpu()
2210 svm->asid_generation = 0; in svm_create_vcpu()
2211 init_vmcb(svm); in svm_create_vcpu()
2213 svm_init_osvw(&svm->vcpu); in svm_create_vcpu()
2215 return &svm->vcpu; in svm_create_vcpu()
2226 kvm_vcpu_uninit(&svm->vcpu); in svm_create_vcpu()
2228 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); in svm_create_vcpu()
2230 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu); in svm_create_vcpu()
2232 kmem_cache_free(kvm_vcpu_cache, svm); in svm_create_vcpu()
2247 struct vcpu_svm *svm = to_svm(vcpu); in svm_free_vcpu() local
2254 svm_clear_current_vmcb(svm->vmcb); in svm_free_vcpu()
2256 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); in svm_free_vcpu()
2257 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); in svm_free_vcpu()
2258 __free_page(virt_to_page(svm->nested.hsave)); in svm_free_vcpu()
2259 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); in svm_free_vcpu()
2261 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.user_fpu); in svm_free_vcpu()
2262 kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); in svm_free_vcpu()
2263 kmem_cache_free(kvm_vcpu_cache, svm); in svm_free_vcpu()
2268 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_load() local
2273 svm->asid_generation = 0; in svm_vcpu_load()
2274 mark_all_dirty(svm->vmcb); in svm_vcpu_load()
2280 savesegment(fs, svm->host.fs); in svm_vcpu_load()
2281 savesegment(gs, svm->host.gs); in svm_vcpu_load()
2282 svm->host.ldt = kvm_read_ldt(); in svm_vcpu_load()
2285 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); in svm_vcpu_load()
2296 wrmsrl(MSR_TSC_AUX, svm->tsc_aux); in svm_vcpu_load()
2298 if (sd->current_vmcb != svm->vmcb) { in svm_vcpu_load()
2299 sd->current_vmcb = svm->vmcb; in svm_vcpu_load()
2307 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_put() local
2313 kvm_load_ldt(svm->host.ldt); in svm_vcpu_put()
2315 loadsegment(fs, svm->host.fs); in svm_vcpu_put()
2317 load_gs_index(svm->host.gs); in svm_vcpu_put()
2320 loadsegment(gs, svm->host.gs); in svm_vcpu_put()
2324 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); in svm_vcpu_put()
2339 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_rflags() local
2340 unsigned long rflags = svm->vmcb->save.rflags; in svm_get_rflags()
2342 if (svm->nmi_singlestep) { in svm_get_rflags()
2344 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in svm_get_rflags()
2346 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in svm_get_rflags()
2377 static void svm_set_vintr(struct vcpu_svm *svm) in svm_set_vintr() argument
2379 set_intercept(svm, INTERCEPT_VINTR); in svm_set_vintr()
2382 static void svm_clear_vintr(struct vcpu_svm *svm) in svm_clear_vintr() argument
2384 clr_intercept(svm, INTERCEPT_VINTR); in svm_clear_vintr()
2490 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_idt() local
2492 dt->size = svm->vmcb->save.idtr.limit; in svm_get_idt()
2493 dt->address = svm->vmcb->save.idtr.base; in svm_get_idt()
2498 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_idt() local
2500 svm->vmcb->save.idtr.limit = dt->size; in svm_set_idt()
2501 svm->vmcb->save.idtr.base = dt->address ; in svm_set_idt()
2502 mark_dirty(svm->vmcb, VMCB_DT); in svm_set_idt()
2507 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_gdt() local
2509 dt->size = svm->vmcb->save.gdtr.limit; in svm_get_gdt()
2510 dt->address = svm->vmcb->save.gdtr.base; in svm_get_gdt()
2515 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_gdt() local
2517 svm->vmcb->save.gdtr.limit = dt->size; in svm_set_gdt()
2518 svm->vmcb->save.gdtr.base = dt->address ; in svm_set_gdt()
2519 mark_dirty(svm->vmcb, VMCB_DT); in svm_set_gdt()
2534 static void update_cr0_intercept(struct vcpu_svm *svm) in update_cr0_intercept() argument
2536 ulong gcr0 = svm->vcpu.arch.cr0; in update_cr0_intercept()
2537 u64 *hcr0 = &svm->vmcb->save.cr0; in update_cr0_intercept()
2542 mark_dirty(svm->vmcb, VMCB_CR); in update_cr0_intercept()
2545 clr_cr_intercept(svm, INTERCEPT_CR0_READ); in update_cr0_intercept()
2546 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE); in update_cr0_intercept()
2548 set_cr_intercept(svm, INTERCEPT_CR0_READ); in update_cr0_intercept()
2549 set_cr_intercept(svm, INTERCEPT_CR0_WRITE); in update_cr0_intercept()
2555 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr0() local
2561 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; in svm_set_cr0()
2566 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); in svm_set_cr0()
2582 svm->vmcb->save.cr0 = cr0; in svm_set_cr0()
2583 mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr0()
2584 update_cr0_intercept(svm); in svm_set_cr0()
2610 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_segment() local
2633 svm->vmcb->save.cpl = (var->dpl & 3); in svm_set_segment()
2635 mark_dirty(svm->vmcb, VMCB_SEG); in svm_set_segment()
2640 struct vcpu_svm *svm = to_svm(vcpu); in update_bp_intercept() local
2642 clr_exception_intercept(svm, BP_VECTOR); in update_bp_intercept()
2646 set_exception_intercept(svm, BP_VECTOR); in update_bp_intercept()
2651 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) in new_asid() argument
2656 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; in new_asid()
2659 svm->asid_generation = sd->asid_generation; in new_asid()
2660 svm->vmcb->control.asid = sd->next_asid++; in new_asid()
2662 mark_dirty(svm->vmcb, VMCB_ASID); in new_asid()
2672 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr6() local
2674 svm->vmcb->save.dr6 = value; in svm_set_dr6()
2675 mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr6()
2680 struct vcpu_svm *svm = to_svm(vcpu); in svm_sync_dirty_debug_regs() local
2687 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
2690 set_dr_intercepts(svm); in svm_sync_dirty_debug_regs()
2695 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr7() local
2697 svm->vmcb->save.dr7 = value; in svm_set_dr7()
2698 mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr7()
2701 static int pf_interception(struct vcpu_svm *svm) in pf_interception() argument
2703 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); in pf_interception()
2704 u64 error_code = svm->vmcb->control.exit_info_1; in pf_interception()
2706 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, in pf_interception()
2708 svm->vmcb->control.insn_bytes : NULL, in pf_interception()
2709 svm->vmcb->control.insn_len); in pf_interception()
2712 static int npf_interception(struct vcpu_svm *svm) in npf_interception() argument
2714 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); in npf_interception()
2715 u64 error_code = svm->vmcb->control.exit_info_1; in npf_interception()
2718 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, in npf_interception()
2720 svm->vmcb->control.insn_bytes : NULL, in npf_interception()
2721 svm->vmcb->control.insn_len); in npf_interception()
2724 static int db_interception(struct vcpu_svm *svm) in db_interception() argument
2726 struct kvm_run *kvm_run = svm->vcpu.run; in db_interception()
2727 struct kvm_vcpu *vcpu = &svm->vcpu; in db_interception()
2729 if (!(svm->vcpu.guest_debug & in db_interception()
2731 !svm->nmi_singlestep) { in db_interception()
2732 kvm_queue_exception(&svm->vcpu, DB_VECTOR); in db_interception()
2736 if (svm->nmi_singlestep) { in db_interception()
2737 disable_nmi_singlestep(svm); in db_interception()
2742 if (svm->vcpu.guest_debug & in db_interception()
2746 svm->vmcb->save.cs.base + svm->vmcb->save.rip; in db_interception()
2754 static int bp_interception(struct vcpu_svm *svm) in bp_interception() argument
2756 struct kvm_run *kvm_run = svm->vcpu.run; in bp_interception()
2759 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; in bp_interception()
2764 static int ud_interception(struct vcpu_svm *svm) in ud_interception() argument
2766 return handle_ud(&svm->vcpu); in ud_interception()
2769 static int ac_interception(struct vcpu_svm *svm) in ac_interception() argument
2771 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); in ac_interception()
2775 static int gp_interception(struct vcpu_svm *svm) in gp_interception() argument
2777 struct kvm_vcpu *vcpu = &svm->vcpu; in gp_interception()
2778 u32 error_code = svm->vmcb->control.exit_info_1; in gp_interception()
2832 static void svm_handle_mce(struct vcpu_svm *svm) in svm_handle_mce() argument
2841 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); in svm_handle_mce()
2857 static int mc_interception(struct vcpu_svm *svm) in mc_interception() argument
2862 static int shutdown_interception(struct vcpu_svm *svm) in shutdown_interception() argument
2864 struct kvm_run *kvm_run = svm->vcpu.run; in shutdown_interception()
2870 clear_page(svm->vmcb); in shutdown_interception()
2871 init_vmcb(svm); in shutdown_interception()
2877 static int io_interception(struct vcpu_svm *svm) in io_interception() argument
2879 struct kvm_vcpu *vcpu = &svm->vcpu; in io_interception()
2880 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ in io_interception()
2884 ++svm->vcpu.stat.io_exits; in io_interception()
2892 svm->next_rip = svm->vmcb->control.exit_info_2; in io_interception()
2894 return kvm_fast_pio(&svm->vcpu, size, port, in); in io_interception()
2897 static int nmi_interception(struct vcpu_svm *svm) in nmi_interception() argument
2902 static int intr_interception(struct vcpu_svm *svm) in intr_interception() argument
2904 ++svm->vcpu.stat.irq_exits; in intr_interception()
2908 static int nop_on_interception(struct vcpu_svm *svm) in nop_on_interception() argument
2913 static int halt_interception(struct vcpu_svm *svm) in halt_interception() argument
2915 return kvm_emulate_halt(&svm->vcpu); in halt_interception()
2918 static int vmmcall_interception(struct vcpu_svm *svm) in vmmcall_interception() argument
2920 return kvm_emulate_hypercall(&svm->vcpu); in vmmcall_interception()
2925 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3() local
2927 return svm->nested.nested_cr3; in nested_svm_get_tdp_cr3()
2932 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr() local
2933 u64 cr3 = svm->nested.nested_cr3; in nested_svm_get_tdp_pdptr()
2947 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_set_tdp_cr3() local
2949 svm->vmcb->control.nested_cr3 = __sme_set(root); in nested_svm_set_tdp_cr3()
2950 mark_dirty(svm->vmcb, VMCB_NPT); in nested_svm_set_tdp_cr3()
2956 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_npf_exit() local
2958 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { in nested_svm_inject_npf_exit()
2963 svm->vmcb->control.exit_code = SVM_EXIT_NPF; in nested_svm_inject_npf_exit()
2964 svm->vmcb->control.exit_code_hi = 0; in nested_svm_inject_npf_exit()
2965 svm->vmcb->control.exit_info_1 = (1ULL << 32); in nested_svm_inject_npf_exit()
2966 svm->vmcb->control.exit_info_2 = fault->address; in nested_svm_inject_npf_exit()
2969 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; in nested_svm_inject_npf_exit()
2970 svm->vmcb->control.exit_info_1 |= fault->error_code; in nested_svm_inject_npf_exit()
2976 if (svm->vmcb->control.exit_info_1 & (2ULL << 32)) in nested_svm_inject_npf_exit()
2977 svm->vmcb->control.exit_info_1 &= ~1; in nested_svm_inject_npf_exit()
2979 nested_svm_vmexit(svm); in nested_svm_inject_npf_exit()
3003 static int nested_svm_check_permissions(struct vcpu_svm *svm) in nested_svm_check_permissions() argument
3005 if (!(svm->vcpu.arch.efer & EFER_SVME) || in nested_svm_check_permissions()
3006 !is_paging(&svm->vcpu)) { in nested_svm_check_permissions()
3007 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in nested_svm_check_permissions()
3011 if (svm->vmcb->save.cpl) { in nested_svm_check_permissions()
3012 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_check_permissions()
3019 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr, in nested_svm_check_exception() argument
3024 if (!is_guest_mode(&svm->vcpu)) in nested_svm_check_exception()
3027 vmexit = nested_svm_intercept(svm); in nested_svm_check_exception()
3031 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; in nested_svm_check_exception()
3032 svm->vmcb->control.exit_code_hi = 0; in nested_svm_check_exception()
3033 svm->vmcb->control.exit_info_1 = error_code; in nested_svm_check_exception()
3039 if (svm->vcpu.arch.exception.nested_apf) in nested_svm_check_exception()
3040 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; in nested_svm_check_exception()
3041 else if (svm->vcpu.arch.exception.has_payload) in nested_svm_check_exception()
3042 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; in nested_svm_check_exception()
3044 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; in nested_svm_check_exception()
3046 svm->nested.exit_required = true; in nested_svm_check_exception()
3051 static inline bool nested_svm_intr(struct vcpu_svm *svm) in nested_svm_intr() argument
3053 if (!is_guest_mode(&svm->vcpu)) in nested_svm_intr()
3056 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) in nested_svm_intr()
3059 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK)) in nested_svm_intr()
3067 if (svm->nested.exit_required) in nested_svm_intr()
3070 svm->vmcb->control.exit_code = SVM_EXIT_INTR; in nested_svm_intr()
3071 svm->vmcb->control.exit_info_1 = 0; in nested_svm_intr()
3072 svm->vmcb->control.exit_info_2 = 0; in nested_svm_intr()
3074 if (svm->nested.intercept & 1ULL) { in nested_svm_intr()
3081 svm->nested.exit_required = true; in nested_svm_intr()
3082 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); in nested_svm_intr()
3090 static inline bool nested_svm_nmi(struct vcpu_svm *svm) in nested_svm_nmi() argument
3092 if (!is_guest_mode(&svm->vcpu)) in nested_svm_nmi()
3095 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI))) in nested_svm_nmi()
3098 svm->vmcb->control.exit_code = SVM_EXIT_NMI; in nested_svm_nmi()
3099 svm->nested.exit_required = true; in nested_svm_nmi()
3104 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) in nested_svm_intercept_ioio() argument
3111 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) in nested_svm_intercept_ioio()
3114 port = svm->vmcb->control.exit_info_1 >> 16; in nested_svm_intercept_ioio()
3115 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> in nested_svm_intercept_ioio()
3117 gpa = svm->nested.vmcb_iopm + (port / 8); in nested_svm_intercept_ioio()
3123 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) in nested_svm_intercept_ioio()
3129 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) in nested_svm_exit_handled_msr() argument
3134 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) in nested_svm_exit_handled_msr()
3137 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in nested_svm_exit_handled_msr()
3139 write = svm->vmcb->control.exit_info_1 & 1; in nested_svm_exit_handled_msr()
3148 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4)) in nested_svm_exit_handled_msr()
3155 static int nested_svm_intercept_db(struct vcpu_svm *svm) in nested_svm_intercept_db() argument
3160 if (!svm->nmi_singlestep) in nested_svm_intercept_db()
3164 if (kvm_get_dr(&svm->vcpu, 6, &dr6)) in nested_svm_intercept_db()
3170 if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) { in nested_svm_intercept_db()
3171 disable_nmi_singlestep(svm); in nested_svm_intercept_db()
3179 static int nested_svm_exit_special(struct vcpu_svm *svm) in nested_svm_exit_special() argument
3181 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_exit_special()
3195 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0) in nested_svm_exit_special()
3208 static int nested_svm_intercept(struct vcpu_svm *svm) in nested_svm_intercept() argument
3210 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_intercept()
3215 vmexit = nested_svm_exit_handled_msr(svm); in nested_svm_intercept()
3218 vmexit = nested_svm_intercept_ioio(svm); in nested_svm_intercept()
3222 if (svm->nested.intercept_cr & bit) in nested_svm_intercept()
3228 if (svm->nested.intercept_dr & bit) in nested_svm_intercept()
3234 if (svm->nested.intercept_exceptions & excp_bits) { in nested_svm_intercept()
3236 vmexit = nested_svm_intercept_db(svm); in nested_svm_intercept()
3242 svm->vcpu.arch.exception.nested_apf != 0) in nested_svm_intercept()
3252 if (svm->nested.intercept & exit_bits) in nested_svm_intercept()
3260 static int nested_svm_exit_handled(struct vcpu_svm *svm) in nested_svm_exit_handled() argument
3264 vmexit = nested_svm_intercept(svm); in nested_svm_exit_handled()
3267 nested_svm_vmexit(svm); in nested_svm_exit_handled()
3304 static int nested_svm_vmexit(struct vcpu_svm *svm) in nested_svm_vmexit() argument
3308 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmexit()
3309 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmexit()
3319 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb), &map); in nested_svm_vmexit()
3322 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_vmexit()
3329 leave_guest_mode(&svm->vcpu); in nested_svm_vmexit()
3330 svm->nested.vmcb = 0; in nested_svm_vmexit()
3333 disable_gif(svm); in nested_svm_vmexit()
3341 nested_vmcb->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit()
3342 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmexit()
3343 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmexit()
3345 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmexit()
3346 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmexit()
3364 if (svm->nrips_enabled) in nested_svm_vmexit()
3387 svm->vmcb->control.pause_filter_count; in nested_svm_vmexit()
3389 svm->vmcb->control.pause_filter_thresh; in nested_svm_vmexit()
3392 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) in nested_svm_vmexit()
3398 svm->vcpu.arch.tsc_offset = svm->vmcb->control.tsc_offset; in nested_svm_vmexit()
3399 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmexit()
3400 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmexit()
3402 svm->nested.nested_cr3 = 0; in nested_svm_vmexit()
3405 svm->vmcb->save.es = hsave->save.es; in nested_svm_vmexit()
3406 svm->vmcb->save.cs = hsave->save.cs; in nested_svm_vmexit()
3407 svm->vmcb->save.ss = hsave->save.ss; in nested_svm_vmexit()
3408 svm->vmcb->save.ds = hsave->save.ds; in nested_svm_vmexit()
3409 svm->vmcb->save.gdtr = hsave->save.gdtr; in nested_svm_vmexit()
3410 svm->vmcb->save.idtr = hsave->save.idtr; in nested_svm_vmexit()
3411 kvm_set_rflags(&svm->vcpu, hsave->save.rflags); in nested_svm_vmexit()
3412 svm_set_efer(&svm->vcpu, hsave->save.efer); in nested_svm_vmexit()
3413 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); in nested_svm_vmexit()
3414 svm_set_cr4(&svm->vcpu, hsave->save.cr4); in nested_svm_vmexit()
3416 svm->vmcb->save.cr3 = hsave->save.cr3; in nested_svm_vmexit()
3417 svm->vcpu.arch.cr3 = hsave->save.cr3; in nested_svm_vmexit()
3419 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3); in nested_svm_vmexit()
3421 kvm_rax_write(&svm->vcpu, hsave->save.rax); in nested_svm_vmexit()
3422 kvm_rsp_write(&svm->vcpu, hsave->save.rsp); in nested_svm_vmexit()
3423 kvm_rip_write(&svm->vcpu, hsave->save.rip); in nested_svm_vmexit()
3424 svm->vmcb->save.dr7 = 0; in nested_svm_vmexit()
3425 svm->vmcb->save.cpl = 0; in nested_svm_vmexit()
3426 svm->vmcb->control.exit_int_info = 0; in nested_svm_vmexit()
3428 mark_all_dirty(svm->vmcb); in nested_svm_vmexit()
3430 kvm_vcpu_unmap(&svm->vcpu, &map, true); in nested_svm_vmexit()
3432 nested_svm_uninit_mmu_context(&svm->vcpu); in nested_svm_vmexit()
3433 kvm_mmu_reset_context(&svm->vcpu); in nested_svm_vmexit()
3434 kvm_mmu_load(&svm->vcpu); in nested_svm_vmexit()
3440 svm->vcpu.arch.nmi_injected = false; in nested_svm_vmexit()
3441 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmexit()
3442 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmexit()
3447 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) in nested_svm_vmrun_msrpm() argument
3456 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm()
3467 offset = svm->nested.vmcb_msrpm + (p * 4); in nested_svm_vmrun_msrpm()
3469 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) in nested_svm_vmrun_msrpm()
3472 svm->nested.msrpm[p] = svm->msrpm[p] | value; in nested_svm_vmrun_msrpm()
3475 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); in nested_svm_vmrun_msrpm()
3495 static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, in enter_svm_guest_mode() argument
3498 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) in enter_svm_guest_mode()
3499 svm->vcpu.arch.hflags |= HF_HIF_MASK; in enter_svm_guest_mode()
3501 svm->vcpu.arch.hflags &= ~HF_HIF_MASK; in enter_svm_guest_mode()
3504 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3; in enter_svm_guest_mode()
3505 nested_svm_init_mmu_context(&svm->vcpu); in enter_svm_guest_mode()
3509 svm->vmcb->save.es = nested_vmcb->save.es; in enter_svm_guest_mode()
3510 svm->vmcb->save.cs = nested_vmcb->save.cs; in enter_svm_guest_mode()
3511 svm->vmcb->save.ss = nested_vmcb->save.ss; in enter_svm_guest_mode()
3512 svm->vmcb->save.ds = nested_vmcb->save.ds; in enter_svm_guest_mode()
3513 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; in enter_svm_guest_mode()
3514 svm->vmcb->save.idtr = nested_vmcb->save.idtr; in enter_svm_guest_mode()
3515 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags); in enter_svm_guest_mode()
3516 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); in enter_svm_guest_mode()
3517 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); in enter_svm_guest_mode()
3518 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); in enter_svm_guest_mode()
3520 svm->vmcb->save.cr3 = nested_vmcb->save.cr3; in enter_svm_guest_mode()
3521 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3; in enter_svm_guest_mode()
3523 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3); in enter_svm_guest_mode()
3526 kvm_mmu_reset_context(&svm->vcpu); in enter_svm_guest_mode()
3528 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2; in enter_svm_guest_mode()
3529 kvm_rax_write(&svm->vcpu, nested_vmcb->save.rax); in enter_svm_guest_mode()
3530 kvm_rsp_write(&svm->vcpu, nested_vmcb->save.rsp); in enter_svm_guest_mode()
3531 kvm_rip_write(&svm->vcpu, nested_vmcb->save.rip); in enter_svm_guest_mode()
3534 svm->vmcb->save.rax = nested_vmcb->save.rax; in enter_svm_guest_mode()
3535 svm->vmcb->save.rsp = nested_vmcb->save.rsp; in enter_svm_guest_mode()
3536 svm->vmcb->save.rip = nested_vmcb->save.rip; in enter_svm_guest_mode()
3537 svm->vmcb->save.dr7 = nested_vmcb->save.dr7; in enter_svm_guest_mode()
3538 svm->vmcb->save.dr6 = nested_vmcb->save.dr6; in enter_svm_guest_mode()
3539 svm->vmcb->save.cpl = nested_vmcb->save.cpl; in enter_svm_guest_mode()
3541 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL; in enter_svm_guest_mode()
3542 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL; in enter_svm_guest_mode()
3545 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr; in enter_svm_guest_mode()
3546 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr; in enter_svm_guest_mode()
3547 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions; in enter_svm_guest_mode()
3548 svm->nested.intercept = nested_vmcb->control.intercept; in enter_svm_guest_mode()
3550 svm_flush_tlb(&svm->vcpu, true); in enter_svm_guest_mode()
3551 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK; in enter_svm_guest_mode()
3553 svm->vcpu.arch.hflags |= HF_VINTR_MASK; in enter_svm_guest_mode()
3555 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK; in enter_svm_guest_mode()
3557 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) { in enter_svm_guest_mode()
3559 clr_cr_intercept(svm, INTERCEPT_CR8_READ); in enter_svm_guest_mode()
3560 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); in enter_svm_guest_mode()
3564 clr_intercept(svm, INTERCEPT_VMMCALL); in enter_svm_guest_mode()
3566 svm->vcpu.arch.tsc_offset += nested_vmcb->control.tsc_offset; in enter_svm_guest_mode()
3567 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset; in enter_svm_guest_mode()
3569 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext; in enter_svm_guest_mode()
3570 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector; in enter_svm_guest_mode()
3571 svm->vmcb->control.int_state = nested_vmcb->control.int_state; in enter_svm_guest_mode()
3572 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; in enter_svm_guest_mode()
3573 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; in enter_svm_guest_mode()
3575 svm->vmcb->control.pause_filter_count = in enter_svm_guest_mode()
3577 svm->vmcb->control.pause_filter_thresh = in enter_svm_guest_mode()
3580 kvm_vcpu_unmap(&svm->vcpu, map, true); in enter_svm_guest_mode()
3583 enter_guest_mode(&svm->vcpu); in enter_svm_guest_mode()
3589 recalc_intercepts(svm); in enter_svm_guest_mode()
3591 svm->nested.vmcb = vmcb_gpa; in enter_svm_guest_mode()
3593 enable_gif(svm); in enter_svm_guest_mode()
3595 mark_all_dirty(svm->vmcb); in enter_svm_guest_mode()
3598 static int nested_svm_vmrun(struct vcpu_svm *svm) in nested_svm_vmrun() argument
3602 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmrun()
3603 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmrun()
3607 vmcb_gpa = svm->vmcb->save.rax; in nested_svm_vmrun()
3609 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map); in nested_svm_vmrun()
3611 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_vmrun()
3614 return kvm_skip_emulated_instruction(&svm->vcpu); in nested_svm_vmrun()
3617 ret = kvm_skip_emulated_instruction(&svm->vcpu); in nested_svm_vmrun()
3627 kvm_vcpu_unmap(&svm->vcpu, &map, true); in nested_svm_vmrun()
3632 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, in nested_svm_vmrun()
3644 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmrun()
3645 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmrun()
3657 hsave->save.efer = svm->vcpu.arch.efer; in nested_svm_vmrun()
3658 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmrun()
3659 hsave->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmrun()
3660 hsave->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmrun()
3661 hsave->save.rip = kvm_rip_read(&svm->vcpu); in nested_svm_vmrun()
3667 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmrun()
3671 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, &map); in nested_svm_vmrun()
3673 if (!nested_svm_vmrun_msrpm(svm)) { in nested_svm_vmrun()
3674 svm->vmcb->control.exit_code = SVM_EXIT_ERR; in nested_svm_vmrun()
3675 svm->vmcb->control.exit_code_hi = 0; in nested_svm_vmrun()
3676 svm->vmcb->control.exit_info_1 = 0; in nested_svm_vmrun()
3677 svm->vmcb->control.exit_info_2 = 0; in nested_svm_vmrun()
3679 nested_svm_vmexit(svm); in nested_svm_vmrun()
3701 static int vmload_interception(struct vcpu_svm *svm) in vmload_interception() argument
3707 if (nested_svm_check_permissions(svm)) in vmload_interception()
3710 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmload_interception()
3713 kvm_inject_gp(&svm->vcpu, 0); in vmload_interception()
3719 ret = kvm_skip_emulated_instruction(&svm->vcpu); in vmload_interception()
3721 nested_svm_vmloadsave(nested_vmcb, svm->vmcb); in vmload_interception()
3722 kvm_vcpu_unmap(&svm->vcpu, &map, true); in vmload_interception()
3727 static int vmsave_interception(struct vcpu_svm *svm) in vmsave_interception() argument
3733 if (nested_svm_check_permissions(svm)) in vmsave_interception()
3736 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmsave_interception()
3739 kvm_inject_gp(&svm->vcpu, 0); in vmsave_interception()
3745 ret = kvm_skip_emulated_instruction(&svm->vcpu); in vmsave_interception()
3747 nested_svm_vmloadsave(svm->vmcb, nested_vmcb); in vmsave_interception()
3748 kvm_vcpu_unmap(&svm->vcpu, &map, true); in vmsave_interception()
3753 static int vmrun_interception(struct vcpu_svm *svm) in vmrun_interception() argument
3755 if (nested_svm_check_permissions(svm)) in vmrun_interception()
3758 return nested_svm_vmrun(svm); in vmrun_interception()
3761 static int stgi_interception(struct vcpu_svm *svm) in stgi_interception() argument
3765 if (nested_svm_check_permissions(svm)) in stgi_interception()
3772 if (vgif_enabled(svm)) in stgi_interception()
3773 clr_intercept(svm, INTERCEPT_STGI); in stgi_interception()
3775 ret = kvm_skip_emulated_instruction(&svm->vcpu); in stgi_interception()
3776 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in stgi_interception()
3778 enable_gif(svm); in stgi_interception()
3783 static int clgi_interception(struct vcpu_svm *svm) in clgi_interception() argument
3787 if (nested_svm_check_permissions(svm)) in clgi_interception()
3790 ret = kvm_skip_emulated_instruction(&svm->vcpu); in clgi_interception()
3792 disable_gif(svm); in clgi_interception()
3795 if (!kvm_vcpu_apicv_active(&svm->vcpu)) { in clgi_interception()
3796 svm_clear_vintr(svm); in clgi_interception()
3797 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; in clgi_interception()
3798 mark_dirty(svm->vmcb, VMCB_INTR); in clgi_interception()
3804 static int invlpga_interception(struct vcpu_svm *svm) in invlpga_interception() argument
3806 struct kvm_vcpu *vcpu = &svm->vcpu; in invlpga_interception()
3808 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu), in invlpga_interception()
3809 kvm_rax_read(&svm->vcpu)); in invlpga_interception()
3812 kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu)); in invlpga_interception()
3814 return kvm_skip_emulated_instruction(&svm->vcpu); in invlpga_interception()
3817 static int skinit_interception(struct vcpu_svm *svm) in skinit_interception() argument
3819 trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu)); in skinit_interception()
3821 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in skinit_interception()
3825 static int wbinvd_interception(struct vcpu_svm *svm) in wbinvd_interception() argument
3827 return kvm_emulate_wbinvd(&svm->vcpu); in wbinvd_interception()
3830 static int xsetbv_interception(struct vcpu_svm *svm) in xsetbv_interception() argument
3832 u64 new_bv = kvm_read_edx_eax(&svm->vcpu); in xsetbv_interception()
3833 u32 index = kvm_rcx_read(&svm->vcpu); in xsetbv_interception()
3835 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { in xsetbv_interception()
3836 return kvm_skip_emulated_instruction(&svm->vcpu); in xsetbv_interception()
3842 static int rdpru_interception(struct vcpu_svm *svm) in rdpru_interception() argument
3844 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in rdpru_interception()
3848 static int task_switch_interception(struct vcpu_svm *svm) in task_switch_interception() argument
3852 int int_type = svm->vmcb->control.exit_int_info & in task_switch_interception()
3854 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; in task_switch_interception()
3856 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; in task_switch_interception()
3858 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; in task_switch_interception()
3862 tss_selector = (u16)svm->vmcb->control.exit_info_1; in task_switch_interception()
3864 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
3867 else if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
3878 svm->vcpu.arch.nmi_injected = false; in task_switch_interception()
3881 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
3885 (u32)svm->vmcb->control.exit_info_2; in task_switch_interception()
3887 kvm_clear_exception_queue(&svm->vcpu); in task_switch_interception()
3890 kvm_clear_interrupt_queue(&svm->vcpu); in task_switch_interception()
3901 if (!skip_emulated_instruction(&svm->vcpu)) in task_switch_interception()
3908 return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, in task_switch_interception()
3912 static int cpuid_interception(struct vcpu_svm *svm) in cpuid_interception() argument
3914 return kvm_emulate_cpuid(&svm->vcpu); in cpuid_interception()
3917 static int iret_interception(struct vcpu_svm *svm) in iret_interception() argument
3919 ++svm->vcpu.stat.nmi_window_exits; in iret_interception()
3920 clr_intercept(svm, INTERCEPT_IRET); in iret_interception()
3921 svm->vcpu.arch.hflags |= HF_IRET_MASK; in iret_interception()
3922 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); in iret_interception()
3923 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in iret_interception()
3927 static int invlpg_interception(struct vcpu_svm *svm) in invlpg_interception() argument
3930 return kvm_emulate_instruction(&svm->vcpu, 0); in invlpg_interception()
3932 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); in invlpg_interception()
3933 return kvm_skip_emulated_instruction(&svm->vcpu); in invlpg_interception()
3936 static int emulate_on_interception(struct vcpu_svm *svm) in emulate_on_interception() argument
3938 return kvm_emulate_instruction(&svm->vcpu, 0); in emulate_on_interception()
3941 static int rsm_interception(struct vcpu_svm *svm) in rsm_interception() argument
3943 return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2); in rsm_interception()
3946 static int rdpmc_interception(struct vcpu_svm *svm) in rdpmc_interception() argument
3951 return emulate_on_interception(svm); in rdpmc_interception()
3953 err = kvm_rdpmc(&svm->vcpu); in rdpmc_interception()
3954 return kvm_complete_insn_gp(&svm->vcpu, err); in rdpmc_interception()
3957 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, in check_selective_cr0_intercepted() argument
3960 unsigned long cr0 = svm->vcpu.arch.cr0; in check_selective_cr0_intercepted()
3964 intercept = svm->nested.intercept; in check_selective_cr0_intercepted()
3966 if (!is_guest_mode(&svm->vcpu) || in check_selective_cr0_intercepted()
3974 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; in check_selective_cr0_intercepted()
3975 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); in check_selective_cr0_intercepted()
3983 static int cr_interception(struct vcpu_svm *svm) in cr_interception() argument
3990 return emulate_on_interception(svm); in cr_interception()
3992 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
3993 return emulate_on_interception(svm); in cr_interception()
3995 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in cr_interception()
3996 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) in cr_interception()
3999 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; in cr_interception()
4004 val = kvm_register_read(&svm->vcpu, reg); in cr_interception()
4007 if (!check_selective_cr0_intercepted(svm, val)) in cr_interception()
4008 err = kvm_set_cr0(&svm->vcpu, val); in cr_interception()
4014 err = kvm_set_cr3(&svm->vcpu, val); in cr_interception()
4017 err = kvm_set_cr4(&svm->vcpu, val); in cr_interception()
4020 err = kvm_set_cr8(&svm->vcpu, val); in cr_interception()
4024 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
4030 val = kvm_read_cr0(&svm->vcpu); in cr_interception()
4033 val = svm->vcpu.arch.cr2; in cr_interception()
4036 val = kvm_read_cr3(&svm->vcpu); in cr_interception()
4039 val = kvm_read_cr4(&svm->vcpu); in cr_interception()
4042 val = kvm_get_cr8(&svm->vcpu); in cr_interception()
4046 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
4049 kvm_register_write(&svm->vcpu, reg, val); in cr_interception()
4051 return kvm_complete_insn_gp(&svm->vcpu, err); in cr_interception()
4054 static int dr_interception(struct vcpu_svm *svm) in dr_interception() argument
4059 if (svm->vcpu.guest_debug == 0) { in dr_interception()
4065 clr_dr_intercepts(svm); in dr_interception()
4066 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in dr_interception()
4071 return emulate_on_interception(svm); in dr_interception()
4073 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in dr_interception()
4074 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; in dr_interception()
4077 if (!kvm_require_dr(&svm->vcpu, dr - 16)) in dr_interception()
4079 val = kvm_register_read(&svm->vcpu, reg); in dr_interception()
4080 kvm_set_dr(&svm->vcpu, dr - 16, val); in dr_interception()
4082 if (!kvm_require_dr(&svm->vcpu, dr)) in dr_interception()
4084 kvm_get_dr(&svm->vcpu, dr, &val); in dr_interception()
4085 kvm_register_write(&svm->vcpu, reg, val); in dr_interception()
4088 return kvm_skip_emulated_instruction(&svm->vcpu); in dr_interception()
4091 static int cr8_write_interception(struct vcpu_svm *svm) in cr8_write_interception() argument
4093 struct kvm_run *kvm_run = svm->vcpu.run; in cr8_write_interception()
4096 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); in cr8_write_interception()
4098 r = cr_interception(svm); in cr8_write_interception()
4099 if (lapic_in_kernel(&svm->vcpu)) in cr8_write_interception()
4101 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) in cr8_write_interception()
4125 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_msr() local
4129 msr_info->data = svm->vmcb->save.star; in svm_get_msr()
4133 msr_info->data = svm->vmcb->save.lstar; in svm_get_msr()
4136 msr_info->data = svm->vmcb->save.cstar; in svm_get_msr()
4139 msr_info->data = svm->vmcb->save.kernel_gs_base; in svm_get_msr()
4142 msr_info->data = svm->vmcb->save.sfmask; in svm_get_msr()
4146 msr_info->data = svm->vmcb->save.sysenter_cs; in svm_get_msr()
4149 msr_info->data = svm->sysenter_eip; in svm_get_msr()
4152 msr_info->data = svm->sysenter_esp; in svm_get_msr()
4157 msr_info->data = svm->tsc_aux; in svm_get_msr()
4165 msr_info->data = svm->vmcb->save.dbgctl; in svm_get_msr()
4168 msr_info->data = svm->vmcb->save.br_from; in svm_get_msr()
4171 msr_info->data = svm->vmcb->save.br_to; in svm_get_msr()
4174 msr_info->data = svm->vmcb->save.last_excp_from; in svm_get_msr()
4177 msr_info->data = svm->vmcb->save.last_excp_to; in svm_get_msr()
4180 msr_info->data = svm->nested.hsave_msr; in svm_get_msr()
4183 msr_info->data = svm->nested.vm_cr_msr; in svm_get_msr()
4191 msr_info->data = svm->spec_ctrl; in svm_get_msr()
4198 msr_info->data = svm->virt_spec_ctrl; in svm_get_msr()
4218 msr_info->data = svm->msr_decfg; in svm_get_msr()
4226 static int rdmsr_interception(struct vcpu_svm *svm) in rdmsr_interception() argument
4228 return kvm_emulate_rdmsr(&svm->vcpu); in rdmsr_interception()
4233 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_vm_cr() local
4241 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) in svm_set_vm_cr()
4244 svm->nested.vm_cr_msr &= ~chg_mask; in svm_set_vm_cr()
4245 svm->nested.vm_cr_msr |= (data & chg_mask); in svm_set_vm_cr()
4247 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; in svm_set_vm_cr()
4258 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_msr() local
4267 svm->vmcb->save.g_pat = data; in svm_set_msr()
4268 mark_dirty(svm->vmcb, VMCB_NPT); in svm_set_msr()
4280 svm->spec_ctrl = data; in svm_set_msr()
4296 set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); in svm_set_msr()
4312 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); in svm_set_msr()
4322 svm->virt_spec_ctrl = data; in svm_set_msr()
4325 svm->vmcb->save.star = data; in svm_set_msr()
4329 svm->vmcb->save.lstar = data; in svm_set_msr()
4332 svm->vmcb->save.cstar = data; in svm_set_msr()
4335 svm->vmcb->save.kernel_gs_base = data; in svm_set_msr()
4338 svm->vmcb->save.sfmask = data; in svm_set_msr()
4342 svm->vmcb->save.sysenter_cs = data; in svm_set_msr()
4345 svm->sysenter_eip = data; in svm_set_msr()
4346 svm->vmcb->save.sysenter_eip = data; in svm_set_msr()
4349 svm->sysenter_esp = data; in svm_set_msr()
4350 svm->vmcb->save.sysenter_esp = data; in svm_set_msr()
4361 svm->tsc_aux = data; in svm_set_msr()
4362 wrmsrl(MSR_TSC_AUX, svm->tsc_aux); in svm_set_msr()
4373 svm->vmcb->save.dbgctl = data; in svm_set_msr()
4374 mark_dirty(svm->vmcb, VMCB_LBR); in svm_set_msr()
4376 svm_enable_lbrv(svm); in svm_set_msr()
4378 svm_disable_lbrv(svm); in svm_set_msr()
4381 svm->nested.hsave_msr = data; in svm_set_msr()
4403 svm->msr_decfg = data; in svm_set_msr()
4416 static int wrmsr_interception(struct vcpu_svm *svm) in wrmsr_interception() argument
4418 return kvm_emulate_wrmsr(&svm->vcpu); in wrmsr_interception()
4421 static int msr_interception(struct vcpu_svm *svm) in msr_interception() argument
4423 if (svm->vmcb->control.exit_info_1) in msr_interception()
4424 return wrmsr_interception(svm); in msr_interception()
4426 return rdmsr_interception(svm); in msr_interception()
4429 static int interrupt_window_interception(struct vcpu_svm *svm) in interrupt_window_interception() argument
4431 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in interrupt_window_interception()
4432 svm_clear_vintr(svm); in interrupt_window_interception()
4433 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; in interrupt_window_interception()
4434 mark_dirty(svm->vmcb, VMCB_INTR); in interrupt_window_interception()
4435 ++svm->vcpu.stat.irq_window_exits; in interrupt_window_interception()
4439 static int pause_interception(struct vcpu_svm *svm) in pause_interception() argument
4441 struct kvm_vcpu *vcpu = &svm->vcpu; in pause_interception()
4451 static int nop_interception(struct vcpu_svm *svm) in nop_interception() argument
4453 return kvm_skip_emulated_instruction(&(svm->vcpu)); in nop_interception()
4456 static int monitor_interception(struct vcpu_svm *svm) in monitor_interception() argument
4459 return nop_interception(svm); in monitor_interception()
4462 static int mwait_interception(struct vcpu_svm *svm) in mwait_interception() argument
4465 return nop_interception(svm); in mwait_interception()
4475 static int avic_incomplete_ipi_interception(struct vcpu_svm *svm) in avic_incomplete_ipi_interception() argument
4477 u32 icrh = svm->vmcb->control.exit_info_1 >> 32; in avic_incomplete_ipi_interception()
4478 u32 icrl = svm->vmcb->control.exit_info_1; in avic_incomplete_ipi_interception()
4479 u32 id = svm->vmcb->control.exit_info_2 >> 32; in avic_incomplete_ipi_interception()
4480 u32 index = svm->vmcb->control.exit_info_2 & 0xFF; in avic_incomplete_ipi_interception()
4481 struct kvm_lapic *apic = svm->vcpu.arch.apic; in avic_incomplete_ipi_interception()
4483 trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index); in avic_incomplete_ipi_interception()
4504 struct kvm *kvm = svm->vcpu.kvm; in avic_incomplete_ipi_interception()
4505 struct kvm_lapic *apic = svm->vcpu.arch.apic; in avic_incomplete_ipi_interception()
4525 index, svm->vcpu.vcpu_id, icrh, icrl); in avic_incomplete_ipi_interception()
4587 struct vcpu_svm *svm = to_svm(vcpu); in avic_invalidate_logical_id_entry() local
4588 bool flat = svm->dfr_reg == APIC_DFR_FLAT; in avic_invalidate_logical_id_entry()
4589 u32 *entry = avic_get_logical_id_entry(vcpu, svm->ldr_reg, flat); in avic_invalidate_logical_id_entry()
4598 struct vcpu_svm *svm = to_svm(vcpu); in avic_handle_ldr_update() local
4602 if (ldr == svm->ldr_reg) in avic_handle_ldr_update()
4611 svm->ldr_reg = ldr; in avic_handle_ldr_update()
4619 struct vcpu_svm *svm = to_svm(vcpu); in avic_handle_apic_id_update() local
4639 if (svm->ldr_reg) in avic_handle_apic_id_update()
4647 struct vcpu_svm *svm = to_svm(vcpu); in avic_handle_dfr_update() local
4650 if (svm->dfr_reg == dfr) in avic_handle_dfr_update()
4654 svm->dfr_reg = dfr; in avic_handle_dfr_update()
4657 static int avic_unaccel_trap_write(struct vcpu_svm *svm) in avic_unaccel_trap_write() argument
4659 struct kvm_lapic *apic = svm->vcpu.arch.apic; in avic_unaccel_trap_write()
4660 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccel_trap_write()
4665 if (avic_handle_apic_id_update(&svm->vcpu)) in avic_unaccel_trap_write()
4669 if (avic_handle_ldr_update(&svm->vcpu)) in avic_unaccel_trap_write()
4673 avic_handle_dfr_update(&svm->vcpu); in avic_unaccel_trap_write()
4713 static int avic_unaccelerated_access_interception(struct vcpu_svm *svm) in avic_unaccelerated_access_interception() argument
4716 u32 offset = svm->vmcb->control.exit_info_1 & in avic_unaccelerated_access_interception()
4718 u32 vector = svm->vmcb->control.exit_info_2 & in avic_unaccelerated_access_interception()
4720 bool write = (svm->vmcb->control.exit_info_1 >> 32) & in avic_unaccelerated_access_interception()
4724 trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset, in avic_unaccelerated_access_interception()
4729 ret = avic_unaccel_trap_write(svm); in avic_unaccelerated_access_interception()
4732 ret = kvm_emulate_instruction(&svm->vcpu, 0); in avic_unaccelerated_access_interception()
4738 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
4808 struct vcpu_svm *svm = to_svm(vcpu); in dump_vmcb() local
4809 struct vmcb_control_area *control = &svm->vmcb->control; in dump_vmcb()
4810 struct vmcb_save_area *save = &svm->vmcb->save; in dump_vmcb()
4932 struct vcpu_svm *svm = to_svm(vcpu); in handle_exit() local
4934 u32 exit_code = svm->vmcb->control.exit_code; in handle_exit()
4938 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) in handle_exit()
4939 vcpu->arch.cr0 = svm->vmcb->save.cr0; in handle_exit()
4941 vcpu->arch.cr3 = svm->vmcb->save.cr3; in handle_exit()
4943 if (unlikely(svm->nested.exit_required)) { in handle_exit()
4944 nested_svm_vmexit(svm); in handle_exit()
4945 svm->nested.exit_required = false; in handle_exit()
4953 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code, in handle_exit()
4954 svm->vmcb->control.exit_info_1, in handle_exit()
4955 svm->vmcb->control.exit_info_2, in handle_exit()
4956 svm->vmcb->control.exit_int_info, in handle_exit()
4957 svm->vmcb->control.exit_int_info_err, in handle_exit()
4960 vmexit = nested_svm_exit_special(svm); in handle_exit()
4963 vmexit = nested_svm_exit_handled(svm); in handle_exit()
4969 svm_complete_interrupts(svm); in handle_exit()
4971 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { in handle_exit()
4974 = svm->vmcb->control.exit_code; in handle_exit()
4979 if (is_external_interrupt(svm->vmcb->control.exit_int_info) && in handle_exit()
4985 __func__, svm->vmcb->control.exit_int_info, in handle_exit()
5000 return svm_exit_handlers[exit_code](svm); in handle_exit()
5012 static void pre_sev_run(struct vcpu_svm *svm, int cpu) in pre_sev_run() argument
5015 int asid = sev_get_asid(svm->vcpu.kvm); in pre_sev_run()
5018 svm->vmcb->control.asid = asid; in pre_sev_run()
5026 if (sd->sev_vmcbs[asid] == svm->vmcb && in pre_sev_run()
5027 svm->last_cpu == cpu) in pre_sev_run()
5030 svm->last_cpu = cpu; in pre_sev_run()
5031 sd->sev_vmcbs[asid] = svm->vmcb; in pre_sev_run()
5032 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in pre_sev_run()
5033 mark_dirty(svm->vmcb, VMCB_ASID); in pre_sev_run()
5036 static void pre_svm_run(struct vcpu_svm *svm) in pre_svm_run() argument
5042 if (sev_guest(svm->vcpu.kvm)) in pre_svm_run()
5043 return pre_sev_run(svm, cpu); in pre_svm_run()
5046 if (svm->asid_generation != sd->asid_generation) in pre_svm_run()
5047 new_asid(svm, sd); in pre_svm_run()
5052 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_nmi() local
5054 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in svm_inject_nmi()
5056 set_intercept(svm, INTERCEPT_IRET); in svm_inject_nmi()
5060 static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) in svm_inject_irq() argument
5065 control = &svm->vmcb->control; in svm_inject_irq()
5070 mark_dirty(svm->vmcb, VMCB_INTR); in svm_inject_irq()
5075 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_irq() local
5077 BUG_ON(!(gif_set(svm))); in svm_set_irq()
5082 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_set_irq()
5093 struct vcpu_svm *svm = to_svm(vcpu); in update_cr8_intercept() local
5099 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); in update_cr8_intercept()
5105 set_cr_intercept(svm, INTERCEPT_CR8_WRITE); in update_cr8_intercept()
5129 struct vcpu_svm *svm = to_svm(vcpu); in svm_refresh_apicv_exec_ctrl() local
5130 struct vmcb *vmcb = svm->vmcb; in svm_refresh_apicv_exec_ctrl()
5164 static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) in svm_ir_list_del() argument
5169 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_del()
5170 list_for_each_entry(cur, &svm->ir_list, node) { in svm_ir_list_del()
5177 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_del()
5180 static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi) in svm_ir_list_add() argument
5192 struct kvm *kvm = svm->vcpu.kvm; in svm_ir_list_add()
5217 spin_lock_irqsave(&svm->ir_list_lock, flags); in svm_ir_list_add()
5218 list_add(&ir->node, &svm->ir_list); in svm_ir_list_add()
5219 spin_unlock_irqrestore(&svm->ir_list_lock, flags); in svm_ir_list_add()
5237 struct vcpu_data *vcpu_info, struct vcpu_svm **svm) in get_pi_vcpu_info() argument
5253 *svm = to_svm(vcpu); in get_pi_vcpu_info()
5254 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page)); in get_pi_vcpu_info()
5289 struct vcpu_svm *svm = NULL; in svm_update_pi_irte() local
5301 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set && in svm_update_pi_irte()
5302 kvm_vcpu_apicv_active(&svm->vcpu)) { in svm_update_pi_irte()
5306 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) & in svm_update_pi_irte()
5309 svm->vcpu.vcpu_id); in svm_update_pi_irte()
5322 svm_ir_list_add(svm, &pi); in svm_update_pi_irte()
5351 if (!ret && svm) { in svm_update_pi_irte()
5352 trace_kvm_pi_irte_update(host_irq, svm->vcpu.vcpu_id, in svm_update_pi_irte()
5371 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_allowed() local
5372 struct vmcb *vmcb = svm->vmcb; in svm_nmi_allowed()
5375 !(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_nmi_allowed()
5376 ret = ret && gif_set(svm) && nested_svm_nmi(svm); in svm_nmi_allowed()
5383 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nmi_mask() local
5385 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_get_nmi_mask()
5390 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nmi_mask() local
5393 svm->vcpu.arch.hflags |= HF_NMI_MASK; in svm_set_nmi_mask()
5394 set_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
5396 svm->vcpu.arch.hflags &= ~HF_NMI_MASK; in svm_set_nmi_mask()
5397 clr_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
5403 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_allowed() local
5404 struct vmcb *vmcb = svm->vmcb; in svm_interrupt_allowed()
5407 if (!gif_set(svm) || in svm_interrupt_allowed()
5414 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); in svm_interrupt_allowed()
5421 struct vcpu_svm *svm = to_svm(vcpu); in enable_irq_window() local
5434 if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) { in enable_irq_window()
5435 svm_set_vintr(svm); in enable_irq_window()
5436 svm_inject_irq(svm, 0x0); in enable_irq_window()
5442 struct vcpu_svm *svm = to_svm(vcpu); in enable_nmi_window() local
5444 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) in enable_nmi_window()
5448 if (!gif_set(svm)) { in enable_nmi_window()
5449 if (vgif_enabled(svm)) in enable_nmi_window()
5450 set_intercept(svm, INTERCEPT_STGI); in enable_nmi_window()
5454 if (svm->nested.exit_required) in enable_nmi_window()
5461 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); in enable_nmi_window()
5462 svm->nmi_singlestep = true; in enable_nmi_window()
5463 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); in enable_nmi_window()
5478 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb() local
5481 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in svm_flush_tlb()
5483 svm->asid_generation--; in svm_flush_tlb()
5488 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb_gva() local
5490 invlpga(gva, svm->vmcb->control.asid); in svm_flush_tlb_gva()
5499 struct vcpu_svm *svm = to_svm(vcpu); in sync_cr8_to_lapic() local
5504 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) { in sync_cr8_to_lapic()
5505 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; in sync_cr8_to_lapic()
5512 struct vcpu_svm *svm = to_svm(vcpu); in sync_lapic_to_cr8() local
5520 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; in sync_lapic_to_cr8()
5521 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; in sync_lapic_to_cr8()
5524 static void svm_complete_interrupts(struct vcpu_svm *svm) in svm_complete_interrupts() argument
5528 u32 exitintinfo = svm->vmcb->control.exit_int_info; in svm_complete_interrupts()
5529 unsigned int3_injected = svm->int3_injected; in svm_complete_interrupts()
5531 svm->int3_injected = 0; in svm_complete_interrupts()
5537 if ((svm->vcpu.arch.hflags & HF_IRET_MASK) in svm_complete_interrupts()
5538 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { in svm_complete_interrupts()
5539 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); in svm_complete_interrupts()
5540 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
5543 svm->vcpu.arch.nmi_injected = false; in svm_complete_interrupts()
5544 kvm_clear_exception_queue(&svm->vcpu); in svm_complete_interrupts()
5545 kvm_clear_interrupt_queue(&svm->vcpu); in svm_complete_interrupts()
5550 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
5557 svm->vcpu.arch.nmi_injected = true; in svm_complete_interrupts()
5567 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) in svm_complete_interrupts()
5568 kvm_rip_write(&svm->vcpu, in svm_complete_interrupts()
5569 kvm_rip_read(&svm->vcpu) - in svm_complete_interrupts()
5574 u32 err = svm->vmcb->control.exit_int_info_err; in svm_complete_interrupts()
5575 kvm_requeue_exception_e(&svm->vcpu, vector, err); in svm_complete_interrupts()
5578 kvm_requeue_exception(&svm->vcpu, vector); in svm_complete_interrupts()
5581 kvm_queue_interrupt(&svm->vcpu, vector, false); in svm_complete_interrupts()
5590 struct vcpu_svm *svm = to_svm(vcpu); in svm_cancel_injection() local
5591 struct vmcb_control_area *control = &svm->vmcb->control; in svm_cancel_injection()
5596 svm_complete_interrupts(svm); in svm_cancel_injection()
5601 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_run() local
5603 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
5604 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
5605 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
5611 if (unlikely(svm->nested.exit_required)) in svm_vcpu_run()
5620 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { in svm_vcpu_run()
5626 disable_nmi_singlestep(svm); in svm_vcpu_run()
5630 pre_svm_run(svm); in svm_vcpu_run()
5634 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
5649 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); in svm_vcpu_run()
5716 : [svm]"a"(svm), in svm_vcpu_run()
5747 wrmsrl(MSR_GS_BASE, svm->host.gs_base); in svm_vcpu_run()
5749 loadsegment(fs, svm->host.fs); in svm_vcpu_run()
5751 loadsegment(gs, svm->host.gs); in svm_vcpu_run()
5771 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); in svm_vcpu_run()
5777 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); in svm_vcpu_run()
5779 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
5780 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
5781 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
5782 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
5784 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
5785 kvm_before_interrupt(&svm->vcpu); in svm_vcpu_run()
5792 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
5793 kvm_after_interrupt(&svm->vcpu); in svm_vcpu_run()
5797 svm->next_rip = 0; in svm_vcpu_run()
5799 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; in svm_vcpu_run()
5802 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) in svm_vcpu_run()
5803 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); in svm_vcpu_run()
5814 if (unlikely(svm->vmcb->control.exit_code == in svm_vcpu_run()
5816 svm_handle_mce(svm); in svm_vcpu_run()
5818 mark_all_clean(svm->vmcb); in svm_vcpu_run()
5824 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr3() local
5826 svm->vmcb->save.cr3 = __sme_set(root); in svm_set_cr3()
5827 mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr3()
5832 struct vcpu_svm *svm = to_svm(vcpu); in set_tdp_cr3() local
5834 svm->vmcb->control.nested_cr3 = __sme_set(root); in set_tdp_cr3()
5835 mark_dirty(svm->vmcb, VMCB_NPT); in set_tdp_cr3()
5838 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu); in set_tdp_cr3()
5839 mark_dirty(svm->vmcb, VMCB_CR); in set_tdp_cr3()
5894 struct vcpu_svm *svm = to_svm(vcpu); in svm_cpuid_update() local
5897 svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); in svm_cpuid_update()
6057 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_intercept() local
6060 struct vmcb *vmcb = svm->vmcb; in svm_check_intercept()
6086 intercept = svm->nested.intercept; in svm_check_intercept()
6165 vmexit = nested_svm_exit_handled(svm); in svm_check_intercept()
6201 struct vcpu_svm *svm = to_svm(vcpu); in svm_smi_allowed() local
6204 if (!gif_set(svm)) in svm_smi_allowed()
6207 if (is_guest_mode(&svm->vcpu) && in svm_smi_allowed()
6208 svm->nested.intercept & (1ULL << INTERCEPT_SMI)) { in svm_smi_allowed()
6210 svm->vmcb->control.exit_code = SVM_EXIT_SMI; in svm_smi_allowed()
6211 svm->nested.exit_required = true; in svm_smi_allowed()
6220 struct vcpu_svm *svm = to_svm(vcpu); in svm_pre_enter_smm() local
6227 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb); in svm_pre_enter_smm()
6229 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_pre_enter_smm()
6230 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_pre_enter_smm()
6231 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_pre_enter_smm()
6233 ret = nested_svm_vmexit(svm); in svm_pre_enter_smm()
6242 struct vcpu_svm *svm = to_svm(vcpu); in svm_pre_leave_smm() local
6252 if (kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb), &map) == -EINVAL) in svm_pre_leave_smm()
6255 enter_svm_guest_mode(svm, vmcb, nested_vmcb, &map); in svm_pre_leave_smm()
6262 struct vcpu_svm *svm = to_svm(vcpu); in enable_smi_window() local
6264 if (!gif_set(svm)) { in enable_smi_window()
6265 if (vgif_enabled(svm)) in enable_smi_window()
6266 set_intercept(svm, INTERCEPT_STGI); in enable_smi_window()
7168 struct vcpu_svm *svm = to_svm(vcpu); in svm_apic_init_signal_blocked() local
7177 return !gif_set(svm) || in svm_apic_init_signal_blocked()
7178 (svm->vmcb->control.intercept & (1ULL << INTERCEPT_INIT)); in svm_apic_init_signal_blocked()