Lines Matching refs:svm

299 	struct vcpu_svm *svm = to_svm(vcpu);  in svm_set_efer()  local
314 svm_set_gif(svm, true); in svm_set_efer()
317 clr_exception_intercept(svm, GP_VECTOR); in svm_set_efer()
325 svm_free_nested(svm); in svm_set_efer()
328 int ret = svm_allocate_nested(svm); in svm_set_efer()
340 set_exception_intercept(svm, GP_VECTOR); in svm_set_efer()
344 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer()
345 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_efer()
351 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_interrupt_shadow() local
354 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow()
361 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_interrupt_shadow() local
364 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
366 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
373 struct vcpu_svm *svm = to_svm(vcpu); in __svm_skip_emulated_instruction() local
383 if (nrips && svm->vmcb->control.next_rip != 0) { in __svm_skip_emulated_instruction()
385 svm->next_rip = svm->vmcb->control.next_rip; in __svm_skip_emulated_instruction()
388 if (!svm->next_rip) { in __svm_skip_emulated_instruction()
390 old_rflags = svm->vmcb->save.rflags; in __svm_skip_emulated_instruction()
396 svm->vmcb->save.rflags = old_rflags; in __svm_skip_emulated_instruction()
398 kvm_rip_write(vcpu, svm->next_rip); in __svm_skip_emulated_instruction()
416 struct vcpu_svm *svm = to_svm(vcpu); in svm_update_soft_interrupt_rip() local
444 svm->soft_int_injected = true; in svm_update_soft_interrupt_rip()
445 svm->soft_int_csbase = svm->vmcb->save.cs.base; in svm_update_soft_interrupt_rip()
446 svm->soft_int_old_rip = old_rip; in svm_update_soft_interrupt_rip()
447 svm->soft_int_next_rip = rip; in svm_update_soft_interrupt_rip()
453 svm->vmcb->control.next_rip = rip; in svm_update_soft_interrupt_rip()
461 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_exception() local
469 svm->vmcb->control.event_inj = ex->vector in svm_inject_exception()
473 svm->vmcb->control.event_inj_err = ex->error_code; in svm_inject_exception()
687 struct vcpu_svm *svm = to_svm(vcpu); in set_shadow_msr_intercept() local
695 set_bit(slot, svm->shadow_msr_intercept.read); in set_shadow_msr_intercept()
697 clear_bit(slot, svm->shadow_msr_intercept.read); in set_shadow_msr_intercept()
700 set_bit(slot, svm->shadow_msr_intercept.write); in set_shadow_msr_intercept()
702 clear_bit(slot, svm->shadow_msr_intercept.write); in set_shadow_msr_intercept()
741 struct vcpu_svm *svm = to_svm(vcpu); in set_msr_interception_bitmap() local
772 svm->nested.force_msr_bitmap_recalc = true; in set_msr_interception_bitmap()
808 void svm_set_x2apic_msr_interception(struct vcpu_svm *svm, bool intercept) in svm_set_x2apic_msr_interception() argument
812 if (intercept == svm->x2avic_msrs_intercepted) in svm_set_x2apic_msr_interception()
816 !apic_x2apic_mode(svm->vcpu.arch.apic)) in svm_set_x2apic_msr_interception()
825 set_msr_interception(&svm->vcpu, svm->msrpm, index, in svm_set_x2apic_msr_interception()
829 svm->x2avic_msrs_intercepted = intercept; in svm_set_x2apic_msr_interception()
839 struct vcpu_svm *svm = to_svm(vcpu); in svm_msr_filter_changed() local
849 u32 read = test_bit(i, svm->shadow_msr_intercept.read); in svm_msr_filter_changed()
850 u32 write = test_bit(i, svm->shadow_msr_intercept.write); in svm_msr_filter_changed()
852 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write); in svm_msr_filter_changed()
912 struct vcpu_svm *svm = to_svm(vcpu); in svm_enable_lbrv() local
914 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; in svm_enable_lbrv()
915 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); in svm_enable_lbrv()
916 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); in svm_enable_lbrv()
917 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); in svm_enable_lbrv()
918 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); in svm_enable_lbrv()
922 svm_copy_lbrs(svm->vmcb, svm->vmcb01.ptr); in svm_enable_lbrv()
927 struct vcpu_svm *svm = to_svm(vcpu); in svm_disable_lbrv() local
929 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; in svm_disable_lbrv()
930 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); in svm_disable_lbrv()
931 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); in svm_disable_lbrv()
932 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); in svm_disable_lbrv()
933 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); in svm_disable_lbrv()
940 svm_copy_lbrs(svm->vmcb01.ptr, svm->vmcb); in svm_disable_lbrv()
943 static int svm_get_lbr_msr(struct vcpu_svm *svm, u32 index) in svm_get_lbr_msr() argument
953 (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) ? in svm_get_lbr_msr()
954 svm->vmcb : svm->vmcb01.ptr; in svm_get_lbr_msr()
968 KVM_BUG(false, svm->vcpu.kvm, in svm_get_lbr_msr()
976 struct vcpu_svm *svm = to_svm(vcpu); in svm_update_lbrv() local
978 bool enable_lbrv = svm_get_lbr_msr(svm, MSR_IA32_DEBUGCTLMSR) & in svm_update_lbrv()
981 bool current_enable_lbrv = !!(svm->vmcb->control.virt_ext & in svm_update_lbrv()
984 if (unlikely(is_guest_mode(vcpu) && svm->lbrv_enabled)) in svm_update_lbrv()
985 if (unlikely(svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK)) in svm_update_lbrv()
997 void disable_nmi_singlestep(struct vcpu_svm *svm) in disable_nmi_singlestep() argument
999 svm->nmi_singlestep = false; in disable_nmi_singlestep()
1001 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { in disable_nmi_singlestep()
1003 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in disable_nmi_singlestep()
1004 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; in disable_nmi_singlestep()
1005 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in disable_nmi_singlestep()
1006 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; in disable_nmi_singlestep()
1012 struct vcpu_svm *svm = to_svm(vcpu); in grow_ple_window() local
1013 struct vmcb_control_area *control = &svm->vmcb->control; in grow_ple_window()
1025 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in grow_ple_window()
1033 struct vcpu_svm *svm = to_svm(vcpu); in shrink_ple_window() local
1034 struct vmcb_control_area *control = &svm->vmcb->control; in shrink_ple_window()
1046 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in shrink_ple_window()
1085 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_l2_tsc_offset() local
1087 return svm->nested.ctl.tsc_offset; in svm_get_l2_tsc_offset()
1092 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_l2_tsc_multiplier() local
1094 return svm->tsc_ratio_msr; in svm_get_l2_tsc_multiplier()
1099 struct vcpu_svm *svm = to_svm(vcpu); in svm_write_tsc_offset() local
1101 svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset; in svm_write_tsc_offset()
1102 svm->vmcb->control.tsc_offset = offset; in svm_write_tsc_offset()
1103 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_write_tsc_offset()
1114 struct vcpu_svm *svm) in svm_recalc_instruction_intercepts() argument
1122 !guest_cpuid_has(&svm->vcpu, X86_FEATURE_INVPCID)) in svm_recalc_instruction_intercepts()
1123 svm_set_intercept(svm, INTERCEPT_INVPCID); in svm_recalc_instruction_intercepts()
1125 svm_clr_intercept(svm, INTERCEPT_INVPCID); in svm_recalc_instruction_intercepts()
1130 svm_clr_intercept(svm, INTERCEPT_RDTSCP); in svm_recalc_instruction_intercepts()
1132 svm_set_intercept(svm, INTERCEPT_RDTSCP); in svm_recalc_instruction_intercepts()
1138 struct vcpu_svm *svm = to_svm(vcpu); in init_vmcb_after_set_cpuid() local
1146 svm_set_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb_after_set_cpuid()
1147 svm_set_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb_after_set_cpuid()
1148 svm->vmcb->control.virt_ext &= ~VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in init_vmcb_after_set_cpuid()
1150 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 0, 0); in init_vmcb_after_set_cpuid()
1151 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 0, 0); in init_vmcb_after_set_cpuid()
1153 svm->v_vmload_vmsave_enabled = false; in init_vmcb_after_set_cpuid()
1160 svm_clr_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb_after_set_cpuid()
1161 svm_clr_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb_after_set_cpuid()
1162 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in init_vmcb_after_set_cpuid()
1165 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_EIP, 1, 1); in init_vmcb_after_set_cpuid()
1166 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SYSENTER_ESP, 1, 1); in init_vmcb_after_set_cpuid()
1172 struct vcpu_svm *svm = to_svm(vcpu); in init_vmcb() local
1173 struct vmcb *vmcb = svm->vmcb01.ptr; in init_vmcb()
1177 svm_set_intercept(svm, INTERCEPT_CR0_READ); in init_vmcb()
1178 svm_set_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1179 svm_set_intercept(svm, INTERCEPT_CR4_READ); in init_vmcb()
1180 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); in init_vmcb()
1181 svm_set_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1182 svm_set_intercept(svm, INTERCEPT_CR4_WRITE); in init_vmcb()
1184 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); in init_vmcb()
1186 set_dr_intercepts(svm); in init_vmcb()
1188 set_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1189 set_exception_intercept(svm, UD_VECTOR); in init_vmcb()
1190 set_exception_intercept(svm, MC_VECTOR); in init_vmcb()
1191 set_exception_intercept(svm, AC_VECTOR); in init_vmcb()
1192 set_exception_intercept(svm, DB_VECTOR); in init_vmcb()
1201 set_exception_intercept(svm, GP_VECTOR); in init_vmcb()
1203 svm_set_intercept(svm, INTERCEPT_INTR); in init_vmcb()
1204 svm_set_intercept(svm, INTERCEPT_NMI); in init_vmcb()
1207 svm_set_intercept(svm, INTERCEPT_SMI); in init_vmcb()
1209 svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); in init_vmcb()
1210 svm_set_intercept(svm, INTERCEPT_RDPMC); in init_vmcb()
1211 svm_set_intercept(svm, INTERCEPT_CPUID); in init_vmcb()
1212 svm_set_intercept(svm, INTERCEPT_INVD); in init_vmcb()
1213 svm_set_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1214 svm_set_intercept(svm, INTERCEPT_INVLPGA); in init_vmcb()
1215 svm_set_intercept(svm, INTERCEPT_IOIO_PROT); in init_vmcb()
1216 svm_set_intercept(svm, INTERCEPT_MSR_PROT); in init_vmcb()
1217 svm_set_intercept(svm, INTERCEPT_TASK_SWITCH); in init_vmcb()
1218 svm_set_intercept(svm, INTERCEPT_SHUTDOWN); in init_vmcb()
1219 svm_set_intercept(svm, INTERCEPT_VMRUN); in init_vmcb()
1220 svm_set_intercept(svm, INTERCEPT_VMMCALL); in init_vmcb()
1221 svm_set_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb()
1222 svm_set_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb()
1223 svm_set_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1224 svm_set_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1225 svm_set_intercept(svm, INTERCEPT_SKINIT); in init_vmcb()
1226 svm_set_intercept(svm, INTERCEPT_WBINVD); in init_vmcb()
1227 svm_set_intercept(svm, INTERCEPT_XSETBV); in init_vmcb()
1228 svm_set_intercept(svm, INTERCEPT_RDPRU); in init_vmcb()
1229 svm_set_intercept(svm, INTERCEPT_RSM); in init_vmcb()
1232 svm_set_intercept(svm, INTERCEPT_MONITOR); in init_vmcb()
1233 svm_set_intercept(svm, INTERCEPT_MWAIT); in init_vmcb()
1237 svm_set_intercept(svm, INTERCEPT_HLT); in init_vmcb()
1240 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); in init_vmcb()
1267 svm_clr_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1268 clr_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1269 svm_clr_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1270 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1274 svm->current_vmcb->asid_generation = 0; in init_vmcb()
1275 svm->asid = 0; in init_vmcb()
1277 svm->nested.vmcb12_gpa = INVALID_GPA; in init_vmcb()
1278 svm->nested.last_vmcb12_gpa = INVALID_GPA; in init_vmcb()
1284 svm_set_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1286 svm_clr_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1289 svm_recalc_instruction_intercepts(vcpu, svm); in init_vmcb()
1296 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); in init_vmcb()
1299 avic_init_vmcb(svm, vmcb); in init_vmcb()
1302 svm_clr_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1303 svm_clr_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1304 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; in init_vmcb()
1308 sev_init_vmcb(svm); in init_vmcb()
1315 enable_gif(svm); in init_vmcb()
1320 struct vcpu_svm *svm = to_svm(vcpu); in __svm_vcpu_reset() local
1322 svm_vcpu_init_msrpm(vcpu, svm->msrpm); in __svm_vcpu_reset()
1326 svm->tsc_ratio_msr = kvm_caps.default_tsc_scaling_ratio; in __svm_vcpu_reset()
1329 sev_es_vcpu_reset(svm); in __svm_vcpu_reset()
1334 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_reset() local
1336 svm->spec_ctrl = 0; in svm_vcpu_reset()
1337 svm->virt_spec_ctrl = 0; in svm_vcpu_reset()
1345 void svm_switch_vmcb(struct vcpu_svm *svm, struct kvm_vmcb_info *target_vmcb) in svm_switch_vmcb() argument
1347 svm->current_vmcb = target_vmcb; in svm_switch_vmcb()
1348 svm->vmcb = target_vmcb->ptr; in svm_switch_vmcb()
1353 struct vcpu_svm *svm; in svm_vcpu_create() local
1359 svm = to_svm(vcpu); in svm_vcpu_create()
1384 err = avic_init_vcpu(svm); in svm_vcpu_create()
1388 svm->msrpm = svm_vcpu_alloc_msrpm(); in svm_vcpu_create()
1389 if (!svm->msrpm) { in svm_vcpu_create()
1394 svm->x2avic_msrs_intercepted = true; in svm_vcpu_create()
1396 svm->vmcb01.ptr = page_address(vmcb01_page); in svm_vcpu_create()
1397 svm->vmcb01.pa = __sme_set(page_to_pfn(vmcb01_page) << PAGE_SHIFT); in svm_vcpu_create()
1398 svm_switch_vmcb(svm, &svm->vmcb01); in svm_vcpu_create()
1401 svm->sev_es.vmsa = page_address(vmsa_page); in svm_vcpu_create()
1403 svm->guest_state_loaded = false; in svm_vcpu_create()
1426 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_free() local
1433 svm_clear_current_vmcb(svm->vmcb); in svm_vcpu_free()
1436 svm_free_nested(svm); in svm_vcpu_free()
1440 __free_page(pfn_to_page(__sme_clr(svm->vmcb01.pa) >> PAGE_SHIFT)); in svm_vcpu_free()
1441 __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE)); in svm_vcpu_free()
1446 struct vcpu_svm *svm = to_svm(vcpu); in svm_prepare_switch_to_guest() local
1450 sev_es_unmap_ghcb(svm); in svm_prepare_switch_to_guest()
1452 if (svm->guest_state_loaded) in svm_prepare_switch_to_guest()
1471 kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull); in svm_prepare_switch_to_guest()
1473 svm->guest_state_loaded = true; in svm_prepare_switch_to_guest()
1483 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_load() local
1486 if (sd->current_vmcb != svm->vmcb) { in svm_vcpu_load()
1487 sd->current_vmcb = svm->vmcb; in svm_vcpu_load()
1506 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_rflags() local
1507 unsigned long rflags = svm->vmcb->save.rflags; in svm_get_rflags()
1509 if (svm->nmi_singlestep) { in svm_get_rflags()
1511 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in svm_get_rflags()
1513 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in svm_get_rflags()
1559 static void svm_set_vintr(struct vcpu_svm *svm) in svm_set_vintr() argument
1566 WARN_ON(kvm_vcpu_apicv_activated(&svm->vcpu)); in svm_set_vintr()
1568 svm_set_intercept(svm, INTERCEPT_VINTR); in svm_set_vintr()
1574 control = &svm->vmcb->control; in svm_set_vintr()
1579 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_set_vintr()
1582 static void svm_clear_vintr(struct vcpu_svm *svm) in svm_clear_vintr() argument
1584 svm_clr_intercept(svm, INTERCEPT_VINTR); in svm_clear_vintr()
1587 svm->vmcb->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; in svm_clear_vintr()
1588 if (is_guest_mode(&svm->vcpu)) { in svm_clear_vintr()
1589 svm->vmcb01.ptr->control.int_ctl &= ~V_IRQ_INJECTION_BITS_MASK; in svm_clear_vintr()
1591 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != in svm_clear_vintr()
1592 (svm->nested.ctl.int_ctl & V_TPR_MASK)); in svm_clear_vintr()
1594 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & in svm_clear_vintr()
1597 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; in svm_clear_vintr()
1600 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_clear_vintr()
1716 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_idt() local
1718 dt->size = svm->vmcb->save.idtr.limit; in svm_get_idt()
1719 dt->address = svm->vmcb->save.idtr.base; in svm_get_idt()
1724 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_idt() local
1726 svm->vmcb->save.idtr.limit = dt->size; in svm_set_idt()
1727 svm->vmcb->save.idtr.base = dt->address ; in svm_set_idt()
1728 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_idt()
1733 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_gdt() local
1735 dt->size = svm->vmcb->save.gdtr.limit; in svm_get_gdt()
1736 dt->address = svm->vmcb->save.gdtr.base; in svm_get_gdt()
1741 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_gdt() local
1743 svm->vmcb->save.gdtr.limit = dt->size; in svm_set_gdt()
1744 svm->vmcb->save.gdtr.base = dt->address ; in svm_set_gdt()
1745 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_gdt()
1750 struct vcpu_svm *svm = to_svm(vcpu); in sev_post_set_cr3() local
1761 svm->vmcb->save.cr3 = cr3; in sev_post_set_cr3()
1762 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in sev_post_set_cr3()
1768 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr0() local
1776 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; in svm_set_cr0()
1781 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); in svm_set_cr0()
1801 svm->vmcb->save.cr0 = hcr0; in svm_set_cr0()
1802 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr0()
1813 svm_clr_intercept(svm, INTERCEPT_CR0_READ); in svm_set_cr0()
1814 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); in svm_set_cr0()
1816 svm_set_intercept(svm, INTERCEPT_CR0_READ); in svm_set_cr0()
1817 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); in svm_set_cr0()
1852 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_segment() local
1875 svm->vmcb->save.cpl = (var->dpl & 3); in svm_set_segment()
1877 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); in svm_set_segment()
1882 struct vcpu_svm *svm = to_svm(vcpu); in svm_update_exception_bitmap() local
1884 clr_exception_intercept(svm, BP_VECTOR); in svm_update_exception_bitmap()
1888 set_exception_intercept(svm, BP_VECTOR); in svm_update_exception_bitmap()
1892 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) in new_asid() argument
1897 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; in new_asid()
1898 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in new_asid()
1901 svm->current_vmcb->asid_generation = sd->asid_generation; in new_asid()
1902 svm->asid = sd->next_asid++; in new_asid()
1905 static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) in svm_set_dr6() argument
1907 struct vmcb *vmcb = svm->vmcb; in svm_set_dr6()
1909 if (svm->vcpu.arch.guest_state_protected) in svm_set_dr6()
1920 struct vcpu_svm *svm = to_svm(vcpu); in svm_sync_dirty_debug_regs() local
1933 vcpu->arch.dr6 = svm->vmcb->save.dr6; in svm_sync_dirty_debug_regs()
1934 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
1936 set_dr_intercepts(svm); in svm_sync_dirty_debug_regs()
1941 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr7() local
1946 svm->vmcb->save.dr7 = value; in svm_set_dr7()
1947 vmcb_mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr7()
1952 struct vcpu_svm *svm = to_svm(vcpu); in pf_interception() local
1954 u64 fault_address = svm->vmcb->control.exit_info_2; in pf_interception()
1955 u64 error_code = svm->vmcb->control.exit_info_1; in pf_interception()
1959 svm->vmcb->control.insn_bytes : NULL, in pf_interception()
1960 svm->vmcb->control.insn_len); in pf_interception()
1965 struct vcpu_svm *svm = to_svm(vcpu); in npf_interception() local
1967 u64 fault_address = svm->vmcb->control.exit_info_2; in npf_interception()
1968 u64 error_code = svm->vmcb->control.exit_info_1; in npf_interception()
1973 svm->vmcb->control.insn_bytes : NULL, in npf_interception()
1974 svm->vmcb->control.insn_len); in npf_interception()
1980 struct vcpu_svm *svm = to_svm(vcpu); in db_interception() local
1984 !svm->nmi_singlestep) { in db_interception()
1985 u32 payload = svm->vmcb->save.dr6 ^ DR6_ACTIVE_LOW; in db_interception()
1990 if (svm->nmi_singlestep) { in db_interception()
1991 disable_nmi_singlestep(svm); in db_interception()
1999 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; in db_interception()
2000 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; in db_interception()
2002 svm->vmcb->save.cs.base + svm->vmcb->save.rip; in db_interception()
2012 struct vcpu_svm *svm = to_svm(vcpu); in bp_interception() local
2016 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; in bp_interception()
2100 struct vcpu_svm *svm = to_svm(vcpu); in shutdown_interception() local
2117 clear_page(svm->vmcb); in shutdown_interception()
2126 struct vcpu_svm *svm = to_svm(vcpu); in io_interception() local
2127 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ in io_interception()
2139 return sev_es_string_io(svm, size, port, in); in io_interception()
2144 svm->next_rip = svm->vmcb->control.exit_info_2; in io_interception()
2167 struct vcpu_svm *svm = to_svm(vcpu); in vmload_vmsave_interception() local
2175 ret = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmload_vmsave_interception()
2187 svm_copy_vmloadsave_state(svm->vmcb, vmcb12); in vmload_vmsave_interception()
2188 svm->sysenter_eip_hi = 0; in vmload_vmsave_interception()
2189 svm->sysenter_esp_hi = 0; in vmload_vmsave_interception()
2191 svm_copy_vmloadsave_state(vmcb12, svm->vmcb); in vmload_vmsave_interception()
2258 struct vcpu_svm *svm = to_svm(vcpu); in emulate_svm_instr() local
2263 ret = nested_svm_simple_vmexit(svm, guest_mode_exit_codes[opcode]); in emulate_svm_instr()
2281 struct vcpu_svm *svm = to_svm(vcpu); in gp_interception() local
2282 u32 error_code = svm->vmcb->control.exit_info_1; in gp_interception()
2308 if (svm->vmcb->save.rax & ~PAGE_MASK) in gp_interception()
2319 void svm_set_gif(struct vcpu_svm *svm, bool value) in svm_set_gif() argument
2329 svm_clr_intercept(svm, INTERCEPT_STGI); in svm_set_gif()
2330 if (svm_is_intercept(svm, INTERCEPT_VINTR)) in svm_set_gif()
2331 svm_clear_vintr(svm); in svm_set_gif()
2333 enable_gif(svm); in svm_set_gif()
2334 if (svm->vcpu.arch.smi_pending || in svm_set_gif()
2335 svm->vcpu.arch.nmi_pending || in svm_set_gif()
2336 kvm_cpu_has_injectable_intr(&svm->vcpu) || in svm_set_gif()
2337 kvm_apic_has_pending_init_or_sipi(&svm->vcpu)) in svm_set_gif()
2338 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_set_gif()
2340 disable_gif(svm); in svm_set_gif()
2348 svm_clear_vintr(svm); in svm_set_gif()
2403 struct vcpu_svm *svm = to_svm(vcpu); in task_switch_interception() local
2406 int int_type = svm->vmcb->control.exit_int_info & in task_switch_interception()
2408 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; in task_switch_interception()
2410 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; in task_switch_interception()
2412 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; in task_switch_interception()
2416 tss_selector = (u16)svm->vmcb->control.exit_info_1; in task_switch_interception()
2418 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2421 else if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2435 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2439 (u32)svm->vmcb->control.exit_info_2; in task_switch_interception()
2469 struct vcpu_svm *svm = to_svm(vcpu); in iret_interception() local
2474 svm_clr_intercept(svm, INTERCEPT_IRET); in iret_interception()
2475 svm->nmi_iret_rip = kvm_rip_read(vcpu); in iret_interception()
2503 struct vcpu_svm *svm = to_svm(vcpu); in check_selective_cr0_intercepted() local
2508 (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) in check_selective_cr0_intercepted()
2515 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; in check_selective_cr0_intercepted()
2516 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); in check_selective_cr0_intercepted()
2526 struct vcpu_svm *svm = to_svm(vcpu); in cr_interception() local
2534 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2537 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in cr_interception()
2538 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) in cr_interception()
2541 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; in cr_interception()
2600 struct vcpu_svm *svm = to_svm(vcpu); in cr_trap() local
2605 new_value = (unsigned long)svm->vmcb->control.exit_info_1; in cr_trap()
2607 cr = svm->vmcb->control.exit_code - SVM_EXIT_CR0_WRITE_TRAP; in cr_trap()
2635 struct vcpu_svm *svm = to_svm(vcpu); in dr_interception() local
2646 clr_dr_intercepts(svm); in dr_interception()
2654 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in dr_interception()
2655 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; in dr_interception()
2722 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_msr() local
2726 if (!msr_info->host_initiated && !svm->tsc_scaling_enabled) in svm_get_msr()
2728 msr_info->data = svm->tsc_ratio_msr; in svm_get_msr()
2731 msr_info->data = svm->vmcb01.ptr->save.star; in svm_get_msr()
2735 msr_info->data = svm->vmcb01.ptr->save.lstar; in svm_get_msr()
2738 msr_info->data = svm->vmcb01.ptr->save.cstar; in svm_get_msr()
2741 msr_info->data = svm->vmcb01.ptr->save.kernel_gs_base; in svm_get_msr()
2744 msr_info->data = svm->vmcb01.ptr->save.sfmask; in svm_get_msr()
2748 msr_info->data = svm->vmcb01.ptr->save.sysenter_cs; in svm_get_msr()
2751 msr_info->data = (u32)svm->vmcb01.ptr->save.sysenter_eip; in svm_get_msr()
2753 msr_info->data |= (u64)svm->sysenter_eip_hi << 32; in svm_get_msr()
2756 msr_info->data = svm->vmcb01.ptr->save.sysenter_esp; in svm_get_msr()
2758 msr_info->data |= (u64)svm->sysenter_esp_hi << 32; in svm_get_msr()
2761 msr_info->data = svm->tsc_aux; in svm_get_msr()
2768 msr_info->data = svm_get_lbr_msr(svm, msr_info->index); in svm_get_msr()
2771 msr_info->data = svm->nested.hsave_msr; in svm_get_msr()
2774 msr_info->data = svm->nested.vm_cr_msr; in svm_get_msr()
2782 msr_info->data = svm->vmcb->save.spec_ctrl; in svm_get_msr()
2784 msr_info->data = svm->spec_ctrl; in svm_get_msr()
2791 msr_info->data = svm->virt_spec_ctrl; in svm_get_msr()
2811 msr_info->data = svm->msr_decfg; in svm_get_msr()
2821 struct vcpu_svm *svm = to_svm(vcpu); in svm_complete_emulated_msr() local
2822 if (!err || !sev_es_guest(vcpu->kvm) || WARN_ON_ONCE(!svm->sev_es.ghcb)) in svm_complete_emulated_msr()
2825 ghcb_set_sw_exit_info_1(svm->sev_es.ghcb, 1); in svm_complete_emulated_msr()
2826 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, in svm_complete_emulated_msr()
2835 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_vm_cr() local
2843 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) in svm_set_vm_cr()
2846 svm->nested.vm_cr_msr &= ~chg_mask; in svm_set_vm_cr()
2847 svm->nested.vm_cr_msr |= (data & chg_mask); in svm_set_vm_cr()
2849 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; in svm_set_vm_cr()
2860 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_msr() local
2868 if (!svm->tsc_scaling_enabled) { in svm_set_msr()
2880 if (data != 0 && data != svm->tsc_ratio_msr) in svm_set_msr()
2888 svm->tsc_ratio_msr = data; in svm_set_msr()
2890 if (svm->tsc_scaling_enabled && is_guest_mode(vcpu)) in svm_set_msr()
2898 svm->vmcb01.ptr->save.g_pat = data; in svm_set_msr()
2900 nested_vmcb02_compute_g_pat(svm); in svm_set_msr()
2901 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_set_msr()
2912 svm->vmcb->save.spec_ctrl = data; in svm_set_msr()
2914 svm->spec_ctrl = data; in svm_set_msr()
2929 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); in svm_set_msr()
2944 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); in svm_set_msr()
2954 svm->virt_spec_ctrl = data; in svm_set_msr()
2957 svm->vmcb01.ptr->save.star = data; in svm_set_msr()
2961 svm->vmcb01.ptr->save.lstar = data; in svm_set_msr()
2964 svm->vmcb01.ptr->save.cstar = data; in svm_set_msr()
2967 svm->vmcb01.ptr->save.kernel_gs_base = data; in svm_set_msr()
2970 svm->vmcb01.ptr->save.sfmask = data; in svm_set_msr()
2974 svm->vmcb01.ptr->save.sysenter_cs = data; in svm_set_msr()
2977 svm->vmcb01.ptr->save.sysenter_eip = (u32)data; in svm_set_msr()
2985 svm->sysenter_eip_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; in svm_set_msr()
2988 svm->vmcb01.ptr->save.sysenter_esp = (u32)data; in svm_set_msr()
2989 svm->sysenter_esp_hi = guest_cpuid_is_intel(vcpu) ? (data >> 32) : 0; in svm_set_msr()
3003 svm->tsc_aux = data; in svm_set_msr()
3014 if (svm->vmcb->control.virt_ext & LBR_CTL_ENABLE_MASK) in svm_set_msr()
3015 svm->vmcb->save.dbgctl = data; in svm_set_msr()
3017 svm->vmcb01.ptr->save.dbgctl = data; in svm_set_msr()
3032 svm->nested.hsave_msr = data & PAGE_MASK; in svm_set_msr()
3054 svm->msr_decfg = data; in svm_set_msr()
3112 struct vcpu_svm *svm = to_svm(vcpu); in invpcid_interception() local
3126 type = svm->vmcb->control.exit_info_2; in invpcid_interception()
3127 gva = svm->vmcb->control.exit_info_1; in invpcid_interception()
3208 struct vcpu_svm *svm = to_svm(vcpu); in dump_vmcb() local
3209 struct vmcb_control_area *control = &svm->vmcb->control; in dump_vmcb()
3210 struct vmcb_save_area *save = &svm->vmcb->save; in dump_vmcb()
3211 struct vmcb_save_area *save01 = &svm->vmcb01.ptr->save; in dump_vmcb()
3219 svm->current_vmcb->ptr, vcpu->arch.last_vmentry_cpu); in dump_vmcb()
3386 struct vcpu_svm *svm = to_svm(vcpu); in svm_handle_exit() local
3388 u32 exit_code = svm->vmcb->control.exit_code; in svm_handle_exit()
3394 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) in svm_handle_exit()
3395 vcpu->arch.cr0 = svm->vmcb->save.cr0; in svm_handle_exit()
3397 vcpu->arch.cr3 = svm->vmcb->save.cr3; in svm_handle_exit()
3405 vmexit = nested_svm_exit_special(svm); in svm_handle_exit()
3408 vmexit = nested_svm_exit_handled(svm); in svm_handle_exit()
3414 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { in svm_handle_exit()
3417 = svm->vmcb->control.exit_code; in svm_handle_exit()
3440 struct vcpu_svm *svm = to_svm(vcpu); in pre_svm_run() local
3447 if (unlikely(svm->current_vmcb->cpu != vcpu->cpu)) { in pre_svm_run()
3448 svm->current_vmcb->asid_generation = 0; in pre_svm_run()
3449 vmcb_mark_all_dirty(svm->vmcb); in pre_svm_run()
3450 svm->current_vmcb->cpu = vcpu->cpu; in pre_svm_run()
3454 return pre_sev_run(svm, vcpu->cpu); in pre_svm_run()
3457 if (svm->current_vmcb->asid_generation != sd->asid_generation) in pre_svm_run()
3458 new_asid(svm, sd); in pre_svm_run()
3463 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_nmi() local
3465 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in svm_inject_nmi()
3467 if (svm->nmi_l1_to_l2) in svm_inject_nmi()
3472 svm_set_intercept(svm, INTERCEPT_IRET); in svm_inject_nmi()
3478 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_irq() local
3494 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_inject_irq()
3550 struct vcpu_svm *svm = to_svm(vcpu); in svm_update_cr8_intercept() local
3562 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); in svm_update_cr8_intercept()
3568 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); in svm_update_cr8_intercept()
3573 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_blocked() local
3574 struct vmcb *vmcb = svm->vmcb; in svm_nmi_blocked()
3577 if (!gif_set(svm)) in svm_nmi_blocked()
3580 if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) in svm_nmi_blocked()
3591 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_allowed() local
3592 if (svm->nested.nested_run_pending) in svm_nmi_allowed()
3599 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) in svm_nmi_allowed()
3611 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nmi_mask() local
3616 svm_set_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
3620 svm_clr_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
3626 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_blocked() local
3627 struct vmcb *vmcb = svm->vmcb; in svm_interrupt_blocked()
3629 if (!gif_set(svm)) in svm_interrupt_blocked()
3634 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) in svm_interrupt_blocked()
3635 ? !(svm->vmcb01.ptr->save.rflags & X86_EFLAGS_IF) in svm_interrupt_blocked()
3640 if (nested_exit_on_intr(svm)) in svm_interrupt_blocked()
3652 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_allowed() local
3654 if (svm->nested.nested_run_pending) in svm_interrupt_allowed()
3664 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) in svm_interrupt_allowed()
3672 struct vcpu_svm *svm = to_svm(vcpu); in svm_enable_irq_window() local
3682 if (vgif || gif_set(svm)) { in svm_enable_irq_window()
3696 svm_set_vintr(svm); in svm_enable_irq_window()
3702 struct vcpu_svm *svm = to_svm(vcpu); in svm_enable_nmi_window() local
3707 if (!gif_set(svm)) { in svm_enable_nmi_window()
3709 svm_set_intercept(svm, INTERCEPT_STGI); in svm_enable_nmi_window()
3717 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); in svm_enable_nmi_window()
3718 svm->nmi_singlestep = true; in svm_enable_nmi_window()
3719 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); in svm_enable_nmi_window()
3724 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb_current() local
3734 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in svm_flush_tlb_current()
3736 svm->current_vmcb->asid_generation--; in svm_flush_tlb_current()
3741 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb_gva() local
3743 invlpga(gva, svm->vmcb->control.asid); in svm_flush_tlb_gva()
3748 struct vcpu_svm *svm = to_svm(vcpu); in sync_cr8_to_lapic() local
3753 if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) { in sync_cr8_to_lapic()
3754 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; in sync_cr8_to_lapic()
3761 struct vcpu_svm *svm = to_svm(vcpu); in sync_lapic_to_cr8() local
3769 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; in sync_lapic_to_cr8()
3770 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; in sync_lapic_to_cr8()
3778 struct vcpu_svm *svm = to_svm(vcpu); in svm_complete_soft_interrupt() local
3790 kvm_is_linear_rip(vcpu, svm->soft_int_old_rip + svm->soft_int_csbase)) in svm_complete_soft_interrupt()
3791 svm->vmcb->control.next_rip = svm->soft_int_next_rip; in svm_complete_soft_interrupt()
3801 kvm_is_linear_rip(vcpu, svm->soft_int_next_rip + svm->soft_int_csbase)) in svm_complete_soft_interrupt()
3802 kvm_rip_write(vcpu, svm->soft_int_old_rip); in svm_complete_soft_interrupt()
3807 struct vcpu_svm *svm = to_svm(vcpu); in svm_complete_interrupts() local
3810 u32 exitintinfo = svm->vmcb->control.exit_int_info; in svm_complete_interrupts()
3811 bool nmi_l1_to_l2 = svm->nmi_l1_to_l2; in svm_complete_interrupts()
3812 bool soft_int_injected = svm->soft_int_injected; in svm_complete_interrupts()
3814 svm->nmi_l1_to_l2 = false; in svm_complete_interrupts()
3815 svm->soft_int_injected = false; in svm_complete_interrupts()
3823 kvm_rip_read(vcpu) != svm->nmi_iret_rip)) { in svm_complete_interrupts()
3846 svm->nmi_l1_to_l2 = nmi_l1_to_l2; in svm_complete_interrupts()
3856 u32 err = svm->vmcb->control.exit_int_info_err; in svm_complete_interrupts()
3876 struct vcpu_svm *svm = to_svm(vcpu); in svm_cancel_injection() local
3877 struct vmcb_control_area *control = &svm->vmcb->control; in svm_cancel_injection()
3901 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_enter_exit() local
3906 __svm_sev_es_vcpu_run(svm, spec_ctrl_intercepted); in svm_vcpu_enter_exit()
3908 __svm_vcpu_run(svm, spec_ctrl_intercepted); in svm_vcpu_enter_exit()
3915 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_run() local
3920 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
3921 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
3922 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
3930 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { in svm_vcpu_run()
3936 disable_nmi_singlestep(svm); in svm_vcpu_run()
3944 if (unlikely(svm->asid != svm->vmcb->control.asid)) { in svm_vcpu_run()
3945 svm->vmcb->control.asid = svm->asid; in svm_vcpu_run()
3946 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in svm_vcpu_run()
3948 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
3950 svm_hv_update_vp_id(svm->vmcb, vcpu); in svm_vcpu_run()
3957 svm_set_dr6(svm, vcpu->arch.dr6); in svm_vcpu_run()
3959 svm_set_dr6(svm, DR6_ACTIVE_LOW); in svm_vcpu_run()
3973 x86_spec_ctrl_set_guest(svm->virt_spec_ctrl); in svm_vcpu_run()
3981 x86_spec_ctrl_restore_host(svm->virt_spec_ctrl); in svm_vcpu_run()
3984 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
3985 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
3986 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
3987 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
3991 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
3999 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
4004 svm->next_rip = 0; in svm_vcpu_run()
4006 nested_sync_control_from_vmcb02(svm); in svm_vcpu_run()
4009 if (svm->nested.nested_run_pending && in svm_vcpu_run()
4010 svm->vmcb->control.exit_code != SVM_EXIT_ERR) in svm_vcpu_run()
4013 svm->nested.nested_run_pending = 0; in svm_vcpu_run()
4016 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; in svm_vcpu_run()
4017 vmcb_mark_all_clean(svm->vmcb); in svm_vcpu_run()
4020 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) in svm_vcpu_run()
4030 if (unlikely(svm->vmcb->control.exit_code == in svm_vcpu_run()
4045 struct vcpu_svm *svm = to_svm(vcpu); in svm_load_mmu_pgd() local
4049 svm->vmcb->control.nested_cr3 = __sme_set(root_hpa); in svm_load_mmu_pgd()
4050 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_load_mmu_pgd()
4063 svm->vmcb->save.cr3 = cr3; in svm_load_mmu_pgd()
4064 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_load_mmu_pgd()
4118 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_after_set_cpuid() local
4126 svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && in svm_vcpu_after_set_cpuid()
4129 svm->tsc_scaling_enabled = tsc_scaling && guest_cpuid_has(vcpu, X86_FEATURE_TSCRATEMSR); in svm_vcpu_after_set_cpuid()
4130 svm->lbrv_enabled = lbrv && guest_cpuid_has(vcpu, X86_FEATURE_LBRV); in svm_vcpu_after_set_cpuid()
4132 svm->v_vmload_vmsave_enabled = vls && guest_cpuid_has(vcpu, X86_FEATURE_V_VMSAVE_VMLOAD); in svm_vcpu_after_set_cpuid()
4134 svm->pause_filter_enabled = kvm_cpu_cap_has(X86_FEATURE_PAUSEFILTER) && in svm_vcpu_after_set_cpuid()
4137 svm->pause_threshold_enabled = kvm_cpu_cap_has(X86_FEATURE_PFTHRESHOLD) && in svm_vcpu_after_set_cpuid()
4140 svm->vgif_enabled = vgif && guest_cpuid_has(vcpu, X86_FEATURE_VGIF); in svm_vcpu_after_set_cpuid()
4142 svm_recalc_instruction_intercepts(vcpu, svm); in svm_vcpu_after_set_cpuid()
4228 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_intercept() local
4231 struct vmcb *vmcb = svm->vmcb; in svm_check_intercept()
4256 if (!(vmcb12_is_intercept(&svm->nested.ctl, in svm_check_intercept()
4334 vmexit = nested_svm_exit_handled(svm); in svm_check_intercept()
4363 struct vcpu_svm *svm = to_svm(vcpu); in svm_smi_blocked() local
4366 if (!gif_set(svm)) in svm_smi_blocked()
4374 struct vcpu_svm *svm = to_svm(vcpu); in svm_smi_allowed() local
4375 if (svm->nested.nested_run_pending) in svm_smi_allowed()
4382 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) in svm_smi_allowed()
4390 struct vcpu_svm *svm = to_svm(vcpu); in svm_enter_smm() local
4400 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); in svm_enter_smm()
4402 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_enter_smm()
4403 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_enter_smm()
4404 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_enter_smm()
4406 ret = nested_svm_simple_vmexit(svm, SVM_EXIT_SW); in svm_enter_smm()
4422 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), in svm_enter_smm()
4429 &svm->vmcb01.ptr->save); in svm_enter_smm()
4437 struct vcpu_svm *svm = to_svm(vcpu); in svm_leave_smm() local
4462 if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL) in svm_leave_smm()
4465 if (svm_allocate_nested(svm)) in svm_leave_smm()
4473 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400); in svm_leave_smm()
4479 vmcb_mark_all_dirty(svm->vmcb01.ptr); in svm_leave_smm()
4482 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); in svm_leave_smm()
4483 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); in svm_leave_smm()
4489 svm->nested.nested_run_pending = 1; in svm_leave_smm()
4500 struct vcpu_svm *svm = to_svm(vcpu); in svm_enable_smi_window() local
4502 if (!gif_set(svm)) { in svm_enable_smi_window()
4504 svm_set_intercept(svm, INTERCEPT_STGI); in svm_enable_smi_window()
4650 struct vcpu_svm *svm = to_svm(vcpu); in svm_apic_init_signal_blocked() local
4652 return !gif_set(svm); in svm_apic_init_signal_blocked()