Lines Matching refs:svm
198 static void svm_complete_interrupts(struct vcpu_svm *svm);
268 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_efer() local
282 svm_leave_nested(svm); in svm_set_efer()
283 svm_set_gif(svm, true); in svm_set_efer()
290 if (!is_smm(&svm->vcpu)) in svm_set_efer()
291 svm_free_nested(svm); in svm_set_efer()
294 int ret = svm_allocate_nested(svm); in svm_set_efer()
303 svm->vmcb->save.efer = efer | EFER_SVME; in svm_set_efer()
304 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_efer()
316 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_interrupt_shadow() local
319 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) in svm_get_interrupt_shadow()
326 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_interrupt_shadow() local
329 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
331 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK; in svm_set_interrupt_shadow()
337 struct vcpu_svm *svm = to_svm(vcpu); in skip_emulated_instruction() local
339 if (nrips && svm->vmcb->control.next_rip != 0) { in skip_emulated_instruction()
341 svm->next_rip = svm->vmcb->control.next_rip; in skip_emulated_instruction()
344 if (!svm->next_rip) { in skip_emulated_instruction()
348 kvm_rip_write(vcpu, svm->next_rip); in skip_emulated_instruction()
357 struct vcpu_svm *svm = to_svm(vcpu); in svm_queue_exception() local
362 kvm_deliver_exception_payload(&svm->vcpu); in svm_queue_exception()
365 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
374 (void)skip_emulated_instruction(&svm->vcpu); in svm_queue_exception()
375 rip = kvm_rip_read(&svm->vcpu); in svm_queue_exception()
376 svm->int3_rip = rip + svm->vmcb->save.cs.base; in svm_queue_exception()
377 svm->int3_injected = rip - old_rip; in svm_queue_exception()
380 svm->vmcb->control.event_inj = nr in svm_queue_exception()
384 svm->vmcb->control.event_inj_err = error_code; in svm_queue_exception()
590 struct vcpu_svm *svm = to_svm(vcpu); in set_shadow_msr_intercept() local
598 set_bit(slot, svm->shadow_msr_intercept.read); in set_shadow_msr_intercept()
600 clear_bit(slot, svm->shadow_msr_intercept.read); in set_shadow_msr_intercept()
603 set_bit(slot, svm->shadow_msr_intercept.write); in set_shadow_msr_intercept()
605 clear_bit(slot, svm->shadow_msr_intercept.write); in set_shadow_msr_intercept()
705 struct vcpu_svm *svm = to_svm(vcpu); in svm_msr_filter_changed() local
715 u32 read = test_bit(i, svm->shadow_msr_intercept.read); in svm_msr_filter_changed()
716 u32 write = test_bit(i, svm->shadow_msr_intercept.write); in svm_msr_filter_changed()
718 set_msr_interception_bitmap(vcpu, svm->msrpm, msr, read, write); in svm_msr_filter_changed()
767 struct vcpu_svm *svm = to_svm(vcpu); in svm_enable_lbrv() local
769 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK; in svm_enable_lbrv()
770 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1); in svm_enable_lbrv()
771 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1); in svm_enable_lbrv()
772 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1); in svm_enable_lbrv()
773 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1); in svm_enable_lbrv()
778 struct vcpu_svm *svm = to_svm(vcpu); in svm_disable_lbrv() local
780 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK; in svm_disable_lbrv()
781 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0); in svm_disable_lbrv()
782 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0); in svm_disable_lbrv()
783 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 0, 0); in svm_disable_lbrv()
784 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 0, 0); in svm_disable_lbrv()
787 void disable_nmi_singlestep(struct vcpu_svm *svm) in disable_nmi_singlestep() argument
789 svm->nmi_singlestep = false; in disable_nmi_singlestep()
791 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) { in disable_nmi_singlestep()
793 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in disable_nmi_singlestep()
794 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF; in disable_nmi_singlestep()
795 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in disable_nmi_singlestep()
796 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF; in disable_nmi_singlestep()
802 struct vcpu_svm *svm = to_svm(vcpu); in grow_ple_window() local
803 struct vmcb_control_area *control = &svm->vmcb->control; in grow_ple_window()
812 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in grow_ple_window()
820 struct vcpu_svm *svm = to_svm(vcpu); in shrink_ple_window() local
821 struct vmcb_control_area *control = &svm->vmcb->control; in shrink_ple_window()
830 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in shrink_ple_window()
1067 struct vcpu_svm *svm = to_svm(vcpu); in svm_write_l1_tsc_offset() local
1072 g_tsc_offset = svm->vmcb->control.tsc_offset - in svm_write_l1_tsc_offset()
1073 svm->nested.hsave->control.tsc_offset; in svm_write_l1_tsc_offset()
1074 svm->nested.hsave->control.tsc_offset = offset; in svm_write_l1_tsc_offset()
1078 svm->vmcb->control.tsc_offset - g_tsc_offset, in svm_write_l1_tsc_offset()
1081 svm->vmcb->control.tsc_offset = offset + g_tsc_offset; in svm_write_l1_tsc_offset()
1083 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in svm_write_l1_tsc_offset()
1084 return svm->vmcb->control.tsc_offset; in svm_write_l1_tsc_offset()
1087 static void svm_check_invpcid(struct vcpu_svm *svm) in svm_check_invpcid() argument
1096 svm_set_intercept(svm, INTERCEPT_INVPCID); in svm_check_invpcid()
1098 svm_clr_intercept(svm, INTERCEPT_INVPCID); in svm_check_invpcid()
1102 static void init_vmcb(struct vcpu_svm *svm) in init_vmcb() argument
1104 struct vmcb_control_area *control = &svm->vmcb->control; in init_vmcb()
1105 struct vmcb_save_area *save = &svm->vmcb->save; in init_vmcb()
1107 svm->vcpu.arch.hflags = 0; in init_vmcb()
1109 svm_set_intercept(svm, INTERCEPT_CR0_READ); in init_vmcb()
1110 svm_set_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1111 svm_set_intercept(svm, INTERCEPT_CR4_READ); in init_vmcb()
1112 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); in init_vmcb()
1113 svm_set_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1114 svm_set_intercept(svm, INTERCEPT_CR4_WRITE); in init_vmcb()
1115 if (!kvm_vcpu_apicv_active(&svm->vcpu)) in init_vmcb()
1116 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); in init_vmcb()
1118 set_dr_intercepts(svm); in init_vmcb()
1120 set_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1121 set_exception_intercept(svm, UD_VECTOR); in init_vmcb()
1122 set_exception_intercept(svm, MC_VECTOR); in init_vmcb()
1123 set_exception_intercept(svm, AC_VECTOR); in init_vmcb()
1124 set_exception_intercept(svm, DB_VECTOR); in init_vmcb()
1132 set_exception_intercept(svm, GP_VECTOR); in init_vmcb()
1134 svm_set_intercept(svm, INTERCEPT_INTR); in init_vmcb()
1135 svm_set_intercept(svm, INTERCEPT_NMI); in init_vmcb()
1136 svm_set_intercept(svm, INTERCEPT_SMI); in init_vmcb()
1137 svm_set_intercept(svm, INTERCEPT_SELECTIVE_CR0); in init_vmcb()
1138 svm_set_intercept(svm, INTERCEPT_RDPMC); in init_vmcb()
1139 svm_set_intercept(svm, INTERCEPT_CPUID); in init_vmcb()
1140 svm_set_intercept(svm, INTERCEPT_INVD); in init_vmcb()
1141 svm_set_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1142 svm_set_intercept(svm, INTERCEPT_INVLPGA); in init_vmcb()
1143 svm_set_intercept(svm, INTERCEPT_IOIO_PROT); in init_vmcb()
1144 svm_set_intercept(svm, INTERCEPT_MSR_PROT); in init_vmcb()
1145 svm_set_intercept(svm, INTERCEPT_TASK_SWITCH); in init_vmcb()
1146 svm_set_intercept(svm, INTERCEPT_SHUTDOWN); in init_vmcb()
1147 svm_set_intercept(svm, INTERCEPT_VMRUN); in init_vmcb()
1148 svm_set_intercept(svm, INTERCEPT_VMMCALL); in init_vmcb()
1149 svm_set_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb()
1150 svm_set_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb()
1151 svm_set_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1152 svm_set_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1153 svm_set_intercept(svm, INTERCEPT_SKINIT); in init_vmcb()
1154 svm_set_intercept(svm, INTERCEPT_WBINVD); in init_vmcb()
1155 svm_set_intercept(svm, INTERCEPT_XSETBV); in init_vmcb()
1156 svm_set_intercept(svm, INTERCEPT_RDPRU); in init_vmcb()
1157 svm_set_intercept(svm, INTERCEPT_RSM); in init_vmcb()
1159 if (!kvm_mwait_in_guest(svm->vcpu.kvm)) { in init_vmcb()
1160 svm_set_intercept(svm, INTERCEPT_MONITOR); in init_vmcb()
1161 svm_set_intercept(svm, INTERCEPT_MWAIT); in init_vmcb()
1164 if (!kvm_hlt_in_guest(svm->vcpu.kvm)) in init_vmcb()
1165 svm_set_intercept(svm, INTERCEPT_HLT); in init_vmcb()
1168 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm)); in init_vmcb()
1190 svm_set_efer(&svm->vcpu, 0); in init_vmcb()
1192 kvm_set_rflags(&svm->vcpu, 2); in init_vmcb()
1194 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; in init_vmcb()
1200 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET); in init_vmcb()
1201 kvm_mmu_reset_context(&svm->vcpu); in init_vmcb()
1209 svm_clr_intercept(svm, INTERCEPT_INVLPG); in init_vmcb()
1210 clr_exception_intercept(svm, PF_VECTOR); in init_vmcb()
1211 svm_clr_intercept(svm, INTERCEPT_CR3_READ); in init_vmcb()
1212 svm_clr_intercept(svm, INTERCEPT_CR3_WRITE); in init_vmcb()
1213 save->g_pat = svm->vcpu.arch.pat; in init_vmcb()
1217 svm->asid_generation = 0; in init_vmcb()
1219 svm->nested.vmcb12_gpa = 0; in init_vmcb()
1220 svm->vcpu.arch.hflags = 0; in init_vmcb()
1222 if (!kvm_pause_in_guest(svm->vcpu.kvm)) { in init_vmcb()
1226 svm_set_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1228 svm_clr_intercept(svm, INTERCEPT_PAUSE); in init_vmcb()
1231 svm_check_invpcid(svm); in init_vmcb()
1233 if (kvm_vcpu_apicv_active(&svm->vcpu)) in init_vmcb()
1234 avic_init_vmcb(svm); in init_vmcb()
1241 svm_clr_intercept(svm, INTERCEPT_VMLOAD); in init_vmcb()
1242 svm_clr_intercept(svm, INTERCEPT_VMSAVE); in init_vmcb()
1243 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK; in init_vmcb()
1247 svm_clr_intercept(svm, INTERCEPT_STGI); in init_vmcb()
1248 svm_clr_intercept(svm, INTERCEPT_CLGI); in init_vmcb()
1249 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK; in init_vmcb()
1252 if (sev_guest(svm->vcpu.kvm)) { in init_vmcb()
1253 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE; in init_vmcb()
1254 clr_exception_intercept(svm, UD_VECTOR); in init_vmcb()
1257 vmcb_mark_all_dirty(svm->vmcb); in init_vmcb()
1259 enable_gif(svm); in init_vmcb()
1265 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_reset() local
1269 svm->spec_ctrl = 0; in svm_vcpu_reset()
1270 svm->virt_spec_ctrl = 0; in svm_vcpu_reset()
1273 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | in svm_vcpu_reset()
1275 if (kvm_vcpu_is_reset_bsp(&svm->vcpu)) in svm_vcpu_reset()
1276 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; in svm_vcpu_reset()
1278 init_vmcb(svm); in svm_vcpu_reset()
1284 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE); in svm_vcpu_reset()
1289 struct vcpu_svm *svm; in svm_create_vcpu() local
1294 svm = to_svm(vcpu); in svm_create_vcpu()
1301 err = avic_init_vcpu(svm); in svm_create_vcpu()
1309 svm->avic_is_running = true; in svm_create_vcpu()
1311 svm->msrpm = svm_vcpu_alloc_msrpm(); in svm_create_vcpu()
1312 if (!svm->msrpm) { in svm_create_vcpu()
1317 svm_vcpu_init_msrpm(vcpu, svm->msrpm); in svm_create_vcpu()
1319 svm->vmcb = page_address(vmcb_page); in svm_create_vcpu()
1320 svm->vmcb_pa = __sme_set(page_to_pfn(vmcb_page) << PAGE_SHIFT); in svm_create_vcpu()
1321 svm->asid_generation = 0; in svm_create_vcpu()
1322 init_vmcb(svm); in svm_create_vcpu()
1345 struct vcpu_svm *svm = to_svm(vcpu); in svm_free_vcpu() local
1352 svm_clear_current_vmcb(svm->vmcb); in svm_free_vcpu()
1354 svm_free_nested(svm); in svm_free_vcpu()
1356 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); in svm_free_vcpu()
1357 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); in svm_free_vcpu()
1362 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_load() local
1367 svm->asid_generation = 0; in svm_vcpu_load()
1368 vmcb_mark_all_dirty(svm->vmcb); in svm_vcpu_load()
1374 savesegment(fs, svm->host.fs); in svm_vcpu_load()
1375 savesegment(gs, svm->host.gs); in svm_vcpu_load()
1376 svm->host.ldt = kvm_read_ldt(); in svm_vcpu_load()
1379 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); in svm_vcpu_load()
1390 wrmsrl(MSR_TSC_AUX, svm->tsc_aux); in svm_vcpu_load()
1392 if (sd->current_vmcb != svm->vmcb) { in svm_vcpu_load()
1393 sd->current_vmcb = svm->vmcb; in svm_vcpu_load()
1401 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_put() local
1407 kvm_load_ldt(svm->host.ldt); in svm_vcpu_put()
1409 loadsegment(fs, svm->host.fs); in svm_vcpu_put()
1411 load_gs_index(svm->host.gs); in svm_vcpu_put()
1414 loadsegment(gs, svm->host.gs); in svm_vcpu_put()
1418 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); in svm_vcpu_put()
1423 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_rflags() local
1424 unsigned long rflags = svm->vmcb->save.rflags; in svm_get_rflags()
1426 if (svm->nmi_singlestep) { in svm_get_rflags()
1428 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF)) in svm_get_rflags()
1430 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF)) in svm_get_rflags()
1461 static void svm_set_vintr(struct vcpu_svm *svm) in svm_set_vintr() argument
1466 WARN_ON(kvm_vcpu_apicv_active(&svm->vcpu)); in svm_set_vintr()
1467 svm_set_intercept(svm, INTERCEPT_VINTR); in svm_set_vintr()
1473 control = &svm->vmcb->control; in svm_set_vintr()
1478 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_set_vintr()
1481 static void svm_clear_vintr(struct vcpu_svm *svm) in svm_clear_vintr() argument
1484 svm_clr_intercept(svm, INTERCEPT_VINTR); in svm_clear_vintr()
1487 svm->vmcb->control.int_ctl &= mask; in svm_clear_vintr()
1488 if (is_guest_mode(&svm->vcpu)) { in svm_clear_vintr()
1489 svm->nested.hsave->control.int_ctl &= mask; in svm_clear_vintr()
1491 WARN_ON((svm->vmcb->control.int_ctl & V_TPR_MASK) != in svm_clear_vintr()
1492 (svm->nested.ctl.int_ctl & V_TPR_MASK)); in svm_clear_vintr()
1493 svm->vmcb->control.int_ctl |= svm->nested.ctl.int_ctl & ~mask; in svm_clear_vintr()
1496 vmcb_mark_dirty(svm->vmcb, VMCB_INTR); in svm_clear_vintr()
1602 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_idt() local
1604 dt->size = svm->vmcb->save.idtr.limit; in svm_get_idt()
1605 dt->address = svm->vmcb->save.idtr.base; in svm_get_idt()
1610 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_idt() local
1612 svm->vmcb->save.idtr.limit = dt->size; in svm_set_idt()
1613 svm->vmcb->save.idtr.base = dt->address ; in svm_set_idt()
1614 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_idt()
1619 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_gdt() local
1621 dt->size = svm->vmcb->save.gdtr.limit; in svm_get_gdt()
1622 dt->address = svm->vmcb->save.gdtr.base; in svm_get_gdt()
1627 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_gdt() local
1629 svm->vmcb->save.gdtr.limit = dt->size; in svm_set_gdt()
1630 svm->vmcb->save.gdtr.base = dt->address ; in svm_set_gdt()
1631 vmcb_mark_dirty(svm->vmcb, VMCB_DT); in svm_set_gdt()
1634 static void update_cr0_intercept(struct vcpu_svm *svm) in update_cr0_intercept() argument
1636 ulong gcr0 = svm->vcpu.arch.cr0; in update_cr0_intercept()
1637 u64 *hcr0 = &svm->vmcb->save.cr0; in update_cr0_intercept()
1642 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in update_cr0_intercept()
1645 svm_clr_intercept(svm, INTERCEPT_CR0_READ); in update_cr0_intercept()
1646 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE); in update_cr0_intercept()
1648 svm_set_intercept(svm, INTERCEPT_CR0_READ); in update_cr0_intercept()
1649 svm_set_intercept(svm, INTERCEPT_CR0_WRITE); in update_cr0_intercept()
1655 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_cr0() local
1661 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; in svm_set_cr0()
1666 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); in svm_set_cr0()
1682 svm->vmcb->save.cr0 = cr0; in svm_set_cr0()
1683 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_set_cr0()
1684 update_cr0_intercept(svm); in svm_set_cr0()
1710 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_segment() local
1733 svm->vmcb->save.cpl = (var->dpl & 3); in svm_set_segment()
1735 vmcb_mark_dirty(svm->vmcb, VMCB_SEG); in svm_set_segment()
1740 struct vcpu_svm *svm = to_svm(vcpu); in update_exception_bitmap() local
1742 clr_exception_intercept(svm, BP_VECTOR); in update_exception_bitmap()
1746 set_exception_intercept(svm, BP_VECTOR); in update_exception_bitmap()
1750 static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) in new_asid() argument
1755 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID; in new_asid()
1758 svm->asid_generation = sd->asid_generation; in new_asid()
1759 svm->vmcb->control.asid = sd->next_asid++; in new_asid()
1761 vmcb_mark_dirty(svm->vmcb, VMCB_ASID); in new_asid()
1764 static void svm_set_dr6(struct vcpu_svm *svm, unsigned long value) in svm_set_dr6() argument
1766 struct vmcb *vmcb = svm->vmcb; in svm_set_dr6()
1776 struct vcpu_svm *svm = to_svm(vcpu); in svm_sync_dirty_debug_regs() local
1786 vcpu->arch.dr6 = svm->vmcb->save.dr6; in svm_sync_dirty_debug_regs()
1787 vcpu->arch.dr7 = svm->vmcb->save.dr7; in svm_sync_dirty_debug_regs()
1789 set_dr_intercepts(svm); in svm_sync_dirty_debug_regs()
1794 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_dr7() local
1796 svm->vmcb->save.dr7 = value; in svm_set_dr7()
1797 vmcb_mark_dirty(svm->vmcb, VMCB_DR); in svm_set_dr7()
1800 static int pf_interception(struct vcpu_svm *svm) in pf_interception() argument
1802 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); in pf_interception()
1803 u64 error_code = svm->vmcb->control.exit_info_1; in pf_interception()
1805 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, in pf_interception()
1807 svm->vmcb->control.insn_bytes : NULL, in pf_interception()
1808 svm->vmcb->control.insn_len); in pf_interception()
1811 static int npf_interception(struct vcpu_svm *svm) in npf_interception() argument
1813 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2); in npf_interception()
1814 u64 error_code = svm->vmcb->control.exit_info_1; in npf_interception()
1817 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, in npf_interception()
1819 svm->vmcb->control.insn_bytes : NULL, in npf_interception()
1820 svm->vmcb->control.insn_len); in npf_interception()
1823 static int db_interception(struct vcpu_svm *svm) in db_interception() argument
1825 struct kvm_run *kvm_run = svm->vcpu.run; in db_interception()
1826 struct kvm_vcpu *vcpu = &svm->vcpu; in db_interception()
1828 if (!(svm->vcpu.guest_debug & in db_interception()
1830 !svm->nmi_singlestep) { in db_interception()
1831 u32 payload = (svm->vmcb->save.dr6 ^ DR6_RTM) & ~DR6_FIXED_1; in db_interception()
1832 kvm_queue_exception_p(&svm->vcpu, DB_VECTOR, payload); in db_interception()
1836 if (svm->nmi_singlestep) { in db_interception()
1837 disable_nmi_singlestep(svm); in db_interception()
1842 if (svm->vcpu.guest_debug & in db_interception()
1845 kvm_run->debug.arch.dr6 = svm->vmcb->save.dr6; in db_interception()
1846 kvm_run->debug.arch.dr7 = svm->vmcb->save.dr7; in db_interception()
1848 svm->vmcb->save.cs.base + svm->vmcb->save.rip; in db_interception()
1856 static int bp_interception(struct vcpu_svm *svm) in bp_interception() argument
1858 struct kvm_run *kvm_run = svm->vcpu.run; in bp_interception()
1861 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; in bp_interception()
1866 static int ud_interception(struct vcpu_svm *svm) in ud_interception() argument
1868 return handle_ud(&svm->vcpu); in ud_interception()
1871 static int ac_interception(struct vcpu_svm *svm) in ac_interception() argument
1873 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0); in ac_interception()
1877 static int gp_interception(struct vcpu_svm *svm) in gp_interception() argument
1879 struct kvm_vcpu *vcpu = &svm->vcpu; in gp_interception()
1880 u32 error_code = svm->vmcb->control.exit_info_1; in gp_interception()
1953 static void svm_handle_mce(struct vcpu_svm *svm) in svm_handle_mce() argument
1962 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu); in svm_handle_mce()
1974 static int mc_interception(struct vcpu_svm *svm) in mc_interception() argument
1979 static int shutdown_interception(struct vcpu_svm *svm) in shutdown_interception() argument
1981 struct kvm_run *kvm_run = svm->vcpu.run; in shutdown_interception()
1987 clear_page(svm->vmcb); in shutdown_interception()
1988 init_vmcb(svm); in shutdown_interception()
1994 static int io_interception(struct vcpu_svm *svm) in io_interception() argument
1996 struct kvm_vcpu *vcpu = &svm->vcpu; in io_interception()
1997 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ in io_interception()
2001 ++svm->vcpu.stat.io_exits; in io_interception()
2009 svm->next_rip = svm->vmcb->control.exit_info_2; in io_interception()
2011 return kvm_fast_pio(&svm->vcpu, size, port, in); in io_interception()
2014 static int nmi_interception(struct vcpu_svm *svm) in nmi_interception() argument
2019 static int intr_interception(struct vcpu_svm *svm) in intr_interception() argument
2021 ++svm->vcpu.stat.irq_exits; in intr_interception()
2025 static int nop_on_interception(struct vcpu_svm *svm) in nop_on_interception() argument
2030 static int halt_interception(struct vcpu_svm *svm) in halt_interception() argument
2032 return kvm_emulate_halt(&svm->vcpu); in halt_interception()
2035 static int vmmcall_interception(struct vcpu_svm *svm) in vmmcall_interception() argument
2037 return kvm_emulate_hypercall(&svm->vcpu); in vmmcall_interception()
2040 static int vmload_interception(struct vcpu_svm *svm) in vmload_interception() argument
2046 if (nested_svm_check_permissions(svm)) in vmload_interception()
2049 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmload_interception()
2052 kvm_inject_gp(&svm->vcpu, 0); in vmload_interception()
2058 ret = kvm_skip_emulated_instruction(&svm->vcpu); in vmload_interception()
2060 nested_svm_vmloadsave(nested_vmcb, svm->vmcb); in vmload_interception()
2061 kvm_vcpu_unmap(&svm->vcpu, &map, true); in vmload_interception()
2066 static int vmsave_interception(struct vcpu_svm *svm) in vmsave_interception() argument
2072 if (nested_svm_check_permissions(svm)) in vmsave_interception()
2075 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->vmcb->save.rax), &map); in vmsave_interception()
2078 kvm_inject_gp(&svm->vcpu, 0); in vmsave_interception()
2084 ret = kvm_skip_emulated_instruction(&svm->vcpu); in vmsave_interception()
2086 nested_svm_vmloadsave(svm->vmcb, nested_vmcb); in vmsave_interception()
2087 kvm_vcpu_unmap(&svm->vcpu, &map, true); in vmsave_interception()
2092 static int vmrun_interception(struct vcpu_svm *svm) in vmrun_interception() argument
2094 if (nested_svm_check_permissions(svm)) in vmrun_interception()
2097 return nested_svm_vmrun(svm); in vmrun_interception()
2100 void svm_set_gif(struct vcpu_svm *svm, bool value) in svm_set_gif() argument
2109 if (vgif_enabled(svm)) in svm_set_gif()
2110 svm_clr_intercept(svm, INTERCEPT_STGI); in svm_set_gif()
2111 if (svm_is_intercept(svm, INTERCEPT_VINTR)) in svm_set_gif()
2112 svm_clear_vintr(svm); in svm_set_gif()
2114 enable_gif(svm); in svm_set_gif()
2115 if (svm->vcpu.arch.smi_pending || in svm_set_gif()
2116 svm->vcpu.arch.nmi_pending || in svm_set_gif()
2117 kvm_cpu_has_injectable_intr(&svm->vcpu)) in svm_set_gif()
2118 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_set_gif()
2120 disable_gif(svm); in svm_set_gif()
2127 if (!vgif_enabled(svm)) in svm_set_gif()
2128 svm_clear_vintr(svm); in svm_set_gif()
2132 static int stgi_interception(struct vcpu_svm *svm) in stgi_interception() argument
2136 if (nested_svm_check_permissions(svm)) in stgi_interception()
2139 ret = kvm_skip_emulated_instruction(&svm->vcpu); in stgi_interception()
2140 svm_set_gif(svm, true); in stgi_interception()
2144 static int clgi_interception(struct vcpu_svm *svm) in clgi_interception() argument
2148 if (nested_svm_check_permissions(svm)) in clgi_interception()
2151 ret = kvm_skip_emulated_instruction(&svm->vcpu); in clgi_interception()
2152 svm_set_gif(svm, false); in clgi_interception()
2156 static int invlpga_interception(struct vcpu_svm *svm) in invlpga_interception() argument
2158 struct kvm_vcpu *vcpu = &svm->vcpu; in invlpga_interception()
2160 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_rcx_read(&svm->vcpu), in invlpga_interception()
2161 kvm_rax_read(&svm->vcpu)); in invlpga_interception()
2164 kvm_mmu_invlpg(vcpu, kvm_rax_read(&svm->vcpu)); in invlpga_interception()
2166 return kvm_skip_emulated_instruction(&svm->vcpu); in invlpga_interception()
2169 static int skinit_interception(struct vcpu_svm *svm) in skinit_interception() argument
2171 trace_kvm_skinit(svm->vmcb->save.rip, kvm_rax_read(&svm->vcpu)); in skinit_interception()
2173 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in skinit_interception()
2177 static int wbinvd_interception(struct vcpu_svm *svm) in wbinvd_interception() argument
2179 return kvm_emulate_wbinvd(&svm->vcpu); in wbinvd_interception()
2182 static int xsetbv_interception(struct vcpu_svm *svm) in xsetbv_interception() argument
2184 u64 new_bv = kvm_read_edx_eax(&svm->vcpu); in xsetbv_interception()
2185 u32 index = kvm_rcx_read(&svm->vcpu); in xsetbv_interception()
2187 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) { in xsetbv_interception()
2188 return kvm_skip_emulated_instruction(&svm->vcpu); in xsetbv_interception()
2194 static int rdpru_interception(struct vcpu_svm *svm) in rdpru_interception() argument
2196 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in rdpru_interception()
2200 static int task_switch_interception(struct vcpu_svm *svm) in task_switch_interception() argument
2204 int int_type = svm->vmcb->control.exit_int_info & in task_switch_interception()
2206 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK; in task_switch_interception()
2208 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK; in task_switch_interception()
2210 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID; in task_switch_interception()
2214 tss_selector = (u16)svm->vmcb->control.exit_info_1; in task_switch_interception()
2216 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2219 else if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2230 svm->vcpu.arch.nmi_injected = false; in task_switch_interception()
2233 if (svm->vmcb->control.exit_info_2 & in task_switch_interception()
2237 (u32)svm->vmcb->control.exit_info_2; in task_switch_interception()
2239 kvm_clear_exception_queue(&svm->vcpu); in task_switch_interception()
2242 kvm_clear_interrupt_queue(&svm->vcpu); in task_switch_interception()
2253 if (!skip_emulated_instruction(&svm->vcpu)) in task_switch_interception()
2260 return kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason, in task_switch_interception()
2264 static int cpuid_interception(struct vcpu_svm *svm) in cpuid_interception() argument
2266 return kvm_emulate_cpuid(&svm->vcpu); in cpuid_interception()
2269 static int iret_interception(struct vcpu_svm *svm) in iret_interception() argument
2271 ++svm->vcpu.stat.nmi_window_exits; in iret_interception()
2272 svm_clr_intercept(svm, INTERCEPT_IRET); in iret_interception()
2273 svm->vcpu.arch.hflags |= HF_IRET_MASK; in iret_interception()
2274 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu); in iret_interception()
2275 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in iret_interception()
2279 static int invd_interception(struct vcpu_svm *svm) in invd_interception() argument
2282 return kvm_skip_emulated_instruction(&svm->vcpu); in invd_interception()
2285 static int invlpg_interception(struct vcpu_svm *svm) in invlpg_interception() argument
2288 return kvm_emulate_instruction(&svm->vcpu, 0); in invlpg_interception()
2290 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1); in invlpg_interception()
2291 return kvm_skip_emulated_instruction(&svm->vcpu); in invlpg_interception()
2294 static int emulate_on_interception(struct vcpu_svm *svm) in emulate_on_interception() argument
2296 return kvm_emulate_instruction(&svm->vcpu, 0); in emulate_on_interception()
2299 static int rsm_interception(struct vcpu_svm *svm) in rsm_interception() argument
2301 return kvm_emulate_instruction_from_buffer(&svm->vcpu, rsm_ins_bytes, 2); in rsm_interception()
2304 static int rdpmc_interception(struct vcpu_svm *svm) in rdpmc_interception() argument
2309 return emulate_on_interception(svm); in rdpmc_interception()
2311 err = kvm_rdpmc(&svm->vcpu); in rdpmc_interception()
2312 return kvm_complete_insn_gp(&svm->vcpu, err); in rdpmc_interception()
2315 static bool check_selective_cr0_intercepted(struct vcpu_svm *svm, in check_selective_cr0_intercepted() argument
2318 unsigned long cr0 = svm->vcpu.arch.cr0; in check_selective_cr0_intercepted()
2321 if (!is_guest_mode(&svm->vcpu) || in check_selective_cr0_intercepted()
2322 (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_SELECTIVE_CR0)))) in check_selective_cr0_intercepted()
2329 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; in check_selective_cr0_intercepted()
2330 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE); in check_selective_cr0_intercepted()
2338 static int cr_interception(struct vcpu_svm *svm) in cr_interception() argument
2345 return emulate_on_interception(svm); in cr_interception()
2347 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0)) in cr_interception()
2348 return emulate_on_interception(svm); in cr_interception()
2350 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in cr_interception()
2351 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE) in cr_interception()
2354 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0; in cr_interception()
2359 val = kvm_register_read(&svm->vcpu, reg); in cr_interception()
2363 if (!check_selective_cr0_intercepted(svm, val)) in cr_interception()
2364 err = kvm_set_cr0(&svm->vcpu, val); in cr_interception()
2370 err = kvm_set_cr3(&svm->vcpu, val); in cr_interception()
2373 err = kvm_set_cr4(&svm->vcpu, val); in cr_interception()
2376 err = kvm_set_cr8(&svm->vcpu, val); in cr_interception()
2380 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
2386 val = kvm_read_cr0(&svm->vcpu); in cr_interception()
2389 val = svm->vcpu.arch.cr2; in cr_interception()
2392 val = kvm_read_cr3(&svm->vcpu); in cr_interception()
2395 val = kvm_read_cr4(&svm->vcpu); in cr_interception()
2398 val = kvm_get_cr8(&svm->vcpu); in cr_interception()
2402 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in cr_interception()
2405 kvm_register_write(&svm->vcpu, reg, val); in cr_interception()
2408 return kvm_complete_insn_gp(&svm->vcpu, err); in cr_interception()
2411 static int dr_interception(struct vcpu_svm *svm) in dr_interception() argument
2416 if (svm->vcpu.guest_debug == 0) { in dr_interception()
2422 clr_dr_intercepts(svm); in dr_interception()
2423 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT; in dr_interception()
2428 return emulate_on_interception(svm); in dr_interception()
2430 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK; in dr_interception()
2431 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0; in dr_interception()
2434 if (!kvm_require_dr(&svm->vcpu, dr - 16)) in dr_interception()
2436 val = kvm_register_read(&svm->vcpu, reg); in dr_interception()
2437 kvm_set_dr(&svm->vcpu, dr - 16, val); in dr_interception()
2439 if (!kvm_require_dr(&svm->vcpu, dr)) in dr_interception()
2441 kvm_get_dr(&svm->vcpu, dr, &val); in dr_interception()
2442 kvm_register_write(&svm->vcpu, reg, val); in dr_interception()
2445 return kvm_skip_emulated_instruction(&svm->vcpu); in dr_interception()
2448 static int cr8_write_interception(struct vcpu_svm *svm) in cr8_write_interception() argument
2450 struct kvm_run *kvm_run = svm->vcpu.run; in cr8_write_interception()
2453 u8 cr8_prev = kvm_get_cr8(&svm->vcpu); in cr8_write_interception()
2455 r = cr_interception(svm); in cr8_write_interception()
2456 if (lapic_in_kernel(&svm->vcpu)) in cr8_write_interception()
2458 if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) in cr8_write_interception()
2484 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_msr() local
2488 msr_info->data = svm->vmcb->save.star; in svm_get_msr()
2492 msr_info->data = svm->vmcb->save.lstar; in svm_get_msr()
2495 msr_info->data = svm->vmcb->save.cstar; in svm_get_msr()
2498 msr_info->data = svm->vmcb->save.kernel_gs_base; in svm_get_msr()
2501 msr_info->data = svm->vmcb->save.sfmask; in svm_get_msr()
2505 msr_info->data = svm->vmcb->save.sysenter_cs; in svm_get_msr()
2508 msr_info->data = svm->sysenter_eip; in svm_get_msr()
2511 msr_info->data = svm->sysenter_esp; in svm_get_msr()
2516 msr_info->data = svm->tsc_aux; in svm_get_msr()
2524 msr_info->data = svm->vmcb->save.dbgctl; in svm_get_msr()
2527 msr_info->data = svm->vmcb->save.br_from; in svm_get_msr()
2530 msr_info->data = svm->vmcb->save.br_to; in svm_get_msr()
2533 msr_info->data = svm->vmcb->save.last_excp_from; in svm_get_msr()
2536 msr_info->data = svm->vmcb->save.last_excp_to; in svm_get_msr()
2539 msr_info->data = svm->nested.hsave_msr; in svm_get_msr()
2542 msr_info->data = svm->nested.vm_cr_msr; in svm_get_msr()
2552 msr_info->data = svm->spec_ctrl; in svm_get_msr()
2559 msr_info->data = svm->virt_spec_ctrl; in svm_get_msr()
2579 msr_info->data = svm->msr_decfg; in svm_get_msr()
2587 static int rdmsr_interception(struct vcpu_svm *svm) in rdmsr_interception() argument
2589 return kvm_emulate_rdmsr(&svm->vcpu); in rdmsr_interception()
2594 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_vm_cr() local
2602 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK) in svm_set_vm_cr()
2605 svm->nested.vm_cr_msr &= ~chg_mask; in svm_set_vm_cr()
2606 svm->nested.vm_cr_msr |= (data & chg_mask); in svm_set_vm_cr()
2608 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK; in svm_set_vm_cr()
2619 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_msr() local
2628 svm->vmcb->save.g_pat = data; in svm_set_msr()
2629 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_set_msr()
2642 svm->spec_ctrl = data; in svm_set_msr()
2657 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1); in svm_set_msr()
2672 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); in svm_set_msr()
2682 svm->virt_spec_ctrl = data; in svm_set_msr()
2685 svm->vmcb->save.star = data; in svm_set_msr()
2689 svm->vmcb->save.lstar = data; in svm_set_msr()
2692 svm->vmcb->save.cstar = data; in svm_set_msr()
2695 svm->vmcb->save.kernel_gs_base = data; in svm_set_msr()
2698 svm->vmcb->save.sfmask = data; in svm_set_msr()
2702 svm->vmcb->save.sysenter_cs = data; in svm_set_msr()
2705 svm->sysenter_eip = data; in svm_set_msr()
2706 svm->vmcb->save.sysenter_eip = data; in svm_set_msr()
2709 svm->sysenter_esp = data; in svm_set_msr()
2710 svm->vmcb->save.sysenter_esp = data; in svm_set_msr()
2721 svm->tsc_aux = data; in svm_set_msr()
2722 wrmsrl(MSR_TSC_AUX, svm->tsc_aux); in svm_set_msr()
2733 svm->vmcb->save.dbgctl = data; in svm_set_msr()
2734 vmcb_mark_dirty(svm->vmcb, VMCB_LBR); in svm_set_msr()
2741 svm->nested.hsave_msr = data; in svm_set_msr()
2763 svm->msr_decfg = data; in svm_set_msr()
2776 static int wrmsr_interception(struct vcpu_svm *svm) in wrmsr_interception() argument
2778 return kvm_emulate_wrmsr(&svm->vcpu); in wrmsr_interception()
2781 static int msr_interception(struct vcpu_svm *svm) in msr_interception() argument
2783 if (svm->vmcb->control.exit_info_1) in msr_interception()
2784 return wrmsr_interception(svm); in msr_interception()
2786 return rdmsr_interception(svm); in msr_interception()
2789 static int interrupt_window_interception(struct vcpu_svm *svm) in interrupt_window_interception() argument
2791 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in interrupt_window_interception()
2792 svm_clear_vintr(svm); in interrupt_window_interception()
2799 svm_toggle_avic_for_irq_window(&svm->vcpu, true); in interrupt_window_interception()
2801 ++svm->vcpu.stat.irq_window_exits; in interrupt_window_interception()
2805 static int pause_interception(struct vcpu_svm *svm) in pause_interception() argument
2807 struct kvm_vcpu *vcpu = &svm->vcpu; in pause_interception()
2817 static int nop_interception(struct vcpu_svm *svm) in nop_interception() argument
2819 return kvm_skip_emulated_instruction(&(svm->vcpu)); in nop_interception()
2822 static int monitor_interception(struct vcpu_svm *svm) in monitor_interception() argument
2825 return nop_interception(svm); in monitor_interception()
2828 static int mwait_interception(struct vcpu_svm *svm) in mwait_interception() argument
2831 return nop_interception(svm); in mwait_interception()
2834 static int invpcid_interception(struct vcpu_svm *svm) in invpcid_interception() argument
2836 struct kvm_vcpu *vcpu = &svm->vcpu; in invpcid_interception()
2850 type = svm->vmcb->control.exit_info_2; in invpcid_interception()
2851 gva = svm->vmcb->control.exit_info_1; in invpcid_interception()
2861 static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
2932 struct vcpu_svm *svm = to_svm(vcpu); in dump_vmcb() local
2933 struct vmcb_control_area *control = &svm->vmcb->control; in dump_vmcb()
2934 struct vmcb_save_area *save = &svm->vmcb->save; in dump_vmcb()
3065 struct vcpu_svm *svm = to_svm(vcpu); in handle_exit() local
3067 u32 exit_code = svm->vmcb->control.exit_code; in handle_exit()
3071 if (!svm_is_intercept(svm, INTERCEPT_CR0_WRITE)) in handle_exit()
3072 vcpu->arch.cr0 = svm->vmcb->save.cr0; in handle_exit()
3074 vcpu->arch.cr3 = svm->vmcb->save.cr3; in handle_exit()
3081 vmexit = nested_svm_exit_special(svm); in handle_exit()
3084 vmexit = nested_svm_exit_handled(svm); in handle_exit()
3090 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { in handle_exit()
3093 = svm->vmcb->control.exit_code; in handle_exit()
3099 if (is_external_interrupt(svm->vmcb->control.exit_int_info) && in handle_exit()
3105 __func__, svm->vmcb->control.exit_int_info, in handle_exit()
3126 return msr_interception(svm); in handle_exit()
3128 return interrupt_window_interception(svm); in handle_exit()
3130 return intr_interception(svm); in handle_exit()
3132 return halt_interception(svm); in handle_exit()
3134 return npf_interception(svm); in handle_exit()
3136 return svm_exit_handlers[exit_code](svm); in handle_exit()
3147 static void pre_svm_run(struct vcpu_svm *svm) in pre_svm_run() argument
3149 struct svm_cpu_data *sd = per_cpu(svm_data, svm->vcpu.cpu); in pre_svm_run()
3151 if (sev_guest(svm->vcpu.kvm)) in pre_svm_run()
3152 return pre_sev_run(svm, svm->vcpu.cpu); in pre_svm_run()
3155 if (svm->asid_generation != sd->asid_generation) in pre_svm_run()
3156 new_asid(svm, sd); in pre_svm_run()
3161 struct vcpu_svm *svm = to_svm(vcpu); in svm_inject_nmi() local
3163 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI; in svm_inject_nmi()
3165 svm_set_intercept(svm, INTERCEPT_IRET); in svm_inject_nmi()
3171 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_irq() local
3173 BUG_ON(!(gif_set(svm))); in svm_set_irq()
3178 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr | in svm_set_irq()
3184 struct vcpu_svm *svm = to_svm(vcpu); in update_cr8_intercept() local
3189 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE); in update_cr8_intercept()
3195 svm_set_intercept(svm, INTERCEPT_CR8_WRITE); in update_cr8_intercept()
3200 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_blocked() local
3201 struct vmcb *vmcb = svm->vmcb; in svm_nmi_blocked()
3204 if (!gif_set(svm)) in svm_nmi_blocked()
3207 if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) in svm_nmi_blocked()
3211 (svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_nmi_blocked()
3218 struct vcpu_svm *svm = to_svm(vcpu); in svm_nmi_allowed() local
3219 if (svm->nested.nested_run_pending) in svm_nmi_allowed()
3223 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(svm)) in svm_nmi_allowed()
3231 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nmi_mask() local
3233 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK); in svm_get_nmi_mask()
3238 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nmi_mask() local
3241 svm->vcpu.arch.hflags |= HF_NMI_MASK; in svm_set_nmi_mask()
3242 svm_set_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
3244 svm->vcpu.arch.hflags &= ~HF_NMI_MASK; in svm_set_nmi_mask()
3245 svm_clr_intercept(svm, INTERCEPT_IRET); in svm_set_nmi_mask()
3251 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_blocked() local
3252 struct vmcb *vmcb = svm->vmcb; in svm_interrupt_blocked()
3254 if (!gif_set(svm)) in svm_interrupt_blocked()
3259 if ((svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) in svm_interrupt_blocked()
3260 ? !(svm->nested.hsave->save.rflags & X86_EFLAGS_IF) in svm_interrupt_blocked()
3265 if (nested_exit_on_intr(svm)) in svm_interrupt_blocked()
3277 struct vcpu_svm *svm = to_svm(vcpu); in svm_interrupt_allowed() local
3278 if (svm->nested.nested_run_pending) in svm_interrupt_allowed()
3285 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(svm)) in svm_interrupt_allowed()
3293 struct vcpu_svm *svm = to_svm(vcpu); in enable_irq_window() local
3303 if (vgif_enabled(svm) || gif_set(svm)) { in enable_irq_window()
3311 svm_set_vintr(svm); in enable_irq_window()
3317 struct vcpu_svm *svm = to_svm(vcpu); in enable_nmi_window() local
3319 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) in enable_nmi_window()
3323 if (!gif_set(svm)) { in enable_nmi_window()
3324 if (vgif_enabled(svm)) in enable_nmi_window()
3325 svm_set_intercept(svm, INTERCEPT_STGI); in enable_nmi_window()
3333 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu); in enable_nmi_window()
3334 svm->nmi_singlestep = true; in enable_nmi_window()
3335 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); in enable_nmi_window()
3350 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb() local
3360 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; in svm_flush_tlb()
3362 svm->asid_generation--; in svm_flush_tlb()
3367 struct vcpu_svm *svm = to_svm(vcpu); in svm_flush_tlb_gva() local
3369 invlpga(gva, svm->vmcb->control.asid); in svm_flush_tlb_gva()
3378 struct vcpu_svm *svm = to_svm(vcpu); in sync_cr8_to_lapic() local
3383 if (!svm_is_intercept(svm, INTERCEPT_CR8_WRITE)) { in sync_cr8_to_lapic()
3384 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK; in sync_cr8_to_lapic()
3391 struct vcpu_svm *svm = to_svm(vcpu); in sync_lapic_to_cr8() local
3399 svm->vmcb->control.int_ctl &= ~V_TPR_MASK; in sync_lapic_to_cr8()
3400 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK; in sync_lapic_to_cr8()
3403 static void svm_complete_interrupts(struct vcpu_svm *svm) in svm_complete_interrupts() argument
3407 u32 exitintinfo = svm->vmcb->control.exit_int_info; in svm_complete_interrupts()
3408 unsigned int3_injected = svm->int3_injected; in svm_complete_interrupts()
3410 svm->int3_injected = 0; in svm_complete_interrupts()
3416 if ((svm->vcpu.arch.hflags & HF_IRET_MASK) in svm_complete_interrupts()
3417 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) { in svm_complete_interrupts()
3418 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); in svm_complete_interrupts()
3419 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3422 svm->vcpu.arch.nmi_injected = false; in svm_complete_interrupts()
3423 kvm_clear_exception_queue(&svm->vcpu); in svm_complete_interrupts()
3424 kvm_clear_interrupt_queue(&svm->vcpu); in svm_complete_interrupts()
3429 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); in svm_complete_interrupts()
3436 svm->vcpu.arch.nmi_injected = true; in svm_complete_interrupts()
3446 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip)) in svm_complete_interrupts()
3447 kvm_rip_write(&svm->vcpu, in svm_complete_interrupts()
3448 kvm_rip_read(&svm->vcpu) - in svm_complete_interrupts()
3453 u32 err = svm->vmcb->control.exit_int_info_err; in svm_complete_interrupts()
3454 kvm_requeue_exception_e(&svm->vcpu, vector, err); in svm_complete_interrupts()
3457 kvm_requeue_exception(&svm->vcpu, vector); in svm_complete_interrupts()
3460 kvm_queue_interrupt(&svm->vcpu, vector, false); in svm_complete_interrupts()
3469 struct vcpu_svm *svm = to_svm(vcpu); in svm_cancel_injection() local
3470 struct vmcb_control_area *control = &svm->vmcb->control; in svm_cancel_injection()
3475 svm_complete_interrupts(svm); in svm_cancel_injection()
3490 struct vcpu_svm *svm) in svm_vcpu_enter_exit() argument
3512 __svm_vcpu_run(svm->vmcb_pa, (unsigned long *)&svm->vcpu.arch.regs); in svm_vcpu_enter_exit()
3515 native_wrmsrl(MSR_GS_BASE, svm->host.gs_base); in svm_vcpu_enter_exit()
3517 loadsegment(fs, svm->host.fs); in svm_vcpu_enter_exit()
3519 loadsegment(gs, svm->host.gs); in svm_vcpu_enter_exit()
3545 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_run() local
3547 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_vcpu_run()
3548 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_vcpu_run()
3549 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_vcpu_run()
3557 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) { in svm_vcpu_run()
3563 disable_nmi_singlestep(svm); in svm_vcpu_run()
3567 pre_svm_run(svm); in svm_vcpu_run()
3571 svm->vmcb->save.cr2 = vcpu->arch.cr2; in svm_vcpu_run()
3577 if (unlikely(svm->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) in svm_vcpu_run()
3578 svm_set_dr6(svm, vcpu->arch.dr6); in svm_vcpu_run()
3580 svm_set_dr6(svm, DR6_FIXED_1 | DR6_RTM); in svm_vcpu_run()
3593 x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl); in svm_vcpu_run()
3595 svm_vcpu_enter_exit(vcpu, svm); in svm_vcpu_run()
3613 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); in svm_vcpu_run()
3617 x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl); in svm_vcpu_run()
3619 vcpu->arch.cr2 = svm->vmcb->save.cr2; in svm_vcpu_run()
3620 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; in svm_vcpu_run()
3621 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; in svm_vcpu_run()
3622 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; in svm_vcpu_run()
3624 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
3625 kvm_before_interrupt(&svm->vcpu); in svm_vcpu_run()
3632 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) in svm_vcpu_run()
3633 kvm_after_interrupt(&svm->vcpu); in svm_vcpu_run()
3637 svm->next_rip = 0; in svm_vcpu_run()
3638 if (is_guest_mode(&svm->vcpu)) { in svm_vcpu_run()
3639 sync_nested_vmcb_control(svm); in svm_vcpu_run()
3640 svm->nested.nested_run_pending = 0; in svm_vcpu_run()
3643 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING; in svm_vcpu_run()
3644 vmcb_mark_all_clean(svm->vmcb); in svm_vcpu_run()
3647 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) in svm_vcpu_run()
3648 svm->vcpu.arch.apf.host_apf_flags = in svm_vcpu_run()
3660 if (unlikely(svm->vmcb->control.exit_code == in svm_vcpu_run()
3662 svm_handle_mce(svm); in svm_vcpu_run()
3664 svm_complete_interrupts(svm); in svm_vcpu_run()
3675 struct vcpu_svm *svm = to_svm(vcpu); in svm_load_mmu_pgd() local
3680 svm->vmcb->control.nested_cr3 = cr3; in svm_load_mmu_pgd()
3681 vmcb_mark_dirty(svm->vmcb, VMCB_NPT); in svm_load_mmu_pgd()
3689 svm->vmcb->save.cr3 = cr3; in svm_load_mmu_pgd()
3690 vmcb_mark_dirty(svm->vmcb, VMCB_CR); in svm_load_mmu_pgd()
3745 struct vcpu_svm *svm = to_svm(vcpu); in svm_vcpu_after_set_cpuid() local
3753 svm->nrips_enabled = kvm_cpu_cap_has(X86_FEATURE_NRIPS) && in svm_vcpu_after_set_cpuid()
3754 guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS); in svm_vcpu_after_set_cpuid()
3757 svm_check_invpcid(svm); in svm_vcpu_after_set_cpuid()
3860 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_intercept() local
3863 struct vmcb *vmcb = svm->vmcb; in svm_check_intercept()
3888 if (!(vmcb_is_intercept(&svm->nested.ctl, in svm_check_intercept()
3966 vmexit = nested_svm_exit_handled(svm); in svm_check_intercept()
3993 struct vcpu_svm *svm = to_svm(vcpu); in svm_smi_blocked() local
3996 if (!gif_set(svm)) in svm_smi_blocked()
4004 struct vcpu_svm *svm = to_svm(vcpu); in svm_smi_allowed() local
4005 if (svm->nested.nested_run_pending) in svm_smi_allowed()
4009 if (for_injection && is_guest_mode(vcpu) && nested_exit_on_smi(svm)) in svm_smi_allowed()
4017 struct vcpu_svm *svm = to_svm(vcpu); in svm_pre_enter_smm() local
4024 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa); in svm_pre_enter_smm()
4026 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; in svm_pre_enter_smm()
4027 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; in svm_pre_enter_smm()
4028 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; in svm_pre_enter_smm()
4030 ret = nested_svm_vmexit(svm); in svm_pre_enter_smm()
4039 struct vcpu_svm *svm = to_svm(vcpu); in svm_pre_leave_smm() local
4055 if (kvm_vcpu_map(&svm->vcpu, in svm_pre_leave_smm()
4059 if (svm_allocate_nested(svm)) in svm_pre_leave_smm()
4062 ret = enter_svm_guest_mode(svm, vmcb12_gpa, map.hva); in svm_pre_leave_smm()
4063 kvm_vcpu_unmap(&svm->vcpu, &map, true); in svm_pre_leave_smm()
4072 struct vcpu_svm *svm = to_svm(vcpu); in enable_smi_window() local
4074 if (!gif_set(svm)) { in enable_smi_window()
4075 if (vgif_enabled(svm)) in enable_smi_window()
4076 svm_set_intercept(svm, INTERCEPT_STGI); in enable_smi_window()
4155 struct vcpu_svm *svm = to_svm(vcpu); in svm_apic_init_signal_blocked() local
4164 return !gif_set(svm) || in svm_apic_init_signal_blocked()
4165 (vmcb_is_intercept(&svm->vmcb->control, INTERCEPT_INIT)); in svm_apic_init_signal_blocked()