Lines Matching refs:svm
38 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_npf_exit() local
39 struct vmcb *vmcb = svm->vmcb; in nested_svm_inject_npf_exit()
55 nested_svm_vmexit(svm); in nested_svm_inject_npf_exit()
60 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr() local
61 u64 cr3 = svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_pdptr()
74 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3() local
76 return svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_cr3()
81 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_init_mmu_context() local
92 kvm_init_shadow_npt_mmu(vcpu, X86_CR0_PG, svm->vmcb01.ptr->save.cr4, in nested_svm_init_mmu_context()
93 svm->vmcb01.ptr->save.efer, in nested_svm_init_mmu_context()
94 svm->nested.ctl.nested_cr3); in nested_svm_init_mmu_context()
107 static bool nested_vmcb_needs_vls_intercept(struct vcpu_svm *svm) in nested_vmcb_needs_vls_intercept() argument
109 if (!svm->v_vmload_vmsave_enabled) in nested_vmcb_needs_vls_intercept()
112 if (!nested_npt_enabled(svm)) in nested_vmcb_needs_vls_intercept()
115 if (!(svm->nested.ctl.virt_ext & VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK)) in nested_vmcb_needs_vls_intercept()
121 void recalc_intercepts(struct vcpu_svm *svm) in recalc_intercepts() argument
127 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in recalc_intercepts()
129 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
132 c = &svm->vmcb->control; in recalc_intercepts()
133 h = &svm->vmcb01.ptr->control; in recalc_intercepts()
134 g = &svm->nested.ctl; in recalc_intercepts()
162 if (nested_vmcb_needs_vls_intercept(svm)) { in recalc_intercepts()
180 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) in nested_svm_vmrun_msrpm() argument
183 (struct hv_enlightenments *)svm->nested.ctl.reserved_sw; in nested_svm_vmrun_msrpm()
194 if (!svm->nested.force_msr_bitmap_recalc && in nested_svm_vmrun_msrpm()
195 kvm_hv_hypercall_enabled(&svm->vcpu) && in nested_svm_vmrun_msrpm()
197 (svm->nested.ctl.clean & BIT(VMCB_HV_NESTED_ENLIGHTENMENTS))) in nested_svm_vmrun_msrpm()
200 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm()
216 offset = svm->nested.ctl.msrpm_base_pa + (p * 4); in nested_svm_vmrun_msrpm()
218 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) in nested_svm_vmrun_msrpm()
221 svm->nested.msrpm[p] = svm->msrpm[p] | value; in nested_svm_vmrun_msrpm()
224 svm->nested.force_msr_bitmap_recalc = false; in nested_svm_vmrun_msrpm()
227 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); in nested_svm_vmrun_msrpm()
318 struct vcpu_svm *svm = to_svm(vcpu); in nested_vmcb_check_save() local
319 struct vmcb_save_area_cached *save = &svm->nested.save; in nested_vmcb_check_save()
326 struct vcpu_svm *svm = to_svm(vcpu); in nested_vmcb_check_controls() local
327 struct vmcb_ctrl_area_cached *ctl = &svm->nested.ctl; in nested_vmcb_check_controls()
377 void nested_copy_vmcb_control_to_cache(struct vcpu_svm *svm, in nested_copy_vmcb_control_to_cache() argument
380 __nested_copy_vmcb_control_to_cache(&svm->vcpu, &svm->nested.ctl, control); in nested_copy_vmcb_control_to_cache()
399 void nested_copy_vmcb_save_to_cache(struct vcpu_svm *svm, in nested_copy_vmcb_save_to_cache() argument
402 __nested_copy_vmcb_save_to_cache(&svm->nested.save, save); in nested_copy_vmcb_save_to_cache()
409 void nested_sync_control_from_vmcb02(struct vcpu_svm *svm) in nested_sync_control_from_vmcb02() argument
412 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; in nested_sync_control_from_vmcb02()
413 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; in nested_sync_control_from_vmcb02()
417 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && in nested_sync_control_from_vmcb02()
418 svm_is_intercept(svm, INTERCEPT_VINTR)) { in nested_sync_control_from_vmcb02()
430 if (nested_vgif_enabled(svm)) in nested_sync_control_from_vmcb02()
433 svm->nested.ctl.int_ctl &= ~mask; in nested_sync_control_from_vmcb02()
434 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; in nested_sync_control_from_vmcb02()
441 static void nested_save_pending_event_to_vmcb12(struct vcpu_svm *svm, in nested_save_pending_event_to_vmcb12() argument
444 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_save_pending_event_to_vmcb12()
518 void nested_vmcb02_compute_g_pat(struct vcpu_svm *svm) in nested_vmcb02_compute_g_pat() argument
520 if (!svm->nested.vmcb02.ptr) in nested_vmcb02_compute_g_pat()
524 svm->nested.vmcb02.ptr->save.g_pat = svm->vmcb01.ptr->save.g_pat; in nested_vmcb02_compute_g_pat()
527 static void nested_vmcb02_prepare_save(struct vcpu_svm *svm, struct vmcb *vmcb12) in nested_vmcb02_prepare_save() argument
530 struct vmcb *vmcb01 = svm->vmcb01.ptr; in nested_vmcb02_prepare_save()
531 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; in nested_vmcb02_prepare_save()
533 nested_vmcb02_compute_g_pat(svm); in nested_vmcb02_prepare_save()
536 if (svm->nested.vmcb12_gpa != svm->nested.last_vmcb12_gpa) { in nested_vmcb02_prepare_save()
538 svm->nested.last_vmcb12_gpa = svm->nested.vmcb12_gpa; in nested_vmcb02_prepare_save()
539 svm->nested.force_msr_bitmap_recalc = true; in nested_vmcb02_prepare_save()
557 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags | X86_EFLAGS_FIXED); in nested_vmcb02_prepare_save()
559 svm_set_efer(&svm->vcpu, svm->nested.save.efer); in nested_vmcb02_prepare_save()
561 svm_set_cr0(&svm->vcpu, svm->nested.save.cr0); in nested_vmcb02_prepare_save()
562 svm_set_cr4(&svm->vcpu, svm->nested.save.cr4); in nested_vmcb02_prepare_save()
564 svm->vcpu.arch.cr2 = vmcb12->save.cr2; in nested_vmcb02_prepare_save()
566 kvm_rax_write(&svm->vcpu, vmcb12->save.rax); in nested_vmcb02_prepare_save()
567 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); in nested_vmcb02_prepare_save()
568 kvm_rip_write(&svm->vcpu, vmcb12->save.rip); in nested_vmcb02_prepare_save()
577 vmcb02->save.dr7 = svm->nested.save.dr7 | DR7_FIXED_1; in nested_vmcb02_prepare_save()
578 svm->vcpu.arch.dr6 = svm->nested.save.dr6 | DR6_ACTIVE_LOW; in nested_vmcb02_prepare_save()
582 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { in nested_vmcb02_prepare_save()
589 svm_update_lbrv(&svm->vcpu); in nested_vmcb02_prepare_save()
620 static void nested_vmcb02_prepare_control(struct vcpu_svm *svm, in nested_vmcb02_prepare_control() argument
627 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_vmcb02_prepare_control()
628 struct vmcb *vmcb01 = svm->vmcb01.ptr; in nested_vmcb02_prepare_control()
629 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; in nested_vmcb02_prepare_control()
638 if (svm->vgif_enabled && (svm->nested.ctl.int_ctl & V_GIF_ENABLE_MASK)) in nested_vmcb02_prepare_control()
654 if (nested_npt_enabled(svm)) in nested_vmcb02_prepare_control()
659 svm->nested.ctl.tsc_offset, in nested_vmcb02_prepare_control()
660 svm->tsc_ratio_msr); in nested_vmcb02_prepare_control()
664 if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) { in nested_vmcb02_prepare_control()
665 WARN_ON(!svm->tsc_scaling_enabled); in nested_vmcb02_prepare_control()
670 (svm->nested.ctl.int_ctl & int_ctl_vmcb12_bits) | in nested_vmcb02_prepare_control()
673 vmcb02->control.int_vector = svm->nested.ctl.int_vector; in nested_vmcb02_prepare_control()
674 vmcb02->control.int_state = svm->nested.ctl.int_state; in nested_vmcb02_prepare_control()
675 vmcb02->control.event_inj = svm->nested.ctl.event_inj; in nested_vmcb02_prepare_control()
676 vmcb02->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_vmcb02_prepare_control()
686 if (svm->nrips_enabled) in nested_vmcb02_prepare_control()
687 vmcb02->control.next_rip = svm->nested.ctl.next_rip; in nested_vmcb02_prepare_control()
691 svm->nmi_l1_to_l2 = is_evtinj_nmi(vmcb02->control.event_inj); in nested_vmcb02_prepare_control()
693 svm->soft_int_injected = true; in nested_vmcb02_prepare_control()
694 svm->soft_int_csbase = vmcb12_csbase; in nested_vmcb02_prepare_control()
695 svm->soft_int_old_rip = vmcb12_rip; in nested_vmcb02_prepare_control()
696 if (svm->nrips_enabled) in nested_vmcb02_prepare_control()
697 svm->soft_int_next_rip = svm->nested.ctl.next_rip; in nested_vmcb02_prepare_control()
699 svm->soft_int_next_rip = vmcb12_rip; in nested_vmcb02_prepare_control()
704 if (svm->lbrv_enabled) in nested_vmcb02_prepare_control()
706 (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK); in nested_vmcb02_prepare_control()
708 if (!nested_vmcb_needs_vls_intercept(svm)) in nested_vmcb02_prepare_control()
711 pause_count12 = svm->pause_filter_enabled ? svm->nested.ctl.pause_filter_count : 0; in nested_vmcb02_prepare_control()
712 pause_thresh12 = svm->pause_threshold_enabled ? svm->nested.ctl.pause_filter_thresh : 0; in nested_vmcb02_prepare_control()
713 if (kvm_pause_in_guest(svm->vcpu.kvm)) { in nested_vmcb02_prepare_control()
724 if (vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_PAUSE)) { in nested_vmcb02_prepare_control()
741 recalc_intercepts(svm); in nested_vmcb02_prepare_control()
759 struct vcpu_svm *svm = to_svm(vcpu); in enter_svm_guest_mode() local
762 trace_kvm_nested_vmenter(svm->vmcb->save.rip, in enter_svm_guest_mode()
780 svm->nested.vmcb12_gpa = vmcb12_gpa; in enter_svm_guest_mode()
782 WARN_ON(svm->vmcb == svm->nested.vmcb02.ptr); in enter_svm_guest_mode()
784 nested_svm_copy_common_state(svm->vmcb01.ptr, svm->nested.vmcb02.ptr); in enter_svm_guest_mode()
786 svm_switch_vmcb(svm, &svm->nested.vmcb02); in enter_svm_guest_mode()
787 nested_vmcb02_prepare_control(svm, vmcb12->save.rip, vmcb12->save.cs.base); in enter_svm_guest_mode()
788 nested_vmcb02_prepare_save(svm, vmcb12); in enter_svm_guest_mode()
790 ret = nested_svm_load_cr3(&svm->vcpu, svm->nested.save.cr3, in enter_svm_guest_mode()
791 nested_npt_enabled(svm), from_vmrun); in enter_svm_guest_mode()
798 svm_set_gif(svm, true); in enter_svm_guest_mode()
808 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_vmrun() local
813 struct vmcb *vmcb01 = svm->vmcb01.ptr; in nested_svm_vmrun()
815 if (!svm->nested.hsave_msr) { in nested_svm_vmrun()
825 vmcb12_gpa = svm->vmcb->save.rax; in nested_svm_vmrun()
838 if (WARN_ON_ONCE(!svm->nested.initialized)) in nested_svm_vmrun()
841 nested_copy_vmcb_control_to_cache(svm, &vmcb12->control); in nested_svm_vmrun()
842 nested_copy_vmcb_save_to_cache(svm, &vmcb12->save); in nested_svm_vmrun()
866 svm->nested.nested_run_pending = 1; in nested_svm_vmrun()
871 if (nested_svm_vmrun_msrpm(svm)) in nested_svm_vmrun()
875 svm->nested.nested_run_pending = 0; in nested_svm_vmrun()
876 svm->nmi_l1_to_l2 = false; in nested_svm_vmrun()
877 svm->soft_int_injected = false; in nested_svm_vmrun()
879 svm->vmcb->control.exit_code = SVM_EXIT_ERR; in nested_svm_vmrun()
880 svm->vmcb->control.exit_code_hi = 0; in nested_svm_vmrun()
881 svm->vmcb->control.exit_info_1 = 0; in nested_svm_vmrun()
882 svm->vmcb->control.exit_info_2 = 0; in nested_svm_vmrun()
884 nested_svm_vmexit(svm); in nested_svm_vmrun()
929 int nested_svm_vmexit(struct vcpu_svm *svm) in nested_svm_vmexit() argument
931 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_svm_vmexit()
932 struct vmcb *vmcb01 = svm->vmcb01.ptr; in nested_svm_vmexit()
933 struct vmcb *vmcb02 = svm->nested.vmcb02.ptr; in nested_svm_vmexit()
938 rc = kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); in nested_svm_vmexit()
949 svm->nested.vmcb12_gpa = 0; in nested_svm_vmexit()
950 WARN_ON_ONCE(svm->nested.nested_run_pending); in nested_svm_vmexit()
955 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; in nested_svm_vmexit()
965 vmcb12->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit()
969 vmcb12->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmexit()
975 vmcb12->save.dr6 = svm->vcpu.arch.dr6; in nested_svm_vmexit()
985 nested_save_pending_event_to_vmcb12(svm, vmcb12); in nested_svm_vmexit()
987 if (svm->nrips_enabled) in nested_svm_vmexit()
990 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; in nested_svm_vmexit()
991 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl; in nested_svm_vmexit()
992 vmcb12->control.event_inj = svm->nested.ctl.event_inj; in nested_svm_vmexit()
993 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_svm_vmexit()
1001 nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr); in nested_svm_vmexit()
1003 svm_switch_vmcb(svm, &svm->vmcb01); in nested_svm_vmexit()
1005 if (unlikely(svm->lbrv_enabled && (svm->nested.ctl.virt_ext & LBR_CTL_ENABLE_MASK))) { in nested_svm_vmexit()
1017 svm_set_gif(svm, false); in nested_svm_vmexit()
1020 svm->vcpu.arch.tsc_offset = svm->vcpu.arch.l1_tsc_offset; in nested_svm_vmexit()
1021 if (vmcb01->control.tsc_offset != svm->vcpu.arch.tsc_offset) { in nested_svm_vmexit()
1022 vmcb01->control.tsc_offset = svm->vcpu.arch.tsc_offset; in nested_svm_vmexit()
1026 if (svm->tsc_ratio_msr != kvm_caps.default_tsc_scaling_ratio) { in nested_svm_vmexit()
1027 WARN_ON(!svm->tsc_scaling_enabled); in nested_svm_vmexit()
1032 svm->nested.ctl.nested_cr3 = 0; in nested_svm_vmexit()
1045 svm->vcpu.arch.dr7 = DR7_FIXED_1; in nested_svm_vmexit()
1046 kvm_update_dr7(&svm->vcpu); in nested_svm_vmexit()
1069 svm->vcpu.arch.nmi_injected = false; in nested_svm_vmexit()
1080 kvm_queue_exception(&(svm->vcpu), DB_VECTOR); in nested_svm_vmexit()
1094 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_triple_fault() local
1096 if (!vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_SHUTDOWN)) in nested_svm_triple_fault()
1103 int svm_allocate_nested(struct vcpu_svm *svm) in svm_allocate_nested() argument
1107 if (svm->nested.initialized) in svm_allocate_nested()
1113 svm->nested.vmcb02.ptr = page_address(vmcb02_page); in svm_allocate_nested()
1114 svm->nested.vmcb02.pa = __sme_set(page_to_pfn(vmcb02_page) << PAGE_SHIFT); in svm_allocate_nested()
1116 svm->nested.msrpm = svm_vcpu_alloc_msrpm(); in svm_allocate_nested()
1117 if (!svm->nested.msrpm) in svm_allocate_nested()
1119 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); in svm_allocate_nested()
1121 svm->nested.initialized = true; in svm_allocate_nested()
1129 void svm_free_nested(struct vcpu_svm *svm) in svm_free_nested() argument
1131 if (!svm->nested.initialized) in svm_free_nested()
1134 if (WARN_ON_ONCE(svm->vmcb != svm->vmcb01.ptr)) in svm_free_nested()
1135 svm_switch_vmcb(svm, &svm->vmcb01); in svm_free_nested()
1137 svm_vcpu_free_msrpm(svm->nested.msrpm); in svm_free_nested()
1138 svm->nested.msrpm = NULL; in svm_free_nested()
1140 __free_page(virt_to_page(svm->nested.vmcb02.ptr)); in svm_free_nested()
1141 svm->nested.vmcb02.ptr = NULL; in svm_free_nested()
1150 svm->nested.last_vmcb12_gpa = INVALID_GPA; in svm_free_nested()
1152 svm->nested.initialized = false; in svm_free_nested()
1157 struct vcpu_svm *svm = to_svm(vcpu); in svm_leave_nested() local
1160 svm->nested.nested_run_pending = 0; in svm_leave_nested()
1161 svm->nested.vmcb12_gpa = INVALID_GPA; in svm_leave_nested()
1165 svm_switch_vmcb(svm, &svm->vmcb01); in svm_leave_nested()
1168 vmcb_mark_all_dirty(svm->vmcb); in svm_leave_nested()
1174 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) in nested_svm_exit_handled_msr() argument
1179 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_exit_handled_msr()
1182 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in nested_svm_exit_handled_msr()
1184 write = svm->vmcb->control.exit_info_1 & 1; in nested_svm_exit_handled_msr()
1193 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) in nested_svm_exit_handled_msr()
1199 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) in nested_svm_intercept_ioio() argument
1206 if (!(vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) in nested_svm_intercept_ioio()
1209 port = svm->vmcb->control.exit_info_1 >> 16; in nested_svm_intercept_ioio()
1210 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> in nested_svm_intercept_ioio()
1212 gpa = svm->nested.ctl.iopm_base_pa + (port / 8); in nested_svm_intercept_ioio()
1218 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) in nested_svm_intercept_ioio()
1224 static int nested_svm_intercept(struct vcpu_svm *svm) in nested_svm_intercept() argument
1226 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_intercept()
1231 vmexit = nested_svm_exit_handled_msr(svm); in nested_svm_intercept()
1234 vmexit = nested_svm_intercept_ioio(svm); in nested_svm_intercept()
1237 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1242 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1260 if (vmcb12_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
1268 int nested_svm_exit_handled(struct vcpu_svm *svm) in nested_svm_exit_handled() argument
1272 vmexit = nested_svm_intercept(svm); in nested_svm_exit_handled()
1275 nested_svm_vmexit(svm); in nested_svm_exit_handled()
1298 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_is_exception_vmexit() local
1300 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(vector)); in nested_svm_is_exception_vmexit()
1306 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_exception_vmexit() local
1307 struct vmcb *vmcb = svm->vmcb; in nested_svm_inject_exception_vmexit()
1336 nested_svm_vmexit(svm); in nested_svm_inject_exception_vmexit()
1339 static inline bool nested_exit_on_init(struct vcpu_svm *svm) in nested_exit_on_init() argument
1341 return vmcb12_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); in nested_exit_on_init()
1347 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_nested_events() local
1353 bool block_nested_exceptions = svm->nested.nested_run_pending; in svm_check_nested_events()
1367 if (!nested_exit_on_init(svm)) in svm_check_nested_events()
1369 nested_svm_simple_vmexit(svm, SVM_EXIT_INIT); in svm_check_nested_events()
1389 if (!nested_exit_on_smi(svm)) in svm_check_nested_events()
1391 nested_svm_simple_vmexit(svm, SVM_EXIT_SMI); in svm_check_nested_events()
1398 if (!nested_exit_on_nmi(svm)) in svm_check_nested_events()
1400 nested_svm_simple_vmexit(svm, SVM_EXIT_NMI); in svm_check_nested_events()
1407 if (!nested_exit_on_intr(svm)) in svm_check_nested_events()
1409 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); in svm_check_nested_events()
1410 nested_svm_simple_vmexit(svm, SVM_EXIT_INTR); in svm_check_nested_events()
1417 int nested_svm_exit_special(struct vcpu_svm *svm) in nested_svm_exit_special() argument
1419 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_exit_special()
1429 if (svm->vmcb01.ptr->control.intercepts[INTERCEPT_EXCEPTION] & in nested_svm_exit_special()
1433 svm->vcpu.arch.apf.host_apf_flags) in nested_svm_exit_special()
1447 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_update_tsc_ratio_msr() local
1451 svm->tsc_ratio_msr); in nested_svm_update_tsc_ratio_msr()
1495 struct vcpu_svm *svm; in svm_get_nested_state() local
1504 &user_kvm_nested_state->data.svm[0]; in svm_get_nested_state()
1509 svm = to_svm(vcpu); in svm_get_nested_state()
1516 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; in svm_get_nested_state()
1520 if (svm->nested.nested_run_pending) in svm_get_nested_state()
1524 if (gif_set(svm)) in svm_get_nested_state()
1544 nested_copy_vmcb_cache_to_control(ctl, &svm->nested.ctl); in svm_get_nested_state()
1551 if (copy_to_user(&user_vmcb->save, &svm->vmcb01.ptr->save, in svm_get_nested_state()
1562 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nested_state() local
1564 &user_kvm_nested_state->data.svm[0]; in svm_set_nested_state()
1599 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); in svm_set_nested_state()
1603 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) in svm_set_nested_state()
1655 svm->nested.vmcb02.ptr->save = svm->vmcb01.ptr->save; in svm_set_nested_state()
1657 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); in svm_set_nested_state()
1659 svm->nested.nested_run_pending = in svm_set_nested_state()
1662 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; in svm_set_nested_state()
1664 svm_copy_vmrun_state(&svm->vmcb01.ptr->save, save); in svm_set_nested_state()
1665 nested_copy_vmcb_control_to_cache(svm, ctl); in svm_set_nested_state()
1667 svm_switch_vmcb(svm, &svm->nested.vmcb02); in svm_set_nested_state()
1668 nested_vmcb02_prepare_control(svm, svm->vmcb->save.rip, svm->vmcb->save.cs.base); in svm_set_nested_state()
1677 ret = nested_svm_load_cr3(&svm->vcpu, vcpu->arch.cr3, in svm_set_nested_state()
1678 nested_npt_enabled(svm), false); in svm_set_nested_state()
1682 svm->nested.force_msr_bitmap_recalc = true; in svm_set_nested_state()
1695 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nested_state_pages() local
1701 !nested_npt_enabled(svm) && is_pae_paging(vcpu)) in svm_get_nested_state_pages()
1710 if (!nested_svm_vmrun_msrpm(svm)) { in svm_get_nested_state_pages()