Lines Matching refs:svm
35 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_inject_npf_exit() local
37 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) { in nested_svm_inject_npf_exit()
42 svm->vmcb->control.exit_code = SVM_EXIT_NPF; in nested_svm_inject_npf_exit()
43 svm->vmcb->control.exit_code_hi = 0; in nested_svm_inject_npf_exit()
44 svm->vmcb->control.exit_info_1 = (1ULL << 32); in nested_svm_inject_npf_exit()
45 svm->vmcb->control.exit_info_2 = fault->address; in nested_svm_inject_npf_exit()
48 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL; in nested_svm_inject_npf_exit()
49 svm->vmcb->control.exit_info_1 |= fault->error_code; in nested_svm_inject_npf_exit()
51 nested_svm_vmexit(svm); in nested_svm_inject_npf_exit()
56 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_pdptr() local
57 u64 cr3 = svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_pdptr()
70 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_get_tdp_cr3() local
72 return svm->nested.ctl.nested_cr3; in nested_svm_get_tdp_cr3()
77 struct vcpu_svm *svm = to_svm(vcpu); in nested_svm_init_mmu_context() local
78 struct vmcb *hsave = svm->nested.hsave; in nested_svm_init_mmu_context()
84 svm->nested.ctl.nested_cr3); in nested_svm_init_mmu_context()
98 void recalc_intercepts(struct vcpu_svm *svm) in recalc_intercepts() argument
103 vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS); in recalc_intercepts()
105 if (!is_guest_mode(&svm->vcpu)) in recalc_intercepts()
108 c = &svm->vmcb->control; in recalc_intercepts()
109 h = &svm->nested.hsave->control; in recalc_intercepts()
110 g = &svm->nested.ctl; in recalc_intercepts()
166 static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm) in nested_svm_vmrun_msrpm() argument
175 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_vmrun_msrpm()
186 offset = svm->nested.ctl.msrpm_base_pa + (p * 4); in nested_svm_vmrun_msrpm()
188 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4)) in nested_svm_vmrun_msrpm()
191 svm->nested.msrpm[p] = svm->msrpm[p] | value; in nested_svm_vmrun_msrpm()
194 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm)); in nested_svm_vmrun_msrpm()
201 struct vcpu_svm *svm = to_svm(vcpu); in svm_get_nested_state_pages() local
202 if (!nested_svm_vmrun_msrpm(svm)) { in svm_get_nested_state_pages()
228 static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12) in nested_vmcb_checks() argument
257 if (kvm_valid_cr4(&svm->vcpu, vmcb12->save.cr4)) in nested_vmcb_checks()
263 static void load_nested_vmcb_control(struct vcpu_svm *svm, in load_nested_vmcb_control() argument
266 copy_vmcb_control_area(&svm->nested.ctl, control); in load_nested_vmcb_control()
269 svm->nested.ctl.asid = control->asid; in load_nested_vmcb_control()
270 svm->nested.ctl.msrpm_base_pa &= ~0x0fffULL; in load_nested_vmcb_control()
271 svm->nested.ctl.iopm_base_pa &= ~0x0fffULL; in load_nested_vmcb_control()
278 void sync_nested_vmcb_control(struct vcpu_svm *svm) in sync_nested_vmcb_control() argument
281 svm->nested.ctl.event_inj = svm->vmcb->control.event_inj; in sync_nested_vmcb_control()
282 svm->nested.ctl.event_inj_err = svm->vmcb->control.event_inj_err; in sync_nested_vmcb_control()
286 if (!(svm->nested.ctl.int_ctl & V_INTR_MASKING_MASK) && in sync_nested_vmcb_control()
287 svm_is_intercept(svm, INTERCEPT_VINTR)) { in sync_nested_vmcb_control()
298 svm->nested.ctl.int_ctl &= ~mask; in sync_nested_vmcb_control()
299 svm->nested.ctl.int_ctl |= svm->vmcb->control.int_ctl & mask; in sync_nested_vmcb_control()
306 static void nested_vmcb_save_pending_event(struct vcpu_svm *svm, in nested_vmcb_save_pending_event() argument
309 struct kvm_vcpu *vcpu = &svm->vcpu; in nested_vmcb_save_pending_event()
339 static inline bool nested_npt_enabled(struct vcpu_svm *svm) in nested_npt_enabled() argument
341 return svm->nested.ctl.nested_ctl & SVM_NESTED_CTL_NP_ENABLE; in nested_npt_enabled()
375 static void nested_prepare_vmcb_save(struct vcpu_svm *svm, struct vmcb *vmcb12) in nested_prepare_vmcb_save() argument
378 svm->vmcb->save.es = vmcb12->save.es; in nested_prepare_vmcb_save()
379 svm->vmcb->save.cs = vmcb12->save.cs; in nested_prepare_vmcb_save()
380 svm->vmcb->save.ss = vmcb12->save.ss; in nested_prepare_vmcb_save()
381 svm->vmcb->save.ds = vmcb12->save.ds; in nested_prepare_vmcb_save()
382 svm->vmcb->save.gdtr = vmcb12->save.gdtr; in nested_prepare_vmcb_save()
383 svm->vmcb->save.idtr = vmcb12->save.idtr; in nested_prepare_vmcb_save()
384 kvm_set_rflags(&svm->vcpu, vmcb12->save.rflags); in nested_prepare_vmcb_save()
385 svm_set_efer(&svm->vcpu, vmcb12->save.efer); in nested_prepare_vmcb_save()
386 svm_set_cr0(&svm->vcpu, vmcb12->save.cr0); in nested_prepare_vmcb_save()
387 svm_set_cr4(&svm->vcpu, vmcb12->save.cr4); in nested_prepare_vmcb_save()
388 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = vmcb12->save.cr2; in nested_prepare_vmcb_save()
389 kvm_rax_write(&svm->vcpu, vmcb12->save.rax); in nested_prepare_vmcb_save()
390 kvm_rsp_write(&svm->vcpu, vmcb12->save.rsp); in nested_prepare_vmcb_save()
391 kvm_rip_write(&svm->vcpu, vmcb12->save.rip); in nested_prepare_vmcb_save()
394 svm->vmcb->save.rax = vmcb12->save.rax; in nested_prepare_vmcb_save()
395 svm->vmcb->save.rsp = vmcb12->save.rsp; in nested_prepare_vmcb_save()
396 svm->vmcb->save.rip = vmcb12->save.rip; in nested_prepare_vmcb_save()
397 svm->vmcb->save.dr7 = vmcb12->save.dr7; in nested_prepare_vmcb_save()
398 svm->vcpu.arch.dr6 = vmcb12->save.dr6; in nested_prepare_vmcb_save()
399 svm->vmcb->save.cpl = vmcb12->save.cpl; in nested_prepare_vmcb_save()
402 static void nested_prepare_vmcb_control(struct vcpu_svm *svm) in nested_prepare_vmcb_control() argument
406 if (nested_npt_enabled(svm)) in nested_prepare_vmcb_control()
407 nested_svm_init_mmu_context(&svm->vcpu); in nested_prepare_vmcb_control()
409 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = in nested_prepare_vmcb_control()
410 svm->vcpu.arch.l1_tsc_offset + svm->nested.ctl.tsc_offset; in nested_prepare_vmcb_control()
412 svm->vmcb->control.int_ctl = in nested_prepare_vmcb_control()
413 (svm->nested.ctl.int_ctl & ~mask) | in nested_prepare_vmcb_control()
414 (svm->nested.hsave->control.int_ctl & mask); in nested_prepare_vmcb_control()
416 svm->vmcb->control.virt_ext = svm->nested.ctl.virt_ext; in nested_prepare_vmcb_control()
417 svm->vmcb->control.int_vector = svm->nested.ctl.int_vector; in nested_prepare_vmcb_control()
418 svm->vmcb->control.int_state = svm->nested.ctl.int_state; in nested_prepare_vmcb_control()
419 svm->vmcb->control.event_inj = svm->nested.ctl.event_inj; in nested_prepare_vmcb_control()
420 svm->vmcb->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_prepare_vmcb_control()
422 svm->vmcb->control.pause_filter_count = svm->nested.ctl.pause_filter_count; in nested_prepare_vmcb_control()
423 svm->vmcb->control.pause_filter_thresh = svm->nested.ctl.pause_filter_thresh; in nested_prepare_vmcb_control()
426 enter_guest_mode(&svm->vcpu); in nested_prepare_vmcb_control()
432 recalc_intercepts(svm); in nested_prepare_vmcb_control()
434 vmcb_mark_all_dirty(svm->vmcb); in nested_prepare_vmcb_control()
437 int enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb12_gpa, in enter_svm_guest_mode() argument
442 svm->nested.vmcb12_gpa = vmcb12_gpa; in enter_svm_guest_mode()
443 load_nested_vmcb_control(svm, &vmcb12->control); in enter_svm_guest_mode()
444 nested_prepare_vmcb_save(svm, vmcb12); in enter_svm_guest_mode()
445 nested_prepare_vmcb_control(svm); in enter_svm_guest_mode()
447 ret = nested_svm_load_cr3(&svm->vcpu, vmcb12->save.cr3, in enter_svm_guest_mode()
448 nested_npt_enabled(svm)); in enter_svm_guest_mode()
452 svm_set_gif(svm, true); in enter_svm_guest_mode()
457 int nested_svm_vmrun(struct vcpu_svm *svm) in nested_svm_vmrun() argument
461 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmrun()
462 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmrun()
466 if (is_smm(&svm->vcpu)) { in nested_svm_vmrun()
467 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in nested_svm_vmrun()
471 vmcb12_gpa = svm->vmcb->save.rax; in nested_svm_vmrun()
472 ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map); in nested_svm_vmrun()
474 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_vmrun()
477 return kvm_skip_emulated_instruction(&svm->vcpu); in nested_svm_vmrun()
480 ret = kvm_skip_emulated_instruction(&svm->vcpu); in nested_svm_vmrun()
484 if (WARN_ON_ONCE(!svm->nested.initialized)) in nested_svm_vmrun()
487 if (!nested_vmcb_checks(svm, vmcb12)) { in nested_svm_vmrun()
495 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa, in nested_svm_vmrun()
509 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmrun()
510 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmrun()
522 hsave->save.efer = svm->vcpu.arch.efer; in nested_svm_vmrun()
523 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmrun()
524 hsave->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmrun()
525 hsave->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmrun()
526 hsave->save.rip = kvm_rip_read(&svm->vcpu); in nested_svm_vmrun()
532 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmrun()
536 svm->nested.nested_run_pending = 1; in nested_svm_vmrun()
538 if (enter_svm_guest_mode(svm, vmcb12_gpa, vmcb12)) in nested_svm_vmrun()
541 if (nested_svm_vmrun_msrpm(svm)) in nested_svm_vmrun()
545 svm->nested.nested_run_pending = 0; in nested_svm_vmrun()
547 svm->vmcb->control.exit_code = SVM_EXIT_ERR; in nested_svm_vmrun()
548 svm->vmcb->control.exit_code_hi = 0; in nested_svm_vmrun()
549 svm->vmcb->control.exit_info_1 = 0; in nested_svm_vmrun()
550 svm->vmcb->control.exit_info_2 = 0; in nested_svm_vmrun()
552 nested_svm_vmexit(svm); in nested_svm_vmrun()
555 kvm_vcpu_unmap(&svm->vcpu, &map, true); in nested_svm_vmrun()
576 int nested_svm_vmexit(struct vcpu_svm *svm) in nested_svm_vmexit() argument
580 struct vmcb *hsave = svm->nested.hsave; in nested_svm_vmexit()
581 struct vmcb *vmcb = svm->vmcb; in nested_svm_vmexit()
584 rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(svm->nested.vmcb12_gpa), &map); in nested_svm_vmexit()
587 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_vmexit()
594 leave_guest_mode(&svm->vcpu); in nested_svm_vmexit()
595 svm->nested.vmcb12_gpa = 0; in nested_svm_vmexit()
596 WARN_ON_ONCE(svm->nested.nested_run_pending); in nested_svm_vmexit()
599 svm->vcpu.arch.mp_state = KVM_MP_STATE_RUNNABLE; in nested_svm_vmexit()
609 vmcb12->save.efer = svm->vcpu.arch.efer; in nested_svm_vmexit()
610 vmcb12->save.cr0 = kvm_read_cr0(&svm->vcpu); in nested_svm_vmexit()
611 vmcb12->save.cr3 = kvm_read_cr3(&svm->vcpu); in nested_svm_vmexit()
613 vmcb12->save.cr4 = svm->vcpu.arch.cr4; in nested_svm_vmexit()
614 vmcb12->save.rflags = kvm_get_rflags(&svm->vcpu); in nested_svm_vmexit()
615 vmcb12->save.rip = kvm_rip_read(&svm->vcpu); in nested_svm_vmexit()
616 vmcb12->save.rsp = kvm_rsp_read(&svm->vcpu); in nested_svm_vmexit()
617 vmcb12->save.rax = kvm_rax_read(&svm->vcpu); in nested_svm_vmexit()
619 vmcb12->save.dr6 = svm->vcpu.arch.dr6; in nested_svm_vmexit()
629 nested_vmcb_save_pending_event(svm, vmcb12); in nested_svm_vmexit()
631 if (svm->nrips_enabled) in nested_svm_vmexit()
634 vmcb12->control.int_ctl = svm->nested.ctl.int_ctl; in nested_svm_vmexit()
635 vmcb12->control.tlb_ctl = svm->nested.ctl.tlb_ctl; in nested_svm_vmexit()
636 vmcb12->control.event_inj = svm->nested.ctl.event_inj; in nested_svm_vmexit()
637 vmcb12->control.event_inj_err = svm->nested.ctl.event_inj_err; in nested_svm_vmexit()
640 svm->vmcb->control.pause_filter_count; in nested_svm_vmexit()
642 svm->vmcb->control.pause_filter_thresh; in nested_svm_vmexit()
648 svm_set_gif(svm, false); in nested_svm_vmexit()
650 svm->vmcb->control.tsc_offset = svm->vcpu.arch.tsc_offset = in nested_svm_vmexit()
651 svm->vcpu.arch.l1_tsc_offset; in nested_svm_vmexit()
653 svm->nested.ctl.nested_cr3 = 0; in nested_svm_vmexit()
656 svm->vmcb->save.es = hsave->save.es; in nested_svm_vmexit()
657 svm->vmcb->save.cs = hsave->save.cs; in nested_svm_vmexit()
658 svm->vmcb->save.ss = hsave->save.ss; in nested_svm_vmexit()
659 svm->vmcb->save.ds = hsave->save.ds; in nested_svm_vmexit()
660 svm->vmcb->save.gdtr = hsave->save.gdtr; in nested_svm_vmexit()
661 svm->vmcb->save.idtr = hsave->save.idtr; in nested_svm_vmexit()
662 kvm_set_rflags(&svm->vcpu, hsave->save.rflags); in nested_svm_vmexit()
663 svm_set_efer(&svm->vcpu, hsave->save.efer); in nested_svm_vmexit()
664 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); in nested_svm_vmexit()
665 svm_set_cr4(&svm->vcpu, hsave->save.cr4); in nested_svm_vmexit()
666 kvm_rax_write(&svm->vcpu, hsave->save.rax); in nested_svm_vmexit()
667 kvm_rsp_write(&svm->vcpu, hsave->save.rsp); in nested_svm_vmexit()
668 kvm_rip_write(&svm->vcpu, hsave->save.rip); in nested_svm_vmexit()
669 svm->vmcb->save.dr7 = 0; in nested_svm_vmexit()
670 svm->vmcb->save.cpl = 0; in nested_svm_vmexit()
671 svm->vmcb->control.exit_int_info = 0; in nested_svm_vmexit()
673 vmcb_mark_all_dirty(svm->vmcb); in nested_svm_vmexit()
682 kvm_vcpu_unmap(&svm->vcpu, &map, true); in nested_svm_vmexit()
684 nested_svm_uninit_mmu_context(&svm->vcpu); in nested_svm_vmexit()
686 rc = nested_svm_load_cr3(&svm->vcpu, hsave->save.cr3, false); in nested_svm_vmexit()
691 svm->vmcb->save.cr3 = hsave->save.cr3; in nested_svm_vmexit()
697 svm->vcpu.arch.nmi_injected = false; in nested_svm_vmexit()
698 kvm_clear_exception_queue(&svm->vcpu); in nested_svm_vmexit()
699 kvm_clear_interrupt_queue(&svm->vcpu); in nested_svm_vmexit()
704 int svm_allocate_nested(struct vcpu_svm *svm) in svm_allocate_nested() argument
708 if (svm->nested.initialized) in svm_allocate_nested()
714 svm->nested.hsave = page_address(hsave_page); in svm_allocate_nested()
716 svm->nested.msrpm = svm_vcpu_alloc_msrpm(); in svm_allocate_nested()
717 if (!svm->nested.msrpm) in svm_allocate_nested()
719 svm_vcpu_init_msrpm(&svm->vcpu, svm->nested.msrpm); in svm_allocate_nested()
721 svm->nested.initialized = true; in svm_allocate_nested()
729 void svm_free_nested(struct vcpu_svm *svm) in svm_free_nested() argument
731 if (!svm->nested.initialized) in svm_free_nested()
734 svm_vcpu_free_msrpm(svm->nested.msrpm); in svm_free_nested()
735 svm->nested.msrpm = NULL; in svm_free_nested()
737 __free_page(virt_to_page(svm->nested.hsave)); in svm_free_nested()
738 svm->nested.hsave = NULL; in svm_free_nested()
740 svm->nested.initialized = false; in svm_free_nested()
746 void svm_leave_nested(struct vcpu_svm *svm) in svm_leave_nested() argument
748 if (is_guest_mode(&svm->vcpu)) { in svm_leave_nested()
749 struct vmcb *hsave = svm->nested.hsave; in svm_leave_nested()
750 struct vmcb *vmcb = svm->vmcb; in svm_leave_nested()
752 svm->nested.nested_run_pending = 0; in svm_leave_nested()
753 leave_guest_mode(&svm->vcpu); in svm_leave_nested()
755 nested_svm_uninit_mmu_context(&svm->vcpu); in svm_leave_nested()
758 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, &svm->vcpu); in svm_leave_nested()
761 static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) in nested_svm_exit_handled_msr() argument
766 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_MSR_PROT))) in nested_svm_exit_handled_msr()
769 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX]; in nested_svm_exit_handled_msr()
771 write = svm->vmcb->control.exit_info_1 & 1; in nested_svm_exit_handled_msr()
780 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.ctl.msrpm_base_pa + offset, &value, 4)) in nested_svm_exit_handled_msr()
786 static int nested_svm_intercept_ioio(struct vcpu_svm *svm) in nested_svm_intercept_ioio() argument
793 if (!(vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_IOIO_PROT))) in nested_svm_intercept_ioio()
796 port = svm->vmcb->control.exit_info_1 >> 16; in nested_svm_intercept_ioio()
797 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> in nested_svm_intercept_ioio()
799 gpa = svm->nested.ctl.iopm_base_pa + (port / 8); in nested_svm_intercept_ioio()
805 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len)) in nested_svm_intercept_ioio()
811 static int nested_svm_intercept(struct vcpu_svm *svm) in nested_svm_intercept() argument
813 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_intercept()
818 vmexit = nested_svm_exit_handled_msr(svm); in nested_svm_intercept()
821 vmexit = nested_svm_intercept_ioio(svm); in nested_svm_intercept()
824 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
829 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
847 if (vmcb_is_intercept(&svm->nested.ctl, exit_code)) in nested_svm_intercept()
855 int nested_svm_exit_handled(struct vcpu_svm *svm) in nested_svm_exit_handled() argument
859 vmexit = nested_svm_intercept(svm); in nested_svm_exit_handled()
862 nested_svm_vmexit(svm); in nested_svm_exit_handled()
867 int nested_svm_check_permissions(struct vcpu_svm *svm) in nested_svm_check_permissions() argument
869 if (!(svm->vcpu.arch.efer & EFER_SVME) || in nested_svm_check_permissions()
870 !is_paging(&svm->vcpu)) { in nested_svm_check_permissions()
871 kvm_queue_exception(&svm->vcpu, UD_VECTOR); in nested_svm_check_permissions()
875 if (svm->vmcb->save.cpl) { in nested_svm_check_permissions()
876 kvm_inject_gp(&svm->vcpu, 0); in nested_svm_check_permissions()
883 static bool nested_exit_on_exception(struct vcpu_svm *svm) in nested_exit_on_exception() argument
885 unsigned int nr = svm->vcpu.arch.exception.nr; in nested_exit_on_exception()
887 return (svm->nested.ctl.intercepts[INTERCEPT_EXCEPTION] & BIT(nr)); in nested_exit_on_exception()
890 static void nested_svm_inject_exception_vmexit(struct vcpu_svm *svm) in nested_svm_inject_exception_vmexit() argument
892 unsigned int nr = svm->vcpu.arch.exception.nr; in nested_svm_inject_exception_vmexit()
894 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr; in nested_svm_inject_exception_vmexit()
895 svm->vmcb->control.exit_code_hi = 0; in nested_svm_inject_exception_vmexit()
897 if (svm->vcpu.arch.exception.has_error_code) in nested_svm_inject_exception_vmexit()
898 svm->vmcb->control.exit_info_1 = svm->vcpu.arch.exception.error_code; in nested_svm_inject_exception_vmexit()
905 if (svm->vcpu.arch.exception.nested_apf) in nested_svm_inject_exception_vmexit()
906 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token; in nested_svm_inject_exception_vmexit()
907 else if (svm->vcpu.arch.exception.has_payload) in nested_svm_inject_exception_vmexit()
908 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.exception.payload; in nested_svm_inject_exception_vmexit()
910 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2; in nested_svm_inject_exception_vmexit()
913 kvm_deliver_exception_payload(&svm->vcpu); in nested_svm_inject_exception_vmexit()
914 if (svm->vcpu.arch.dr7 & DR7_GD) { in nested_svm_inject_exception_vmexit()
915 svm->vcpu.arch.dr7 &= ~DR7_GD; in nested_svm_inject_exception_vmexit()
916 kvm_update_dr7(&svm->vcpu); in nested_svm_inject_exception_vmexit()
919 WARN_ON(svm->vcpu.arch.exception.has_payload); in nested_svm_inject_exception_vmexit()
921 nested_svm_vmexit(svm); in nested_svm_inject_exception_vmexit()
924 static void nested_svm_smi(struct vcpu_svm *svm) in nested_svm_smi() argument
926 svm->vmcb->control.exit_code = SVM_EXIT_SMI; in nested_svm_smi()
927 svm->vmcb->control.exit_info_1 = 0; in nested_svm_smi()
928 svm->vmcb->control.exit_info_2 = 0; in nested_svm_smi()
930 nested_svm_vmexit(svm); in nested_svm_smi()
933 static void nested_svm_nmi(struct vcpu_svm *svm) in nested_svm_nmi() argument
935 svm->vmcb->control.exit_code = SVM_EXIT_NMI; in nested_svm_nmi()
936 svm->vmcb->control.exit_info_1 = 0; in nested_svm_nmi()
937 svm->vmcb->control.exit_info_2 = 0; in nested_svm_nmi()
939 nested_svm_vmexit(svm); in nested_svm_nmi()
942 static void nested_svm_intr(struct vcpu_svm *svm) in nested_svm_intr() argument
944 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip); in nested_svm_intr()
946 svm->vmcb->control.exit_code = SVM_EXIT_INTR; in nested_svm_intr()
947 svm->vmcb->control.exit_info_1 = 0; in nested_svm_intr()
948 svm->vmcb->control.exit_info_2 = 0; in nested_svm_intr()
950 nested_svm_vmexit(svm); in nested_svm_intr()
953 static inline bool nested_exit_on_init(struct vcpu_svm *svm) in nested_exit_on_init() argument
955 return vmcb_is_intercept(&svm->nested.ctl, INTERCEPT_INIT); in nested_exit_on_init()
958 static void nested_svm_init(struct vcpu_svm *svm) in nested_svm_init() argument
960 svm->vmcb->control.exit_code = SVM_EXIT_INIT; in nested_svm_init()
961 svm->vmcb->control.exit_info_1 = 0; in nested_svm_init()
962 svm->vmcb->control.exit_info_2 = 0; in nested_svm_init()
964 nested_svm_vmexit(svm); in nested_svm_init()
970 struct vcpu_svm *svm = to_svm(vcpu); in svm_check_nested_events() local
972 kvm_event_needs_reinjection(vcpu) || svm->nested.nested_run_pending; in svm_check_nested_events()
979 if (!nested_exit_on_init(svm)) in svm_check_nested_events()
981 nested_svm_init(svm); in svm_check_nested_events()
988 if (!nested_exit_on_exception(svm)) in svm_check_nested_events()
990 nested_svm_inject_exception_vmexit(svm); in svm_check_nested_events()
997 if (!nested_exit_on_smi(svm)) in svm_check_nested_events()
999 nested_svm_smi(svm); in svm_check_nested_events()
1006 if (!nested_exit_on_nmi(svm)) in svm_check_nested_events()
1008 nested_svm_nmi(svm); in svm_check_nested_events()
1015 if (!nested_exit_on_intr(svm)) in svm_check_nested_events()
1017 nested_svm_intr(svm); in svm_check_nested_events()
1024 int nested_svm_exit_special(struct vcpu_svm *svm) in nested_svm_exit_special() argument
1026 u32 exit_code = svm->vmcb->control.exit_code; in nested_svm_exit_special()
1036 if (get_host_vmcb(svm)->control.intercepts[INTERCEPT_EXCEPTION] & in nested_svm_exit_special()
1040 svm->vcpu.arch.apf.host_apf_flags) in nested_svm_exit_special()
1056 struct vcpu_svm *svm; in svm_get_nested_state() local
1063 &user_kvm_nested_state->data.svm[0]; in svm_get_nested_state()
1068 svm = to_svm(vcpu); in svm_get_nested_state()
1075 kvm_state.hdr.svm.vmcb_pa = svm->nested.vmcb12_gpa; in svm_get_nested_state()
1079 if (svm->nested.nested_run_pending) in svm_get_nested_state()
1083 if (gif_set(svm)) in svm_get_nested_state()
1098 if (copy_to_user(&user_vmcb->control, &svm->nested.ctl, in svm_get_nested_state()
1101 if (copy_to_user(&user_vmcb->save, &svm->nested.hsave->save, in svm_get_nested_state()
1113 struct vcpu_svm *svm = to_svm(vcpu); in svm_set_nested_state() local
1114 struct vmcb *hsave = svm->nested.hsave; in svm_set_nested_state()
1116 &user_kvm_nested_state->data.svm[0]; in svm_set_nested_state()
1148 svm_leave_nested(svm); in svm_set_nested_state()
1149 svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET)); in svm_set_nested_state()
1153 if (!page_address_valid(vcpu, kvm_state->hdr.svm.vmcb_pa)) in svm_set_nested_state()
1196 copy_vmcb_control_area(&hsave->control, &svm->vmcb->control); in svm_set_nested_state()
1199 svm->nested.vmcb12_gpa = kvm_state->hdr.svm.vmcb_pa; in svm_set_nested_state()
1200 load_nested_vmcb_control(svm, ctl); in svm_set_nested_state()
1201 nested_prepare_vmcb_control(svm); in svm_set_nested_state()