Lines Matching refs:to_vmx
468 tmp_eptp = to_vmx(vcpu)->ept_pointer; in check_ept_pointer_match()
469 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { in check_ept_pointer_match()
491 u64 ept_pointer = to_vmx(vcpu)->ept_pointer; in __hv_remote_flush_tlb_with_range()
519 if (VALID_PAGE(to_vmx(vcpu)->ept_pointer)) in hv_remote_flush_tlb_with_range()
551 evmcs = (struct hv_enlightened_vmcs *)to_vmx(vcpu)->loaded_vmcs->vmcs; in hv_enable_direct_tlbflush()
792 if (to_vmx(vcpu)->rmode.vm86_active) in update_exception_bitmap()
819 msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; in msr_write_intercepted()
1132 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_prepare_switch_to_guest()
1314 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load_vmcs()
1381 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load()
1409 vmx_prepare_switch_to_host(to_vmx(vcpu)); in vmx_vcpu_put()
1426 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_rflags()
1428 save_rflags = to_vmx(vcpu)->rmode.save_rflags; in vmx_get_rflags()
1431 to_vmx(vcpu)->rflags = rflags; in vmx_get_rflags()
1433 return to_vmx(vcpu)->rflags; in vmx_get_rflags()
1441 to_vmx(vcpu)->rflags = rflags; in vmx_set_rflags()
1442 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_set_rflags()
1443 to_vmx(vcpu)->rmode.save_rflags = rflags; in vmx_set_rflags()
1448 if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM) in vmx_set_rflags()
1449 to_vmx(vcpu)->emulation_required = emulation_required(vcpu); in vmx_set_rflags()
1483 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_rtit_ctl_check()
1566 to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) { in skip_emulated_instruction()
1596 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception()
1741 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; in vmx_feature_control_msr_valid()
1767 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_msr()
1796 msr_info->data = to_vmx(vcpu)->spec_ctrl; in vmx_get_msr()
1907 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr()
2051 !(to_vmx(vcpu)->msr_ia32_feature_control & in vmx_set_msr()
2059 (to_vmx(vcpu)->msr_ia32_feature_control & in vmx_set_msr()
2674 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode()
2747 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode()
2795 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer()
2803 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
2806 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
2819 vmx_segment_cache_clear(to_vmx(vcpu)); in enter_lmode()
2834 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in exit_lmode()
2842 int vpid = to_vmx(vcpu)->vpid; in vmx_flush_tlb_gva()
2914 struct vcpu_vmx *vmx = to_vmx(vcpu); in ept_update_paging_mode_cr0()
2938 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0()
3008 to_vmx(vcpu)->ept_pointer = eptp; in vmx_set_cr3()
3027 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr4()
3102 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment()
3140 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_segment_base()
3144 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); in vmx_get_segment_base()
3149 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl()
3181 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment()
3221 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); in vmx_get_cs_db_l_bits()
3683 (secondary_exec_controls_get(to_vmx(vcpu)) & in vmx_msr_bitmap_mode()
3720 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_msr_bitmap()
3763 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_guest_apic_has_interrupt()
3823 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt()
3849 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt()
3959 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_refresh_apicv_exec_ctrl()
4272 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset()
4371 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING); in enable_irq_window()
4382 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING); in enable_nmi_window()
4387 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq()
4415 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi()
4446 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_nmi_mask()
4460 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask()
4480 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_nmi_allowed()
4484 to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) in vmx_nmi_allowed()
4494 return (!to_vmx(vcpu)->nested.nested_run_pending && in vmx_interrupt_allowed()
4529 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
4607 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception_nmi()
4781 if (to_vmx(vcpu)->nested.vmxon && in handle_set_cr0()
4930 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); in handle_dr()
4974 exec_controls_setbit(to_vmx(vcpu), CPU_BASED_MOV_DR_EXITING); in vmx_sync_dirty_debug_regs()
5005 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_INTR_PENDING); in handle_interrupt_window()
5103 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch()
5172 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_ept_violation()
5223 exec_controls_clearbit(to_vmx(vcpu), CPU_BASED_VIRTUAL_NMI_PENDING); in handle_nmi_window()
5232 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state()
5286 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window()
5302 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window()
5507 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_pml_full()
5522 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_preemption_timer()
5629 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_pml_buffer()
5857 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit()
6045 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_virtual_apic_mode()
6146 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr()
6203 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_apicv_post_state_restore()
6277 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit_irqoff()
6460 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_hv_timer()
6496 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run()
6684 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu()
6966 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_cr_fixed1_bits_update()
7005 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_entry_exit_ctls_update()
7022 struct vcpu_vmx *vmx = to_vmx(vcpu); in update_intel_pt_cfg()
7091 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_cpuid_update()
7099 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= in vmx_cpuid_update()
7102 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= in vmx_cpuid_update()
7123 to_vmx(vcpu)->req_immediate_exit = true; in vmx_request_immediate_exit()
7178 vmx = to_vmx(vcpu); in vmx_set_hv_timer()
7213 to_vmx(vcpu)->hv_deadline_tsc = -1; in vmx_cancel_hv_timer()
7244 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_write_pml_buffer()
7512 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= in vmx_setup_mce()
7515 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= in vmx_setup_mce()
7522 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_smi_allowed()
7529 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_pre_enter_smm()
7543 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_pre_leave_smm()
7573 return to_vmx(vcpu)->nested.vmxon; in vmx_apic_init_signal_blocked()