Lines Matching refs:to_vmx
1062 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu) in to_vmx() function
1069 return &(to_vmx(vcpu)->pi_desc); in vcpu_to_pi_desc()
1262 return to_vmx(vcpu)->nested.cached_vmcs12; in get_vmcs12()
1267 return to_vmx(vcpu)->nested.cached_shadow_vmcs12; in get_shadow_vmcs12()
1550 tmp_eptp = to_vmx(vcpu)->ept_pointer; in check_ept_pointer_match()
1551 } else if (tmp_eptp != to_vmx(vcpu)->ept_pointer) { in check_ept_pointer_match()
1580 to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK); in vmx_hv_remote_flush_tlb()
1941 return vmx_misc_cr3_count(to_vmx(vcpu)->nested.msrs.misc_low); in nested_cpu_vmx_misc_cr3_count()
1951 return to_vmx(vcpu)->nested.msrs.misc_low & in nested_cpu_has_vmwrite_any_field()
1957 return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS; in nested_cpu_has_zero_length_injection()
1962 return to_vmx(vcpu)->nested.msrs.procbased_ctls_high & in nested_cpu_supports_monitor_trap_flag()
1968 return to_vmx(vcpu)->nested.msrs.secondary_ctls_high & in nested_cpu_has_vmx_shadow_vmcs()
2584 if (to_vmx(vcpu)->rmode.vm86_active) in update_exception_bitmap()
2611 msr_bitmap = to_vmx(vcpu)->loaded_vmcs->msr_bitmap; in msr_write_intercepted()
2634 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap; in msr_write_intercepted_l01()
2865 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_prepare_switch_to_guest()
3060 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load()
3143 vmx_prepare_switch_to_host(to_vmx(vcpu)); in vmx_vcpu_put()
3176 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_rflags()
3178 save_rflags = to_vmx(vcpu)->rmode.save_rflags; in vmx_get_rflags()
3181 to_vmx(vcpu)->rflags = rflags; in vmx_get_rflags()
3183 return to_vmx(vcpu)->rflags; in vmx_get_rflags()
3191 to_vmx(vcpu)->rflags = rflags; in vmx_set_rflags()
3192 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_set_rflags()
3193 to_vmx(vcpu)->rmode.save_rflags = rflags; in vmx_set_rflags()
3198 if ((old_rflags ^ to_vmx(vcpu)->rflags) & X86_EFLAGS_VM) in vmx_set_rflags()
3199 to_vmx(vcpu)->emulation_required = emulation_required(vcpu); in vmx_set_rflags()
3323 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception()
3903 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_vmx_msr()
4041 uint64_t valid_bits = to_vmx(vcpu)->msr_ia32_feature_control_valid_bits; in vmx_feature_control_msr_valid()
4067 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_msr()
4089 msr_info->data = to_vmx(vcpu)->spec_ctrl; in vmx_get_msr()
4095 msr_info->data = to_vmx(vcpu)->arch_capabilities; in vmx_get_msr()
4159 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr()
4278 !(to_vmx(vcpu)->msr_ia32_feature_control & in vmx_set_msr()
4286 (to_vmx(vcpu)->msr_ia32_feature_control & in vmx_set_msr()
4945 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode()
5018 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode()
5066 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer()
5074 vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
5077 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in vmx_set_efer()
5090 vmx_segment_cache_clear(to_vmx(vcpu)); in enter_lmode()
5105 vm_entry_controls_clearbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE); in exit_lmode()
5125 __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid, invalidate_gpa); in vmx_flush_tlb()
5130 int vpid = to_vmx(vcpu)->vpid; in vmx_flush_tlb_gva()
5200 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; in nested_guest_cr0_valid()
5201 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; in nested_guest_cr0_valid()
5204 if (to_vmx(vcpu)->nested.msrs.secondary_ctls_high & in nested_guest_cr0_valid()
5214 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr0_fixed0; in nested_host_cr0_valid()
5215 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr0_fixed1; in nested_host_cr0_valid()
5222 u64 fixed0 = to_vmx(vcpu)->nested.msrs.cr4_fixed0; in nested_cr4_valid()
5223 u64 fixed1 = to_vmx(vcpu)->nested.msrs.cr4_fixed1; in nested_cr4_valid()
5264 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0()
5334 to_vmx(vcpu)->ept_pointer = eptp; in vmx_set_cr3()
5363 else if (to_vmx(vcpu)->rmode.vm86_active) in vmx_set_cr4()
5391 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4)) in vmx_set_cr4()
5429 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment()
5467 if (to_vmx(vcpu)->rmode.vm86_active) { in vmx_get_segment_base()
5471 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); in vmx_get_segment_base()
5476 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl()
5509 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment()
5549 u32 ar = vmx_read_guest_seg_ar(to_vmx(vcpu), VCPU_SREG_CS); in vmx_get_cs_db_l_bits()
6096 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_msr_bitmap()
6139 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt()
6176 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_guest_apic_has_interrupt()
6237 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt()
6263 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt()
6372 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_refresh_apicv_exec_ctrl()
6680 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset()
6819 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq()
6848 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi()
6880 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_nmi_mask()
6894 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask()
6914 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_nmi_allowed()
6918 to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) in vmx_nmi_allowed()
6928 return (!to_vmx(vcpu)->nested.nested_run_pending && in vmx_interrupt_allowed()
6963 to_vmx(vcpu)->vcpu.arch.event_exit_inst_len = in rmode_exception()
7041 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception()
7215 if (to_vmx(vcpu)->nested.vmxon && in handle_set_cr0()
7584 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch()
7661 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_ept_violation()
7736 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state()
7799 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window()
7814 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window()
8091 if (to_vmx(vcpu)->nested.current_vmptr == -1ull) { in nested_vmx_failValid()
8250 struct vcpu_vmx *vmx = to_vmx(vcpu); in alloc_shadow_vmcs()
8271 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_vmx_operation()
8324 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmon()
8407 if (!to_vmx(vcpu)->nested.vmxon) { in nested_vmx_check_permission()
8494 free_nested(to_vmx(vcpu)); in handle_vmoff()
8502 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear()
8692 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmcs12()
8762 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmwrite()
8869 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld()
8928 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; in handle_vmptrst()
8950 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept()
9012 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invvpid()
9208 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && in handle_pml_full()
9223 if (!to_vmx(vcpu)->req_immediate_exit) in handle_preemption_timer()
9230 struct vcpu_vmx *vmx = to_vmx(vcpu); in valid_ept_address()
9311 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmfunc()
9617 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_exit_reflected()
9832 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_pml_buffer()
10043 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit()
10236 to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true; in vmx_set_virtual_apic_mode()
10325 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr()
10382 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_apicv_post_state_restore()
10427 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_external_intr()
10631 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_hv_timer()
10661 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run()
10950 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_switch_vmcs()
10969 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu_nested()
10979 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu()
11221 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_cr_fixed1_bits_update()
11260 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_entry_exit_ctls_update()
11277 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_cpuid_update()
11285 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= in vmx_cpuid_update()
11288 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= in vmx_cpuid_update()
11307 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_ept_inject_page_fault()
11344 to_vmx(vcpu)->nested.msrs.ept_caps & in nested_ept_init_mmu_context()
11381 !to_vmx(vcpu)->nested.nested_run_pending) { in vmx_inject_page_fault_nested()
11398 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages()
11488 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer()
11556 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap; in nested_vmx_prepare_msr_bitmap()
11662 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_flush_cached_shadow_vmcs12()
11965 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02_full()
12105 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02()
12385 struct vcpu_vmx *vmx = to_vmx(vcpu); in check_vmentry_prereqs()
12570 if (to_vmx(vcpu)->nested.nested_run_pending && in check_vmentry_postreqs()
12609 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_vmx_non_root_mode()
12699 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run()
12895 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events()
12945 to_vmx(vcpu)->req_immediate_exit = true; in vmx_request_immediate_exit()
12951 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); in vmx_get_preemption_timer_value()
13028 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); in sync_vmcs12()
13054 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); in sync_vmcs12()
13197 !(nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02)) { in load_vmcs12_host_state()
13287 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit()
13439 to_vmx(vcpu)->nested.nested_run_pending = 0; in vmx_leave_nested()
13442 free_nested(to_vmx(vcpu)); in vmx_leave_nested()
13461 to_vmx(vcpu)->nested.sync_shadow_vmcs = true; in nested_vmx_entry_failure()
13513 vmx = to_vmx(vcpu); in vmx_set_hv_timer()
13547 to_vmx(vcpu)->hv_deadline_tsc = -1; in vmx_cancel_hv_timer()
13578 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_write_pml_buffer()
13846 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits |= in vmx_setup_mce()
13849 to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &= in vmx_setup_mce()
13856 if (to_vmx(vcpu)->nested.nested_run_pending) in vmx_smi_allowed()
13863 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_pre_enter_smm()
13877 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_pre_leave_smm()
13919 vmx = to_vmx(vcpu); in vmx_get_nested_state()
13988 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nested_state()