Lines Matching refs:vmx

190 	struct vcpu_vmx *vmx = to_vmx(vcpu);  in nested_vmx_fail()  local
196 if (vmx->nested.current_vmptr == INVALID_GPA && in nested_vmx_fail()
197 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in nested_vmx_fail()
220 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) in vmx_disable_shadow_vmcs() argument
222 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); in vmx_disable_shadow_vmcs()
224 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs()
230 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_release_evmcs() local
232 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { in nested_release_evmcs()
233 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); in nested_release_evmcs()
234 vmx->nested.hv_evmcs = NULL; in nested_release_evmcs()
237 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; in nested_release_evmcs()
246 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, in vmx_sync_vmcs_host_state() argument
251 if (unlikely(!vmx->guest_state_loaded)) in vmx_sync_vmcs_host_state()
255 dest = &vmx->loaded_vmcs->host_state; in vmx_sync_vmcs_host_state()
267 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_switch_vmcs() local
271 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) in vmx_switch_vmcs()
275 prev = vmx->loaded_vmcs; in vmx_switch_vmcs()
276 vmx->loaded_vmcs = vmcs; in vmx_switch_vmcs()
278 vmx_sync_vmcs_host_state(vmx, prev); in vmx_switch_vmcs()
296 struct vcpu_vmx *vmx = to_vmx(vcpu); in free_nested() local
298 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) in free_nested()
299 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in free_nested()
301 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) in free_nested()
306 vmx->nested.vmxon = false; in free_nested()
307 vmx->nested.smm.vmxon = false; in free_nested()
308 vmx->nested.vmxon_ptr = INVALID_GPA; in free_nested()
309 free_vpid(vmx->nested.vpid02); in free_nested()
310 vmx->nested.posted_intr_nv = -1; in free_nested()
311 vmx->nested.current_vmptr = INVALID_GPA; in free_nested()
313 vmx_disable_shadow_vmcs(vmx); in free_nested()
314 vmcs_clear(vmx->vmcs01.shadow_vmcs); in free_nested()
315 free_vmcs(vmx->vmcs01.shadow_vmcs); in free_nested()
316 vmx->vmcs01.shadow_vmcs = NULL; in free_nested()
318 kfree(vmx->nested.cached_vmcs12); in free_nested()
319 vmx->nested.cached_vmcs12 = NULL; in free_nested()
320 kfree(vmx->nested.cached_shadow_vmcs12); in free_nested()
321 vmx->nested.cached_shadow_vmcs12 = NULL; in free_nested()
327 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false); in free_nested()
328 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); in free_nested()
329 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); in free_nested()
330 vmx->nested.pi_desc = NULL; in free_nested()
336 free_loaded_vmcs(&vmx->nested.vmcs02); in free_nested()
382 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_ept_inject_page_fault() local
386 if (vmx->nested.pml_full) { in nested_ept_inject_page_fault()
388 vmx->nested.pml_full = false; in nested_ept_inject_page_fault()
413 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_ept_new_eptp() local
414 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT; in nested_ept_new_eptp()
415 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps); in nested_ept_new_eptp()
538 void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \
542 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
551 static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx, in BUILD_NVMX_MSR_INTERCEPT_HELPER()
557 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1, in BUILD_NVMX_MSR_INTERCEPT_HELPER()
560 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1, in BUILD_NVMX_MSR_INTERCEPT_HELPER()
571 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_prepare_msr_bitmap() local
574 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; in nested_vmx_prepare_msr_bitmap()
575 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in nested_vmx_prepare_msr_bitmap()
576 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map; in nested_vmx_prepare_msr_bitmap()
591 if (!vmx->nested.force_msr_bitmap_recalc && evmcs && in nested_vmx_prepare_msr_bitmap()
645 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, in nested_vmx_prepare_msr_bitmap()
648 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, in nested_vmx_prepare_msr_bitmap()
651 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, in nested_vmx_prepare_msr_bitmap()
654 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, in nested_vmx_prepare_msr_bitmap()
657 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, in nested_vmx_prepare_msr_bitmap()
660 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0, in nested_vmx_prepare_msr_bitmap()
663 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); in nested_vmx_prepare_msr_bitmap()
665 vmx->nested.force_msr_bitmap_recalc = false; in nested_vmx_prepare_msr_bitmap()
673 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_cache_shadow_vmcs12() local
674 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_cache_shadow_vmcs12()
685 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), in nested_cache_shadow_vmcs12()
692 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_flush_cached_shadow_vmcs12() local
693 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_flush_cached_shadow_vmcs12()
704 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu), in nested_flush_cached_shadow_vmcs12()
891 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_max_atomic_switch_msrs() local
892 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, in nested_vmx_max_atomic_switch_msrs()
893 vmx->nested.msrs.misc_high); in nested_vmx_max_atomic_switch_msrs()
947 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_get_vmexit_msr_value() local
955 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, in nested_vmx_get_vmexit_msr_value()
959 u64 val = vmx->msr_autostore.guest.val[i].value; in nested_vmx_get_vmexit_msr_value()
1045 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmx_msr_autostore_list() local
1046 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; in prepare_vmx_msr_autostore_list()
1140 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_transition_tlb_flush() local
1180 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { in nested_vmx_transition_tlb_flush()
1181 vmx->nested.last_vpid = vmcs12->virtual_processor_id; in nested_vmx_transition_tlb_flush()
1204 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_basic() argument
1230 vmx->nested.msrs.basic = data; in vmx_restore_vmx_basic()
1264 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) in vmx_restore_control_msr() argument
1281 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); in vmx_restore_control_msr()
1287 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_misc() argument
1301 if ((vmx->nested.msrs.pinbased_ctls_high & in vmx_restore_vmx_misc()
1316 vmx->nested.msrs.misc_low = data; in vmx_restore_vmx_misc()
1317 vmx->nested.msrs.misc_high = data >> 32; in vmx_restore_vmx_misc()
1322 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_ept_vpid_cap() argument
1331 vmx->nested.msrs.ept_caps = data; in vmx_restore_vmx_ept_vpid_cap()
1332 vmx->nested.msrs.vpid_caps = data >> 32; in vmx_restore_vmx_ept_vpid_cap()
1348 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) in vmx_restore_fixed0_msr() argument
1359 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; in vmx_restore_fixed0_msr()
1370 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_vmx_msr() local
1376 if (vmx->nested.vmxon) in vmx_set_vmx_msr()
1381 return vmx_restore_vmx_basic(vmx, data); in vmx_set_vmx_msr()
1401 return vmx_restore_control_msr(vmx, msr_index, data); in vmx_set_vmx_msr()
1403 return vmx_restore_vmx_misc(vmx, data); in vmx_set_vmx_msr()
1406 return vmx_restore_fixed0_msr(vmx, msr_index, data); in vmx_set_vmx_msr()
1415 return vmx_restore_vmx_ept_vpid_cap(vmx, data); in vmx_set_vmx_msr()
1417 vmx->nested.msrs.vmcs_enum = data; in vmx_set_vmx_msr()
1422 vmx->nested.msrs.vmfunc_controls = data; in vmx_set_vmx_msr()
1518 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) in copy_shadow_to_vmcs12() argument
1520 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_shadow_to_vmcs12()
1521 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); in copy_shadow_to_vmcs12()
1540 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_shadow_to_vmcs12()
1545 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) in copy_vmcs12_to_shadow() argument
1555 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_vmcs12_to_shadow()
1556 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); in copy_vmcs12_to_shadow()
1576 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_vmcs12_to_shadow()
1579 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields) in copy_enlightened_to_vmcs12() argument
1581 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_enlightened_to_vmcs12()
1582 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in copy_enlightened_to_vmcs12()
1583 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(&vmx->vcpu); in copy_enlightened_to_vmcs12()
1823 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) in copy_vmcs12_to_enlightened() argument
1825 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_vmcs12_to_enlightened()
1826 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in copy_vmcs12_to_enlightened()
2004 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_handle_enlightened_vmptrld() local
2017 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { in nested_vmx_handle_enlightened_vmptrld()
2018 vmx->nested.current_vmptr = INVALID_GPA; in nested_vmx_handle_enlightened_vmptrld()
2023 &vmx->nested.hv_evmcs_map)) in nested_vmx_handle_enlightened_vmptrld()
2026 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; in nested_vmx_handle_enlightened_vmptrld()
2050 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && in nested_vmx_handle_enlightened_vmptrld()
2051 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { in nested_vmx_handle_enlightened_vmptrld()
2056 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; in nested_vmx_handle_enlightened_vmptrld()
2078 vmx->nested.hv_evmcs->hv_clean_fields &= in nested_vmx_handle_enlightened_vmptrld()
2081 vmx->nested.force_msr_bitmap_recalc = true; in nested_vmx_handle_enlightened_vmptrld()
2089 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_sync_vmcs12_to_shadow() local
2091 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in nested_sync_vmcs12_to_shadow()
2092 copy_vmcs12_to_enlightened(vmx); in nested_sync_vmcs12_to_shadow()
2094 copy_vmcs12_to_shadow(vmx); in nested_sync_vmcs12_to_shadow()
2096 vmx->nested.need_vmcs12_to_shadow_sync = false; in nested_sync_vmcs12_to_shadow()
2101 struct vcpu_vmx *vmx = in vmx_preemption_timer_fn() local
2104 vmx->nested.preemption_timer_expired = true; in vmx_preemption_timer_fn()
2105 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); in vmx_preemption_timer_fn()
2106 kvm_vcpu_kick(&vmx->vcpu); in vmx_preemption_timer_fn()
2113 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_calc_preemption_timer_value() local
2119 if (!vmx->nested.has_preemption_timer_deadline) { in vmx_calc_preemption_timer_value()
2120 vmx->nested.preemption_timer_deadline = in vmx_calc_preemption_timer_value()
2122 vmx->nested.has_preemption_timer_deadline = true; in vmx_calc_preemption_timer_value()
2124 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; in vmx_calc_preemption_timer_value()
2130 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer() local
2137 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); in vmx_start_preemption_timer()
2147 hrtimer_start(&vmx->nested.preemption_timer, in vmx_start_preemption_timer()
2152 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) in nested_vmx_calc_efer() argument
2154 if (vmx->nested.nested_run_pending && in nested_vmx_calc_efer()
2158 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); in nested_vmx_calc_efer()
2160 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); in nested_vmx_calc_efer()
2163 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) in prepare_vmcs02_constant_state() argument
2165 struct kvm *kvm = vmx->vcpu.kvm; in prepare_vmcs02_constant_state()
2173 if (vmx->nested.vmcs02_initialized) in prepare_vmcs02_constant_state()
2175 vmx->nested.vmcs02_initialized = true; in prepare_vmcs02_constant_state()
2184 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); in prepare_vmcs02_constant_state()
2194 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); in prepare_vmcs02_constant_state()
2217 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); in prepare_vmcs02_constant_state()
2218 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); in prepare_vmcs02_constant_state()
2219 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); in prepare_vmcs02_constant_state()
2221 vmx_set_constant_host_state(vmx); in prepare_vmcs02_constant_state()
2224 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, in prepare_vmcs02_early_rare() argument
2227 prepare_vmcs02_constant_state(vmx); in prepare_vmcs02_early_rare()
2232 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) in prepare_vmcs02_early_rare()
2233 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); in prepare_vmcs02_early_rare()
2235 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in prepare_vmcs02_early_rare()
2239 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01, in prepare_vmcs02_early() argument
2243 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); in prepare_vmcs02_early()
2245 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in prepare_vmcs02_early()
2246 prepare_vmcs02_early_rare(vmx, vmcs12); in prepare_vmcs02_early()
2256 vmx->nested.pi_pending = false; in prepare_vmcs02_early()
2258 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; in prepare_vmcs02_early()
2261 pin_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2272 vmx->nested.l1_tpr_threshold = -1; in prepare_vmcs02_early()
2295 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; in prepare_vmcs02_early()
2297 exec_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2342 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12); in prepare_vmcs02_early()
2344 secondary_exec_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2369 vm_entry_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2383 vm_exit_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2388 if (vmx->nested.nested_run_pending) { in prepare_vmcs02_early()
2397 vmx->loaded_vmcs->nmi_known_unmasked = in prepare_vmcs02_early()
2404 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) in prepare_vmcs02_rare() argument
2406 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; in prepare_vmcs02_rare()
2447 vmx->segment_cache.bitmask = 0; in prepare_vmcs02_rare()
2469 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && in prepare_vmcs02_rare()
2490 if (vmx_need_pf_intercept(&vmx->vcpu)) { in prepare_vmcs02_rare()
2513 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); in prepare_vmcs02_rare()
2515 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); in prepare_vmcs02_rare()
2516 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in prepare_vmcs02_rare()
2517 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in prepare_vmcs02_rare()
2519 set_cr4_guest_host_mask(vmx); in prepare_vmcs02_rare()
2537 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02() local
2540 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { in prepare_vmcs02()
2541 prepare_vmcs02_rare(vmx, vmcs12); in prepare_vmcs02()
2542 vmx->nested.dirty_vmcs12 = false; in prepare_vmcs02()
2544 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) || in prepare_vmcs02()
2545 !(vmx->nested.hv_evmcs->hv_clean_fields & in prepare_vmcs02()
2549 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2555 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl); in prepare_vmcs02()
2557 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || in prepare_vmcs02()
2559 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs); in prepare_vmcs02()
2570 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2575 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
2607 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); in prepare_vmcs02()
2667 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in prepare_vmcs02()
2668 vmx->nested.hv_evmcs->hv_clean_fields |= in prepare_vmcs02()
2689 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_eptp() local
2694 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) in nested_vmx_check_eptp()
2698 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) in nested_vmx_check_eptp()
2708 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) in nested_vmx_check_eptp()
2712 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) in nested_vmx_check_eptp()
2725 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) in nested_vmx_check_eptp()
2738 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_check_vm_execution_controls() local
2741 vmx->nested.msrs.pinbased_ctls_low, in nested_check_vm_execution_controls()
2742 vmx->nested.msrs.pinbased_ctls_high)) || in nested_check_vm_execution_controls()
2744 vmx->nested.msrs.procbased_ctls_low, in nested_check_vm_execution_controls()
2745 vmx->nested.msrs.procbased_ctls_high))) in nested_check_vm_execution_controls()
2750 vmx->nested.msrs.secondary_ctls_low, in nested_check_vm_execution_controls()
2751 vmx->nested.msrs.secondary_ctls_high))) in nested_check_vm_execution_controls()
2778 ~vmx->nested.msrs.vmfunc_controls)) in nested_check_vm_execution_controls()
2797 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_check_vm_exit_controls() local
2800 vmx->nested.msrs.exit_ctls_low, in nested_check_vm_exit_controls()
2801 vmx->nested.msrs.exit_ctls_high)) || in nested_check_vm_exit_controls()
2814 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_check_vm_entry_controls() local
2817 vmx->nested.msrs.entry_ctls_low, in nested_check_vm_entry_controls()
2818 vmx->nested.msrs.entry_ctls_high))) in nested_check_vm_entry_controls()
2980 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmcs_link_ptr() local
2981 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_vmx_check_vmcs_link_ptr()
3088 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmentry_hw() local
3095 if (vmx->msr_autoload.host.nr) in nested_vmx_check_vmentry_hw()
3097 if (vmx->msr_autoload.guest.nr) in nested_vmx_check_vmentry_hw()
3113 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { in nested_vmx_check_vmentry_hw()
3115 vmx->loaded_vmcs->host_state.cr3 = cr3; in nested_vmx_check_vmentry_hw()
3119 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { in nested_vmx_check_vmentry_hw()
3121 vmx->loaded_vmcs->host_state.cr4 = cr4; in nested_vmx_check_vmentry_hw()
3124 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs, in nested_vmx_check_vmentry_hw()
3125 __vmx_vcpu_run_flags(vmx)); in nested_vmx_check_vmentry_hw()
3127 if (vmx->msr_autoload.host.nr) in nested_vmx_check_vmentry_hw()
3128 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in nested_vmx_check_vmentry_hw()
3129 if (vmx->msr_autoload.guest.nr) in nested_vmx_check_vmentry_hw()
3130 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in nested_vmx_check_vmentry_hw()
3166 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_evmcs_page() local
3174 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) { in nested_get_evmcs_page()
3186 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_get_evmcs_page()
3195 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages() local
3211 map = &vmx->nested.apic_access_page_map; in nested_get_vmcs12_pages()
3227 map = &vmx->nested.virtual_apic_map; in nested_get_vmcs12_pages()
3242 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); in nested_get_vmcs12_pages()
3253 map = &vmx->nested.pi_desc_map; in nested_get_vmcs12_pages()
3256 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
3268 vmx->nested.pi_desc = NULL; in nested_get_vmcs12_pages()
3269 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR); in nested_get_vmcs12_pages()
3273 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); in nested_get_vmcs12_pages()
3275 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); in nested_get_vmcs12_pages()
3308 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_write_pml_buffer() local
3314 if (WARN_ON_ONCE(vmx->nested.pml_full)) in nested_vmx_write_pml_buffer()
3326 vmx->nested.pml_full = true; in nested_vmx_write_pml_buffer()
3388 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_enter_non_root_mode() local
3399 vmx->nested.current_vmptr, in nested_vmx_enter_non_root_mode()
3410 evaluate_pending_interrupts = exec_controls_get(vmx) & in nested_vmx_enter_non_root_mode()
3417 if (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3419 vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); in nested_vmx_enter_non_root_mode()
3421 (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3423 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS); in nested_vmx_enter_non_root_mode()
3444 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); in nested_vmx_enter_non_root_mode()
3446 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12); in nested_vmx_enter_non_root_mode()
3450 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3455 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3509 vmx->nested.preemption_timer_expired = false; in nested_vmx_enter_non_root_mode()
3534 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3541 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in nested_vmx_enter_non_root_mode()
3542 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_enter_non_root_mode()
3554 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run() local
3572 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) && in nested_vmx_run()
3573 vmx->nested.current_vmptr == INVALID_GPA)) in nested_vmx_run()
3587 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { in nested_vmx_run()
3588 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields); in nested_vmx_run()
3592 copy_shadow_to_vmcs12(vmx); in nested_vmx_run()
3626 vmx->nested.nested_run_pending = 1; in nested_vmx_run()
3627 vmx->nested.has_preemption_timer_deadline = false; in nested_vmx_run()
3634 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { in nested_vmx_run()
3635 vmx->nested.pi_pending = true; in nested_vmx_run()
3637 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); in nested_vmx_run()
3641 vmx->vcpu.arch.l1tf_flush_l1d = true; in nested_vmx_run()
3666 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3671 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3681 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3817 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt() local
3822 if (!vmx->nested.pi_pending) in vmx_complete_nested_posted_interrupt()
3825 if (!vmx->nested.pi_desc) in vmx_complete_nested_posted_interrupt()
3828 vmx->nested.pi_pending = false; in vmx_complete_nested_posted_interrupt()
3830 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) in vmx_complete_nested_posted_interrupt()
3833 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); in vmx_complete_nested_posted_interrupt()
3835 vapic_page = vmx->nested.virtual_apic_map.hva; in vmx_complete_nested_posted_interrupt()
3839 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, in vmx_complete_nested_posted_interrupt()
4057 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events() local
4063 bool block_nested_exceptions = vmx->nested.nested_run_pending; in vmx_check_nested_events()
4083 vmx->nested.mtf_pending = false; in vmx_check_nested_events()
4127 if (vmx->nested.mtf_pending) { in vmx_check_nested_events()
4257 struct vcpu_vmx *vmx = to_vmx(vcpu); in sync_vmcs02_to_vmcs12_rare() local
4296 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; in sync_vmcs02_to_vmcs12_rare()
4302 struct vcpu_vmx *vmx = to_vmx(vcpu); in copy_vmcs02_to_vmcs12_rare() local
4305 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) in copy_vmcs02_to_vmcs12_rare()
4309 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); in copy_vmcs02_to_vmcs12_rare()
4312 vmx->loaded_vmcs = &vmx->nested.vmcs02; in copy_vmcs02_to_vmcs12_rare()
4313 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01); in copy_vmcs02_to_vmcs12_rare()
4317 vmx->loaded_vmcs = &vmx->vmcs01; in copy_vmcs02_to_vmcs12_rare()
4318 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); in copy_vmcs02_to_vmcs12_rare()
4330 struct vcpu_vmx *vmx = to_vmx(vcpu); in sync_vmcs02_to_vmcs12() local
4332 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in sync_vmcs02_to_vmcs12()
4335 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = in sync_vmcs02_to_vmcs12()
4336 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr); in sync_vmcs02_to_vmcs12()
4360 !vmx->nested.nested_run_pending) in sync_vmcs02_to_vmcs12()
4590 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) in nested_vmx_get_vmcs01_guest_efer() argument
4595 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) in nested_vmx_get_vmcs01_guest_efer()
4601 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { in nested_vmx_get_vmcs01_guest_efer()
4602 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) in nested_vmx_get_vmcs01_guest_efer()
4603 return vmx->msr_autoload.guest.val[i].value; in nested_vmx_get_vmcs01_guest_efer()
4606 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER); in nested_vmx_get_vmcs01_guest_efer()
4616 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_restore_host_state() local
4640 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); in nested_vmx_restore_host_state()
4726 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit() local
4730 vmx->nested.mtf_pending = false; in nested_vmx_vmexit()
4733 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_vmexit()
4767 if (likely(!vmx->fail)) { in nested_vmx_vmexit()
4807 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_vmexit()
4821 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in nested_vmx_vmexit()
4822 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in nested_vmx_vmexit()
4827 if (vmx->nested.l1_tpr_threshold != -1) in nested_vmx_vmexit()
4828 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); in nested_vmx_vmexit()
4830 if (vmx->nested.change_vmcs01_virtual_apic_mode) { in nested_vmx_vmexit()
4831 vmx->nested.change_vmcs01_virtual_apic_mode = false; in nested_vmx_vmexit()
4835 if (vmx->nested.update_vmcs01_cpu_dirty_logging) { in nested_vmx_vmexit()
4836 vmx->nested.update_vmcs01_cpu_dirty_logging = false; in nested_vmx_vmexit()
4841 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false); in nested_vmx_vmexit()
4842 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); in nested_vmx_vmexit()
4843 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); in nested_vmx_vmexit()
4844 vmx->nested.pi_desc = NULL; in nested_vmx_vmexit()
4846 if (vmx->nested.reload_vmcs01_apic_access_page) { in nested_vmx_vmexit()
4847 vmx->nested.reload_vmcs01_apic_access_page = false; in nested_vmx_vmexit()
4851 if (vmx->nested.update_vmcs01_apicv_status) { in nested_vmx_vmexit()
4852 vmx->nested.update_vmcs01_apicv_status = false; in nested_vmx_vmexit()
4857 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))) in nested_vmx_vmexit()
4858 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_vmexit()
4863 if (likely(!vmx->fail)) { in nested_vmx_vmexit()
4902 vmx->fail = 0; in nested_vmx_vmexit()
5071 struct vcpu_vmx *vmx = to_vmx(vcpu); in alloc_shadow_vmcs() local
5072 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; in alloc_shadow_vmcs()
5082 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs)) in alloc_shadow_vmcs()
5094 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_vmx_operation() local
5097 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
5101 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
5102 if (!vmx->nested.cached_vmcs12) in enter_vmx_operation()
5105 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA; in enter_vmx_operation()
5106 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
5107 if (!vmx->nested.cached_shadow_vmcs12) in enter_vmx_operation()
5113 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, in enter_vmx_operation()
5115 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; in enter_vmx_operation()
5117 vmx->nested.vpid02 = allocate_vpid(); in enter_vmx_operation()
5119 vmx->nested.vmcs02_initialized = false; in enter_vmx_operation()
5120 vmx->nested.vmxon = true; in enter_vmx_operation()
5123 vmx->pt_desc.guest.ctl = 0; in enter_vmx_operation()
5130 kfree(vmx->nested.cached_shadow_vmcs12); in enter_vmx_operation()
5133 kfree(vmx->nested.cached_vmcs12); in enter_vmx_operation()
5136 free_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
5148 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmxon() local
5188 if (vmx->nested.vmxon) in handle_vmxon()
5202 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) in handle_vmxon()
5226 vmx->nested.vmxon_ptr = vmptr; in handle_vmxon()
5236 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_release_vmcs12() local
5238 if (vmx->nested.current_vmptr == INVALID_GPA) in nested_release_vmcs12()
5246 copy_shadow_to_vmcs12(vmx); in nested_release_vmcs12()
5247 vmx_disable_shadow_vmcs(vmx); in nested_release_vmcs12()
5249 vmx->nested.posted_intr_nv = -1; in nested_release_vmcs12()
5253 vmx->nested.current_vmptr >> PAGE_SHIFT, in nested_release_vmcs12()
5254 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
5258 vmx->nested.current_vmptr = INVALID_GPA; in nested_release_vmcs12()
5278 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear() local
5292 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmclear()
5307 if (vmptr == vmx->nested.current_vmptr) in handle_vmclear()
5323 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) { in handle_vmclear()
5349 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmread() local
5363 if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { in handle_vmread()
5368 if (vmx->nested.current_vmptr == INVALID_GPA || in handle_vmread()
5401 value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset); in handle_vmread()
5455 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmwrite() local
5478 if (vmx->nested.current_vmptr == INVALID_GPA || in handle_vmwrite()
5542 vmcs_load(vmx->vmcs01.shadow_vmcs); in handle_vmwrite()
5546 vmcs_clear(vmx->vmcs01.shadow_vmcs); in handle_vmwrite()
5547 vmcs_load(vmx->loaded_vmcs->vmcs); in handle_vmwrite()
5550 vmx->nested.dirty_vmcs12 = true; in handle_vmwrite()
5556 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) in set_current_vmptr() argument
5558 vmx->nested.current_vmptr = vmptr; in set_current_vmptr()
5560 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); in set_current_vmptr()
5562 __pa(vmx->vmcs01.shadow_vmcs)); in set_current_vmptr()
5563 vmx->nested.need_vmcs12_to_shadow_sync = true; in set_current_vmptr()
5565 vmx->nested.dirty_vmcs12 = true; in set_current_vmptr()
5566 vmx->nested.force_msr_bitmap_recalc = true; in set_current_vmptr()
5572 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld() local
5585 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmptrld()
5589 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in handle_vmptrld()
5592 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
5593 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache; in handle_vmptrld()
5627 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12, in handle_vmptrld()
5633 set_current_vmptr(vmx, vmptr); in handle_vmptrld()
5670 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept() local
5681 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invept()
5683 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { in handle_invept()
5695 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; in handle_invept()
5750 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invvpid() local
5762 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invvpid()
5764 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { in handle_invvpid()
5776 types = (vmx->nested.msrs.vpid_caps & in handle_invvpid()
5872 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmfunc() local
5915 nested_vmx_vmexit(vcpu, vmx->exit_reason.full, in handle_vmfunc()
6355 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_reflect_vmexit() local
6356 union vmx_exit_reason exit_reason = vmx->exit_reason; in nested_vmx_reflect_vmexit()
6360 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_reflect_vmexit()
6366 if (unlikely(vmx->fail)) { in nested_vmx_reflect_vmexit()
6409 struct vcpu_vmx *vmx; in vmx_get_nested_state() local
6415 .hdr.vmx.flags = 0, in vmx_get_nested_state()
6416 .hdr.vmx.vmxon_pa = INVALID_GPA, in vmx_get_nested_state()
6417 .hdr.vmx.vmcs12_pa = INVALID_GPA, in vmx_get_nested_state()
6418 .hdr.vmx.preemption_timer_deadline = 0, in vmx_get_nested_state()
6421 &user_kvm_nested_state->data.vmx[0]; in vmx_get_nested_state()
6426 vmx = to_vmx(vcpu); in vmx_get_nested_state()
6430 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { in vmx_get_nested_state()
6431 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; in vmx_get_nested_state()
6432 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; in vmx_get_nested_state()
6438 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID) in vmx_get_nested_state()
6447 if (vmx->nested.smm.vmxon) in vmx_get_nested_state()
6448 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; in vmx_get_nested_state()
6450 if (vmx->nested.smm.guest_mode) in vmx_get_nested_state()
6451 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; in vmx_get_nested_state()
6456 if (vmx->nested.nested_run_pending) in vmx_get_nested_state()
6459 if (vmx->nested.mtf_pending) in vmx_get_nested_state()
6463 vmx->nested.has_preemption_timer_deadline) { in vmx_get_nested_state()
6464 kvm_state.hdr.vmx.flags |= in vmx_get_nested_state()
6466 kvm_state.hdr.vmx.preemption_timer_deadline = in vmx_get_nested_state()
6467 vmx->nested.preemption_timer_deadline; in vmx_get_nested_state()
6493 if (!vmx->nested.need_vmcs12_to_shadow_sync) { in vmx_get_nested_state()
6494 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in vmx_get_nested_state()
6502 copy_enlightened_to_vmcs12(vmx, 0); in vmx_get_nested_state()
6504 copy_shadow_to_vmcs12(vmx); in vmx_get_nested_state()
6541 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nested_state() local
6545 &user_kvm_nested_state->data.vmx[0]; in vmx_set_nested_state()
6551 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) { in vmx_set_nested_state()
6552 if (kvm_state->hdr.vmx.smm.flags) in vmx_set_nested_state()
6555 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) in vmx_set_nested_state()
6573 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) in vmx_set_nested_state()
6577 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
6581 if (kvm_state->hdr.vmx.smm.flags & in vmx_set_nested_state()
6585 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) in vmx_set_nested_state()
6596 : kvm_state->hdr.vmx.smm.flags) in vmx_set_nested_state()
6599 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
6600 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) in vmx_set_nested_state()
6605 !vmx->nested.enlightened_vmcs_enabled)) in vmx_set_nested_state()
6610 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) in vmx_set_nested_state()
6613 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; in vmx_set_nested_state()
6623 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)) in vmx_set_nested_state()
6629 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) { in vmx_set_nested_state()
6630 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || in vmx_set_nested_state()
6631 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) in vmx_set_nested_state()
6634 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); in vmx_set_nested_state()
6642 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING; in vmx_set_nested_state()
6648 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { in vmx_set_nested_state()
6649 vmx->nested.smm.vmxon = true; in vmx_set_nested_state()
6650 vmx->nested.vmxon = false; in vmx_set_nested_state()
6652 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) in vmx_set_nested_state()
6653 vmx->nested.smm.guest_mode = true; in vmx_set_nested_state()
6666 vmx->nested.nested_run_pending = in vmx_set_nested_state()
6669 vmx->nested.mtf_pending = in vmx_set_nested_state()
6694 vmx->nested.has_preemption_timer_deadline = false; in vmx_set_nested_state()
6695 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { in vmx_set_nested_state()
6696 vmx->nested.has_preemption_timer_deadline = true; in vmx_set_nested_state()
6697 vmx->nested.preemption_timer_deadline = in vmx_set_nested_state()
6698 kvm_state->hdr.vmx.preemption_timer_deadline; in vmx_set_nested_state()
6706 vmx->nested.dirty_vmcs12 = true; in vmx_set_nested_state()
6707 vmx->nested.force_msr_bitmap_recalc = true; in vmx_set_nested_state()
6712 if (vmx->nested.mtf_pending) in vmx_set_nested_state()
6718 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()