Lines Matching full:nested

13 #include "nested.h"
181 if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID) in nested_vmx_failValid()
182 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_failValid()
195 if (vmx->nested.current_vmptr == INVALID_GPA && in nested_vmx_fail()
196 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in nested_vmx_fail()
206 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator); in nested_vmx_abort()
223 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs()
230 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { in nested_release_evmcs()
231 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); in nested_release_evmcs()
232 vmx->nested.hv_evmcs = NULL; in nested_release_evmcs()
235 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID; in nested_release_evmcs()
283 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
293 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) in free_nested()
298 vmx->nested.vmxon = false; in free_nested()
299 vmx->nested.smm.vmxon = false; in free_nested()
300 vmx->nested.vmxon_ptr = INVALID_GPA; in free_nested()
301 free_vpid(vmx->nested.vpid02); in free_nested()
302 vmx->nested.posted_intr_nv = -1; in free_nested()
303 vmx->nested.current_vmptr = INVALID_GPA; in free_nested()
310 kfree(vmx->nested.cached_vmcs12); in free_nested()
311 vmx->nested.cached_vmcs12 = NULL; in free_nested()
312 kfree(vmx->nested.cached_shadow_vmcs12); in free_nested()
313 vmx->nested.cached_shadow_vmcs12 = NULL; in free_nested()
319 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false); in free_nested()
320 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); in free_nested()
321 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); in free_nested()
322 vmx->nested.pi_desc = NULL; in free_nested()
328 free_loaded_vmcs(&vmx->nested.vmcs02); in free_nested()
375 if (vmx->nested.pml_full) { in nested_ept_inject_page_fault()
377 vmx->nested.pml_full = false; in nested_ept_inject_page_fault()
403 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT; in nested_ept_new_eptp()
404 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps); in nested_ept_new_eptp()
563 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap; in nested_vmx_prepare_msr_bitmap()
564 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in nested_vmx_prepare_msr_bitmap()
565 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map; in nested_vmx_prepare_msr_bitmap()
575 * - Nested hypervisor (L1) is attempting to launch the same L2 as in nested_vmx_prepare_msr_bitmap()
577 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature in nested_vmx_prepare_msr_bitmap()
580 if (!vmx->nested.force_msr_bitmap_recalc && evmcs && in nested_vmx_prepare_msr_bitmap()
649 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false); in nested_vmx_prepare_msr_bitmap()
651 vmx->nested.force_msr_bitmap_recalc = false; in nested_vmx_prepare_msr_bitmap()
660 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_cache_shadow_vmcs12()
679 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_flush_cached_shadow_vmcs12()
695 * In nested virtualization, check if L1 has set
878 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, in nested_vmx_max_atomic_switch_msrs()
879 vmx->nested.msrs.misc_high); in nested_vmx_max_atomic_switch_msrs()
885 * Load guest's/host's msr at nested entry/exit.
1065 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1112 * while L2 entries are tagged with vmx->nested.vpid02).
1119 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02); in nested_has_guest_tlb_tag()
1135 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use. in nested_vmx_transition_tlb_flush()
1157 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) { in nested_vmx_transition_tlb_flush()
1158 vmx->nested.last_vpid = vmcs12->virtual_processor_id; in nested_vmx_transition_tlb_flush()
1188 u64 vmx_basic = vmcs_config.nested.basic; in vmx_restore_vmx_basic()
1207 vmx->nested.msrs.basic = data; in vmx_restore_vmx_basic()
1246 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp); in vmx_restore_control_msr()
1258 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp); in vmx_restore_control_msr()
1272 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low, in vmx_restore_vmx_misc()
1273 vmcs_config.nested.misc_high); in vmx_restore_vmx_misc()
1278 if ((vmx->nested.msrs.pinbased_ctls_high & in vmx_restore_vmx_misc()
1293 vmx->nested.msrs.misc_low = data; in vmx_restore_vmx_misc()
1294 vmx->nested.msrs.misc_high = data >> 32; in vmx_restore_vmx_misc()
1301 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps, in vmx_restore_vmx_ept_vpid_cap()
1302 vmcs_config.nested.vpid_caps); in vmx_restore_vmx_ept_vpid_cap()
1308 vmx->nested.msrs.ept_caps = data; in vmx_restore_vmx_ept_vpid_cap()
1309 vmx->nested.msrs.vpid_caps = data >> 32; in vmx_restore_vmx_ept_vpid_cap()
1327 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index); in vmx_restore_fixed0_msr()
1336 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data; in vmx_restore_fixed0_msr()
1353 if (vmx->nested.vmxon) in vmx_set_vmx_msr()
1394 vmx->nested.msrs.vmcs_enum = data; in vmx_set_vmx_msr()
1397 if (data & ~vmcs_config.nested.vmfunc_controls) in vmx_set_vmx_msr()
1399 vmx->nested.msrs.vmfunc_controls = data; in vmx_set_vmx_msr()
1558 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_enlightened_to_vmcs12()
1559 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in copy_enlightened_to_vmcs12()
1794 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_vmcs12_to_enlightened()
1795 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in copy_vmcs12_to_enlightened()
1967 * This is an equivalent of the nested hypervisor executing the vmptrld
1985 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { in nested_vmx_handle_enlightened_vmptrld()
1986 vmx->nested.current_vmptr = INVALID_GPA; in nested_vmx_handle_enlightened_vmptrld()
1991 &vmx->nested.hv_evmcs_map)) in nested_vmx_handle_enlightened_vmptrld()
1994 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; in nested_vmx_handle_enlightened_vmptrld()
2018 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && in nested_vmx_handle_enlightened_vmptrld()
2019 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { in nested_vmx_handle_enlightened_vmptrld()
2024 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; in nested_vmx_handle_enlightened_vmptrld()
2046 vmx->nested.hv_evmcs->hv_clean_fields &= in nested_vmx_handle_enlightened_vmptrld()
2049 vmx->nested.force_msr_bitmap_recalc = true; in nested_vmx_handle_enlightened_vmptrld()
2059 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in nested_sync_vmcs12_to_shadow()
2064 vmx->nested.need_vmcs12_to_shadow_sync = false; in nested_sync_vmcs12_to_shadow()
2070 container_of(timer, struct vcpu_vmx, nested.preemption_timer); in vmx_preemption_timer_fn()
2072 vmx->nested.preemption_timer_expired = true; in vmx_preemption_timer_fn()
2087 if (!vmx->nested.has_preemption_timer_deadline) { in vmx_calc_preemption_timer_value()
2088 vmx->nested.preemption_timer_deadline = in vmx_calc_preemption_timer_value()
2090 vmx->nested.has_preemption_timer_deadline = true; in vmx_calc_preemption_timer_value()
2092 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; in vmx_calc_preemption_timer_value()
2105 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); in vmx_start_preemption_timer()
2115 hrtimer_start(&vmx->nested.preemption_timer, in vmx_start_preemption_timer()
2122 if (vmx->nested.nested_run_pending && in nested_vmx_calc_efer()
2141 if (vmx->nested.vmcs02_initialized) in prepare_vmcs02_constant_state()
2143 vmx->nested.vmcs02_initialized = true; in prepare_vmcs02_constant_state()
2162 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); in prepare_vmcs02_constant_state()
2200 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) in prepare_vmcs02_early_rare()
2201 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); in prepare_vmcs02_early_rare()
2213 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in prepare_vmcs02_early()
2224 vmx->nested.pi_pending = false; in prepare_vmcs02_early()
2226 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; in prepare_vmcs02_early()
2240 vmx->nested.l1_tpr_threshold = -1; in prepare_vmcs02_early()
2357 if (vmx->nested.nested_run_pending) { in prepare_vmcs02_early()
2375 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; in prepare_vmcs02_rare()
2438 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && in prepare_vmcs02_rare()
2492 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2509 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { in prepare_vmcs02()
2511 vmx->nested.dirty_vmcs12 = false; in prepare_vmcs02()
2513 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) || in prepare_vmcs02()
2514 !(vmx->nested.hv_evmcs->hv_clean_fields & in prepare_vmcs02()
2518 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2524 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl); in prepare_vmcs02()
2526 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || in prepare_vmcs02()
2528 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs); in prepare_vmcs02()
2539 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2589 * loading nested state after migration, it is possible to in prepare_vmcs02()
2605 * on nested VM-Exit, which can occur without actually running L2 and in prepare_vmcs02()
2639 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in prepare_vmcs02()
2640 vmx->nested.hv_evmcs->hv_clean_fields |= in prepare_vmcs02()
2666 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) in nested_vmx_check_eptp()
2670 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) in nested_vmx_check_eptp()
2680 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) in nested_vmx_check_eptp()
2684 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) in nested_vmx_check_eptp()
2697 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) in nested_vmx_check_eptp()
2713 vmx->nested.msrs.pinbased_ctls_low, in nested_check_vm_execution_controls()
2714 vmx->nested.msrs.pinbased_ctls_high)) || in nested_check_vm_execution_controls()
2716 vmx->nested.msrs.procbased_ctls_low, in nested_check_vm_execution_controls()
2717 vmx->nested.msrs.procbased_ctls_high))) in nested_check_vm_execution_controls()
2722 vmx->nested.msrs.secondary_ctls_low, in nested_check_vm_execution_controls()
2723 vmx->nested.msrs.secondary_ctls_high))) in nested_check_vm_execution_controls()
2750 ~vmx->nested.msrs.vmfunc_controls)) in nested_check_vm_execution_controls()
2772 vmx->nested.msrs.exit_ctls_low, in nested_check_vm_exit_controls()
2773 vmx->nested.msrs.exit_ctls_high)) || in nested_check_vm_exit_controls()
2789 vmx->nested.msrs.entry_ctls_low, in nested_check_vm_entry_controls()
2790 vmx->nested.msrs.entry_ctls_high))) in nested_check_vm_entry_controls()
2959 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache; in nested_vmx_check_vmcs_link_ptr()
3037 if (to_vmx(vcpu)->nested.nested_run_pending && in nested_vmx_check_guest_state()
3146 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) { in nested_get_evmcs_page()
3158 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_get_evmcs_page()
3174 * the guest CR3 might be restored prior to setting the nested in nested_get_vmcs12_pages()
3183 map = &vmx->nested.apic_access_page_map; in nested_get_vmcs12_pages()
3199 map = &vmx->nested.virtual_apic_map; in nested_get_vmcs12_pages()
3225 map = &vmx->nested.pi_desc_map; in nested_get_vmcs12_pages()
3228 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
3240 vmx->nested.pi_desc = NULL; in nested_get_vmcs12_pages()
3280 if (WARN_ON_ONCE(vmx->nested.pml_full)) in nested_vmx_write_pml_buffer()
3284 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is in nested_vmx_write_pml_buffer()
3292 vmx->nested.pml_full = true; in nested_vmx_write_pml_buffer()
3317 if (!to_vmx(vcpu)->nested.vmxon) { in nested_vmx_check_permission()
3365 vmx->nested.current_vmptr, in nested_vmx_enter_non_root_mode()
3383 if (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3385 vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); in nested_vmx_enter_non_root_mode()
3387 (!vmx->nested.nested_run_pending || in nested_vmx_enter_non_root_mode()
3389 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS); in nested_vmx_enter_non_root_mode()
3393 * nested early checks are disabled. In the event of a "late" VM-Fail, in nested_vmx_enter_non_root_mode()
3399 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested in nested_vmx_enter_non_root_mode()
3402 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as in nested_vmx_enter_non_root_mode()
3410 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); in nested_vmx_enter_non_root_mode()
3475 vmx->nested.preemption_timer_expired = false; in nested_vmx_enter_non_root_mode()
3507 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in nested_vmx_enter_non_root_mode()
3508 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_enter_non_root_mode()
3513 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3514 * for running an L2 nested guest.
3538 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) && in nested_vmx_run()
3539 vmx->nested.current_vmptr == INVALID_GPA)) in nested_vmx_run()
3553 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { in nested_vmx_run()
3554 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields); in nested_vmx_run()
3562 * The nested entry process starts with enforcing various prerequisites in nested_vmx_run()
3590 * the nested entry. in nested_vmx_run()
3592 vmx->nested.nested_run_pending = 1; in nested_vmx_run()
3593 vmx->nested.has_preemption_timer_deadline = false; in nested_vmx_run()
3600 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { in nested_vmx_run()
3601 vmx->nested.pi_pending = true; in nested_vmx_run()
3603 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); in nested_vmx_run()
3606 /* Hide L1D cache contents from the nested guest. */ in nested_vmx_run()
3632 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3637 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3647 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3657 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3788 if (!vmx->nested.pi_pending) in vmx_complete_nested_posted_interrupt()
3791 if (!vmx->nested.pi_desc) in vmx_complete_nested_posted_interrupt()
3794 vmx->nested.pi_pending = false; in vmx_complete_nested_posted_interrupt()
3796 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) in vmx_complete_nested_posted_interrupt()
3799 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); in vmx_complete_nested_posted_interrupt()
3801 vapic_page = vmx->nested.virtual_apic_map.hva; in vmx_complete_nested_posted_interrupt()
3805 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, in vmx_complete_nested_posted_interrupt()
3847 * hardware and avoid inducing failure on nested VM-Entry if L1 in nested_vmx_inject_exception_vmexit()
3923 to_vmx(vcpu)->nested.preemption_timer_expired; in nested_vmx_preemption_timer_pending()
3929 to_vmx(vcpu)->nested.mtf_pending; in vmx_has_nested_events()
4020 * Only a pending nested run blocks a pending exception. If there is a in vmx_check_nested_events()
4024 bool block_nested_exceptions = vmx->nested.nested_run_pending; in vmx_check_nested_events()
4044 vmx->nested.mtf_pending = false; in vmx_check_nested_events()
4088 if (vmx->nested.mtf_pending) { in vmx_check_nested_events()
4157 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer); in vmx_get_preemption_timer_value()
4257 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; in sync_vmcs02_to_vmcs12_rare()
4266 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) in copy_vmcs02_to_vmcs12_rare()
4273 vmx->loaded_vmcs = &vmx->nested.vmcs02; in copy_vmcs02_to_vmcs12_rare()
4279 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); in copy_vmcs02_to_vmcs12_rare()
4293 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in sync_vmcs02_to_vmcs12()
4296 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = in sync_vmcs02_to_vmcs12()
4297 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr); in sync_vmcs02_to_vmcs12()
4321 !vmx->nested.nested_run_pending) in sync_vmcs02_to_vmcs12()
4326 * In some cases (usually, nested EPT), L2 is allowed to change its in sync_vmcs02_to_vmcs12()
4360 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4418 * A part of what we need to when the nested L2 guest exits and we want to
4421 * This function is to be called not only on normal nested exit, but also on
4422 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4589 * nested VMENTER (not worth adding a variable in nested_vmx). in nested_vmx_restore_host_state()
4627 * of VMFail), leaving the nested VM's MSRs in the software model in nested_vmx_restore_host_state()
4630 * MSR that was (prematurely) loaded from the nested VMEntry load in nested_vmx_restore_host_state()
4680 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4691 vmx->nested.mtf_pending = false; in nested_vmx_vmexit()
4694 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_vmexit()
4720 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer); in nested_vmx_vmexit()
4777 if (vmx->nested.l1_tpr_threshold != -1) in nested_vmx_vmexit()
4778 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); in nested_vmx_vmexit()
4780 if (vmx->nested.change_vmcs01_virtual_apic_mode) { in nested_vmx_vmexit()
4781 vmx->nested.change_vmcs01_virtual_apic_mode = false; in nested_vmx_vmexit()
4785 if (vmx->nested.update_vmcs01_cpu_dirty_logging) { in nested_vmx_vmexit()
4786 vmx->nested.update_vmcs01_cpu_dirty_logging = false; in nested_vmx_vmexit()
4791 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false); in nested_vmx_vmexit()
4792 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); in nested_vmx_vmexit()
4793 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); in nested_vmx_vmexit()
4794 vmx->nested.pi_desc = NULL; in nested_vmx_vmexit()
4796 if (vmx->nested.reload_vmcs01_apic_access_page) { in nested_vmx_vmexit()
4797 vmx->nested.reload_vmcs01_apic_access_page = false; in nested_vmx_vmexit()
4801 if (vmx->nested.update_vmcs01_apicv_status) { in nested_vmx_vmexit()
4802 vmx->nested.update_vmcs01_apicv_status = false; in nested_vmx_vmexit()
4807 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))) in nested_vmx_vmexit()
4808 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_vmexit()
5026 * when L1 executes VMXOFF or the vCPU is forced out of nested in alloc_shadow_vmcs()
5047 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
5051 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
5052 if (!vmx->nested.cached_vmcs12) in enter_vmx_operation()
5055 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA; in enter_vmx_operation()
5056 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
5057 if (!vmx->nested.cached_shadow_vmcs12) in enter_vmx_operation()
5063 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, in enter_vmx_operation()
5065 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; in enter_vmx_operation()
5067 vmx->nested.vpid02 = allocate_vpid(); in enter_vmx_operation()
5069 vmx->nested.vmcs02_initialized = false; in enter_vmx_operation()
5070 vmx->nested.vmxon = true; in enter_vmx_operation()
5080 kfree(vmx->nested.cached_shadow_vmcs12); in enter_vmx_operation()
5083 kfree(vmx->nested.cached_vmcs12); in enter_vmx_operation()
5086 free_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
5127 if (vmx->nested.vmxon) in handle_vmxon()
5144 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case; in handle_vmxon()
5154 vmx->nested.vmxon_ptr = vmptr; in handle_vmxon()
5166 if (vmx->nested.current_vmptr == INVALID_GPA) in nested_release_vmcs12()
5177 vmx->nested.posted_intr_nv = -1; in nested_release_vmcs12()
5181 vmx->nested.current_vmptr >> PAGE_SHIFT, in nested_release_vmcs12()
5182 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
5186 vmx->nested.current_vmptr = INVALID_GPA; in nested_release_vmcs12()
5221 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmclear()
5232 * vmx->nested.hv_evmcs but this shouldn't be a problem. in handle_vmclear()
5236 if (vmptr == vmx->nested.current_vmptr) in handle_vmclear()
5243 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) { in handle_vmclear()
5283 if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) { in handle_vmread()
5288 if (vmx->nested.current_vmptr == INVALID_GPA || in handle_vmread()
5321 value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset); in handle_vmread()
5398 if (vmx->nested.current_vmptr == INVALID_GPA || in handle_vmwrite()
5470 vmx->nested.dirty_vmcs12 = true; in handle_vmwrite()
5478 vmx->nested.current_vmptr = vmptr; in set_current_vmptr()
5483 vmx->nested.need_vmcs12_to_shadow_sync = true; in set_current_vmptr()
5485 vmx->nested.dirty_vmcs12 = true; in set_current_vmptr()
5486 vmx->nested.force_msr_bitmap_recalc = true; in set_current_vmptr()
5505 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmptrld()
5509 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in handle_vmptrld()
5512 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
5513 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache; in handle_vmptrld()
5547 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12, in handle_vmptrld()
5564 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr; in handle_vmptrst()
5572 if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr))) in handle_vmptrst()
5601 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invept()
5603 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { in handle_invept()
5615 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; in handle_invept()
5631 * Nested EPT roots are always held through guest_mmu, in handle_invept()
5682 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invvpid()
5684 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { in handle_invvpid()
5696 types = (vmx->nested.msrs.vpid_caps & in handle_invvpid()
5797 * VMFUNC is only supported for nested guests, but we always enable the in handle_vmfunc()
5798 * secondary control for simplicity; for non-nested mode, fake that we in handle_vmfunc()
5833 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode in handle_vmfunc()
6101 * L0 always deals with the EPT violation. If nested EPT is in nested_vmx_l0_wants_exit()
6102 * used, and the nested mmu code discovers that the address is in nested_vmx_l0_wants_exit()
6276 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_reflect_vmexit()
6279 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM in nested_vmx_reflect_vmexit()
6346 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { in vmx_get_nested_state()
6347 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; in vmx_get_nested_state()
6348 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; in vmx_get_nested_state()
6354 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID) in vmx_get_nested_state()
6363 if (vmx->nested.smm.vmxon) in vmx_get_nested_state()
6366 if (vmx->nested.smm.guest_mode) in vmx_get_nested_state()
6372 if (vmx->nested.nested_run_pending) in vmx_get_nested_state()
6375 if (vmx->nested.mtf_pending) in vmx_get_nested_state()
6379 vmx->nested.has_preemption_timer_deadline) { in vmx_get_nested_state()
6383 vmx->nested.preemption_timer_deadline; in vmx_get_nested_state()
6409 if (!vmx->nested.need_vmcs12_to_shadow_sync) { in vmx_get_nested_state()
6410 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) in vmx_get_nested_state()
6447 to_vmx(vcpu)->nested.nested_run_pending = 0; in vmx_leave_nested()
6520 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) in vmx_set_nested_state()
6528 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; in vmx_set_nested_state()
6557 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING; in vmx_set_nested_state()
6564 vmx->nested.smm.vmxon = true; in vmx_set_nested_state()
6565 vmx->nested.vmxon = false; in vmx_set_nested_state()
6568 vmx->nested.smm.guest_mode = true; in vmx_set_nested_state()
6581 vmx->nested.nested_run_pending = in vmx_set_nested_state()
6584 vmx->nested.mtf_pending = in vmx_set_nested_state()
6609 vmx->nested.has_preemption_timer_deadline = false; in vmx_set_nested_state()
6611 vmx->nested.has_preemption_timer_deadline = true; in vmx_set_nested_state()
6612 vmx->nested.preemption_timer_deadline = in vmx_set_nested_state()
6621 vmx->nested.dirty_vmcs12 = true; in vmx_set_nested_state()
6622 vmx->nested.force_msr_bitmap_recalc = true; in vmx_set_nested_state()
6627 if (vmx->nested.mtf_pending) in vmx_set_nested_state()
6633 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()
6681 * returned for the various VMX controls MSRs when nested VMX is enabled.
6683 * valid during nested entry from L1 to L2.
6691 struct nested_vmx_msrs *msrs = &vmcs_conf->nested; in nested_vmx_setup_ctls_msrs()
6821 /* nested EPT: emulate EPT also to L1 */ in nested_vmx_setup_ctls_msrs()