Lines Matching refs:vmx
188 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_fail() local
194 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs) in nested_vmx_fail()
217 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) in vmx_disable_shadow_vmcs() argument
219 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); in vmx_disable_shadow_vmcs()
221 vmx->nested.need_vmcs12_to_shadow_sync = false; in vmx_disable_shadow_vmcs()
226 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_release_evmcs() local
228 if (!vmx->nested.hv_evmcs) in nested_release_evmcs()
231 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true); in nested_release_evmcs()
232 vmx->nested.hv_evmcs_vmptr = 0; in nested_release_evmcs()
233 vmx->nested.hv_evmcs = NULL; in nested_release_evmcs()
236 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx, in vmx_sync_vmcs_host_state() argument
241 if (unlikely(!vmx->guest_state_loaded)) in vmx_sync_vmcs_host_state()
245 dest = &vmx->loaded_vmcs->host_state; in vmx_sync_vmcs_host_state()
257 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_switch_vmcs() local
261 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs)) in vmx_switch_vmcs()
265 prev = vmx->loaded_vmcs; in vmx_switch_vmcs()
266 vmx->loaded_vmcs = vmcs; in vmx_switch_vmcs()
268 vmx_sync_vmcs_host_state(vmx, prev); in vmx_switch_vmcs()
280 struct vcpu_vmx *vmx = to_vmx(vcpu); in free_nested() local
282 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01)) in free_nested()
283 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in free_nested()
285 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) in free_nested()
290 vmx->nested.vmxon = false; in free_nested()
291 vmx->nested.smm.vmxon = false; in free_nested()
292 free_vpid(vmx->nested.vpid02); in free_nested()
293 vmx->nested.posted_intr_nv = -1; in free_nested()
294 vmx->nested.current_vmptr = -1ull; in free_nested()
296 vmx_disable_shadow_vmcs(vmx); in free_nested()
297 vmcs_clear(vmx->vmcs01.shadow_vmcs); in free_nested()
298 free_vmcs(vmx->vmcs01.shadow_vmcs); in free_nested()
299 vmx->vmcs01.shadow_vmcs = NULL; in free_nested()
301 kfree(vmx->nested.cached_vmcs12); in free_nested()
302 vmx->nested.cached_vmcs12 = NULL; in free_nested()
303 kfree(vmx->nested.cached_shadow_vmcs12); in free_nested()
304 vmx->nested.cached_shadow_vmcs12 = NULL; in free_nested()
306 if (vmx->nested.apic_access_page) { in free_nested()
307 kvm_release_page_clean(vmx->nested.apic_access_page); in free_nested()
308 vmx->nested.apic_access_page = NULL; in free_nested()
310 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); in free_nested()
311 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); in free_nested()
312 vmx->nested.pi_desc = NULL; in free_nested()
318 free_loaded_vmcs(&vmx->nested.vmcs02); in free_nested()
336 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_ept_inject_page_fault() local
340 if (vmx->nested.pml_full) { in nested_ept_inject_page_fault()
342 vmx->nested.pml_full = false; in nested_ept_inject_page_fault()
682 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_flush_cached_shadow_vmcs12() local
688 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, in nested_flush_cached_shadow_vmcs12()
878 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_max_atomic_switch_msrs() local
879 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, in nested_vmx_max_atomic_switch_msrs()
880 vmx->nested.msrs.misc_high); in nested_vmx_max_atomic_switch_msrs()
934 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_get_vmexit_msr_value() local
942 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest, in nested_vmx_get_vmexit_msr_value()
946 u64 val = vmx->msr_autostore.guest.val[i].value; in nested_vmx_get_vmexit_msr_value()
1032 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmx_msr_autostore_list() local
1033 struct vmx_msrs *autostore = &vmx->msr_autostore.guest; in prepare_vmx_msr_autostore_list()
1183 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_transition_tlb_flush() local
1217 vmcs12->virtual_processor_id != vmx->nested.last_vpid) { in nested_vmx_transition_tlb_flush()
1218 vmx->nested.last_vpid = vmcs12->virtual_processor_id; in nested_vmx_transition_tlb_flush()
1231 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_basic() argument
1238 u64 vmx_basic = vmx->nested.msrs.basic; in vmx_restore_vmx_basic()
1257 vmx->nested.msrs.basic = data; in vmx_restore_vmx_basic()
1262 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) in vmx_restore_control_msr() argument
1269 lowp = &vmx->nested.msrs.pinbased_ctls_low; in vmx_restore_control_msr()
1270 highp = &vmx->nested.msrs.pinbased_ctls_high; in vmx_restore_control_msr()
1273 lowp = &vmx->nested.msrs.procbased_ctls_low; in vmx_restore_control_msr()
1274 highp = &vmx->nested.msrs.procbased_ctls_high; in vmx_restore_control_msr()
1277 lowp = &vmx->nested.msrs.exit_ctls_low; in vmx_restore_control_msr()
1278 highp = &vmx->nested.msrs.exit_ctls_high; in vmx_restore_control_msr()
1281 lowp = &vmx->nested.msrs.entry_ctls_low; in vmx_restore_control_msr()
1282 highp = &vmx->nested.msrs.entry_ctls_high; in vmx_restore_control_msr()
1285 lowp = &vmx->nested.msrs.secondary_ctls_low; in vmx_restore_control_msr()
1286 highp = &vmx->nested.msrs.secondary_ctls_high; in vmx_restore_control_msr()
1307 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_misc() argument
1317 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, in vmx_restore_vmx_misc()
1318 vmx->nested.msrs.misc_high); in vmx_restore_vmx_misc()
1323 if ((vmx->nested.msrs.pinbased_ctls_high & in vmx_restore_vmx_misc()
1338 vmx->nested.msrs.misc_low = data; in vmx_restore_vmx_misc()
1339 vmx->nested.msrs.misc_high = data >> 32; in vmx_restore_vmx_misc()
1344 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_ept_vpid_cap() argument
1348 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, in vmx_restore_vmx_ept_vpid_cap()
1349 vmx->nested.msrs.vpid_caps); in vmx_restore_vmx_ept_vpid_cap()
1355 vmx->nested.msrs.ept_caps = data; in vmx_restore_vmx_ept_vpid_cap()
1356 vmx->nested.msrs.vpid_caps = data >> 32; in vmx_restore_vmx_ept_vpid_cap()
1360 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) in vmx_restore_fixed0_msr() argument
1366 msr = &vmx->nested.msrs.cr0_fixed0; in vmx_restore_fixed0_msr()
1369 msr = &vmx->nested.msrs.cr4_fixed0; in vmx_restore_fixed0_msr()
1393 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_vmx_msr() local
1399 if (vmx->nested.vmxon) in vmx_set_vmx_msr()
1404 return vmx_restore_vmx_basic(vmx, data); in vmx_set_vmx_msr()
1424 return vmx_restore_control_msr(vmx, msr_index, data); in vmx_set_vmx_msr()
1426 return vmx_restore_vmx_misc(vmx, data); in vmx_set_vmx_msr()
1429 return vmx_restore_fixed0_msr(vmx, msr_index, data); in vmx_set_vmx_msr()
1438 return vmx_restore_vmx_ept_vpid_cap(vmx, data); in vmx_set_vmx_msr()
1440 vmx->nested.msrs.vmcs_enum = data; in vmx_set_vmx_msr()
1443 if (data & ~vmx->nested.msrs.vmfunc_controls) in vmx_set_vmx_msr()
1445 vmx->nested.msrs.vmfunc_controls = data; in vmx_set_vmx_msr()
1541 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) in copy_shadow_to_vmcs12() argument
1543 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_shadow_to_vmcs12()
1544 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); in copy_shadow_to_vmcs12()
1563 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_shadow_to_vmcs12()
1568 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) in copy_vmcs12_to_shadow() argument
1578 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_vmcs12_to_shadow()
1579 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu); in copy_vmcs12_to_shadow()
1599 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_vmcs12_to_shadow()
1602 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) in copy_enlightened_to_vmcs12() argument
1604 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_enlightened_to_vmcs12()
1605 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in copy_enlightened_to_vmcs12()
1818 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx) in copy_vmcs12_to_enlightened() argument
1820 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; in copy_vmcs12_to_enlightened()
1821 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; in copy_vmcs12_to_enlightened()
1988 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_handle_enlightened_vmptrld() local
1992 if (likely(!vmx->nested.enlightened_vmcs_enabled)) in nested_vmx_handle_enlightened_vmptrld()
1998 if (unlikely(!vmx->nested.hv_evmcs || in nested_vmx_handle_enlightened_vmptrld()
1999 evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) { in nested_vmx_handle_enlightened_vmptrld()
2000 if (!vmx->nested.hv_evmcs) in nested_vmx_handle_enlightened_vmptrld()
2001 vmx->nested.current_vmptr = -1ull; in nested_vmx_handle_enlightened_vmptrld()
2006 &vmx->nested.hv_evmcs_map)) in nested_vmx_handle_enlightened_vmptrld()
2009 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva; in nested_vmx_handle_enlightened_vmptrld()
2033 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && in nested_vmx_handle_enlightened_vmptrld()
2034 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { in nested_vmx_handle_enlightened_vmptrld()
2039 vmx->nested.dirty_vmcs12 = true; in nested_vmx_handle_enlightened_vmptrld()
2040 vmx->nested.hv_evmcs_vmptr = evmcs_gpa; in nested_vmx_handle_enlightened_vmptrld()
2062 vmx->nested.hv_evmcs->hv_clean_fields &= in nested_vmx_handle_enlightened_vmptrld()
2070 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_sync_vmcs12_to_shadow() local
2072 if (vmx->nested.hv_evmcs) { in nested_sync_vmcs12_to_shadow()
2073 copy_vmcs12_to_enlightened(vmx); in nested_sync_vmcs12_to_shadow()
2075 vmx->nested.hv_evmcs->hv_clean_fields |= in nested_sync_vmcs12_to_shadow()
2078 copy_vmcs12_to_shadow(vmx); in nested_sync_vmcs12_to_shadow()
2081 vmx->nested.need_vmcs12_to_shadow_sync = false; in nested_sync_vmcs12_to_shadow()
2086 struct vcpu_vmx *vmx = in vmx_preemption_timer_fn() local
2089 vmx->nested.preemption_timer_expired = true; in vmx_preemption_timer_fn()
2090 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); in vmx_preemption_timer_fn()
2091 kvm_vcpu_kick(&vmx->vcpu); in vmx_preemption_timer_fn()
2098 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_calc_preemption_timer_value() local
2104 if (!vmx->nested.has_preemption_timer_deadline) { in vmx_calc_preemption_timer_value()
2105 vmx->nested.preemption_timer_deadline = in vmx_calc_preemption_timer_value()
2107 vmx->nested.has_preemption_timer_deadline = true; in vmx_calc_preemption_timer_value()
2109 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc; in vmx_calc_preemption_timer_value()
2115 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer() local
2122 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); in vmx_start_preemption_timer()
2132 hrtimer_start(&vmx->nested.preemption_timer, in vmx_start_preemption_timer()
2137 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) in nested_vmx_calc_efer() argument
2139 if (vmx->nested.nested_run_pending && in nested_vmx_calc_efer()
2143 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME); in nested_vmx_calc_efer()
2145 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME); in nested_vmx_calc_efer()
2148 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx) in prepare_vmcs02_constant_state() argument
2156 if (vmx->nested.vmcs02_initialized) in prepare_vmcs02_constant_state()
2158 vmx->nested.vmcs02_initialized = true; in prepare_vmcs02_constant_state()
2167 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL)); in prepare_vmcs02_constant_state()
2177 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); in prepare_vmcs02_constant_state()
2187 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); in prepare_vmcs02_constant_state()
2199 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val)); in prepare_vmcs02_constant_state()
2200 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); in prepare_vmcs02_constant_state()
2201 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); in prepare_vmcs02_constant_state()
2203 vmx_set_constant_host_state(vmx); in prepare_vmcs02_constant_state()
2206 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx, in prepare_vmcs02_early_rare() argument
2209 prepare_vmcs02_constant_state(vmx); in prepare_vmcs02_early_rare()
2214 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) in prepare_vmcs02_early_rare()
2215 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); in prepare_vmcs02_early_rare()
2217 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in prepare_vmcs02_early_rare()
2221 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) in prepare_vmcs02_early() argument
2224 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12); in prepare_vmcs02_early()
2226 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs) in prepare_vmcs02_early()
2227 prepare_vmcs02_early_rare(vmx, vmcs12); in prepare_vmcs02_early()
2232 exec_control = vmx_pin_based_exec_ctrl(vmx); in prepare_vmcs02_early()
2238 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; in prepare_vmcs02_early()
2239 vmx->nested.pi_pending = false; in prepare_vmcs02_early()
2243 pin_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2248 exec_control = vmx_exec_control(vmx); /* L0's desires */ in prepare_vmcs02_early()
2254 vmx->nested.l1_tpr_threshold = -1; in prepare_vmcs02_early()
2277 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS; in prepare_vmcs02_early()
2279 exec_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2285 exec_control = vmx->secondary_exec_control; in prepare_vmcs02_early()
2321 secondary_exec_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2340 vm_entry_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2352 vm_exit_controls_set(vmx, exec_control); in prepare_vmcs02_early()
2357 if (vmx->nested.nested_run_pending) { in prepare_vmcs02_early()
2366 vmx->loaded_vmcs->nmi_known_unmasked = in prepare_vmcs02_early()
2373 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12) in prepare_vmcs02_rare() argument
2375 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; in prepare_vmcs02_rare()
2416 vmx->segment_cache.bitmask = 0; in prepare_vmcs02_rare()
2438 if (kvm_mpx_supported() && vmx->nested.nested_run_pending && in prepare_vmcs02_rare()
2459 if (vmx_need_pf_intercept(&vmx->vcpu)) { in prepare_vmcs02_rare()
2482 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC); in prepare_vmcs02_rare()
2484 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr); in prepare_vmcs02_rare()
2485 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in prepare_vmcs02_rare()
2486 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in prepare_vmcs02_rare()
2488 set_cr4_guest_host_mask(vmx); in prepare_vmcs02_rare()
2505 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02() local
2506 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs; in prepare_vmcs02()
2509 if (vmx->nested.dirty_vmcs12 || hv_evmcs) { in prepare_vmcs02()
2510 prepare_vmcs02_rare(vmx, vmcs12); in prepare_vmcs02()
2511 vmx->nested.dirty_vmcs12 = false; in prepare_vmcs02()
2518 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2524 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); in prepare_vmcs02()
2526 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending || in prepare_vmcs02()
2528 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); in prepare_vmcs02()
2539 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
2544 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
2550 decache_tsc_multiplier(vmx); in prepare_vmcs02()
2571 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12); in prepare_vmcs02()
2637 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_eptp() local
2643 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT))) in nested_vmx_check_eptp()
2647 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT))) in nested_vmx_check_eptp()
2657 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT))) in nested_vmx_check_eptp()
2661 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT))) in nested_vmx_check_eptp()
2674 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT))) in nested_vmx_check_eptp()
2687 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_check_vm_execution_controls() local
2690 vmx->nested.msrs.pinbased_ctls_low, in nested_check_vm_execution_controls()
2691 vmx->nested.msrs.pinbased_ctls_high)) || in nested_check_vm_execution_controls()
2693 vmx->nested.msrs.procbased_ctls_low, in nested_check_vm_execution_controls()
2694 vmx->nested.msrs.procbased_ctls_high))) in nested_check_vm_execution_controls()
2699 vmx->nested.msrs.secondary_ctls_low, in nested_check_vm_execution_controls()
2700 vmx->nested.msrs.secondary_ctls_high))) in nested_check_vm_execution_controls()
2727 ~vmx->nested.msrs.vmfunc_controls)) in nested_check_vm_execution_controls()
2746 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_check_vm_exit_controls() local
2749 vmx->nested.msrs.exit_ctls_low, in nested_check_vm_exit_controls()
2750 vmx->nested.msrs.exit_ctls_high)) || in nested_check_vm_exit_controls()
2763 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_check_vm_entry_controls() local
2766 vmx->nested.msrs.entry_ctls_low, in nested_check_vm_entry_controls()
2767 vmx->nested.msrs.entry_ctls_high))) in nested_check_vm_entry_controls()
3023 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmentry_hw() local
3030 if (vmx->msr_autoload.host.nr) in nested_vmx_check_vmentry_hw()
3032 if (vmx->msr_autoload.guest.nr) in nested_vmx_check_vmentry_hw()
3048 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { in nested_vmx_check_vmentry_hw()
3050 vmx->loaded_vmcs->host_state.cr3 = cr3; in nested_vmx_check_vmentry_hw()
3054 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { in nested_vmx_check_vmentry_hw()
3056 vmx->loaded_vmcs->host_state.cr4 = cr4; in nested_vmx_check_vmentry_hw()
3082 [loaded_vmcs]"r"(vmx->loaded_vmcs), in nested_vmx_check_vmentry_hw()
3089 if (vmx->msr_autoload.host.nr) in nested_vmx_check_vmentry_hw()
3090 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in nested_vmx_check_vmentry_hw()
3091 if (vmx->msr_autoload.guest.nr) in nested_vmx_check_vmentry_hw()
3092 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in nested_vmx_check_vmentry_hw()
3129 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages() local
3139 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) { in nested_get_vmcs12_pages()
3162 if (vmx->nested.apic_access_page) { /* shouldn't happen */ in nested_get_vmcs12_pages()
3163 kvm_release_page_clean(vmx->nested.apic_access_page); in nested_get_vmcs12_pages()
3164 vmx->nested.apic_access_page = NULL; in nested_get_vmcs12_pages()
3168 vmx->nested.apic_access_page = page; in nested_get_vmcs12_pages()
3169 hpa = page_to_phys(vmx->nested.apic_access_page); in nested_get_vmcs12_pages()
3183 map = &vmx->nested.virtual_apic_map; in nested_get_vmcs12_pages()
3198 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW); in nested_get_vmcs12_pages()
3209 map = &vmx->nested.pi_desc_map; in nested_get_vmcs12_pages()
3212 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
3220 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS); in nested_get_vmcs12_pages()
3222 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS); in nested_get_vmcs12_pages()
3229 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_write_pml_buffer() local
3235 if (WARN_ON_ONCE(vmx->nested.pml_full)) in nested_vmx_write_pml_buffer()
3247 vmx->nested.pml_full = true; in nested_vmx_write_pml_buffer()
3309 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_enter_non_root_mode() local
3318 evaluate_pending_interrupts = exec_controls_get(vmx) & in nested_vmx_enter_non_root_mode()
3324 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); in nested_vmx_enter_non_root_mode()
3327 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); in nested_vmx_enter_non_root_mode()
3348 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); in nested_vmx_enter_non_root_mode()
3350 prepare_vmcs02_early(vmx, vmcs12); in nested_vmx_enter_non_root_mode()
3354 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3359 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3423 vmx->nested.preemption_timer_expired = false; in nested_vmx_enter_non_root_mode()
3448 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_enter_non_root_mode()
3455 if (enable_shadow_vmcs || vmx->nested.hv_evmcs) in nested_vmx_enter_non_root_mode()
3456 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_enter_non_root_mode()
3468 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run() local
3483 if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull)) in nested_vmx_run()
3497 if (vmx->nested.hv_evmcs) { in nested_vmx_run()
3498 copy_enlightened_to_vmcs12(vmx); in nested_vmx_run()
3502 copy_shadow_to_vmcs12(vmx); in nested_vmx_run()
3533 vmx->nested.nested_run_pending = 1; in nested_vmx_run()
3534 vmx->nested.has_preemption_timer_deadline = false; in nested_vmx_run()
3541 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) { in nested_vmx_run()
3542 vmx->nested.pi_pending = true; in nested_vmx_run()
3544 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv); in nested_vmx_run()
3548 vmx->vcpu.arch.l1tf_flush_l1d = true; in nested_vmx_run()
3572 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3578 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
3690 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt() local
3695 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) in vmx_complete_nested_posted_interrupt()
3698 vmx->nested.pi_pending = false; in vmx_complete_nested_posted_interrupt()
3699 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) in vmx_complete_nested_posted_interrupt()
3702 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); in vmx_complete_nested_posted_interrupt()
3704 vapic_page = vmx->nested.virtual_apic_map.hva; in vmx_complete_nested_posted_interrupt()
3708 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, in vmx_complete_nested_posted_interrupt()
3780 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events() local
3783 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); in vmx_check_nested_events()
3784 bool mtf_pending = vmx->nested.mtf_pending; in vmx_check_nested_events()
3792 vmx->nested.mtf_pending = false; in vmx_check_nested_events()
3942 struct vcpu_vmx *vmx = to_vmx(vcpu); in sync_vmcs02_to_vmcs12_rare() local
3983 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false; in sync_vmcs02_to_vmcs12_rare()
3989 struct vcpu_vmx *vmx = to_vmx(vcpu); in copy_vmcs02_to_vmcs12_rare() local
3992 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare) in copy_vmcs02_to_vmcs12_rare()
3996 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01); in copy_vmcs02_to_vmcs12_rare()
3999 vmx->loaded_vmcs = &vmx->nested.vmcs02; in copy_vmcs02_to_vmcs12_rare()
4000 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01); in copy_vmcs02_to_vmcs12_rare()
4004 vmx->loaded_vmcs = &vmx->vmcs01; in copy_vmcs02_to_vmcs12_rare()
4005 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02); in copy_vmcs02_to_vmcs12_rare()
4017 struct vcpu_vmx *vmx = to_vmx(vcpu); in sync_vmcs02_to_vmcs12() local
4019 if (vmx->nested.hv_evmcs) in sync_vmcs02_to_vmcs12()
4022 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs; in sync_vmcs02_to_vmcs12()
4044 !vmx->nested.nested_run_pending) in sync_vmcs02_to_vmcs12()
4274 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx) in nested_vmx_get_vmcs01_guest_efer() argument
4279 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER) in nested_vmx_get_vmcs01_guest_efer()
4285 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) { in nested_vmx_get_vmcs01_guest_efer()
4286 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER) in nested_vmx_get_vmcs01_guest_efer()
4287 return vmx->msr_autoload.guest.val[i].value; in nested_vmx_get_vmcs01_guest_efer()
4290 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER); in nested_vmx_get_vmcs01_guest_efer()
4300 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_restore_host_state() local
4324 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx)); in nested_vmx_restore_host_state()
4413 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit() local
4417 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_vmexit()
4439 if (likely(!vmx->fail)) { in nested_vmx_vmexit()
4468 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_vmexit()
4471 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in nested_vmx_vmexit()
4472 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in nested_vmx_vmexit()
4474 if (vmx->nested.l1_tpr_threshold != -1) in nested_vmx_vmexit()
4475 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold); in nested_vmx_vmexit()
4478 decache_tsc_multiplier(vmx); in nested_vmx_vmexit()
4480 if (vmx->nested.change_vmcs01_virtual_apic_mode) { in nested_vmx_vmexit()
4481 vmx->nested.change_vmcs01_virtual_apic_mode = false; in nested_vmx_vmexit()
4486 if (vmx->nested.apic_access_page) { in nested_vmx_vmexit()
4487 kvm_release_page_clean(vmx->nested.apic_access_page); in nested_vmx_vmexit()
4488 vmx->nested.apic_access_page = NULL; in nested_vmx_vmexit()
4490 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true); in nested_vmx_vmexit()
4491 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true); in nested_vmx_vmexit()
4492 vmx->nested.pi_desc = NULL; in nested_vmx_vmexit()
4494 if (vmx->nested.reload_vmcs01_apic_access_page) { in nested_vmx_vmexit()
4495 vmx->nested.reload_vmcs01_apic_access_page = false; in nested_vmx_vmexit()
4500 (enable_shadow_vmcs || vmx->nested.hv_evmcs)) in nested_vmx_vmexit()
4501 vmx->nested.need_vmcs12_to_shadow_sync = true; in nested_vmx_vmexit()
4506 if (likely(!vmx->fail)) { in nested_vmx_vmexit()
4545 vmx->fail = 0; in nested_vmx_vmexit()
4680 struct vcpu_vmx *vmx; in nested_vmx_pmu_entry_exit_ctls_update() local
4685 vmx = to_vmx(vcpu); in nested_vmx_pmu_entry_exit_ctls_update()
4687 vmx->nested.msrs.entry_ctls_high |= in nested_vmx_pmu_entry_exit_ctls_update()
4689 vmx->nested.msrs.exit_ctls_high |= in nested_vmx_pmu_entry_exit_ctls_update()
4692 vmx->nested.msrs.entry_ctls_high &= in nested_vmx_pmu_entry_exit_ctls_update()
4694 vmx->nested.msrs.exit_ctls_high &= in nested_vmx_pmu_entry_exit_ctls_update()
4729 struct vcpu_vmx *vmx = to_vmx(vcpu); in alloc_shadow_vmcs() local
4730 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; in alloc_shadow_vmcs()
4738 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); in alloc_shadow_vmcs()
4750 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_vmx_operation() local
4753 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
4757 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
4758 if (!vmx->nested.cached_vmcs12) in enter_vmx_operation()
4761 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT); in enter_vmx_operation()
4762 if (!vmx->nested.cached_shadow_vmcs12) in enter_vmx_operation()
4768 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, in enter_vmx_operation()
4770 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; in enter_vmx_operation()
4772 vmx->nested.vpid02 = allocate_vpid(); in enter_vmx_operation()
4774 vmx->nested.vmcs02_initialized = false; in enter_vmx_operation()
4775 vmx->nested.vmxon = true; in enter_vmx_operation()
4778 vmx->pt_desc.guest.ctl = 0; in enter_vmx_operation()
4785 kfree(vmx->nested.cached_shadow_vmcs12); in enter_vmx_operation()
4788 kfree(vmx->nested.cached_vmcs12); in enter_vmx_operation()
4791 free_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
4810 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmon() local
4834 if (vmx->nested.vmxon) in handle_vmon()
4837 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) in handle_vmon()
4861 vmx->nested.vmxon_ptr = vmptr; in handle_vmon()
4871 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_release_vmcs12() local
4873 if (vmx->nested.current_vmptr == -1ull) in nested_release_vmcs12()
4881 copy_shadow_to_vmcs12(vmx); in nested_release_vmcs12()
4882 vmx_disable_shadow_vmcs(vmx); in nested_release_vmcs12()
4884 vmx->nested.posted_intr_nv = -1; in nested_release_vmcs12()
4888 vmx->nested.current_vmptr >> PAGE_SHIFT, in nested_release_vmcs12()
4889 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
4893 vmx->nested.current_vmptr = -1ull; in nested_release_vmcs12()
4913 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear() local
4928 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmclear()
4941 if (likely(!vmx->nested.enlightened_vmcs_enabled || in handle_vmclear()
4943 if (vmptr == vmx->nested.current_vmptr) in handle_vmclear()
4974 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmread() local
4989 if (vmx->nested.current_vmptr == -1ull || in handle_vmread()
5058 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmwrite() local
5081 if (vmx->nested.current_vmptr == -1ull || in handle_vmwrite()
5145 vmcs_load(vmx->vmcs01.shadow_vmcs); in handle_vmwrite()
5149 vmcs_clear(vmx->vmcs01.shadow_vmcs); in handle_vmwrite()
5150 vmcs_load(vmx->loaded_vmcs->vmcs); in handle_vmwrite()
5153 vmx->nested.dirty_vmcs12 = true; in handle_vmwrite()
5159 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) in set_current_vmptr() argument
5161 vmx->nested.current_vmptr = vmptr; in set_current_vmptr()
5163 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS); in set_current_vmptr()
5165 __pa(vmx->vmcs01.shadow_vmcs)); in set_current_vmptr()
5166 vmx->nested.need_vmcs12_to_shadow_sync = true; in set_current_vmptr()
5168 vmx->nested.dirty_vmcs12 = true; in set_current_vmptr()
5174 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld() local
5187 if (vmptr == vmx->nested.vmxon_ptr) in handle_vmptrld()
5191 if (vmx->nested.hv_evmcs) in handle_vmptrld()
5194 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
5225 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); in handle_vmptrld()
5228 set_current_vmptr(vmx, vmptr); in handle_vmptrld()
5273 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept() local
5284 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invept()
5286 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { in handle_invept()
5297 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; in handle_invept()
5352 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invvpid() local
5364 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invvpid()
5366 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { in handle_invvpid()
5377 types = (vmx->nested.msrs.vpid_caps & in handle_invvpid()
5486 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmfunc() local
5515 nested_vmx_vmexit(vcpu, vmx->exit_reason, in handle_vmfunc()
5917 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_reflect_vmexit() local
5918 u32 exit_reason = vmx->exit_reason; in nested_vmx_reflect_vmexit()
5922 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_reflect_vmexit()
5928 if (unlikely(vmx->fail)) { in nested_vmx_reflect_vmexit()
5971 struct vcpu_vmx *vmx; in vmx_get_nested_state() local
5977 .hdr.vmx.flags = 0, in vmx_get_nested_state()
5978 .hdr.vmx.vmxon_pa = -1ull, in vmx_get_nested_state()
5979 .hdr.vmx.vmcs12_pa = -1ull, in vmx_get_nested_state()
5980 .hdr.vmx.preemption_timer_deadline = 0, in vmx_get_nested_state()
5983 &user_kvm_nested_state->data.vmx[0]; in vmx_get_nested_state()
5988 vmx = to_vmx(vcpu); in vmx_get_nested_state()
5992 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { in vmx_get_nested_state()
5993 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr; in vmx_get_nested_state()
5994 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr; in vmx_get_nested_state()
5999 if (vmx->nested.hv_evmcs) in vmx_get_nested_state()
6008 if (vmx->nested.smm.vmxon) in vmx_get_nested_state()
6009 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; in vmx_get_nested_state()
6011 if (vmx->nested.smm.guest_mode) in vmx_get_nested_state()
6012 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; in vmx_get_nested_state()
6017 if (vmx->nested.nested_run_pending) in vmx_get_nested_state()
6020 if (vmx->nested.mtf_pending) in vmx_get_nested_state()
6024 vmx->nested.has_preemption_timer_deadline) { in vmx_get_nested_state()
6025 kvm_state.hdr.vmx.flags |= in vmx_get_nested_state()
6027 kvm_state.hdr.vmx.preemption_timer_deadline = in vmx_get_nested_state()
6028 vmx->nested.preemption_timer_deadline; in vmx_get_nested_state()
6052 } else if (!vmx->nested.need_vmcs12_to_shadow_sync) { in vmx_get_nested_state()
6053 if (vmx->nested.hv_evmcs) in vmx_get_nested_state()
6054 copy_enlightened_to_vmcs12(vmx); in vmx_get_nested_state()
6056 copy_shadow_to_vmcs12(vmx); in vmx_get_nested_state()
6095 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nested_state() local
6099 &user_kvm_nested_state->data.vmx[0]; in vmx_set_nested_state()
6105 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) { in vmx_set_nested_state()
6106 if (kvm_state->hdr.vmx.smm.flags) in vmx_set_nested_state()
6109 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) in vmx_set_nested_state()
6127 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa)) in vmx_set_nested_state()
6131 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
6135 if (kvm_state->hdr.vmx.smm.flags & in vmx_set_nested_state()
6139 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) in vmx_set_nested_state()
6150 : kvm_state->hdr.vmx.smm.flags) in vmx_set_nested_state()
6153 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
6154 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) in vmx_set_nested_state()
6158 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled)) in vmx_set_nested_state()
6163 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) in vmx_set_nested_state()
6166 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa; in vmx_set_nested_state()
6176 (kvm_state->hdr.vmx.vmcs12_pa != -1ull)) in vmx_set_nested_state()
6182 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) { in vmx_set_nested_state()
6183 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa || in vmx_set_nested_state()
6184 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa)) in vmx_set_nested_state()
6187 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa); in vmx_set_nested_state()
6200 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { in vmx_set_nested_state()
6201 vmx->nested.smm.vmxon = true; in vmx_set_nested_state()
6202 vmx->nested.vmxon = false; in vmx_set_nested_state()
6204 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) in vmx_set_nested_state()
6205 vmx->nested.smm.guest_mode = true; in vmx_set_nested_state()
6218 vmx->nested.nested_run_pending = in vmx_set_nested_state()
6221 vmx->nested.mtf_pending = in vmx_set_nested_state()
6246 vmx->nested.has_preemption_timer_deadline = false; in vmx_set_nested_state()
6247 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) { in vmx_set_nested_state()
6248 vmx->nested.has_preemption_timer_deadline = true; in vmx_set_nested_state()
6249 vmx->nested.preemption_timer_deadline = in vmx_set_nested_state()
6250 kvm_state->hdr.vmx.preemption_timer_deadline; in vmx_set_nested_state()
6258 vmx->nested.dirty_vmcs12 = true; in vmx_set_nested_state()
6266 vmx->nested.nested_run_pending = 0; in vmx_set_nested_state()