Lines Matching refs:vmx

1280 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
2070 static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) in __find_msr_index() argument
2074 for (i = 0; i < vmx->nmsrs; ++i) in __find_msr_index()
2075 if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) in __find_msr_index()
2108 static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) in find_msr_entry() argument
2112 i = __find_msr_index(vmx, msr); in find_msr_entry()
2114 return &vmx->guest_msrs[i]; in find_msr_entry()
2444 static inline void vm_entry_controls_reset_shadow(struct vcpu_vmx *vmx) in vm_entry_controls_reset_shadow() argument
2446 vmx->vm_entry_controls_shadow = vmcs_read32(VM_ENTRY_CONTROLS); in vm_entry_controls_reset_shadow()
2449 static inline void vm_entry_controls_init(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_init() argument
2452 vmx->vm_entry_controls_shadow = val; in vm_entry_controls_init()
2455 static inline void vm_entry_controls_set(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_set() argument
2457 if (vmx->vm_entry_controls_shadow != val) in vm_entry_controls_set()
2458 vm_entry_controls_init(vmx, val); in vm_entry_controls_set()
2461 static inline u32 vm_entry_controls_get(struct vcpu_vmx *vmx) in vm_entry_controls_get() argument
2463 return vmx->vm_entry_controls_shadow; in vm_entry_controls_get()
2467 static inline void vm_entry_controls_setbit(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_setbit() argument
2469 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) | val); in vm_entry_controls_setbit()
2472 static inline void vm_entry_controls_clearbit(struct vcpu_vmx *vmx, u32 val) in vm_entry_controls_clearbit() argument
2474 vm_entry_controls_set(vmx, vm_entry_controls_get(vmx) & ~val); in vm_entry_controls_clearbit()
2477 static inline void vm_exit_controls_reset_shadow(struct vcpu_vmx *vmx) in vm_exit_controls_reset_shadow() argument
2479 vmx->vm_exit_controls_shadow = vmcs_read32(VM_EXIT_CONTROLS); in vm_exit_controls_reset_shadow()
2482 static inline void vm_exit_controls_init(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_init() argument
2485 vmx->vm_exit_controls_shadow = val; in vm_exit_controls_init()
2488 static inline void vm_exit_controls_set(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_set() argument
2490 if (vmx->vm_exit_controls_shadow != val) in vm_exit_controls_set()
2491 vm_exit_controls_init(vmx, val); in vm_exit_controls_set()
2494 static inline u32 vm_exit_controls_get(struct vcpu_vmx *vmx) in vm_exit_controls_get() argument
2496 return vmx->vm_exit_controls_shadow; in vm_exit_controls_get()
2500 static inline void vm_exit_controls_setbit(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_setbit() argument
2502 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) | val); in vm_exit_controls_setbit()
2505 static inline void vm_exit_controls_clearbit(struct vcpu_vmx *vmx, u32 val) in vm_exit_controls_clearbit() argument
2507 vm_exit_controls_set(vmx, vm_exit_controls_get(vmx) & ~val); in vm_exit_controls_clearbit()
2510 static void vmx_segment_cache_clear(struct vcpu_vmx *vmx) in vmx_segment_cache_clear() argument
2512 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_clear()
2515 static bool vmx_segment_cache_test_set(struct vcpu_vmx *vmx, unsigned seg, in vmx_segment_cache_test_set() argument
2521 if (!(vmx->vcpu.arch.regs_avail & (1 << VCPU_EXREG_SEGMENTS))) { in vmx_segment_cache_test_set()
2522 vmx->vcpu.arch.regs_avail |= (1 << VCPU_EXREG_SEGMENTS); in vmx_segment_cache_test_set()
2523 vmx->segment_cache.bitmask = 0; in vmx_segment_cache_test_set()
2525 ret = vmx->segment_cache.bitmask & mask; in vmx_segment_cache_test_set()
2526 vmx->segment_cache.bitmask |= mask; in vmx_segment_cache_test_set()
2530 static u16 vmx_read_guest_seg_selector(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_selector() argument
2532 u16 *p = &vmx->segment_cache.seg[seg].selector; in vmx_read_guest_seg_selector()
2534 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_SEL)) in vmx_read_guest_seg_selector()
2539 static ulong vmx_read_guest_seg_base(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_base() argument
2541 ulong *p = &vmx->segment_cache.seg[seg].base; in vmx_read_guest_seg_base()
2543 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_BASE)) in vmx_read_guest_seg_base()
2548 static u32 vmx_read_guest_seg_limit(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_limit() argument
2550 u32 *p = &vmx->segment_cache.seg[seg].limit; in vmx_read_guest_seg_limit()
2552 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_LIMIT)) in vmx_read_guest_seg_limit()
2557 static u32 vmx_read_guest_seg_ar(struct vcpu_vmx *vmx, unsigned seg) in vmx_read_guest_seg_ar() argument
2559 u32 *p = &vmx->segment_cache.seg[seg].ar; in vmx_read_guest_seg_ar()
2561 if (!vmx_segment_cache_test_set(vmx, seg, SEG_FIELD_AR)) in vmx_read_guest_seg_ar()
2646 static void clear_atomic_switch_msr_special(struct vcpu_vmx *vmx, in clear_atomic_switch_msr_special() argument
2649 vm_entry_controls_clearbit(vmx, entry); in clear_atomic_switch_msr_special()
2650 vm_exit_controls_clearbit(vmx, exit); in clear_atomic_switch_msr_special()
2664 static void clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) in clear_atomic_switch_msr() argument
2667 struct msr_autoload *m = &vmx->msr_autoload; in clear_atomic_switch_msr()
2672 clear_atomic_switch_msr_special(vmx, in clear_atomic_switch_msr()
2680 clear_atomic_switch_msr_special(vmx, in clear_atomic_switch_msr()
2704 static void add_atomic_switch_msr_special(struct vcpu_vmx *vmx, in add_atomic_switch_msr_special() argument
2711 vm_entry_controls_setbit(vmx, entry); in add_atomic_switch_msr_special()
2712 vm_exit_controls_setbit(vmx, exit); in add_atomic_switch_msr_special()
2715 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, in add_atomic_switch_msr() argument
2719 struct msr_autoload *m = &vmx->msr_autoload; in add_atomic_switch_msr()
2724 add_atomic_switch_msr_special(vmx, in add_atomic_switch_msr()
2735 add_atomic_switch_msr_special(vmx, in add_atomic_switch_msr()
2780 static bool update_transition_efer(struct vcpu_vmx *vmx, int efer_offset) in update_transition_efer() argument
2782 u64 guest_efer = vmx->vcpu.arch.efer; in update_transition_efer()
2808 clear_atomic_switch_msr(vmx, MSR_EFER); in update_transition_efer()
2816 (enable_ept && ((vmx->vcpu.arch.efer ^ host_efer) & EFER_NX))) { in update_transition_efer()
2820 add_atomic_switch_msr(vmx, MSR_EFER, in update_transition_efer()
2827 vmx->guest_msrs[efer_offset].data = guest_efer; in update_transition_efer()
2828 vmx->guest_msrs[efer_offset].mask = ~ignore_bits; in update_transition_efer()
2865 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_prepare_switch_to_guest() local
2874 vmx->req_immediate_exit = false; in vmx_prepare_switch_to_guest()
2876 if (vmx->loaded_cpu_state) in vmx_prepare_switch_to_guest()
2879 vmx->loaded_cpu_state = vmx->loaded_vmcs; in vmx_prepare_switch_to_guest()
2880 host_state = &vmx->loaded_cpu_state->host_state; in vmx_prepare_switch_to_guest()
2898 vmx->msr_host_kernel_gs_base = current->thread.gsbase; in vmx_prepare_switch_to_guest()
2903 vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE); in vmx_prepare_switch_to_guest()
2906 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_prepare_switch_to_guest()
2937 for (i = 0; i < vmx->save_nmsrs; ++i) in vmx_prepare_switch_to_guest()
2938 kvm_set_shared_msr(vmx->guest_msrs[i].index, in vmx_prepare_switch_to_guest()
2939 vmx->guest_msrs[i].data, in vmx_prepare_switch_to_guest()
2940 vmx->guest_msrs[i].mask); in vmx_prepare_switch_to_guest()
2943 static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) in vmx_prepare_switch_to_host() argument
2947 if (!vmx->loaded_cpu_state) in vmx_prepare_switch_to_host()
2950 WARN_ON_ONCE(vmx->loaded_cpu_state != vmx->loaded_vmcs); in vmx_prepare_switch_to_host()
2951 host_state = &vmx->loaded_cpu_state->host_state; in vmx_prepare_switch_to_host()
2953 ++vmx->vcpu.stat.host_state_reload; in vmx_prepare_switch_to_host()
2954 vmx->loaded_cpu_state = NULL; in vmx_prepare_switch_to_host()
2957 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_prepare_switch_to_host()
2977 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); in vmx_prepare_switch_to_host()
2983 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx) in vmx_read_guest_kernel_gs_base() argument
2986 if (vmx->loaded_cpu_state) in vmx_read_guest_kernel_gs_base()
2987 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); in vmx_read_guest_kernel_gs_base()
2989 return vmx->msr_guest_kernel_gs_base; in vmx_read_guest_kernel_gs_base()
2992 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data) in vmx_write_guest_kernel_gs_base() argument
2995 if (vmx->loaded_cpu_state) in vmx_write_guest_kernel_gs_base()
2998 vmx->msr_guest_kernel_gs_base = data; in vmx_write_guest_kernel_gs_base()
3048 static void decache_tsc_multiplier(struct vcpu_vmx *vmx) in decache_tsc_multiplier() argument
3050 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio; in decache_tsc_multiplier()
3051 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio); in decache_tsc_multiplier()
3060 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_load() local
3061 bool already_loaded = vmx->loaded_vmcs->cpu == cpu; in vmx_vcpu_load()
3064 loaded_vmcs_clear(vmx->loaded_vmcs); in vmx_vcpu_load()
3075 list_add(&vmx->loaded_vmcs->loaded_vmcss_on_cpu_link, in vmx_vcpu_load()
3081 if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) { in vmx_vcpu_load()
3082 per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs; in vmx_vcpu_load()
3083 vmcs_load(vmx->loaded_vmcs->vmcs); in vmx_vcpu_load()
3112 vmx->loaded_vmcs->cpu = cpu; in vmx_vcpu_load()
3117 vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) in vmx_vcpu_load()
3118 decache_tsc_multiplier(vmx); in vmx_vcpu_load()
3121 vmx->host_pkru = read_pkru(); in vmx_vcpu_load()
3122 vmx->host_debugctlmsr = get_debugctlmsr(); in vmx_vcpu_load()
3323 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_queue_exception() local
3334 if (vmx->rmode.vm86_active) { in vmx_queue_exception()
3343 WARN_ON_ONCE(vmx->emulation_required); in vmx_queue_exception()
3347 vmx->vcpu.arch.event_exit_inst_len); in vmx_queue_exception()
3370 static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) in move_msr_up() argument
3374 tmp = vmx->guest_msrs[to]; in move_msr_up()
3375 vmx->guest_msrs[to] = vmx->guest_msrs[from]; in move_msr_up()
3376 vmx->guest_msrs[from] = tmp; in move_msr_up()
3384 static void setup_msrs(struct vcpu_vmx *vmx) in setup_msrs() argument
3390 if (is_long_mode(&vmx->vcpu)) { in setup_msrs()
3391 index = __find_msr_index(vmx, MSR_SYSCALL_MASK); in setup_msrs()
3393 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
3394 index = __find_msr_index(vmx, MSR_LSTAR); in setup_msrs()
3396 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
3397 index = __find_msr_index(vmx, MSR_CSTAR); in setup_msrs()
3399 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
3400 index = __find_msr_index(vmx, MSR_TSC_AUX); in setup_msrs()
3401 if (index >= 0 && guest_cpuid_has(&vmx->vcpu, X86_FEATURE_RDTSCP)) in setup_msrs()
3402 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
3407 index = __find_msr_index(vmx, MSR_STAR); in setup_msrs()
3408 if ((index >= 0) && (vmx->vcpu.arch.efer & EFER_SCE)) in setup_msrs()
3409 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
3412 index = __find_msr_index(vmx, MSR_EFER); in setup_msrs()
3413 if (index >= 0 && update_transition_efer(vmx, index)) in setup_msrs()
3414 move_msr_up(vmx, index, save_nmsrs++); in setup_msrs()
3416 vmx->save_nmsrs = save_nmsrs; in setup_msrs()
3419 vmx_update_msr_bitmap(&vmx->vcpu); in setup_msrs()
3733 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_basic() argument
3740 u64 vmx_basic = vmx->nested.msrs.basic; in vmx_restore_vmx_basic()
3759 vmx->nested.msrs.basic = data; in vmx_restore_vmx_basic()
3764 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) in vmx_restore_control_msr() argument
3771 lowp = &vmx->nested.msrs.pinbased_ctls_low; in vmx_restore_control_msr()
3772 highp = &vmx->nested.msrs.pinbased_ctls_high; in vmx_restore_control_msr()
3775 lowp = &vmx->nested.msrs.procbased_ctls_low; in vmx_restore_control_msr()
3776 highp = &vmx->nested.msrs.procbased_ctls_high; in vmx_restore_control_msr()
3779 lowp = &vmx->nested.msrs.exit_ctls_low; in vmx_restore_control_msr()
3780 highp = &vmx->nested.msrs.exit_ctls_high; in vmx_restore_control_msr()
3783 lowp = &vmx->nested.msrs.entry_ctls_low; in vmx_restore_control_msr()
3784 highp = &vmx->nested.msrs.entry_ctls_high; in vmx_restore_control_msr()
3787 lowp = &vmx->nested.msrs.secondary_ctls_low; in vmx_restore_control_msr()
3788 highp = &vmx->nested.msrs.secondary_ctls_high; in vmx_restore_control_msr()
3809 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_misc() argument
3819 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low, in vmx_restore_vmx_misc()
3820 vmx->nested.msrs.misc_high); in vmx_restore_vmx_misc()
3825 if ((vmx->nested.msrs.pinbased_ctls_high & in vmx_restore_vmx_misc()
3840 vmx->nested.msrs.misc_low = data; in vmx_restore_vmx_misc()
3841 vmx->nested.msrs.misc_high = data >> 32; in vmx_restore_vmx_misc()
3848 if (enable_shadow_vmcs && !nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) in vmx_restore_vmx_misc()
3854 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data) in vmx_restore_vmx_ept_vpid_cap() argument
3858 vmx_ept_vpid_cap = vmx_control_msr(vmx->nested.msrs.ept_caps, in vmx_restore_vmx_ept_vpid_cap()
3859 vmx->nested.msrs.vpid_caps); in vmx_restore_vmx_ept_vpid_cap()
3865 vmx->nested.msrs.ept_caps = data; in vmx_restore_vmx_ept_vpid_cap()
3866 vmx->nested.msrs.vpid_caps = data >> 32; in vmx_restore_vmx_ept_vpid_cap()
3870 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data) in vmx_restore_fixed0_msr() argument
3876 msr = &vmx->nested.msrs.cr0_fixed0; in vmx_restore_fixed0_msr()
3879 msr = &vmx->nested.msrs.cr4_fixed0; in vmx_restore_fixed0_msr()
3903 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_vmx_msr() local
3909 if (vmx->nested.vmxon) in vmx_set_vmx_msr()
3914 return vmx_restore_vmx_basic(vmx, data); in vmx_set_vmx_msr()
3934 return vmx_restore_control_msr(vmx, msr_index, data); in vmx_set_vmx_msr()
3936 return vmx_restore_vmx_misc(vmx, data); in vmx_set_vmx_msr()
3939 return vmx_restore_fixed0_msr(vmx, msr_index, data); in vmx_set_vmx_msr()
3948 return vmx_restore_vmx_ept_vpid_cap(vmx, data); in vmx_set_vmx_msr()
3950 vmx->nested.msrs.vmcs_enum = data; in vmx_set_vmx_msr()
4067 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_msr() local
4079 msr_info->data = vmx_read_guest_kernel_gs_base(vmx); in vmx_get_msr()
4115 !(vmx->msr_ia32_feature_control & in vmx_get_msr()
4121 msr_info->data = vmx->msr_ia32_feature_control; in vmx_get_msr()
4126 return vmx_get_vmx_msr(&vmx->nested.msrs, msr_info->index, in vmx_get_msr()
4139 msr = find_msr_entry(vmx, msr_info->index); in vmx_get_msr()
4159 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_msr() local
4171 vmx_segment_cache_clear(vmx); in vmx_set_msr()
4175 vmx_segment_cache_clear(vmx); in vmx_set_msr()
4179 vmx_write_guest_kernel_gs_base(vmx, data); in vmx_set_msr()
4210 vmx->spec_ctrl = data; in vmx_set_msr()
4227 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, in vmx_set_msr()
4255 vmx_disable_intercept_for_msr(vmx->vmcs01.msr_bitmap, MSR_IA32_PRED_CMD, in vmx_set_msr()
4261 vmx->arch_capabilities = data; in vmx_set_msr()
4289 vmx->msr_ia32_feature_control = data; in vmx_set_msr()
4310 add_atomic_switch_msr(vmx, MSR_IA32_XSS, in vmx_set_msr()
4313 clear_atomic_switch_msr(vmx, MSR_IA32_XSS); in vmx_set_msr()
4324 msr = find_msr_entry(vmx, msr_index); in vmx_set_msr()
4328 if (msr - vmx->guest_msrs < vmx->save_nmsrs) { in vmx_set_msr()
4945 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_pmode() local
4951 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_pmode()
4952 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_pmode()
4953 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_pmode()
4954 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_pmode()
4955 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_pmode()
4956 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_pmode()
4958 vmx->rmode.vm86_active = 0; in enter_pmode()
4960 vmx_segment_cache_clear(vmx); in enter_pmode()
4962 vmx_set_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_pmode()
4966 flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS; in enter_pmode()
4974 fix_pmode_seg(vcpu, VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_pmode()
4975 fix_pmode_seg(vcpu, VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_pmode()
4976 fix_pmode_seg(vcpu, VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_pmode()
4977 fix_pmode_seg(vcpu, VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_pmode()
4978 fix_pmode_seg(vcpu, VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_pmode()
4979 fix_pmode_seg(vcpu, VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_pmode()
5018 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_rmode() local
5021 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); in enter_rmode()
5022 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); in enter_rmode()
5023 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); in enter_rmode()
5024 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_FS], VCPU_SREG_FS); in enter_rmode()
5025 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_GS], VCPU_SREG_GS); in enter_rmode()
5026 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_SS], VCPU_SREG_SS); in enter_rmode()
5027 vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_CS], VCPU_SREG_CS); in enter_rmode()
5029 vmx->rmode.vm86_active = 1; in enter_rmode()
5039 vmx_segment_cache_clear(vmx); in enter_rmode()
5046 vmx->rmode.save_rflags = flags; in enter_rmode()
5054 fix_rmode_seg(VCPU_SREG_SS, &vmx->rmode.segs[VCPU_SREG_SS]); in enter_rmode()
5055 fix_rmode_seg(VCPU_SREG_CS, &vmx->rmode.segs[VCPU_SREG_CS]); in enter_rmode()
5056 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.segs[VCPU_SREG_ES]); in enter_rmode()
5057 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.segs[VCPU_SREG_DS]); in enter_rmode()
5058 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.segs[VCPU_SREG_GS]); in enter_rmode()
5059 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.segs[VCPU_SREG_FS]); in enter_rmode()
5066 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_efer() local
5067 struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); in vmx_set_efer()
5081 setup_msrs(vmx); in vmx_set_efer()
5264 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_cr0() local
5273 if (vmx->rmode.vm86_active && (cr0 & X86_CR0_PE)) in vmx_set_cr0()
5276 if (!vmx->rmode.vm86_active && !(cr0 & X86_CR0_PE)) in vmx_set_cr0()
5297 vmx->emulation_required = emulation_required(vcpu); in vmx_set_cr0()
5429 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_segment() local
5432 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_get_segment()
5433 *var = vmx->rmode.segs[seg]; in vmx_get_segment()
5435 || var->selector == vmx_read_guest_seg_selector(vmx, seg)) in vmx_get_segment()
5437 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
5438 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
5441 var->base = vmx_read_guest_seg_base(vmx, seg); in vmx_get_segment()
5442 var->limit = vmx_read_guest_seg_limit(vmx, seg); in vmx_get_segment()
5443 var->selector = vmx_read_guest_seg_selector(vmx, seg); in vmx_get_segment()
5444 ar = vmx_read_guest_seg_ar(vmx, seg); in vmx_get_segment()
5476 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_cpl() local
5478 if (unlikely(vmx->rmode.vm86_active)) in vmx_get_cpl()
5481 int ar = vmx_read_guest_seg_ar(vmx, VCPU_SREG_SS); in vmx_get_cpl()
5509 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_segment() local
5512 vmx_segment_cache_clear(vmx); in vmx_set_segment()
5514 if (vmx->rmode.vm86_active && seg != VCPU_SREG_LDTR) { in vmx_set_segment()
5515 vmx->rmode.segs[seg] = *var; in vmx_set_segment()
5519 fix_rmode_seg(seg, &vmx->rmode.segs[seg]); in vmx_set_segment()
5544 vmx->emulation_required = emulation_required(vcpu); in vmx_set_segment()
6096 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_msr_bitmap() local
6097 unsigned long *msr_bitmap = vmx->vmcs01.msr_bitmap; in vmx_update_msr_bitmap()
6099 u8 changed = mode ^ vmx->msr_bitmap_mode; in vmx_update_msr_bitmap()
6107 vmx->msr_bitmap_mode = mode; in vmx_update_msr_bitmap()
6139 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_complete_nested_posted_interrupt() local
6144 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending) in vmx_complete_nested_posted_interrupt()
6147 vmx->nested.pi_pending = false; in vmx_complete_nested_posted_interrupt()
6148 if (!pi_test_and_clear_on(vmx->nested.pi_desc)) in vmx_complete_nested_posted_interrupt()
6151 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256); in vmx_complete_nested_posted_interrupt()
6153 vapic_page = kmap(vmx->nested.virtual_apic_page); in vmx_complete_nested_posted_interrupt()
6154 __kvm_apic_update_irr(vmx->nested.pi_desc->pir, in vmx_complete_nested_posted_interrupt()
6156 kunmap(vmx->nested.virtual_apic_page); in vmx_complete_nested_posted_interrupt()
6176 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_guest_apic_has_interrupt() local
6183 WARN_ON_ONCE(!vmx->nested.virtual_apic_page)) in vmx_guest_apic_has_interrupt()
6188 vapic_page = kmap(vmx->nested.virtual_apic_page); in vmx_guest_apic_has_interrupt()
6190 kunmap(vmx->nested.virtual_apic_page); in vmx_guest_apic_has_interrupt()
6237 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_nested_posted_interrupt() local
6240 vector == vmx->nested.posted_intr_nv) { in vmx_deliver_nested_posted_interrupt()
6245 vmx->nested.pi_pending = true; in vmx_deliver_nested_posted_interrupt()
6263 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_deliver_posted_interrupt() local
6270 if (pi_test_and_set_pir(vector, &vmx->pi_desc)) in vmx_deliver_posted_interrupt()
6274 if (pi_test_and_set_on(&vmx->pi_desc)) in vmx_deliver_posted_interrupt()
6287 static void vmx_set_constant_host_state(struct vcpu_vmx *vmx) in vmx_set_constant_host_state() argument
6304 vmx->loaded_vmcs->host_state.cr3 = cr3; in vmx_set_constant_host_state()
6309 vmx->loaded_vmcs->host_state.cr4 = cr4; in vmx_set_constant_host_state()
6329 vmx->host_idt_base = dt.address; in vmx_set_constant_host_state()
6344 static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx) in set_cr4_guest_host_mask() argument
6346 vmx->vcpu.arch.cr4_guest_owned_bits = KVM_CR4_GUEST_OWNED_BITS; in set_cr4_guest_host_mask()
6348 vmx->vcpu.arch.cr4_guest_owned_bits |= X86_CR4_PGE; in set_cr4_guest_host_mask()
6349 if (is_guest_mode(&vmx->vcpu)) in set_cr4_guest_host_mask()
6350 vmx->vcpu.arch.cr4_guest_owned_bits &= in set_cr4_guest_host_mask()
6351 ~get_vmcs12(&vmx->vcpu)->cr4_guest_host_mask; in set_cr4_guest_host_mask()
6352 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); in set_cr4_guest_host_mask()
6355 static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx) in vmx_pin_based_exec_ctrl() argument
6359 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) in vmx_pin_based_exec_ctrl()
6372 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_refresh_apicv_exec_ctrl() local
6374 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); in vmx_refresh_apicv_exec_ctrl()
6390 static u32 vmx_exec_control(struct vcpu_vmx *vmx) in vmx_exec_control() argument
6394 if (vmx->vcpu.arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT) in vmx_exec_control()
6397 if (!cpu_need_tpr_shadow(&vmx->vcpu)) { in vmx_exec_control()
6408 if (kvm_mwait_in_guest(vmx->vcpu.kvm)) in vmx_exec_control()
6411 if (kvm_hlt_in_guest(vmx->vcpu.kvm)) in vmx_exec_control()
6428 static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) in vmx_compute_secondary_exec_control() argument
6430 struct kvm_vcpu *vcpu = &vmx->vcpu; in vmx_compute_secondary_exec_control()
6436 if (vmx->vpid == 0) in vmx_compute_secondary_exec_control()
6444 if (kvm_pause_in_guest(vmx->vcpu.kvm)) in vmx_compute_secondary_exec_control()
6476 vmx->nested.msrs.secondary_ctls_high |= in vmx_compute_secondary_exec_control()
6479 vmx->nested.msrs.secondary_ctls_high &= in vmx_compute_secondary_exec_control()
6491 vmx->nested.msrs.secondary_ctls_high |= in vmx_compute_secondary_exec_control()
6494 vmx->nested.msrs.secondary_ctls_high &= in vmx_compute_secondary_exec_control()
6512 vmx->nested.msrs.secondary_ctls_high |= in vmx_compute_secondary_exec_control()
6515 vmx->nested.msrs.secondary_ctls_high &= in vmx_compute_secondary_exec_control()
6527 vmx->nested.msrs.secondary_ctls_high |= in vmx_compute_secondary_exec_control()
6530 vmx->nested.msrs.secondary_ctls_high &= in vmx_compute_secondary_exec_control()
6542 vmx->nested.msrs.secondary_ctls_high |= in vmx_compute_secondary_exec_control()
6545 vmx->nested.msrs.secondary_ctls_high &= in vmx_compute_secondary_exec_control()
6550 vmx->secondary_exec_control = exec_control; in vmx_compute_secondary_exec_control()
6567 static void vmx_vcpu_setup(struct vcpu_vmx *vmx) in vmx_vcpu_setup() argument
6582 vmcs_write64(MSR_BITMAP, __pa(vmx->vmcs01.msr_bitmap)); in vmx_vcpu_setup()
6587 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx)); in vmx_vcpu_setup()
6588 vmx->hv_deadline_tsc = -1; in vmx_vcpu_setup()
6590 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); in vmx_vcpu_setup()
6593 vmx_compute_secondary_exec_control(vmx); in vmx_vcpu_setup()
6595 vmx->secondary_exec_control); in vmx_vcpu_setup()
6598 if (kvm_vcpu_apicv_active(&vmx->vcpu)) { in vmx_vcpu_setup()
6607 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc))); in vmx_vcpu_setup()
6610 if (!kvm_pause_in_guest(vmx->vcpu.kvm)) { in vmx_vcpu_setup()
6612 vmx->ple_window = ple_window; in vmx_vcpu_setup()
6613 vmx->ple_window_dirty = true; in vmx_vcpu_setup()
6622 vmx_set_constant_host_state(vmx); in vmx_vcpu_setup()
6631 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); in vmx_vcpu_setup()
6633 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); in vmx_vcpu_setup()
6636 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in vmx_vcpu_setup()
6641 int j = vmx->nmsrs; in vmx_vcpu_setup()
6647 vmx->guest_msrs[j].index = i; in vmx_vcpu_setup()
6648 vmx->guest_msrs[j].data = 0; in vmx_vcpu_setup()
6649 vmx->guest_msrs[j].mask = -1ull; in vmx_vcpu_setup()
6650 ++vmx->nmsrs; in vmx_vcpu_setup()
6653 vmx->arch_capabilities = kvm_get_arch_capabilities(); in vmx_vcpu_setup()
6655 vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); in vmx_vcpu_setup()
6658 vm_entry_controls_init(vmx, vmcs_config.vmentry_ctrl); in vmx_vcpu_setup()
6660 vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; in vmx_vcpu_setup()
6663 set_cr4_guest_host_mask(vmx); in vmx_vcpu_setup()
6669 ASSERT(vmx->pml_pg); in vmx_vcpu_setup()
6670 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); in vmx_vcpu_setup()
6680 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_reset() local
6684 vmx->rmode.vm86_active = 0; in vmx_vcpu_reset()
6685 vmx->spec_ctrl = 0; in vmx_vcpu_reset()
6688 vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); in vmx_vcpu_reset()
6700 vmx_segment_cache_clear(vmx); in vmx_vcpu_reset()
6744 setup_msrs(vmx); in vmx_vcpu_reset()
6758 if (vmx->vpid != 0) in vmx_vcpu_reset()
6759 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in vmx_vcpu_reset()
6762 vmx->vcpu.arch.cr0 = cr0; in vmx_vcpu_reset()
6769 vpid_sync_context(vmx->vpid); in vmx_vcpu_reset()
6819 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_irq() local
6826 if (vmx->rmode.vm86_active) { in vmx_inject_irq()
6838 vmx->vcpu.arch.event_exit_inst_len); in vmx_inject_irq()
6848 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_inject_nmi() local
6859 vmx->loaded_vmcs->soft_vnmi_blocked = 1; in vmx_inject_nmi()
6860 vmx->loaded_vmcs->vnmi_blocked_time = 0; in vmx_inject_nmi()
6864 vmx->loaded_vmcs->nmi_known_unmasked = false; in vmx_inject_nmi()
6866 if (vmx->rmode.vm86_active) { in vmx_inject_nmi()
6880 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_get_nmi_mask() local
6884 return vmx->loaded_vmcs->soft_vnmi_blocked; in vmx_get_nmi_mask()
6885 if (vmx->loaded_vmcs->nmi_known_unmasked) in vmx_get_nmi_mask()
6888 vmx->loaded_vmcs->nmi_known_unmasked = !masked; in vmx_get_nmi_mask()
6894 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nmi_mask() local
6897 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { in vmx_set_nmi_mask()
6898 vmx->loaded_vmcs->soft_vnmi_blocked = masked; in vmx_set_nmi_mask()
6899 vmx->loaded_vmcs->vnmi_blocked_time = 0; in vmx_set_nmi_mask()
6902 vmx->loaded_vmcs->nmi_known_unmasked = !masked; in vmx_set_nmi_mask()
7041 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_exception() local
7048 vect_info = vmx->idt_vectoring_info; in handle_exception()
7049 intr_info = vmx->exit_intr_info; in handle_exception()
7064 if (!vmx->rmode.vm86_active && is_gp_fault(intr_info)) { in handle_exception()
7100 if (vmx->rmode.vm86_active && rmode_exception(vcpu, ex_no)) in handle_exception()
7128 vmx->vcpu.arch.event_exit_inst_len = in handle_exception()
7584 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_task_switch() local
7591 idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); in handle_task_switch()
7592 idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); in handle_task_switch()
7593 type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); in handle_task_switch()
7609 if (vmx->idt_vectoring_info & in handle_task_switch()
7736 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invalid_guest_state() local
7748 WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending); in handle_invalid_guest_state()
7753 while (vmx->emulation_required && count-- != 0) { in handle_invalid_guest_state()
7755 return handle_interrupt_window(&vmx->vcpu); in handle_invalid_guest_state()
7771 if (vmx->emulation_required && !vmx->rmode.vm86_active && in handle_invalid_guest_state()
7799 struct vcpu_vmx *vmx = to_vmx(vcpu); in grow_ple_window() local
7800 int old = vmx->ple_window; in grow_ple_window()
7802 vmx->ple_window = __grow_ple_window(old, ple_window, in grow_ple_window()
7806 if (vmx->ple_window != old) in grow_ple_window()
7807 vmx->ple_window_dirty = true; in grow_ple_window()
7809 trace_kvm_ple_window_grow(vcpu->vcpu_id, vmx->ple_window, old); in grow_ple_window()
7814 struct vcpu_vmx *vmx = to_vmx(vcpu); in shrink_ple_window() local
7815 int old = vmx->ple_window; in shrink_ple_window()
7817 vmx->ple_window = __shrink_ple_window(old, ple_window, in shrink_ple_window()
7821 if (vmx->ple_window != old) in shrink_ple_window()
7822 vmx->ple_window_dirty = true; in shrink_ple_window()
7824 trace_kvm_ple_window_shrink(vcpu->vcpu_id, vmx->ple_window, old); in shrink_ple_window()
8119 struct vcpu_vmx *vmx = in vmx_preemption_timer_fn() local
8122 vmx->nested.preemption_timer_expired = true; in vmx_preemption_timer_fn()
8123 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); in vmx_preemption_timer_fn()
8124 kvm_vcpu_kick(&vmx->vcpu); in vmx_preemption_timer_fn()
8250 struct vcpu_vmx *vmx = to_vmx(vcpu); in alloc_shadow_vmcs() local
8251 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs; in alloc_shadow_vmcs()
8259 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs); in alloc_shadow_vmcs()
8271 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_vmx_operation() local
8274 r = alloc_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
8278 vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); in enter_vmx_operation()
8279 if (!vmx->nested.cached_vmcs12) in enter_vmx_operation()
8282 vmx->nested.cached_shadow_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL); in enter_vmx_operation()
8283 if (!vmx->nested.cached_shadow_vmcs12) in enter_vmx_operation()
8289 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC, in enter_vmx_operation()
8291 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn; in enter_vmx_operation()
8293 vmx->nested.vpid02 = allocate_vpid(); in enter_vmx_operation()
8295 vmx->nested.vmxon = true; in enter_vmx_operation()
8299 kfree(vmx->nested.cached_shadow_vmcs12); in enter_vmx_operation()
8302 kfree(vmx->nested.cached_vmcs12); in enter_vmx_operation()
8305 free_loaded_vmcs(&vmx->nested.vmcs02); in enter_vmx_operation()
8324 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmon() local
8348 if (vmx->nested.vmxon) { in handle_vmon()
8353 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES) in handle_vmon()
8389 vmx->nested.vmxon_ptr = vmptr; in handle_vmon()
8420 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx) in vmx_disable_shadow_vmcs() argument
8426 static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) in nested_release_vmcs12() argument
8428 if (vmx->nested.current_vmptr == -1ull) in nested_release_vmcs12()
8434 copy_shadow_to_vmcs12(vmx); in nested_release_vmcs12()
8435 vmx->nested.sync_shadow_vmcs = false; in nested_release_vmcs12()
8436 vmx_disable_shadow_vmcs(vmx); in nested_release_vmcs12()
8438 vmx->nested.posted_intr_nv = -1; in nested_release_vmcs12()
8441 kvm_vcpu_write_guest_page(&vmx->vcpu, in nested_release_vmcs12()
8442 vmx->nested.current_vmptr >> PAGE_SHIFT, in nested_release_vmcs12()
8443 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE); in nested_release_vmcs12()
8445 vmx->nested.current_vmptr = -1ull; in nested_release_vmcs12()
8452 static void free_nested(struct vcpu_vmx *vmx) in free_nested() argument
8454 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon) in free_nested()
8457 vmx->nested.vmxon = false; in free_nested()
8458 vmx->nested.smm.vmxon = false; in free_nested()
8459 free_vpid(vmx->nested.vpid02); in free_nested()
8460 vmx->nested.posted_intr_nv = -1; in free_nested()
8461 vmx->nested.current_vmptr = -1ull; in free_nested()
8463 vmx_disable_shadow_vmcs(vmx); in free_nested()
8464 vmcs_clear(vmx->vmcs01.shadow_vmcs); in free_nested()
8465 free_vmcs(vmx->vmcs01.shadow_vmcs); in free_nested()
8466 vmx->vmcs01.shadow_vmcs = NULL; in free_nested()
8468 kfree(vmx->nested.cached_vmcs12); in free_nested()
8469 kfree(vmx->nested.cached_shadow_vmcs12); in free_nested()
8471 if (vmx->nested.apic_access_page) { in free_nested()
8472 kvm_release_page_dirty(vmx->nested.apic_access_page); in free_nested()
8473 vmx->nested.apic_access_page = NULL; in free_nested()
8475 if (vmx->nested.virtual_apic_page) { in free_nested()
8476 kvm_release_page_dirty(vmx->nested.virtual_apic_page); in free_nested()
8477 vmx->nested.virtual_apic_page = NULL; in free_nested()
8479 if (vmx->nested.pi_desc_page) { in free_nested()
8480 kunmap(vmx->nested.pi_desc_page); in free_nested()
8481 kvm_release_page_dirty(vmx->nested.pi_desc_page); in free_nested()
8482 vmx->nested.pi_desc_page = NULL; in free_nested()
8483 vmx->nested.pi_desc = NULL; in free_nested()
8486 free_loaded_vmcs(&vmx->nested.vmcs02); in free_nested()
8502 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmclear() local
8517 if (vmptr == vmx->nested.vmxon_ptr) { in handle_vmclear()
8522 if (vmptr == vmx->nested.current_vmptr) in handle_vmclear()
8523 nested_release_vmcs12(vmx); in handle_vmclear()
8619 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx) in copy_shadow_to_vmcs12() argument
8632 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_shadow_to_vmcs12()
8642 vmcs12_write_any(get_vmcs12(&vmx->vcpu), field, field_value); in copy_shadow_to_vmcs12()
8647 if (!nested_cpu_has_vmwrite_any_field(&vmx->vcpu)) in copy_shadow_to_vmcs12()
8652 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_shadow_to_vmcs12()
8657 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) in copy_vmcs12_to_shadow() argument
8670 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs; in copy_vmcs12_to_shadow()
8677 vmcs12_read_any(get_vmcs12(&vmx->vcpu), field, &field_value); in copy_vmcs12_to_shadow()
8683 vmcs_load(vmx->loaded_vmcs->vmcs); in copy_vmcs12_to_shadow()
8692 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_check_vmcs12() local
8693 if (vmx->nested.current_vmptr == -1ull) { in nested_vmx_check_vmcs12()
8762 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmwrite() local
8844 vmx->nested.dirty_vmcs12 = true; in handle_vmwrite()
8853 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr) in set_current_vmptr() argument
8855 vmx->nested.current_vmptr = vmptr; in set_current_vmptr()
8860 __pa(vmx->vmcs01.shadow_vmcs)); in set_current_vmptr()
8861 vmx->nested.sync_shadow_vmcs = true; in set_current_vmptr()
8863 vmx->nested.dirty_vmcs12 = true; in set_current_vmptr()
8869 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmptrld() local
8883 if (vmptr == vmx->nested.vmxon_ptr) { in handle_vmptrld()
8888 if (vmx->nested.current_vmptr != vmptr) { in handle_vmptrld()
8907 nested_release_vmcs12(vmx); in handle_vmptrld()
8912 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE); in handle_vmptrld()
8916 set_current_vmptr(vmx, vmptr); in handle_vmptrld()
8950 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invept() local
8959 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invept()
8961 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) { in handle_invept()
8972 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; in handle_invept()
9012 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_invvpid() local
9022 if (!(vmx->nested.msrs.secondary_ctls_high & in handle_invvpid()
9024 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) { in handle_invvpid()
9035 types = (vmx->nested.msrs.vpid_caps & in handle_invvpid()
9069 vmx->nested.vpid02) { in handle_invvpid()
9071 vmx->nested.vpid02, operand.gla); in handle_invvpid()
9073 __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); in handle_invvpid()
9082 __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); in handle_invvpid()
9085 __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); in handle_invvpid()
9230 struct vcpu_vmx *vmx = to_vmx(vcpu); in valid_ept_address() local
9236 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)) in valid_ept_address()
9240 if (!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)) in valid_ept_address()
9257 if (!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)) in valid_ept_address()
9311 struct vcpu_vmx *vmx = to_vmx(vcpu); in handle_vmfunc() local
9340 nested_vmx_vmexit(vcpu, vmx->exit_reason, in handle_vmfunc()
9617 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_exit_reflected() local
9620 if (vmx->nested.nested_run_pending) in nested_vmx_exit_reflected()
9623 if (unlikely(vmx->fail)) { in nested_vmx_exit_reflected()
9644 vmx->idt_vectoring_info, in nested_vmx_exit_reflected()
9654 return !vmx->vcpu.arch.apf.host_apf_reason && enable_ept; in nested_vmx_exit_reflected()
9822 static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) in vmx_destroy_pml_buffer() argument
9824 if (vmx->pml_pg) { in vmx_destroy_pml_buffer()
9825 __free_page(vmx->pml_pg); in vmx_destroy_pml_buffer()
9826 vmx->pml_pg = NULL; in vmx_destroy_pml_buffer()
9832 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_flush_pml_buffer() local
9848 pml_buf = page_address(vmx->pml_pg); in vmx_flush_pml_buffer()
10043 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_exit() local
10044 u32 exit_reason = vmx->exit_reason; in vmx_handle_exit()
10045 u32 vectoring_info = vmx->idt_vectoring_info; in vmx_handle_exit()
10060 if (vmx->emulation_required) in vmx_handle_exit()
10074 if (unlikely(vmx->fail)) { in vmx_handle_exit()
10108 vmx->loaded_vmcs->soft_vnmi_blocked)) { in vmx_handle_exit()
10110 vmx->loaded_vmcs->soft_vnmi_blocked = 0; in vmx_handle_exit()
10111 } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL && in vmx_handle_exit()
10122 vmx->loaded_vmcs->soft_vnmi_blocked = 0; in vmx_handle_exit()
10325 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_sync_pir_to_irr() local
10330 if (pi_test_on(&vmx->pi_desc)) { in vmx_sync_pir_to_irr()
10331 pi_clear_on(&vmx->pi_desc); in vmx_sync_pir_to_irr()
10338 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir, &max_irr); in vmx_sync_pir_to_irr()
10382 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_apicv_post_state_restore() local
10384 pi_clear_on(&vmx->pi_desc); in vmx_apicv_post_state_restore()
10385 memset(vmx->pi_desc.pir, 0, sizeof(vmx->pi_desc.pir)); in vmx_apicv_post_state_restore()
10388 static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) in vmx_complete_atomic_exit() argument
10391 u16 basic_exit_reason = (u16)vmx->exit_reason; in vmx_complete_atomic_exit()
10397 if (!(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) in vmx_complete_atomic_exit()
10399 vmx->exit_intr_info = exit_intr_info; in vmx_complete_atomic_exit()
10403 vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason(); in vmx_complete_atomic_exit()
10412 kvm_before_interrupt(&vmx->vcpu); in vmx_complete_atomic_exit()
10414 kvm_after_interrupt(&vmx->vcpu); in vmx_complete_atomic_exit()
10427 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_handle_external_intr() local
10433 desc = (gate_desc *)vmx->host_idt_base + vector; in vmx_handle_external_intr()
10488 static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) in vmx_recover_nmi_blocking() argument
10495 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; in vmx_recover_nmi_blocking()
10498 if (vmx->loaded_vmcs->nmi_known_unmasked) in vmx_recover_nmi_blocking()
10522 vmx->loaded_vmcs->nmi_known_unmasked = in vmx_recover_nmi_blocking()
10525 } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked)) in vmx_recover_nmi_blocking()
10526 vmx->loaded_vmcs->vnmi_blocked_time += in vmx_recover_nmi_blocking()
10528 vmx->loaded_vmcs->entry_time)); in vmx_recover_nmi_blocking()
10585 static void vmx_complete_interrupts(struct vcpu_vmx *vmx) in vmx_complete_interrupts() argument
10587 __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, in vmx_complete_interrupts()
10602 static void atomic_switch_perf_msrs(struct vcpu_vmx *vmx) in atomic_switch_perf_msrs() argument
10614 clear_atomic_switch_msr(vmx, msrs[i].msr); in atomic_switch_perf_msrs()
10616 add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest, in atomic_switch_perf_msrs()
10620 static void vmx_arm_hv_timer(struct vcpu_vmx *vmx, u32 val) in vmx_arm_hv_timer() argument
10623 if (!vmx->loaded_vmcs->hv_timer_armed) in vmx_arm_hv_timer()
10626 vmx->loaded_vmcs->hv_timer_armed = true; in vmx_arm_hv_timer()
10631 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_update_hv_timer() local
10635 if (vmx->req_immediate_exit) { in vmx_update_hv_timer()
10636 vmx_arm_hv_timer(vmx, 0); in vmx_update_hv_timer()
10640 if (vmx->hv_deadline_tsc != -1) { in vmx_update_hv_timer()
10642 if (vmx->hv_deadline_tsc > tscl) in vmx_update_hv_timer()
10644 delta_tsc = (u32)((vmx->hv_deadline_tsc - tscl) >> in vmx_update_hv_timer()
10649 vmx_arm_hv_timer(vmx, delta_tsc); in vmx_update_hv_timer()
10653 if (vmx->loaded_vmcs->hv_timer_armed) in vmx_update_hv_timer()
10656 vmx->loaded_vmcs->hv_timer_armed = false; in vmx_update_hv_timer()
10661 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_vcpu_run() local
10666 vmx->loaded_vmcs->soft_vnmi_blocked)) in vmx_vcpu_run()
10667 vmx->loaded_vmcs->entry_time = ktime_get(); in vmx_vcpu_run()
10671 if (vmx->emulation_required) in vmx_vcpu_run()
10674 if (vmx->ple_window_dirty) { in vmx_vcpu_run()
10675 vmx->ple_window_dirty = false; in vmx_vcpu_run()
10676 vmcs_write32(PLE_WINDOW, vmx->ple_window); in vmx_vcpu_run()
10679 if (vmx->nested.sync_shadow_vmcs) { in vmx_vcpu_run()
10680 copy_vmcs12_to_shadow(vmx); in vmx_vcpu_run()
10681 vmx->nested.sync_shadow_vmcs = false; in vmx_vcpu_run()
10690 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) { in vmx_vcpu_run()
10692 vmx->loaded_vmcs->host_state.cr3 = cr3; in vmx_vcpu_run()
10696 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) { in vmx_vcpu_run()
10698 vmx->loaded_vmcs->host_state.cr4 = cr4; in vmx_vcpu_run()
10711 vcpu->arch.pkru != vmx->host_pkru) in vmx_vcpu_run()
10714 atomic_switch_perf_msrs(vmx); in vmx_vcpu_run()
10724 x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0); in vmx_vcpu_run()
10726 vmx->__launched = vmx->loaded_vmcs->launched; in vmx_vcpu_run()
10825 : : "c"(vmx), "d"((unsigned long)HOST_RSP), "S"(evmcs_rsp), in vmx_vcpu_run()
10873 vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); in vmx_vcpu_run()
10875 x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0); in vmx_vcpu_run()
10886 if (vmx->host_debugctlmsr) in vmx_vcpu_run()
10887 update_debugctlmsr(vmx->host_debugctlmsr); in vmx_vcpu_run()
10917 if (vcpu->arch.pkru != vmx->host_pkru) in vmx_vcpu_run()
10918 __write_pkru(vmx->host_pkru); in vmx_vcpu_run()
10921 vmx->nested.nested_run_pending = 0; in vmx_vcpu_run()
10922 vmx->idt_vectoring_info = 0; in vmx_vcpu_run()
10924 vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON); in vmx_vcpu_run()
10925 if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) in vmx_vcpu_run()
10928 vmx->loaded_vmcs->launched = 1; in vmx_vcpu_run()
10929 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); in vmx_vcpu_run()
10931 vmx_complete_atomic_exit(vmx); in vmx_vcpu_run()
10932 vmx_recover_nmi_blocking(vmx); in vmx_vcpu_run()
10933 vmx_complete_interrupts(vmx); in vmx_vcpu_run()
10950 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_switch_vmcs() local
10953 if (vmx->loaded_vmcs == vmcs) in vmx_switch_vmcs()
10958 vmx->loaded_vmcs = vmcs; in vmx_switch_vmcs()
10969 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu_nested() local
10972 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in vmx_free_vcpu_nested()
10973 free_nested(vmx); in vmx_free_vcpu_nested()
10979 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_free_vcpu() local
10982 vmx_destroy_pml_buffer(vmx); in vmx_free_vcpu()
10983 free_vpid(vmx->vpid); in vmx_free_vcpu()
10986 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_free_vcpu()
10987 kfree(vmx->guest_msrs); in vmx_free_vcpu()
10989 kmem_cache_free(kvm_vcpu_cache, vmx); in vmx_free_vcpu()
10995 struct vcpu_vmx *vmx = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); in vmx_create_vcpu() local
10999 if (!vmx) in vmx_create_vcpu()
11002 vmx->vpid = allocate_vpid(); in vmx_create_vcpu()
11004 err = kvm_vcpu_init(&vmx->vcpu, kvm, id); in vmx_create_vcpu()
11017 vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO); in vmx_create_vcpu()
11018 if (!vmx->pml_pg) in vmx_create_vcpu()
11022 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); in vmx_create_vcpu()
11023 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) in vmx_create_vcpu()
11026 if (!vmx->guest_msrs) in vmx_create_vcpu()
11029 err = alloc_loaded_vmcs(&vmx->vmcs01); in vmx_create_vcpu()
11033 msr_bitmap = vmx->vmcs01.msr_bitmap; in vmx_create_vcpu()
11040 vmx->msr_bitmap_mode = 0; in vmx_create_vcpu()
11042 vmx->loaded_vmcs = &vmx->vmcs01; in vmx_create_vcpu()
11044 vmx_vcpu_load(&vmx->vcpu, cpu); in vmx_create_vcpu()
11045 vmx->vcpu.cpu = cpu; in vmx_create_vcpu()
11046 vmx_vcpu_setup(vmx); in vmx_create_vcpu()
11047 vmx_vcpu_put(&vmx->vcpu); in vmx_create_vcpu()
11049 if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { in vmx_create_vcpu()
11062 nested_vmx_setup_ctls_msrs(&vmx->nested.msrs, in vmx_create_vcpu()
11063 kvm_vcpu_apicv_active(&vmx->vcpu)); in vmx_create_vcpu()
11065 vmx->nested.posted_intr_nv = -1; in vmx_create_vcpu()
11066 vmx->nested.current_vmptr = -1ull; in vmx_create_vcpu()
11068 vmx->msr_ia32_feature_control_valid_bits = FEATURE_CONTROL_LOCKED; in vmx_create_vcpu()
11074 vmx->pi_desc.nv = POSTED_INTR_VECTOR; in vmx_create_vcpu()
11075 vmx->pi_desc.sn = 1; in vmx_create_vcpu()
11077 return &vmx->vcpu; in vmx_create_vcpu()
11080 free_loaded_vmcs(vmx->loaded_vmcs); in vmx_create_vcpu()
11082 kfree(vmx->guest_msrs); in vmx_create_vcpu()
11084 vmx_destroy_pml_buffer(vmx); in vmx_create_vcpu()
11086 kvm_vcpu_uninit(&vmx->vcpu); in vmx_create_vcpu()
11088 free_vpid(vmx->vpid); in vmx_create_vcpu()
11089 kmem_cache_free(kvm_vcpu_cache, vmx); in vmx_create_vcpu()
11221 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_cr_fixed1_bits_update() local
11224 vmx->nested.msrs.cr0_fixed1 = 0xffffffff; in nested_vmx_cr_fixed1_bits_update()
11225 vmx->nested.msrs.cr4_fixed1 = X86_CR4_PCE; in nested_vmx_cr_fixed1_bits_update()
11229 vmx->nested.msrs.cr4_fixed1 |= (_cr4_mask); \ in nested_vmx_cr_fixed1_bits_update()
11260 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_entry_exit_ctls_update() local
11266 vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
11267 vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
11269 vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
11270 vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS; in nested_vmx_entry_exit_ctls_update()
11277 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_cpuid_update() local
11280 vmx_compute_secondary_exec_control(vmx); in vmx_cpuid_update()
11281 vmcs_set_secondary_exec_control(vmx->secondary_exec_control); in vmx_cpuid_update()
11307 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_ept_inject_page_fault() local
11311 if (vmx->nested.pml_full) { in nested_ept_inject_page_fault()
11313 vmx->nested.pml_full = false; in nested_ept_inject_page_fault()
11398 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_get_vmcs12_pages() local
11409 if (vmx->nested.apic_access_page) { /* shouldn't happen */ in nested_get_vmcs12_pages()
11410 kvm_release_page_dirty(vmx->nested.apic_access_page); in nested_get_vmcs12_pages()
11411 vmx->nested.apic_access_page = NULL; in nested_get_vmcs12_pages()
11421 vmx->nested.apic_access_page = page; in nested_get_vmcs12_pages()
11422 hpa = page_to_phys(vmx->nested.apic_access_page); in nested_get_vmcs12_pages()
11431 if (vmx->nested.virtual_apic_page) { /* shouldn't happen */ in nested_get_vmcs12_pages()
11432 kvm_release_page_dirty(vmx->nested.virtual_apic_page); in nested_get_vmcs12_pages()
11433 vmx->nested.virtual_apic_page = NULL; in nested_get_vmcs12_pages()
11451 vmx->nested.virtual_apic_page = page; in nested_get_vmcs12_pages()
11452 hpa = page_to_phys(vmx->nested.virtual_apic_page); in nested_get_vmcs12_pages()
11458 if (vmx->nested.pi_desc_page) { /* shouldn't happen */ in nested_get_vmcs12_pages()
11459 kunmap(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
11460 kvm_release_page_dirty(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
11461 vmx->nested.pi_desc_page = NULL; in nested_get_vmcs12_pages()
11466 vmx->nested.pi_desc_page = page; in nested_get_vmcs12_pages()
11467 vmx->nested.pi_desc = kmap(vmx->nested.pi_desc_page); in nested_get_vmcs12_pages()
11468 vmx->nested.pi_desc = in nested_get_vmcs12_pages()
11469 (struct pi_desc *)((void *)vmx->nested.pi_desc + in nested_get_vmcs12_pages()
11473 page_to_phys(vmx->nested.pi_desc_page) + in nested_get_vmcs12_pages()
11488 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_start_preemption_timer() local
11495 vmx_preemption_timer_fn(&vmx->nested.preemption_timer); in vmx_start_preemption_timer()
11505 hrtimer_start(&vmx->nested.preemption_timer, in vmx_start_preemption_timer()
11662 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_flush_cached_shadow_vmcs12() local
11668 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer, in nested_flush_cached_shadow_vmcs12()
11965 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02_full() local
12049 vmx_set_constant_host_state(vmx); in prepare_vmcs02_full()
12055 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in prepare_vmcs02_full()
12056 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val)); in prepare_vmcs02_full()
12057 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in prepare_vmcs02_full()
12058 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val)); in prepare_vmcs02_full()
12060 set_cr4_guest_host_mask(vmx); in prepare_vmcs02_full()
12063 if (vmx->nested.nested_run_pending && in prepare_vmcs02_full()
12067 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs); in prepare_vmcs02_full()
12071 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) in prepare_vmcs02_full()
12072 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02); in prepare_vmcs02_full()
12074 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); in prepare_vmcs02_full()
12088 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap)); in prepare_vmcs02_full()
12105 struct vcpu_vmx *vmx = to_vmx(vcpu); in prepare_vmcs02() local
12108 if (vmx->nested.dirty_vmcs12) { in prepare_vmcs02()
12110 vmx->nested.dirty_vmcs12 = false; in prepare_vmcs02()
12124 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
12130 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); in prepare_vmcs02()
12132 if (vmx->nested.nested_run_pending) { in prepare_vmcs02()
12141 vmx->loaded_vmcs->nmi_known_unmasked = in prepare_vmcs02()
12153 vmx->loaded_vmcs->hv_timer_armed = false; in prepare_vmcs02()
12157 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv; in prepare_vmcs02()
12158 vmx->nested.pi_pending = false; in prepare_vmcs02()
12165 vmx->nested.preemption_timer_expired = false; in prepare_vmcs02()
12170 exec_control = vmx->secondary_exec_control; in prepare_vmcs02()
12215 vmx->host_rsp = 0; in prepare_vmcs02()
12217 exec_control = vmx_exec_control(vmx); /* L0's desires */ in prepare_vmcs02()
12264 vm_entry_controls_init(vmx, in prepare_vmcs02()
12269 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
12274 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat); in prepare_vmcs02()
12280 decache_tsc_multiplier(vmx); in prepare_vmcs02()
12291 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02) { in prepare_vmcs02()
12292 if (vmcs12->virtual_processor_id != vmx->nested.last_vpid) { in prepare_vmcs02()
12293 vmx->nested.last_vpid = vmcs12->virtual_processor_id; in prepare_vmcs02()
12294 __vmx_flush_tlb(vcpu, vmx->nested.vpid02, true); in prepare_vmcs02()
12308 ASSERT(vmx->pml_pg); in prepare_vmcs02()
12309 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); in prepare_vmcs02()
12337 if (vmx->nested.nested_run_pending && in prepare_vmcs02()
12352 if (vmx->emulation_required) { in prepare_vmcs02()
12385 struct vcpu_vmx *vmx = to_vmx(vcpu); in check_vmentry_prereqs() local
12419 vmx->nested.msrs.procbased_ctls_low, in check_vmentry_prereqs()
12420 vmx->nested.msrs.procbased_ctls_high) || in check_vmentry_prereqs()
12423 vmx->nested.msrs.secondary_ctls_low, in check_vmentry_prereqs()
12424 vmx->nested.msrs.secondary_ctls_high)) || in check_vmentry_prereqs()
12426 vmx->nested.msrs.pinbased_ctls_low, in check_vmentry_prereqs()
12427 vmx->nested.msrs.pinbased_ctls_high) || in check_vmentry_prereqs()
12429 vmx->nested.msrs.exit_ctls_low, in check_vmentry_prereqs()
12430 vmx->nested.msrs.exit_ctls_high) || in check_vmentry_prereqs()
12432 vmx->nested.msrs.entry_ctls_low, in check_vmentry_prereqs()
12433 vmx->nested.msrs.entry_ctls_high)) in check_vmentry_prereqs()
12441 ~vmx->nested.msrs.vmfunc_controls) in check_vmentry_prereqs()
12609 struct vcpu_vmx *vmx = to_vmx(vcpu); in enter_vmx_non_root_mode() local
12624 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); in enter_vmx_non_root_mode()
12627 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); in enter_vmx_non_root_mode()
12629 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02); in enter_vmx_non_root_mode()
12630 vmx_segment_cache_clear(vmx); in enter_vmx_non_root_mode()
12688 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in enter_vmx_non_root_mode()
12699 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_run() local
12724 copy_shadow_to_vmcs12(vmx); in nested_vmx_run()
12776 vmx->nested.nested_run_pending = 1; in nested_vmx_run()
12780 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
12785 vmx->vcpu.arch.l1tf_flush_l1d = true; in nested_vmx_run()
12805 vmx->nested.nested_run_pending = 0; in nested_vmx_run()
12895 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_check_nested_events() local
12898 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu); in vmx_check_nested_events()
12909 vmx->nested.preemption_timer_expired) { in vmx_check_nested_events()
13287 struct vcpu_vmx *vmx = to_vmx(vcpu); in nested_vmx_vmexit() local
13291 WARN_ON_ONCE(vmx->nested.nested_run_pending); in nested_vmx_vmexit()
13298 WARN_ON_ONCE(vmx->fail && (vmcs_read32(VM_INSTRUCTION_ERROR) != in nested_vmx_vmexit()
13306 if (likely(!vmx->fail)) { in nested_vmx_vmexit()
13329 vmx_switch_vmcs(vcpu, &vmx->vmcs01); in nested_vmx_vmexit()
13330 vm_entry_controls_reset_shadow(vmx); in nested_vmx_vmexit()
13331 vm_exit_controls_reset_shadow(vmx); in nested_vmx_vmexit()
13332 vmx_segment_cache_clear(vmx); in nested_vmx_vmexit()
13335 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr); in nested_vmx_vmexit()
13336 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr); in nested_vmx_vmexit()
13340 decache_tsc_multiplier(vmx); in nested_vmx_vmexit()
13342 if (vmx->nested.change_vmcs01_virtual_apic_mode) { in nested_vmx_vmexit()
13343 vmx->nested.change_vmcs01_virtual_apic_mode = false; in nested_vmx_vmexit()
13352 vmx->host_rsp = 0; in nested_vmx_vmexit()
13355 if (vmx->nested.apic_access_page) { in nested_vmx_vmexit()
13356 kvm_release_page_dirty(vmx->nested.apic_access_page); in nested_vmx_vmexit()
13357 vmx->nested.apic_access_page = NULL; in nested_vmx_vmexit()
13359 if (vmx->nested.virtual_apic_page) { in nested_vmx_vmexit()
13360 kvm_release_page_dirty(vmx->nested.virtual_apic_page); in nested_vmx_vmexit()
13361 vmx->nested.virtual_apic_page = NULL; in nested_vmx_vmexit()
13363 if (vmx->nested.pi_desc_page) { in nested_vmx_vmexit()
13364 kunmap(vmx->nested.pi_desc_page); in nested_vmx_vmexit()
13365 kvm_release_page_dirty(vmx->nested.pi_desc_page); in nested_vmx_vmexit()
13366 vmx->nested.pi_desc_page = NULL; in nested_vmx_vmexit()
13367 vmx->nested.pi_desc = NULL; in nested_vmx_vmexit()
13377 vmx->nested.sync_shadow_vmcs = true; in nested_vmx_vmexit()
13382 if (likely(!vmx->fail)) { in nested_vmx_vmexit()
13430 vmx->fail = 0; in nested_vmx_vmexit()
13507 struct vcpu_vmx *vmx; in vmx_set_hv_timer() local
13513 vmx = to_vmx(vcpu); in vmx_set_hv_timer()
13541 vmx->hv_deadline_tsc = tscl + delta_tsc; in vmx_set_hv_timer()
13578 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_write_pml_buffer() local
13584 WARN_ON_ONCE(vmx->nested.pml_full); in vmx_write_pml_buffer()
13596 vmx->nested.pml_full = true; in vmx_write_pml_buffer()
13863 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_pre_enter_smm() local
13865 vmx->nested.smm.guest_mode = is_guest_mode(vcpu); in vmx_pre_enter_smm()
13866 if (vmx->nested.smm.guest_mode) in vmx_pre_enter_smm()
13869 vmx->nested.smm.vmxon = vmx->nested.vmxon; in vmx_pre_enter_smm()
13870 vmx->nested.vmxon = false; in vmx_pre_enter_smm()
13877 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_pre_leave_smm() local
13880 if (vmx->nested.smm.vmxon) { in vmx_pre_leave_smm()
13881 vmx->nested.vmxon = true; in vmx_pre_leave_smm()
13882 vmx->nested.smm.vmxon = false; in vmx_pre_leave_smm()
13885 if (vmx->nested.smm.guest_mode) { in vmx_pre_leave_smm()
13892 vmx->nested.smm.guest_mode = false; in vmx_pre_leave_smm()
13906 struct vcpu_vmx *vmx; in vmx_get_nested_state() local
13912 .vmx.vmxon_pa = -1ull, in vmx_get_nested_state()
13913 .vmx.vmcs_pa = -1ull, in vmx_get_nested_state()
13919 vmx = to_vmx(vcpu); in vmx_get_nested_state()
13922 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) { in vmx_get_nested_state()
13923 kvm_state.vmx.vmxon_pa = vmx->nested.vmxon_ptr; in vmx_get_nested_state()
13924 kvm_state.vmx.vmcs_pa = vmx->nested.current_vmptr; in vmx_get_nested_state()
13926 if (vmx->nested.current_vmptr != -1ull) { in vmx_get_nested_state()
13935 if (vmx->nested.smm.vmxon) in vmx_get_nested_state()
13936 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON; in vmx_get_nested_state()
13938 if (vmx->nested.smm.guest_mode) in vmx_get_nested_state()
13939 kvm_state.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE; in vmx_get_nested_state()
13944 if (vmx->nested.nested_run_pending) in vmx_get_nested_state()
13955 if (vmx->nested.current_vmptr == -1ull) in vmx_get_nested_state()
13967 else if (enable_shadow_vmcs && !vmx->nested.sync_shadow_vmcs) in vmx_get_nested_state()
13968 copy_shadow_to_vmcs12(vmx); in vmx_get_nested_state()
13988 struct vcpu_vmx *vmx = to_vmx(vcpu); in vmx_set_nested_state() local
13997 return kvm_state->vmx.vmxon_pa == -1ull ? 0 : -EINVAL; in vmx_set_nested_state()
13999 if (kvm_state->vmx.vmxon_pa == -1ull) { in vmx_set_nested_state()
14000 if (kvm_state->vmx.smm.flags) in vmx_set_nested_state()
14003 if (kvm_state->vmx.vmcs_pa != -1ull) in vmx_set_nested_state()
14010 if (!page_address_valid(vcpu, kvm_state->vmx.vmxon_pa)) in vmx_set_nested_state()
14016 if (kvm_state->vmx.vmcs_pa == kvm_state->vmx.vmxon_pa || in vmx_set_nested_state()
14017 !page_address_valid(vcpu, kvm_state->vmx.vmcs_pa)) in vmx_set_nested_state()
14020 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
14024 if (kvm_state->vmx.smm.flags & in vmx_set_nested_state()
14033 if (is_smm(vcpu) ? kvm_state->flags : kvm_state->vmx.smm.flags) in vmx_set_nested_state()
14036 if ((kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) && in vmx_set_nested_state()
14037 !(kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)) in vmx_set_nested_state()
14041 if (kvm_state->vmx.vmxon_pa == -1ull) in vmx_set_nested_state()
14044 vmx->nested.vmxon_ptr = kvm_state->vmx.vmxon_pa; in vmx_set_nested_state()
14049 set_current_vmptr(vmx, kvm_state->vmx.vmcs_pa); in vmx_set_nested_state()
14051 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) { in vmx_set_nested_state()
14052 vmx->nested.smm.vmxon = true; in vmx_set_nested_state()
14053 vmx->nested.vmxon = false; in vmx_set_nested_state()
14055 if (kvm_state->vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) in vmx_set_nested_state()
14056 vmx->nested.smm.guest_mode = true; in vmx_set_nested_state()
14069 vmx->nested.nested_run_pending = in vmx_set_nested_state()
14092 vmx->nested.dirty_vmcs12 = true; in vmx_set_nested_state()