Lines Matching refs:hv_vcpu

201 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);  in kvm_hv_notify_acked_sint()  local
208 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { in kvm_hv_notify_acked_sint()
209 stimer = &hv_vcpu->stimer[idx]; in kvm_hv_notify_acked_sint()
226 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in synic_exit() local
228 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; in synic_exit()
229 hv_vcpu->exit.u.synic.msr = msr; in synic_exit()
230 hv_vcpu->exit.u.synic.control = synic->control; in synic_exit()
231 hv_vcpu->exit.u.synic.evt_page = synic->evt_page; in synic_exit()
232 hv_vcpu->exit.u.synic.msg_page = synic->msg_page; in synic_exit()
308 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_is_syndbg_enabled() local
310 return hv_vcpu->cpuid_cache.syndbg_cap_eax & in kvm_hv_is_syndbg_enabled()
327 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in syndbg_exit() local
329 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; in syndbg_exit()
330 hv_vcpu->exit.u.syndbg.msr = msr; in syndbg_exit()
331 hv_vcpu->exit.u.syndbg.control = syndbg->control.control; in syndbg_exit()
332 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page; in syndbg_exit()
333 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page; in syndbg_exit()
334 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page; in syndbg_exit()
668 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in stimer_set_config() local
674 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode && in stimer_set_config()
675 !(hv_vcpu->cpuid_cache.features_edx & in stimer_set_config()
841 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_process_stimers() local
846 if (!hv_vcpu) in kvm_hv_process_stimers()
849 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_process_stimers()
850 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { in kvm_hv_process_stimers()
851 stimer = &hv_vcpu->stimer[i]; in kvm_hv_process_stimers()
874 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_vcpu_uninit() local
877 if (!hv_vcpu) in kvm_hv_vcpu_uninit()
880 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_uninit()
881 stimer_cleanup(&hv_vcpu->stimer[i]); in kvm_hv_vcpu_uninit()
883 kfree(hv_vcpu); in kvm_hv_vcpu_uninit()
889 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_assist_page_enabled() local
891 if (!hv_vcpu) in kvm_hv_assist_page_enabled()
894 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) in kvm_hv_assist_page_enabled()
936 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_vcpu_init() local
939 if (hv_vcpu) in kvm_hv_vcpu_init()
942 hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT); in kvm_hv_vcpu_init()
943 if (!hv_vcpu) in kvm_hv_vcpu_init()
946 vcpu->arch.hyperv = hv_vcpu; in kvm_hv_vcpu_init()
947 hv_vcpu->vcpu = vcpu; in kvm_hv_vcpu_init()
949 synic_init(&hv_vcpu->synic); in kvm_hv_vcpu_init()
951 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); in kvm_hv_vcpu_init()
952 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_init()
953 stimer_init(&hv_vcpu->stimer[i], i); in kvm_hv_vcpu_init()
955 hv_vcpu->vp_index = vcpu->vcpu_idx; in kvm_hv_vcpu_init()
1217 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr) in hv_check_msr_access() argument
1219 if (!hv_vcpu->enforce_cpuid) in hv_check_msr_access()
1225 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1228 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1231 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1234 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1237 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1240 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1248 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1258 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1264 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1269 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1274 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1278 return hv_vcpu->cpuid_cache.features_edx & in hv_check_msr_access()
1282 return hv_vcpu->cpuid_cache.features_edx & in hv_check_msr_access()
1426 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_set_msr() local
1428 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) in kvm_hv_set_msr()
1439 if (new_vp_index == hv_vcpu->vp_index) in kvm_hv_set_msr()
1448 if (hv_vcpu->vp_index == vcpu->vcpu_idx) in kvm_hv_set_msr()
1453 hv_vcpu->vp_index = new_vp_index; in kvm_hv_set_msr()
1461 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1478 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1495 hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); in kvm_hv_set_msr()
1594 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_get_msr() local
1596 if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr))) in kvm_hv_get_msr()
1601 data = hv_vcpu->vp_index; in kvm_hv_get_msr()
1610 data = hv_vcpu->hv_vapic; in kvm_hv_get_msr()
1613 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; in kvm_hv_get_msr()
1986 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_set_cpuid() local
1991 if (!hv_vcpu) { in kvm_hv_set_cpuid()
2000 memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache)); in kvm_hv_set_cpuid()
2007 hv_vcpu->cpuid_cache.features_eax = entry->eax; in kvm_hv_set_cpuid()
2008 hv_vcpu->cpuid_cache.features_ebx = entry->ebx; in kvm_hv_set_cpuid()
2009 hv_vcpu->cpuid_cache.features_edx = entry->edx; in kvm_hv_set_cpuid()
2014 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax; in kvm_hv_set_cpuid()
2015 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx; in kvm_hv_set_cpuid()
2020 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax; in kvm_hv_set_cpuid()
2024 hv_vcpu->cpuid_cache.nested_eax = entry->eax; in kvm_hv_set_cpuid()
2025 hv_vcpu->cpuid_cache.nested_ebx = entry->ebx; in kvm_hv_set_cpuid()
2031 struct kvm_vcpu_hv *hv_vcpu; in kvm_hv_set_enforce_cpuid() local
2044 hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_set_enforce_cpuid()
2045 hv_vcpu->enforce_cpuid = enforce; in kvm_hv_set_enforce_cpuid()
2141 static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code) in hv_check_hypercall_access() argument
2143 if (!hv_vcpu->enforce_cpuid) in hv_check_hypercall_access()
2148 return hv_vcpu->cpuid_cache.enlightenments_ebx && in hv_check_hypercall_access()
2149 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX; in hv_check_hypercall_access()
2151 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES; in hv_check_hypercall_access()
2153 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS; in hv_check_hypercall_access()
2161 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) || in hv_check_hypercall_access()
2162 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING; in hv_check_hypercall_access()
2165 if (!(hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2171 return hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2174 if (!(hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2179 return hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2190 struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); in kvm_hv_hypercall() local
2229 if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) { in kvm_hv_hypercall()
2240 if (unlikely(hv_vcpu->enforce_cpuid && in kvm_hv_hypercall()
2241 !(hv_vcpu->cpuid_cache.features_edx & in kvm_hv_hypercall()