Lines Matching +full:os +full:- +full:initiated

1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM Microsoft Hyper-V emulation
17 * Ben-Ami Yassour <benami@il.ibm.com>
51 return atomic64_read(&synic->sint[sint]); in synic_read_sint()
57 return -1; in synic_get_sint_vector()
66 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_has_vector_connected()
79 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_has_vector_auto_eoi()
92 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in synic_update_vector()
99 __set_bit(vector, synic->vec_bitmap); in synic_update_vector()
101 __clear_bit(vector, synic->vec_bitmap); in synic_update_vector()
103 auto_eoi_old = bitmap_weight(synic->auto_eoi_bitmap, 256); in synic_update_vector()
106 __set_bit(vector, synic->auto_eoi_bitmap); in synic_update_vector()
108 __clear_bit(vector, synic->auto_eoi_bitmap); in synic_update_vector()
110 auto_eoi_new = bitmap_weight(synic->auto_eoi_bitmap, 256); in synic_update_vector()
115 mutex_lock(&vcpu->kvm->arch.apicv_update_lock); in synic_update_vector()
118 hv->synic_auto_eoi_used++; in synic_update_vector()
120 hv->synic_auto_eoi_used--; in synic_update_vector()
122 __kvm_request_apicv_update(vcpu->kvm, in synic_update_vector()
123 !hv->synic_auto_eoi_used, in synic_update_vector()
126 mutex_unlock(&vcpu->kvm->arch.apicv_update_lock); in synic_update_vector()
139 * Valid vectors are 16-255, however, nested Hyper-V attempts to write in synic_set_sint()
141 * allow zero-initing the register from host as well. in synic_set_sint()
148 * bitmap of vectors with auto-eoi behavior. The bitmaps are in synic_set_sint()
153 atomic64_set(&synic->sint[sint], data); in synic_set_sint()
190 return (synic->active) ? synic : NULL; in synic_get()
195 struct kvm *kvm = vcpu->kvm; in kvm_hv_notify_acked_sint()
201 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); in kvm_hv_notify_acked_sint()
203 /* Try to deliver pending Hyper-V SynIC timers messages */ in kvm_hv_notify_acked_sint()
204 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { in kvm_hv_notify_acked_sint()
205 stimer = &hv_vcpu->stimer[idx]; in kvm_hv_notify_acked_sint()
206 if (stimer->msg_pending && stimer->config.enable && in kvm_hv_notify_acked_sint()
207 !stimer->config.direct_mode && in kvm_hv_notify_acked_sint()
208 stimer->config.sintx == sint) in kvm_hv_notify_acked_sint()
212 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_hv_notify_acked_sint()
213 gsi = atomic_read(&synic->sint_to_gsi[sint]); in kvm_hv_notify_acked_sint()
214 if (gsi != -1) in kvm_hv_notify_acked_sint()
216 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_hv_notify_acked_sint()
224 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; in synic_exit()
225 hv_vcpu->exit.u.synic.msr = msr; in synic_exit()
226 hv_vcpu->exit.u.synic.control = synic->control; in synic_exit()
227 hv_vcpu->exit.u.synic.evt_page = synic->evt_page; in synic_exit()
228 hv_vcpu->exit.u.synic.msg_page = synic->msg_page; in synic_exit()
239 if (!synic->active && !host) in synic_set_msr()
242 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); in synic_set_msr()
247 synic->control = data; in synic_set_msr()
256 synic->version = data; in synic_set_msr()
260 !synic->dont_zero_synic_pages) in synic_set_msr()
261 if (kvm_clear_guest(vcpu->kvm, in synic_set_msr()
266 synic->evt_page = data; in synic_set_msr()
272 !synic->dont_zero_synic_pages) in synic_set_msr()
273 if (kvm_clear_guest(vcpu->kvm, in synic_set_msr()
278 synic->msg_page = data; in synic_set_msr()
285 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) in synic_set_msr()
290 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); in synic_set_msr()
303 return hv_vcpu->cpuid_cache.syndbg_cap_eax & in kvm_hv_is_syndbg_enabled()
309 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_syndbg_complete_userspace()
311 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL) in kvm_hv_syndbg_complete_userspace()
312 hv->hv_syndbg.control.status = in kvm_hv_syndbg_complete_userspace()
313 vcpu->run->hyperv.u.syndbg.status; in kvm_hv_syndbg_complete_userspace()
322 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; in syndbg_exit()
323 hv_vcpu->exit.u.syndbg.msr = msr; in syndbg_exit()
324 hv_vcpu->exit.u.syndbg.control = syndbg->control.control; in syndbg_exit()
325 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page; in syndbg_exit()
326 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page; in syndbg_exit()
327 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page; in syndbg_exit()
328 vcpu->arch.complete_userspace_io = in syndbg_exit()
341 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, in syndbg_set_msr()
342 to_hv_vcpu(vcpu)->vp_index, msr, data); in syndbg_set_msr()
345 syndbg->control.control = data; in syndbg_set_msr()
350 syndbg->control.status = data; in syndbg_set_msr()
353 syndbg->control.send_page = data; in syndbg_set_msr()
356 syndbg->control.recv_page = data; in syndbg_set_msr()
359 syndbg->control.pending_page = data; in syndbg_set_msr()
364 syndbg->options = data; in syndbg_set_msr()
382 *pdata = syndbg->control.control; in syndbg_get_msr()
385 *pdata = syndbg->control.status; in syndbg_get_msr()
388 *pdata = syndbg->control.send_page; in syndbg_get_msr()
391 *pdata = syndbg->control.recv_page; in syndbg_get_msr()
394 *pdata = syndbg->control.pending_page; in syndbg_get_msr()
397 *pdata = syndbg->options; in syndbg_get_msr()
403 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata); in syndbg_get_msr()
413 if (!synic->active && !host) in synic_get_msr()
419 *pdata = synic->control; in synic_get_msr()
422 *pdata = synic->version; in synic_get_msr()
425 *pdata = synic->evt_page; in synic_get_msr()
428 *pdata = synic->msg_page; in synic_get_msr()
434 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); in synic_get_msr()
449 if (sint >= ARRAY_SIZE(synic->sint)) in synic_set_irq()
450 return -EINVAL; in synic_set_irq()
454 return -ENOENT; in synic_set_irq()
463 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL); in synic_set_irq()
464 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); in synic_set_irq()
474 return -EINVAL; in kvm_hv_synic_set_irq()
484 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); in kvm_hv_synic_send_eoi()
486 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) in kvm_hv_synic_send_eoi()
497 return -EINVAL; in kvm_hv_set_sint_gsi()
499 if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) in kvm_hv_set_sint_gsi()
500 return -EINVAL; in kvm_hv_set_sint_gsi()
502 atomic_set(&synic->sint_to_gsi[sint], gsi); in kvm_hv_set_sint_gsi()
512 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, in kvm_hv_irq_routing_update()
513 lockdep_is_held(&kvm->irq_lock)); in kvm_hv_irq_routing_update()
515 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { in kvm_hv_irq_routing_update()
516 hlist_for_each_entry(e, &irq_rt->map[gsi], link) { in kvm_hv_irq_routing_update()
517 if (e->type == KVM_IRQ_ROUTING_HV_SINT) in kvm_hv_irq_routing_update()
518 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, in kvm_hv_irq_routing_update()
519 e->hv_sint.sint, gsi); in kvm_hv_irq_routing_update()
529 synic->version = HV_SYNIC_VERSION_1; in synic_init()
530 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_init()
531 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); in synic_init()
532 atomic_set(&synic->sint_to_gsi[i], -1); in synic_init()
546 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET) in get_time_ref_counter()
551 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64) in get_time_ref_counter()
552 + hv->tsc_ref.tsc_offset; in get_time_ref_counter()
560 set_bit(stimer->index, in stimer_mark_pending()
561 to_hv_vcpu(vcpu)->stimer_pending_bitmap); in stimer_mark_pending()
571 trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_cleanup()
572 stimer->index); in stimer_cleanup()
574 hrtimer_cancel(&stimer->timer); in stimer_cleanup()
575 clear_bit(stimer->index, in stimer_cleanup()
576 to_hv_vcpu(vcpu)->stimer_pending_bitmap); in stimer_cleanup()
577 stimer->msg_pending = false; in stimer_cleanup()
578 stimer->exp_time = 0; in stimer_cleanup()
586 trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_timer_callback()
587 stimer->index); in stimer_timer_callback()
595 * a) stimer->count is not equal to 0
596 * b) stimer->config has HV_STIMER_ENABLE flag
603 time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm); in stimer_start()
606 if (stimer->config.periodic) { in stimer_start()
607 if (stimer->exp_time) { in stimer_start()
608 if (time_now >= stimer->exp_time) { in stimer_start()
611 div64_u64_rem(time_now - stimer->exp_time, in stimer_start()
612 stimer->count, &remainder); in stimer_start()
613 stimer->exp_time = in stimer_start()
614 time_now + (stimer->count - remainder); in stimer_start()
617 stimer->exp_time = time_now + stimer->count; in stimer_start()
620 hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_start()
621 stimer->index, in stimer_start()
622 time_now, stimer->exp_time); in stimer_start()
624 hrtimer_start(&stimer->timer, in stimer_start()
626 100 * (stimer->exp_time - time_now)), in stimer_start()
630 stimer->exp_time = stimer->count; in stimer_start()
631 if (time_now >= stimer->count) { in stimer_start()
633 * Expire timer according to Hypervisor Top-Level Functional in stimer_start()
642 trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_start()
643 stimer->index, in stimer_start()
644 time_now, stimer->count); in stimer_start()
646 hrtimer_start(&stimer->timer, in stimer_start()
647 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), in stimer_start()
656 old_config = {.as_uint64 = stimer->config.as_uint64}; in stimer_set_config()
661 if (!synic->active && !host) in stimer_set_config()
664 if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode && in stimer_set_config()
665 !(hv_vcpu->cpuid_cache.features_edx & in stimer_set_config()
669 trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_set_config()
670 stimer->index, config, host); in stimer_set_config()
676 stimer->config.as_uint64 = new_config.as_uint64; in stimer_set_config()
678 if (stimer->config.enable) in stimer_set_config()
690 if (!synic->active && !host) in stimer_set_count()
693 trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_set_count()
694 stimer->index, count, host); in stimer_set_count()
697 stimer->count = count; in stimer_set_count()
698 if (stimer->count == 0) in stimer_set_count()
699 stimer->config.enable = 0; in stimer_set_count()
700 else if (stimer->config.auto_enable) in stimer_set_count()
701 stimer->config.enable = 1; in stimer_set_count()
703 if (stimer->config.enable) in stimer_set_count()
711 *pconfig = stimer->config.as_uint64; in stimer_get_config()
717 *pcount = stimer->count; in stimer_get_count()
730 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) in synic_deliver_msg()
731 return -ENOENT; in synic_deliver_msg()
733 msg_page_gfn = synic->msg_page >> PAGE_SHIFT; in synic_deliver_msg()
736 * Strictly following the spec-mandated ordering would assume setting in synic_deliver_msg()
761 return -EAGAIN; in synic_deliver_msg()
765 sizeof(src_msg->header) + in synic_deliver_msg()
766 src_msg->header.payload_size); in synic_deliver_msg()
774 return -EFAULT; in synic_deliver_msg()
781 struct hv_message *msg = &stimer->msg; in stimer_send_msg()
783 (struct hv_timer_message_payload *)&msg->u.payload; in stimer_send_msg()
789 bool no_retry = stimer->config.periodic; in stimer_send_msg()
791 payload->expiration_time = stimer->exp_time; in stimer_send_msg()
792 payload->delivery_time = get_time_ref_counter(vcpu->kvm); in stimer_send_msg()
794 stimer->config.sintx, msg, in stimer_send_msg()
803 .vector = stimer->config.apic_vector in stimer_notify_direct()
813 int r, direct = stimer->config.direct_mode; in stimer_expiration()
815 stimer->msg_pending = true; in stimer_expiration()
820 trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id, in stimer_expiration()
821 stimer->index, direct, r); in stimer_expiration()
823 stimer->msg_pending = false; in stimer_expiration()
824 if (!(stimer->config.periodic)) in stimer_expiration()
825 stimer->config.enable = 0; in stimer_expiration()
839 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_process_stimers()
840 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { in kvm_hv_process_stimers()
841 stimer = &hv_vcpu->stimer[i]; in kvm_hv_process_stimers()
842 if (stimer->config.enable) { in kvm_hv_process_stimers()
843 exp_time = stimer->exp_time; in kvm_hv_process_stimers()
847 get_time_ref_counter(vcpu->kvm); in kvm_hv_process_stimers()
852 if ((stimer->config.enable) && in kvm_hv_process_stimers()
853 stimer->count) { in kvm_hv_process_stimers()
854 if (!stimer->msg_pending) in kvm_hv_process_stimers()
870 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_uninit()
871 stimer_cleanup(&hv_vcpu->stimer[i]); in kvm_hv_vcpu_uninit()
874 vcpu->arch.hyperv = NULL; in kvm_hv_vcpu_uninit()
884 if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) in kvm_hv_assist_page_enabled()
886 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; in kvm_hv_assist_page_enabled()
895 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, in kvm_hv_get_assist_page()
902 struct hv_message *msg = &stimer->msg; in stimer_prepare_msg()
904 (struct hv_timer_message_payload *)&msg->u.payload; in stimer_prepare_msg()
906 memset(&msg->header, 0, sizeof(msg->header)); in stimer_prepare_msg()
907 msg->header.message_type = HVMSG_TIMER_EXPIRED; in stimer_prepare_msg()
908 msg->header.payload_size = sizeof(*payload); in stimer_prepare_msg()
910 payload->timer_index = stimer->index; in stimer_prepare_msg()
911 payload->expiration_time = 0; in stimer_prepare_msg()
912 payload->delivery_time = 0; in stimer_prepare_msg()
918 stimer->index = timer_index; in stimer_init()
919 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in stimer_init()
920 stimer->timer.function = stimer_timer_callback; in stimer_init()
931 return -ENOMEM; in kvm_hv_vcpu_init()
933 vcpu->arch.hyperv = hv_vcpu; in kvm_hv_vcpu_init()
934 hv_vcpu->vcpu = vcpu; in kvm_hv_vcpu_init()
936 synic_init(&hv_vcpu->synic); in kvm_hv_vcpu_init()
938 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); in kvm_hv_vcpu_init()
939 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_init()
940 stimer_init(&hv_vcpu->stimer[i], i); in kvm_hv_vcpu_init()
942 hv_vcpu->vp_index = vcpu->vcpu_idx; in kvm_hv_vcpu_init()
960 synic->active = true; in kvm_hv_activate_synic()
961 synic->dont_zero_synic_pages = dont_zero_synic_pages; in kvm_hv_activate_synic()
962 synic->control = HV_SYNIC_CONTROL_ENABLE; in kvm_hv_activate_synic()
993 size_t size = ARRAY_SIZE(hv->hv_crash_param); in kvm_hv_msr_get_crash_data()
996 return -EINVAL; in kvm_hv_msr_get_crash_data()
998 *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; in kvm_hv_msr_get_crash_data()
1006 *pdata = hv->hv_crash_ctl; in kvm_hv_msr_get_crash_ctl()
1014 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; in kvm_hv_msr_set_crash_ctl()
1022 size_t size = ARRAY_SIZE(hv->hv_crash_param); in kvm_hv_msr_set_crash_data()
1025 return -EINVAL; in kvm_hv_msr_set_crash_data()
1027 hv->hv_crash_param[array_index_nospec(index, size)] = data; in kvm_hv_msr_set_crash_data()
1032 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1036 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1039 * Hyper-V formula:
1042 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1044 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1045 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1049 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1050 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1052 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1053 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1056 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1058 * - tsc_timestamp * scale / 2^64
1061 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1062 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
1071 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT)) in compute_tsc_page_parameters()
1077 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift) in compute_tsc_page_parameters()
1078 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift) in compute_tsc_page_parameters()
1080 max_mul = 100ull << (32 - hv_clock->tsc_shift); in compute_tsc_page_parameters()
1081 if (hv_clock->tsc_to_system_mul >= max_mul) in compute_tsc_page_parameters()
1088 tsc_ref->tsc_scale = in compute_tsc_page_parameters()
1089 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift), in compute_tsc_page_parameters()
1090 hv_clock->tsc_to_system_mul, in compute_tsc_page_parameters()
1093 tsc_ref->tsc_offset = hv_clock->system_time; in compute_tsc_page_parameters()
1094 do_div(tsc_ref->tsc_offset, 100); in compute_tsc_page_parameters()
1095 tsc_ref->tsc_offset -= in compute_tsc_page_parameters()
1096 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64); in compute_tsc_page_parameters()
1103 * access emulation and Hyper-V is known to expect the values in TSC page to
1111 return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) && in tsc_page_update_unsafe()
1112 hv->hv_tsc_emulation_control; in tsc_page_update_unsafe()
1122 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); in kvm_hv_setup_tsc_page()
1125 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || in kvm_hv_setup_tsc_page()
1126 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET) in kvm_hv_setup_tsc_page()
1129 mutex_lock(&hv->hv_lock); in kvm_hv_setup_tsc_page()
1130 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) in kvm_hv_setup_tsc_page()
1133 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; in kvm_hv_setup_tsc_page()
1143 if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) in kvm_hv_setup_tsc_page()
1146 hv->hv_tsc_page_status = HV_TSC_PAGE_SET; in kvm_hv_setup_tsc_page()
1154 hv->tsc_ref.tsc_sequence = 0; in kvm_hv_setup_tsc_page()
1156 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) in kvm_hv_setup_tsc_page()
1159 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) in kvm_hv_setup_tsc_page()
1164 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) in kvm_hv_setup_tsc_page()
1174 /* Write the struct entirely before the non-zero sequence. */ in kvm_hv_setup_tsc_page()
1177 hv->tsc_ref.tsc_sequence = tsc_seq; in kvm_hv_setup_tsc_page()
1179 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) in kvm_hv_setup_tsc_page()
1182 hv->hv_tsc_page_status = HV_TSC_PAGE_SET; in kvm_hv_setup_tsc_page()
1186 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; in kvm_hv_setup_tsc_page()
1188 mutex_unlock(&hv->hv_lock); in kvm_hv_setup_tsc_page()
1197 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || in kvm_hv_invalidate_tsc_page()
1198 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET || in kvm_hv_invalidate_tsc_page()
1202 mutex_lock(&hv->hv_lock); in kvm_hv_invalidate_tsc_page()
1204 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) in kvm_hv_invalidate_tsc_page()
1208 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET) in kvm_hv_invalidate_tsc_page()
1209 hv->hv_tsc_page_status = HV_TSC_PAGE_UPDATING; in kvm_hv_invalidate_tsc_page()
1211 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; in kvm_hv_invalidate_tsc_page()
1213 hv->tsc_ref.tsc_sequence = 0; in kvm_hv_invalidate_tsc_page()
1219 idx = srcu_read_lock(&kvm->srcu); in kvm_hv_invalidate_tsc_page()
1221 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) in kvm_hv_invalidate_tsc_page()
1222 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; in kvm_hv_invalidate_tsc_page()
1223 srcu_read_unlock(&kvm->srcu, idx); in kvm_hv_invalidate_tsc_page()
1226 mutex_unlock(&hv->hv_lock); in kvm_hv_invalidate_tsc_page()
1232 if (!hv_vcpu->enforce_cpuid) in hv_check_msr_access()
1238 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1241 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1244 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1247 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1250 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1253 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1261 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1271 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1277 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1282 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1287 return hv_vcpu->cpuid_cache.features_eax & in hv_check_msr_access()
1291 return hv_vcpu->cpuid_cache.features_edx & in hv_check_msr_access()
1295 return hv_vcpu->cpuid_cache.features_edx & in hv_check_msr_access()
1307 struct kvm *kvm = vcpu->kvm; in kvm_hv_set_msr_pw()
1315 hv->hv_guest_os_id = data; in kvm_hv_set_msr_pw()
1316 /* setting guest os id to zero disables hypercall page */ in kvm_hv_set_msr_pw()
1317 if (!hv->hv_guest_os_id) in kvm_hv_set_msr_pw()
1318 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; in kvm_hv_set_msr_pw()
1325 /* if guest os id is not set hypercall should remain disabled */ in kvm_hv_set_msr_pw()
1326 if (!hv->hv_guest_os_id) in kvm_hv_set_msr_pw()
1329 hv->hv_hypercall = data; in kvm_hv_set_msr_pw()
1334 * If Xen and Hyper-V hypercalls are both enabled, disambiguate in kvm_hv_set_msr_pw()
1336 * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just in kvm_hv_set_msr_pw()
1337 * going to be clobbered on 64-bit. in kvm_hv_set_msr_pw()
1358 hv->hv_hypercall = data; in kvm_hv_set_msr_pw()
1362 hv->hv_tsc_page = data; in kvm_hv_set_msr_pw()
1363 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) { in kvm_hv_set_msr_pw()
1365 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED; in kvm_hv_set_msr_pw()
1367 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; in kvm_hv_set_msr_pw()
1370 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET; in kvm_hv_set_msr_pw()
1375 msr - HV_X64_MSR_CRASH_P0, in kvm_hv_set_msr_pw()
1383 hv->hv_crash_param[0], in kvm_hv_set_msr_pw()
1384 hv->hv_crash_param[1], in kvm_hv_set_msr_pw()
1385 hv->hv_crash_param[2], in kvm_hv_set_msr_pw()
1386 hv->hv_crash_param[3], in kvm_hv_set_msr_pw()
1387 hv->hv_crash_param[4]); in kvm_hv_set_msr_pw()
1395 vcpu_debug(vcpu, "hyper-v reset requested\n"); in kvm_hv_set_msr_pw()
1400 hv->hv_reenlightenment_control = data; in kvm_hv_set_msr_pw()
1403 hv->hv_tsc_emulation_control = data; in kvm_hv_set_msr_pw()
1409 hv->hv_tsc_emulation_status = data; in kvm_hv_set_msr_pw()
1412 /* read-only, but still ignore it if host-initiated */ in kvm_hv_set_msr_pw()
1420 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n", in kvm_hv_set_msr_pw()
1446 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_set_msr()
1452 if (new_vp_index == hv_vcpu->vp_index) in kvm_hv_set_msr()
1461 if (hv_vcpu->vp_index == vcpu->vcpu_idx) in kvm_hv_set_msr()
1462 atomic_inc(&hv->num_mismatched_vp_indexes); in kvm_hv_set_msr()
1463 else if (new_vp_index == vcpu->vcpu_idx) in kvm_hv_set_msr()
1464 atomic_dec(&hv->num_mismatched_vp_indexes); in kvm_hv_set_msr()
1466 hv_vcpu->vp_index = new_vp_index; in kvm_hv_set_msr()
1474 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1491 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1508 hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); in kvm_hv_set_msr()
1521 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; in kvm_hv_set_msr()
1530 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; in kvm_hv_set_msr()
1537 /* read-only, but still ignore it if host-initiated */ in kvm_hv_set_msr()
1542 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n", in kvm_hv_set_msr()
1554 struct kvm *kvm = vcpu->kvm; in kvm_hv_get_msr_pw()
1562 data = hv->hv_guest_os_id; in kvm_hv_get_msr_pw()
1565 data = hv->hv_hypercall; in kvm_hv_get_msr_pw()
1571 data = hv->hv_tsc_page; in kvm_hv_get_msr_pw()
1575 msr - HV_X64_MSR_CRASH_P0, in kvm_hv_get_msr_pw()
1583 data = hv->hv_reenlightenment_control; in kvm_hv_get_msr_pw()
1586 data = hv->hv_tsc_emulation_control; in kvm_hv_get_msr_pw()
1589 data = hv->hv_tsc_emulation_status; in kvm_hv_get_msr_pw()
1595 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); in kvm_hv_get_msr_pw()
1614 data = hv_vcpu->vp_index; in kvm_hv_get_msr()
1623 data = hv_vcpu->hv_vapic; in kvm_hv_get_msr()
1626 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; in kvm_hv_get_msr()
1639 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; in kvm_hv_get_msr()
1648 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; in kvm_hv_get_msr()
1654 data = (u64)vcpu->arch.virtual_tsc_khz * 1000; in kvm_hv_get_msr()
1660 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); in kvm_hv_get_msr()
1669 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_set_msr_common()
1671 if (!host && !vcpu->arch.hyperv_enabled) in kvm_hv_set_msr_common()
1682 mutex_lock(&hv->hv_lock); in kvm_hv_set_msr_common()
1684 mutex_unlock(&hv->hv_lock); in kvm_hv_set_msr_common()
1692 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_get_msr_common()
1694 if (!host && !vcpu->arch.hyperv_enabled) in kvm_hv_get_msr_common()
1705 mutex_lock(&hv->hv_lock); in kvm_hv_get_msr_common()
1707 mutex_unlock(&hv->hv_lock); in kvm_hv_get_msr_common()
1727 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) { in sparse_set_to_vcpu_mask()
1756 struct kvm *kvm = vcpu->kvm; in kvm_hv_flush_tlb()
1769 if (hc->fast) { in kvm_hv_flush_tlb()
1770 flush.address_space = hc->ingpa; in kvm_hv_flush_tlb()
1771 flush.flags = hc->outgpa; in kvm_hv_flush_tlb()
1772 flush.processor_mask = sse128_lo(hc->xmm[0]); in kvm_hv_flush_tlb()
1774 if (unlikely(kvm_read_guest(kvm, hc->ingpa, in kvm_hv_flush_tlb()
1795 if (hc->fast) { in kvm_hv_flush_tlb()
1796 flush_ex.address_space = hc->ingpa; in kvm_hv_flush_tlb()
1797 flush_ex.flags = hc->outgpa; in kvm_hv_flush_tlb()
1799 &hc->xmm[0], sizeof(hc->xmm[0])); in kvm_hv_flush_tlb()
1801 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex, in kvm_hv_flush_tlb()
1821 if (hc->fast) { in kvm_hv_flush_tlb()
1822 if (sparse_banks_len > HV_HYPERCALL_MAX_XMM_REGISTERS - 1) in kvm_hv_flush_tlb()
1825 sparse_banks[i] = sse128_lo(hc->xmm[i / 2 + 1]); in kvm_hv_flush_tlb()
1826 sparse_banks[i + 1] = sse128_hi(hc->xmm[i / 2 + 1]); in kvm_hv_flush_tlb()
1829 gpa = hc->ingpa + offsetof(struct hv_tlb_flush_ex, in kvm_hv_flush_tlb()
1839 cpumask_clear(&hv_vcpu->tlb_flush); in kvm_hv_flush_tlb()
1846 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't in kvm_hv_flush_tlb()
1850 NULL, vcpu_mask, &hv_vcpu->tlb_flush); in kvm_hv_flush_tlb()
1855 ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); in kvm_hv_flush_tlb()
1879 struct kvm *kvm = vcpu->kvm; in kvm_hv_send_ipi()
1892 if (!hc->fast) { in kvm_hv_send_ipi()
1893 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi, in kvm_hv_send_ipi()
1900 if (unlikely(hc->ingpa >> 32 != 0)) in kvm_hv_send_ipi()
1902 sparse_banks[0] = hc->outgpa; in kvm_hv_send_ipi()
1903 vector = (u32)hc->ingpa; in kvm_hv_send_ipi()
1910 if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex, in kvm_hv_send_ipi()
1930 hc->ingpa + offsetof(struct hv_send_ipi_ex, in kvm_hv_send_ipi()
1956 if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) { in kvm_hv_set_cpuid()
1957 vcpu->arch.hyperv_enabled = true; in kvm_hv_set_cpuid()
1959 vcpu->arch.hyperv_enabled = false; in kvm_hv_set_cpuid()
1970 hv_vcpu->cpuid_cache.features_eax = entry->eax; in kvm_hv_set_cpuid()
1971 hv_vcpu->cpuid_cache.features_ebx = entry->ebx; in kvm_hv_set_cpuid()
1972 hv_vcpu->cpuid_cache.features_edx = entry->edx; in kvm_hv_set_cpuid()
1974 hv_vcpu->cpuid_cache.features_eax = 0; in kvm_hv_set_cpuid()
1975 hv_vcpu->cpuid_cache.features_ebx = 0; in kvm_hv_set_cpuid()
1976 hv_vcpu->cpuid_cache.features_edx = 0; in kvm_hv_set_cpuid()
1981 hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax; in kvm_hv_set_cpuid()
1982 hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx; in kvm_hv_set_cpuid()
1984 hv_vcpu->cpuid_cache.enlightenments_eax = 0; in kvm_hv_set_cpuid()
1985 hv_vcpu->cpuid_cache.enlightenments_ebx = 0; in kvm_hv_set_cpuid()
1990 hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax; in kvm_hv_set_cpuid()
1992 hv_vcpu->cpuid_cache.syndbg_cap_eax = 0; in kvm_hv_set_cpuid()
2011 hv_vcpu->enforce_cpuid = enforce; in kvm_hv_set_enforce_cpuid()
2018 return vcpu->arch.hyperv_enabled && to_kvm_hv(vcpu->kvm)->hv_guest_os_id; in kvm_hv_hypercall_enabled()
2038 ++vcpu->stat.hypercalls; in kvm_hv_hypercall_complete()
2044 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); in kvm_hv_hypercall_complete_userspace()
2049 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hvcall_signal_event()
2052 if (unlikely(!hc->fast)) { in kvm_hvcall_signal_event()
2054 gpa_t gpa = hc->ingpa; in kvm_hvcall_signal_event()
2056 if ((gpa & (__alignof__(hc->ingpa) - 1)) || in kvm_hvcall_signal_event()
2057 offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE) in kvm_hvcall_signal_event()
2061 &hc->ingpa, sizeof(hc->ingpa)); in kvm_hvcall_signal_event()
2067 * Per spec, bits 32-47 contain the extra "flag number". However, we in kvm_hvcall_signal_event()
2071 if (hc->ingpa & 0xffff00000000ULL) in kvm_hvcall_signal_event()
2073 /* remaining bits are reserved-zero */ in kvm_hvcall_signal_event()
2074 if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK) in kvm_hvcall_signal_event()
2077 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ in kvm_hvcall_signal_event()
2079 eventfd = idr_find(&hv->conn_to_evt, hc->ingpa); in kvm_hvcall_signal_event()
2090 switch (hc->code) { in is_xmm_fast_hypercall()
2107 _kvm_read_sse_reg(reg, &hc->xmm[reg]); in kvm_hv_hypercall_read_xmm()
2113 if (!hv_vcpu->enforce_cpuid) in hv_check_hypercall_access()
2118 return hv_vcpu->cpuid_cache.enlightenments_ebx && in hv_check_hypercall_access()
2119 hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX; in hv_check_hypercall_access()
2121 return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES; in hv_check_hypercall_access()
2123 return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS; in hv_check_hypercall_access()
2131 return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) || in hv_check_hypercall_access()
2132 hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING; in hv_check_hypercall_access()
2135 if (!(hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2141 return hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2144 if (!(hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2149 return hv_vcpu->cpuid_cache.enlightenments_eax & in hv_check_hypercall_access()
2166 * per HYPER-V spec in kvm_hv_hypercall()
2204 if (unlikely(hv_vcpu->enforce_cpuid && in kvm_hv_hypercall()
2205 !(hv_vcpu->cpuid_cache.features_edx & in kvm_hv_hypercall()
2233 if (unlikely(hc.rep || !to_hv_synic(vcpu)->active)) { in kvm_hv_hypercall()
2237 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in kvm_hv_hypercall()
2238 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; in kvm_hv_hypercall()
2239 vcpu->run->hyperv.u.hcall.input = hc.param; in kvm_hv_hypercall()
2240 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa; in kvm_hv_hypercall()
2241 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa; in kvm_hv_hypercall()
2242 vcpu->arch.complete_userspace_io = in kvm_hv_hypercall()
2302 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) { in kvm_hv_hypercall()
2306 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in kvm_hv_hypercall()
2307 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; in kvm_hv_hypercall()
2308 vcpu->run->hyperv.u.hcall.input = hc.param; in kvm_hv_hypercall()
2309 vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa; in kvm_hv_hypercall()
2310 vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa; in kvm_hv_hypercall()
2311 vcpu->arch.complete_userspace_io = in kvm_hv_hypercall()
2328 mutex_init(&hv->hv_lock); in kvm_hv_init_vm()
2329 idr_init(&hv->conn_to_evt); in kvm_hv_init_vm()
2338 idr_for_each_entry(&hv->conn_to_evt, eventfd, i) in kvm_hv_destroy_vm()
2340 idr_destroy(&hv->conn_to_evt); in kvm_hv_destroy_vm()
2353 mutex_lock(&hv->hv_lock); in kvm_hv_eventfd_assign()
2354 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1, in kvm_hv_eventfd_assign()
2356 mutex_unlock(&hv->hv_lock); in kvm_hv_eventfd_assign()
2361 if (ret == -ENOSPC) in kvm_hv_eventfd_assign()
2362 ret = -EEXIST; in kvm_hv_eventfd_assign()
2372 mutex_lock(&hv->hv_lock); in kvm_hv_eventfd_deassign()
2373 eventfd = idr_remove(&hv->conn_to_evt, conn_id); in kvm_hv_eventfd_deassign()
2374 mutex_unlock(&hv->hv_lock); in kvm_hv_eventfd_deassign()
2377 return -ENOENT; in kvm_hv_eventfd_deassign()
2379 synchronize_srcu(&kvm->srcu); in kvm_hv_eventfd_deassign()
2386 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) || in kvm_vm_ioctl_hv_eventfd()
2387 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK)) in kvm_vm_ioctl_hv_eventfd()
2388 return -EINVAL; in kvm_vm_ioctl_hv_eventfd()
2390 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN) in kvm_vm_ioctl_hv_eventfd()
2391 return kvm_hv_eventfd_deassign(kvm, args->conn_id); in kvm_vm_ioctl_hv_eventfd()
2392 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd); in kvm_vm_ioctl_hv_eventfd()
2413 if (kvm_x86_ops.nested_ops->get_evmcs_version) in kvm_get_hv_cpuid()
2414 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu); in kvm_get_hv_cpuid()
2418 --nent; in kvm_get_hv_cpuid()
2420 if (cpuid->nent < nent) in kvm_get_hv_cpuid()
2421 return -E2BIG; in kvm_get_hv_cpuid()
2423 if (cpuid->nent > nent) in kvm_get_hv_cpuid()
2424 cpuid->nent = nent; in kvm_get_hv_cpuid()
2430 switch (ent->function) { in kvm_get_hv_cpuid()
2434 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; in kvm_get_hv_cpuid()
2435 ent->ebx = signature[0]; in kvm_get_hv_cpuid()
2436 ent->ecx = signature[1]; in kvm_get_hv_cpuid()
2437 ent->edx = signature[2]; in kvm_get_hv_cpuid()
2441 ent->eax = HYPERV_CPUID_SIGNATURE_EAX; in kvm_get_hv_cpuid()
2446 * We implement some Hyper-V 2016 functions so let's use in kvm_get_hv_cpuid()
2449 ent->eax = 0x00003839; in kvm_get_hv_cpuid()
2450 ent->ebx = 0x000A0000; in kvm_get_hv_cpuid()
2454 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; in kvm_get_hv_cpuid()
2455 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; in kvm_get_hv_cpuid()
2456 ent->eax |= HV_MSR_SYNIC_AVAILABLE; in kvm_get_hv_cpuid()
2457 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE; in kvm_get_hv_cpuid()
2458 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; in kvm_get_hv_cpuid()
2459 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE; in kvm_get_hv_cpuid()
2460 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE; in kvm_get_hv_cpuid()
2461 ent->eax |= HV_MSR_RESET_AVAILABLE; in kvm_get_hv_cpuid()
2462 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; in kvm_get_hv_cpuid()
2463 ent->eax |= HV_ACCESS_FREQUENCY_MSRS; in kvm_get_hv_cpuid()
2464 ent->eax |= HV_ACCESS_REENLIGHTENMENT; in kvm_get_hv_cpuid()
2466 ent->ebx |= HV_POST_MESSAGES; in kvm_get_hv_cpuid()
2467 ent->ebx |= HV_SIGNAL_EVENTS; in kvm_get_hv_cpuid()
2469 ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE; in kvm_get_hv_cpuid()
2470 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; in kvm_get_hv_cpuid()
2471 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; in kvm_get_hv_cpuid()
2473 ent->ebx |= HV_DEBUGGING; in kvm_get_hv_cpuid()
2474 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE; in kvm_get_hv_cpuid()
2475 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; in kvm_get_hv_cpuid()
2478 * Direct Synthetic timers only make sense with in-kernel in kvm_get_hv_cpuid()
2482 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; in kvm_get_hv_cpuid()
2487 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; in kvm_get_hv_cpuid()
2488 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; in kvm_get_hv_cpuid()
2489 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; in kvm_get_hv_cpuid()
2490 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; in kvm_get_hv_cpuid()
2491 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; in kvm_get_hv_cpuid()
2493 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; in kvm_get_hv_cpuid()
2495 ent->eax |= HV_X64_NO_NONARCH_CORESHARING; in kvm_get_hv_cpuid()
2497 ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED; in kvm_get_hv_cpuid()
2502 ent->ebx = 0x00000FFF; in kvm_get_hv_cpuid()
2508 ent->eax = KVM_MAX_VCPUS; in kvm_get_hv_cpuid()
2513 ent->ebx = 64; in kvm_get_hv_cpuid()
2518 ent->eax = evmcs_ver; in kvm_get_hv_cpuid()
2525 ent->eax = 0; in kvm_get_hv_cpuid()
2526 ent->ebx = signature[0]; in kvm_get_hv_cpuid()
2527 ent->ecx = signature[1]; in kvm_get_hv_cpuid()
2528 ent->edx = signature[2]; in kvm_get_hv_cpuid()
2533 ent->eax = signature[0]; in kvm_get_hv_cpuid()
2537 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; in kvm_get_hv_cpuid()
2547 return -EFAULT; in kvm_get_hv_cpuid()