Lines Matching +full:ecx +full:- +full:1000
1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM Microsoft Hyper-V emulation
17 * Ben-Ami Yassour <benami@il.ibm.com>
46 return atomic64_read(&synic->sint[sint]); in synic_read_sint()
52 return -1; in synic_get_sint_vector()
61 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_has_vector_connected()
74 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_has_vector_auto_eoi()
90 __set_bit(vector, synic->vec_bitmap); in synic_update_vector()
92 __clear_bit(vector, synic->vec_bitmap); in synic_update_vector()
95 __set_bit(vector, synic->auto_eoi_bitmap); in synic_update_vector()
97 __clear_bit(vector, synic->auto_eoi_bitmap); in synic_update_vector()
110 * Valid vectors are 16-255, however, nested Hyper-V attempts to write in synic_set_sint()
112 * allow zero-initing the register from host as well. in synic_set_sint()
119 * bitmap of vectors with auto-eoi behavior. The bitmaps are in synic_set_sint()
124 atomic64_set(&synic->sint[sint], data); in synic_set_sint()
144 if (vcpu && vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) in get_vcpu_by_vpidx()
147 if (vcpu_to_hv_vcpu(vcpu)->vp_index == vpidx) in get_vcpu_by_vpidx()
161 return (synic->active) ? synic : NULL; in synic_get()
166 struct kvm *kvm = vcpu->kvm; in kvm_hv_notify_acked_sint()
172 trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint); in kvm_hv_notify_acked_sint()
174 /* Try to deliver pending Hyper-V SynIC timers messages */ in kvm_hv_notify_acked_sint()
175 for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) { in kvm_hv_notify_acked_sint()
176 stimer = &hv_vcpu->stimer[idx]; in kvm_hv_notify_acked_sint()
177 if (stimer->msg_pending && stimer->config.enable && in kvm_hv_notify_acked_sint()
178 !stimer->config.direct_mode && in kvm_hv_notify_acked_sint()
179 stimer->config.sintx == sint) in kvm_hv_notify_acked_sint()
183 idx = srcu_read_lock(&kvm->irq_srcu); in kvm_hv_notify_acked_sint()
184 gsi = atomic_read(&synic->sint_to_gsi[sint]); in kvm_hv_notify_acked_sint()
185 if (gsi != -1) in kvm_hv_notify_acked_sint()
187 srcu_read_unlock(&kvm->irq_srcu, idx); in kvm_hv_notify_acked_sint()
193 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; in synic_exit()
195 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC; in synic_exit()
196 hv_vcpu->exit.u.synic.msr = msr; in synic_exit()
197 hv_vcpu->exit.u.synic.control = synic->control; in synic_exit()
198 hv_vcpu->exit.u.synic.evt_page = synic->evt_page; in synic_exit()
199 hv_vcpu->exit.u.synic.msg_page = synic->msg_page; in synic_exit()
210 if (!synic->active && !host) in synic_set_msr()
213 trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host); in synic_set_msr()
218 synic->control = data; in synic_set_msr()
227 synic->version = data; in synic_set_msr()
231 !synic->dont_zero_synic_pages) in synic_set_msr()
232 if (kvm_clear_guest(vcpu->kvm, in synic_set_msr()
237 synic->evt_page = data; in synic_set_msr()
243 !synic->dont_zero_synic_pages) in synic_set_msr()
244 if (kvm_clear_guest(vcpu->kvm, in synic_set_msr()
249 synic->msg_page = data; in synic_set_msr()
256 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) in synic_set_msr()
261 ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host); in synic_set_msr()
280 return entry->eax & HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; in kvm_hv_is_syndbg_enabled()
285 struct kvm *kvm = vcpu->kvm; in kvm_hv_syndbg_complete_userspace()
286 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_syndbg_complete_userspace()
288 if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL) in kvm_hv_syndbg_complete_userspace()
289 hv->hv_syndbg.control.status = in kvm_hv_syndbg_complete_userspace()
290 vcpu->run->hyperv.u.syndbg.status; in kvm_hv_syndbg_complete_userspace()
297 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; in syndbg_exit()
299 hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG; in syndbg_exit()
300 hv_vcpu->exit.u.syndbg.msr = msr; in syndbg_exit()
301 hv_vcpu->exit.u.syndbg.control = syndbg->control.control; in syndbg_exit()
302 hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page; in syndbg_exit()
303 hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page; in syndbg_exit()
304 hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page; in syndbg_exit()
305 vcpu->arch.complete_userspace_io = in syndbg_exit()
318 trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id, in syndbg_set_msr()
319 vcpu_to_hv_vcpu(vcpu)->vp_index, msr, data); in syndbg_set_msr()
322 syndbg->control.control = data; in syndbg_set_msr()
327 syndbg->control.status = data; in syndbg_set_msr()
330 syndbg->control.send_page = data; in syndbg_set_msr()
333 syndbg->control.recv_page = data; in syndbg_set_msr()
336 syndbg->control.pending_page = data; in syndbg_set_msr()
341 syndbg->options = data; in syndbg_set_msr()
359 *pdata = syndbg->control.control; in syndbg_get_msr()
362 *pdata = syndbg->control.status; in syndbg_get_msr()
365 *pdata = syndbg->control.send_page; in syndbg_get_msr()
368 *pdata = syndbg->control.recv_page; in syndbg_get_msr()
371 *pdata = syndbg->control.pending_page; in syndbg_get_msr()
374 *pdata = syndbg->options; in syndbg_get_msr()
380 trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, in syndbg_get_msr()
381 vcpu_to_hv_vcpu(vcpu)->vp_index, msr, in syndbg_get_msr()
392 if (!synic->active && !host) in synic_get_msr()
398 *pdata = synic->control; in synic_get_msr()
401 *pdata = synic->version; in synic_get_msr()
404 *pdata = synic->evt_page; in synic_get_msr()
407 *pdata = synic->msg_page; in synic_get_msr()
413 *pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]); in synic_get_msr()
428 if (sint >= ARRAY_SIZE(synic->sint)) in synic_set_irq()
429 return -EINVAL; in synic_set_irq()
433 return -ENOENT; in synic_set_irq()
442 ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL); in synic_set_irq()
443 trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret); in synic_set_irq()
453 return -EINVAL; in kvm_hv_synic_set_irq()
463 trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector); in kvm_hv_synic_send_eoi()
465 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) in kvm_hv_synic_send_eoi()
476 return -EINVAL; in kvm_hv_set_sint_gsi()
478 if (sint >= ARRAY_SIZE(synic->sint_to_gsi)) in kvm_hv_set_sint_gsi()
479 return -EINVAL; in kvm_hv_set_sint_gsi()
481 atomic_set(&synic->sint_to_gsi[sint], gsi); in kvm_hv_set_sint_gsi()
491 irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu, in kvm_hv_irq_routing_update()
492 lockdep_is_held(&kvm->irq_lock)); in kvm_hv_irq_routing_update()
494 for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) { in kvm_hv_irq_routing_update()
495 hlist_for_each_entry(e, &irq_rt->map[gsi], link) { in kvm_hv_irq_routing_update()
496 if (e->type == KVM_IRQ_ROUTING_HV_SINT) in kvm_hv_irq_routing_update()
497 kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu, in kvm_hv_irq_routing_update()
498 e->hv_sint.sint, gsi); in kvm_hv_irq_routing_update()
508 synic->version = HV_SYNIC_VERSION_1; in synic_init()
509 for (i = 0; i < ARRAY_SIZE(synic->sint); i++) { in synic_init()
510 atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED); in synic_init()
511 atomic_set(&synic->sint_to_gsi[i], -1); in synic_init()
517 struct kvm_hv *hv = &kvm->arch.hyperv; in get_time_ref_counter()
525 if (!hv->tsc_ref.tsc_sequence) in get_time_ref_counter()
530 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64) in get_time_ref_counter()
531 + hv->tsc_ref.tsc_offset; in get_time_ref_counter()
539 set_bit(stimer->index, in stimer_mark_pending()
540 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); in stimer_mark_pending()
550 trace_kvm_hv_stimer_cleanup(stimer_to_vcpu(stimer)->vcpu_id, in stimer_cleanup()
551 stimer->index); in stimer_cleanup()
553 hrtimer_cancel(&stimer->timer); in stimer_cleanup()
554 clear_bit(stimer->index, in stimer_cleanup()
555 vcpu_to_hv_vcpu(vcpu)->stimer_pending_bitmap); in stimer_cleanup()
556 stimer->msg_pending = false; in stimer_cleanup()
557 stimer->exp_time = 0; in stimer_cleanup()
565 trace_kvm_hv_stimer_callback(stimer_to_vcpu(stimer)->vcpu_id, in stimer_timer_callback()
566 stimer->index); in stimer_timer_callback()
574 * a) stimer->count is not equal to 0
575 * b) stimer->config has HV_STIMER_ENABLE flag
582 time_now = get_time_ref_counter(stimer_to_vcpu(stimer)->kvm); in stimer_start()
585 if (stimer->config.periodic) { in stimer_start()
586 if (stimer->exp_time) { in stimer_start()
587 if (time_now >= stimer->exp_time) { in stimer_start()
590 div64_u64_rem(time_now - stimer->exp_time, in stimer_start()
591 stimer->count, &remainder); in stimer_start()
592 stimer->exp_time = in stimer_start()
593 time_now + (stimer->count - remainder); in stimer_start()
596 stimer->exp_time = time_now + stimer->count; in stimer_start()
599 stimer_to_vcpu(stimer)->vcpu_id, in stimer_start()
600 stimer->index, in stimer_start()
601 time_now, stimer->exp_time); in stimer_start()
603 hrtimer_start(&stimer->timer, in stimer_start()
605 100 * (stimer->exp_time - time_now)), in stimer_start()
609 stimer->exp_time = stimer->count; in stimer_start()
610 if (time_now >= stimer->count) { in stimer_start()
612 * Expire timer according to Hypervisor Top-Level Functional in stimer_start()
621 trace_kvm_hv_stimer_start_one_shot(stimer_to_vcpu(stimer)->vcpu_id, in stimer_start()
622 stimer->index, in stimer_start()
623 time_now, stimer->count); in stimer_start()
625 hrtimer_start(&stimer->timer, in stimer_start()
626 ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)), in stimer_start()
635 old_config = {.as_uint64 = stimer->config.as_uint64}; in stimer_set_config()
639 if (!synic->active && !host) in stimer_set_config()
642 trace_kvm_hv_stimer_set_config(stimer_to_vcpu(stimer)->vcpu_id, in stimer_set_config()
643 stimer->index, config, host); in stimer_set_config()
649 stimer->config.as_uint64 = new_config.as_uint64; in stimer_set_config()
651 if (stimer->config.enable) in stimer_set_config()
663 if (!synic->active && !host) in stimer_set_count()
666 trace_kvm_hv_stimer_set_count(stimer_to_vcpu(stimer)->vcpu_id, in stimer_set_count()
667 stimer->index, count, host); in stimer_set_count()
670 stimer->count = count; in stimer_set_count()
671 if (stimer->count == 0) in stimer_set_count()
672 stimer->config.enable = 0; in stimer_set_count()
673 else if (stimer->config.auto_enable) in stimer_set_count()
674 stimer->config.enable = 1; in stimer_set_count()
676 if (stimer->config.enable) in stimer_set_count()
684 *pconfig = stimer->config.as_uint64; in stimer_get_config()
690 *pcount = stimer->count; in stimer_get_count()
703 if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE)) in synic_deliver_msg()
704 return -ENOENT; in synic_deliver_msg()
706 msg_page_gfn = synic->msg_page >> PAGE_SHIFT; in synic_deliver_msg()
709 * Strictly following the spec-mandated ordering would assume setting in synic_deliver_msg()
734 return -EAGAIN; in synic_deliver_msg()
738 sizeof(src_msg->header) + in synic_deliver_msg()
739 src_msg->header.payload_size); in synic_deliver_msg()
747 return -EFAULT; in synic_deliver_msg()
754 struct hv_message *msg = &stimer->msg; in stimer_send_msg()
756 (struct hv_timer_message_payload *)&msg->u.payload; in stimer_send_msg()
762 bool no_retry = stimer->config.periodic; in stimer_send_msg()
764 payload->expiration_time = stimer->exp_time; in stimer_send_msg()
765 payload->delivery_time = get_time_ref_counter(vcpu->kvm); in stimer_send_msg()
767 stimer->config.sintx, msg, in stimer_send_msg()
776 .vector = stimer->config.apic_vector in stimer_notify_direct()
786 int r, direct = stimer->config.direct_mode; in stimer_expiration()
788 stimer->msg_pending = true; in stimer_expiration()
793 trace_kvm_hv_stimer_expiration(stimer_to_vcpu(stimer)->vcpu_id, in stimer_expiration()
794 stimer->index, direct, r); in stimer_expiration()
796 stimer->msg_pending = false; in stimer_expiration()
797 if (!(stimer->config.periodic)) in stimer_expiration()
798 stimer->config.enable = 0; in stimer_expiration()
809 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_process_stimers()
810 if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { in kvm_hv_process_stimers()
811 stimer = &hv_vcpu->stimer[i]; in kvm_hv_process_stimers()
812 if (stimer->config.enable) { in kvm_hv_process_stimers()
813 exp_time = stimer->exp_time; in kvm_hv_process_stimers()
817 get_time_ref_counter(vcpu->kvm); in kvm_hv_process_stimers()
822 if ((stimer->config.enable) && in kvm_hv_process_stimers()
823 stimer->count) { in kvm_hv_process_stimers()
824 if (!stimer->msg_pending) in kvm_hv_process_stimers()
837 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_uninit()
838 stimer_cleanup(&hv_vcpu->stimer[i]); in kvm_hv_vcpu_uninit()
843 if (!(vcpu->arch.hyperv.hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) in kvm_hv_assist_page_enabled()
845 return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; in kvm_hv_assist_page_enabled()
854 return !kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data, in kvm_hv_get_assist_page()
861 struct hv_message *msg = &stimer->msg; in stimer_prepare_msg()
863 (struct hv_timer_message_payload *)&msg->u.payload; in stimer_prepare_msg()
865 memset(&msg->header, 0, sizeof(msg->header)); in stimer_prepare_msg()
866 msg->header.message_type = HVMSG_TIMER_EXPIRED; in stimer_prepare_msg()
867 msg->header.payload_size = sizeof(*payload); in stimer_prepare_msg()
869 payload->timer_index = stimer->index; in stimer_prepare_msg()
870 payload->expiration_time = 0; in stimer_prepare_msg()
871 payload->delivery_time = 0; in stimer_prepare_msg()
877 stimer->index = timer_index; in stimer_init()
878 hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in stimer_init()
879 stimer->timer.function = stimer_timer_callback; in stimer_init()
888 synic_init(&hv_vcpu->synic); in kvm_hv_vcpu_init()
890 bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT); in kvm_hv_vcpu_init()
891 for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) in kvm_hv_vcpu_init()
892 stimer_init(&hv_vcpu->stimer[i], i); in kvm_hv_vcpu_init()
899 hv_vcpu->vp_index = kvm_vcpu_get_idx(vcpu); in kvm_hv_vcpu_postcreate()
907 * Hyper-V SynIC auto EOI SINT's are in kvm_hv_activate_synic()
911 kvm_request_apicv_update(vcpu->kvm, false, APICV_INHIBIT_REASON_HYPERV); in kvm_hv_activate_synic()
912 synic->active = true; in kvm_hv_activate_synic()
913 synic->dont_zero_synic_pages = dont_zero_synic_pages; in kvm_hv_activate_synic()
914 synic->control = HV_SYNIC_CONTROL_ENABLE; in kvm_hv_activate_synic()
945 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_get_crash_data()
946 size_t size = ARRAY_SIZE(hv->hv_crash_param); in kvm_hv_msr_get_crash_data()
949 return -EINVAL; in kvm_hv_msr_get_crash_data()
951 *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; in kvm_hv_msr_get_crash_data()
957 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_get_crash_ctl()
959 *pdata = hv->hv_crash_ctl; in kvm_hv_msr_get_crash_ctl()
965 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_set_crash_ctl()
968 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; in kvm_hv_msr_set_crash_ctl()
973 hv->hv_crash_param[0], in kvm_hv_msr_set_crash_ctl()
974 hv->hv_crash_param[1], in kvm_hv_msr_set_crash_ctl()
975 hv->hv_crash_param[2], in kvm_hv_msr_set_crash_ctl()
976 hv->hv_crash_param[3], in kvm_hv_msr_set_crash_ctl()
977 hv->hv_crash_param[4]); in kvm_hv_msr_set_crash_ctl()
989 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_msr_set_crash_data()
990 size_t size = ARRAY_SIZE(hv->hv_crash_param); in kvm_hv_msr_set_crash_data()
993 return -EINVAL; in kvm_hv_msr_set_crash_data()
995 hv->hv_crash_param[array_index_nospec(index, size)] = data; in kvm_hv_msr_set_crash_data()
1000 * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1004 * nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1007 * Hyper-V formula:
1010 * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1012 * ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1013 * scale / 2^64 = tsc_to_system_mul * 2^(tsc_shift-32) / 100
1017 * nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1018 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1020 * nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1021 * - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1024 * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1026 * - tsc_timestamp * scale / 2^64
1029 * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1030 * offset = system_time / 100 - tsc_timestamp * scale / 2^64
1039 if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT)) in compute_tsc_page_parameters()
1045 * tsc_to_system_mul / 100 >= 2^(32-tsc_shift) in compute_tsc_page_parameters()
1046 * tsc_to_system_mul >= 100 * 2^(32-tsc_shift) in compute_tsc_page_parameters()
1048 max_mul = 100ull << (32 - hv_clock->tsc_shift); in compute_tsc_page_parameters()
1049 if (hv_clock->tsc_to_system_mul >= max_mul) in compute_tsc_page_parameters()
1056 tsc_ref->tsc_scale = in compute_tsc_page_parameters()
1057 mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift), in compute_tsc_page_parameters()
1058 hv_clock->tsc_to_system_mul, in compute_tsc_page_parameters()
1061 tsc_ref->tsc_offset = hv_clock->system_time; in compute_tsc_page_parameters()
1062 do_div(tsc_ref->tsc_offset, 100); in compute_tsc_page_parameters()
1063 tsc_ref->tsc_offset -= in compute_tsc_page_parameters()
1064 mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64); in compute_tsc_page_parameters()
1071 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_setup_tsc_page()
1075 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); in kvm_hv_setup_tsc_page()
1078 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) in kvm_hv_setup_tsc_page()
1081 mutex_lock(&kvm->arch.hyperv.hv_lock); in kvm_hv_setup_tsc_page()
1082 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) in kvm_hv_setup_tsc_page()
1085 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; in kvm_hv_setup_tsc_page()
1098 hv->tsc_ref.tsc_sequence = 0; in kvm_hv_setup_tsc_page()
1100 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) in kvm_hv_setup_tsc_page()
1103 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) in kvm_hv_setup_tsc_page()
1108 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) in kvm_hv_setup_tsc_page()
1118 /* Write the struct entirely before the non-zero sequence. */ in kvm_hv_setup_tsc_page()
1121 hv->tsc_ref.tsc_sequence = tsc_seq; in kvm_hv_setup_tsc_page()
1123 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)); in kvm_hv_setup_tsc_page()
1125 mutex_unlock(&kvm->arch.hyperv.hv_lock); in kvm_hv_setup_tsc_page()
1131 struct kvm *kvm = vcpu->kvm; in kvm_hv_set_msr_pw()
1132 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_set_msr_pw()
1136 hv->hv_guest_os_id = data; in kvm_hv_set_msr_pw()
1138 if (!hv->hv_guest_os_id) in kvm_hv_set_msr_pw()
1139 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; in kvm_hv_set_msr_pw()
1147 if (!hv->hv_guest_os_id) in kvm_hv_set_msr_pw()
1150 hv->hv_hypercall = data; in kvm_hv_set_msr_pw()
1161 hv->hv_hypercall = data; in kvm_hv_set_msr_pw()
1166 hv->hv_tsc_page = data; in kvm_hv_set_msr_pw()
1167 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) in kvm_hv_set_msr_pw()
1172 msr - HV_X64_MSR_CRASH_P0, in kvm_hv_set_msr_pw()
1178 vcpu_debug(vcpu, "hyper-v reset requested\n"); in kvm_hv_set_msr_pw()
1183 hv->hv_reenlightenment_control = data; in kvm_hv_set_msr_pw()
1186 hv->hv_tsc_emulation_control = data; in kvm_hv_set_msr_pw()
1189 hv->hv_tsc_emulation_status = data; in kvm_hv_set_msr_pw()
1192 /* read-only, but still ignore it if host-initiated */ in kvm_hv_set_msr_pw()
1200 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n", in kvm_hv_set_msr_pw()
1219 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; in kvm_hv_set_msr()
1223 struct kvm_hv *hv = &vcpu->kvm->arch.hyperv; in kvm_hv_set_msr()
1230 if (new_vp_index == hv_vcpu->vp_index) in kvm_hv_set_msr()
1239 if (hv_vcpu->vp_index == vcpu_idx) in kvm_hv_set_msr()
1240 atomic_inc(&hv->num_mismatched_vp_indexes); in kvm_hv_set_msr()
1242 atomic_dec(&hv->num_mismatched_vp_indexes); in kvm_hv_set_msr()
1244 hv_vcpu->vp_index = new_vp_index; in kvm_hv_set_msr()
1252 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1269 hv_vcpu->hv_vapic = data; in kvm_hv_set_msr()
1286 hv_vcpu->runtime_offset = data - current_task_runtime_100ns(); in kvm_hv_set_msr()
1299 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; in kvm_hv_set_msr()
1308 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; in kvm_hv_set_msr()
1315 /* read-only, but still ignore it if host-initiated */ in kvm_hv_set_msr()
1320 vcpu_unimpl(vcpu, "Hyper-V unhandled wrmsr: 0x%x data 0x%llx\n", in kvm_hv_set_msr()
1332 struct kvm *kvm = vcpu->kvm; in kvm_hv_get_msr_pw()
1333 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_get_msr_pw()
1337 data = hv->hv_guest_os_id; in kvm_hv_get_msr_pw()
1340 data = hv->hv_hypercall; in kvm_hv_get_msr_pw()
1346 data = hv->hv_tsc_page; in kvm_hv_get_msr_pw()
1350 msr - HV_X64_MSR_CRASH_P0, in kvm_hv_get_msr_pw()
1358 data = hv->hv_reenlightenment_control; in kvm_hv_get_msr_pw()
1361 data = hv->hv_tsc_emulation_control; in kvm_hv_get_msr_pw()
1364 data = hv->hv_tsc_emulation_status; in kvm_hv_get_msr_pw()
1370 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); in kvm_hv_get_msr_pw()
1382 struct kvm_vcpu_hv *hv_vcpu = &vcpu->arch.hyperv; in kvm_hv_get_msr()
1386 data = hv_vcpu->vp_index; in kvm_hv_get_msr()
1395 data = hv_vcpu->hv_vapic; in kvm_hv_get_msr()
1398 data = current_task_runtime_100ns() + hv_vcpu->runtime_offset; in kvm_hv_get_msr()
1411 int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2; in kvm_hv_get_msr()
1420 int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2; in kvm_hv_get_msr()
1426 data = (u64)vcpu->arch.virtual_tsc_khz * 1000; in kvm_hv_get_msr()
1432 vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr); in kvm_hv_get_msr()
1444 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); in kvm_hv_set_msr_common()
1446 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); in kvm_hv_set_msr_common()
1457 mutex_lock(&vcpu->kvm->arch.hyperv.hv_lock); in kvm_hv_get_msr_common()
1459 mutex_unlock(&vcpu->kvm->arch.hyperv.hv_lock); in kvm_hv_get_msr_common()
1469 struct kvm_hv *hv = &kvm->arch.hyperv; in sparse_set_to_vcpu_mask()
1479 if (likely(!atomic_read(&hv->num_mismatched_vp_indexes))) { in sparse_set_to_vcpu_mask()
1486 if (test_bit(vcpu_to_hv_vcpu(vcpu)->vp_index, in sparse_set_to_vcpu_mask()
1496 struct kvm *kvm = current_vcpu->kvm; in kvm_hv_flush_tlb()
1497 struct kvm_vcpu_hv *hv_vcpu = ¤t_vcpu->arch.hyperv; in kvm_hv_flush_tlb()
1557 cpumask_clear(&hv_vcpu->tlb_flush); in kvm_hv_flush_tlb()
1564 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't in kvm_hv_flush_tlb()
1568 NULL, vcpu_mask, &hv_vcpu->tlb_flush); in kvm_hv_flush_tlb()
1598 struct kvm *kvm = current_vcpu->kvm; in kvm_hv_send_ipi()
1671 return READ_ONCE(kvm->arch.hyperv.hv_guest_os_id) != 0; in kvm_hv_hypercall_enabled()
1690 ++vcpu->stat.hypercalls; in kvm_hv_hypercall_complete()
1696 return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result); in kvm_hv_hypercall_complete_userspace()
1707 if ((gpa & (__alignof__(param) - 1)) || in kvm_hvcall_signal_event()
1717 * Per spec, bits 32-47 contain the extra "flag number". However, we in kvm_hvcall_signal_event()
1723 /* remaining bits are reserved-zero */ in kvm_hvcall_signal_event()
1727 /* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */ in kvm_hvcall_signal_event()
1729 eventfd = idr_find(&vcpu->kvm->arch.hyperv.conn_to_evt, param); in kvm_hvcall_signal_event()
1746 * per HYPER-V spec in kvm_hv_hypercall()
1796 if (unlikely(rep || !vcpu_to_synic(vcpu)->active)) { in kvm_hv_hypercall()
1800 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in kvm_hv_hypercall()
1801 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; in kvm_hv_hypercall()
1802 vcpu->run->hyperv.u.hcall.input = param; in kvm_hv_hypercall()
1803 vcpu->run->hyperv.u.hcall.params[0] = ingpa; in kvm_hv_hypercall()
1804 vcpu->run->hyperv.u.hcall.params[1] = outgpa; in kvm_hv_hypercall()
1805 vcpu->arch.complete_userspace_io = in kvm_hv_hypercall()
1865 if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) { in kvm_hv_hypercall()
1869 vcpu->run->exit_reason = KVM_EXIT_HYPERV; in kvm_hv_hypercall()
1870 vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL; in kvm_hv_hypercall()
1871 vcpu->run->hyperv.u.hcall.input = param; in kvm_hv_hypercall()
1872 vcpu->run->hyperv.u.hcall.params[0] = ingpa; in kvm_hv_hypercall()
1873 vcpu->run->hyperv.u.hcall.params[1] = outgpa; in kvm_hv_hypercall()
1874 vcpu->arch.complete_userspace_io = in kvm_hv_hypercall()
1888 mutex_init(&kvm->arch.hyperv.hv_lock); in kvm_hv_init_vm()
1889 idr_init(&kvm->arch.hyperv.conn_to_evt); in kvm_hv_init_vm()
1897 idr_for_each_entry(&kvm->arch.hyperv.conn_to_evt, eventfd, i) in kvm_hv_destroy_vm()
1899 idr_destroy(&kvm->arch.hyperv.conn_to_evt); in kvm_hv_destroy_vm()
1904 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_eventfd_assign()
1912 mutex_lock(&hv->hv_lock); in kvm_hv_eventfd_assign()
1913 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1, in kvm_hv_eventfd_assign()
1915 mutex_unlock(&hv->hv_lock); in kvm_hv_eventfd_assign()
1920 if (ret == -ENOSPC) in kvm_hv_eventfd_assign()
1921 ret = -EEXIST; in kvm_hv_eventfd_assign()
1928 struct kvm_hv *hv = &kvm->arch.hyperv; in kvm_hv_eventfd_deassign()
1931 mutex_lock(&hv->hv_lock); in kvm_hv_eventfd_deassign()
1932 eventfd = idr_remove(&hv->conn_to_evt, conn_id); in kvm_hv_eventfd_deassign()
1933 mutex_unlock(&hv->hv_lock); in kvm_hv_eventfd_deassign()
1936 return -ENOENT; in kvm_hv_eventfd_deassign()
1938 synchronize_srcu(&kvm->srcu); in kvm_hv_eventfd_deassign()
1945 if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) || in kvm_vm_ioctl_hv_eventfd()
1946 (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK)) in kvm_vm_ioctl_hv_eventfd()
1947 return -EINVAL; in kvm_vm_ioctl_hv_eventfd()
1949 if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN) in kvm_vm_ioctl_hv_eventfd()
1950 return kvm_hv_eventfd_deassign(kvm, args->conn_id); in kvm_vm_ioctl_hv_eventfd()
1951 return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd); in kvm_vm_ioctl_hv_eventfd()
1972 if (kvm_x86_ops.nested_ops->get_evmcs_version) in kvm_vcpu_ioctl_get_hv_cpuid()
1973 evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu); in kvm_vcpu_ioctl_get_hv_cpuid()
1977 --nent; in kvm_vcpu_ioctl_get_hv_cpuid()
1979 if (cpuid->nent < nent) in kvm_vcpu_ioctl_get_hv_cpuid()
1980 return -E2BIG; in kvm_vcpu_ioctl_get_hv_cpuid()
1982 if (cpuid->nent > nent) in kvm_vcpu_ioctl_get_hv_cpuid()
1983 cpuid->nent = nent; in kvm_vcpu_ioctl_get_hv_cpuid()
1989 switch (ent->function) { in kvm_vcpu_ioctl_get_hv_cpuid()
1993 ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES; in kvm_vcpu_ioctl_get_hv_cpuid()
1994 ent->ebx = signature[0]; in kvm_vcpu_ioctl_get_hv_cpuid()
1995 ent->ecx = signature[1]; in kvm_vcpu_ioctl_get_hv_cpuid()
1996 ent->edx = signature[2]; in kvm_vcpu_ioctl_get_hv_cpuid()
2001 ent->eax = signature[0]; in kvm_vcpu_ioctl_get_hv_cpuid()
2006 * We implement some Hyper-V 2016 functions so let's use in kvm_vcpu_ioctl_get_hv_cpuid()
2009 ent->eax = 0x00003839; in kvm_vcpu_ioctl_get_hv_cpuid()
2010 ent->ebx = 0x000A0000; in kvm_vcpu_ioctl_get_hv_cpuid()
2014 ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2015 ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2016 ent->eax |= HV_MSR_SYNIC_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2017 ent->eax |= HV_MSR_SYNTIMER_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2018 ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2019 ent->eax |= HV_MSR_HYPERCALL_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2020 ent->eax |= HV_MSR_VP_INDEX_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2021 ent->eax |= HV_MSR_RESET_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2022 ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2023 ent->eax |= HV_ACCESS_FREQUENCY_MSRS; in kvm_vcpu_ioctl_get_hv_cpuid()
2024 ent->eax |= HV_ACCESS_REENLIGHTENMENT; in kvm_vcpu_ioctl_get_hv_cpuid()
2026 ent->ebx |= HV_POST_MESSAGES; in kvm_vcpu_ioctl_get_hv_cpuid()
2027 ent->ebx |= HV_SIGNAL_EVENTS; in kvm_vcpu_ioctl_get_hv_cpuid()
2029 ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2030 ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2032 ent->ebx |= HV_DEBUGGING; in kvm_vcpu_ioctl_get_hv_cpuid()
2033 ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2034 ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2037 * Direct Synthetic timers only make sense with in-kernel in kvm_vcpu_ioctl_get_hv_cpuid()
2041 ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE; in kvm_vcpu_ioctl_get_hv_cpuid()
2046 ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2047 ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2048 ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2049 ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2050 ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2052 ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED; in kvm_vcpu_ioctl_get_hv_cpuid()
2054 ent->eax |= HV_X64_NO_NONARCH_CORESHARING; in kvm_vcpu_ioctl_get_hv_cpuid()
2059 ent->ebx = 0x00000FFF; in kvm_vcpu_ioctl_get_hv_cpuid()
2065 ent->eax = KVM_MAX_VCPUS; in kvm_vcpu_ioctl_get_hv_cpuid()
2070 ent->ebx = 64; in kvm_vcpu_ioctl_get_hv_cpuid()
2075 ent->eax = evmcs_ver; in kvm_vcpu_ioctl_get_hv_cpuid()
2082 ent->eax = 0; in kvm_vcpu_ioctl_get_hv_cpuid()
2083 ent->ebx = signature[0]; in kvm_vcpu_ioctl_get_hv_cpuid()
2084 ent->ecx = signature[1]; in kvm_vcpu_ioctl_get_hv_cpuid()
2085 ent->edx = signature[2]; in kvm_vcpu_ioctl_get_hv_cpuid()
2090 ent->eax = signature[0]; in kvm_vcpu_ioctl_get_hv_cpuid()
2094 ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING; in kvm_vcpu_ioctl_get_hv_cpuid()
2104 return -EFAULT; in kvm_vcpu_ioctl_get_hv_cpuid()