1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * KVM Microsoft Hyper-V emulation
4  *
5  * derived from arch/x86/kvm/x86.c
6  *
7  * Copyright (C) 2006 Qumranet, Inc.
8  * Copyright (C) 2008 Qumranet, Inc.
9  * Copyright IBM Corporation, 2008
10  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
11  * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com>
12  *
13  * Authors:
14  *   Avi Kivity   <avi@qumranet.com>
15  *   Yaniv Kamay  <yaniv@qumranet.com>
16  *   Amit Shah    <amit.shah@qumranet.com>
17  *   Ben-Ami Yassour <benami@il.ibm.com>
18  *   Andrey Smetanin <asmetanin@virtuozzo.com>
19  */
20 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 
22 #include "x86.h"
23 #include "lapic.h"
24 #include "ioapic.h"
25 #include "cpuid.h"
26 #include "hyperv.h"
27 #include "mmu.h"
28 #include "xen.h"
29 
30 #include <linux/cpu.h>
31 #include <linux/kvm_host.h>
32 #include <linux/highmem.h>
33 #include <linux/sched/cputime.h>
34 #include <linux/spinlock.h>
35 #include <linux/eventfd.h>
36 
37 #include <asm/apicdef.h>
38 #include <asm/mshyperv.h>
39 #include <trace/events/kvm.h>
40 
41 #include "trace.h"
42 #include "irq.h"
43 #include "fpu.h"
44 
45 #define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, HV_VCPUS_PER_SPARSE_BANK)
46 
47 /*
48  * As per Hyper-V TLFS, extended hypercalls start from 0x8001
49  * (HvExtCallQueryCapabilities). Response of this hypercalls is a 64 bit value
50  * where each bit tells which extended hypercall is available besides
51  * HvExtCallQueryCapabilities.
52  *
53  * 0x8001 - First extended hypercall, HvExtCallQueryCapabilities, no bit
54  * assigned.
55  *
56  * 0x8002 - Bit 0
57  * 0x8003 - Bit 1
58  * ..
59  * 0x8041 - Bit 63
60  *
61  * Therefore, HV_EXT_CALL_MAX = 0x8001 + 64
62  */
63 #define HV_EXT_CALL_MAX (HV_EXT_CALL_QUERY_CAPABILITIES + 64)
64 
65 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
66 				bool vcpu_kick);
67 
synic_read_sint(struct kvm_vcpu_hv_synic * synic,int sint)68 static inline u64 synic_read_sint(struct kvm_vcpu_hv_synic *synic, int sint)
69 {
70 	return atomic64_read(&synic->sint[sint]);
71 }
72 
synic_get_sint_vector(u64 sint_value)73 static inline int synic_get_sint_vector(u64 sint_value)
74 {
75 	if (sint_value & HV_SYNIC_SINT_MASKED)
76 		return -1;
77 	return sint_value & HV_SYNIC_SINT_VECTOR_MASK;
78 }
79 
synic_has_vector_connected(struct kvm_vcpu_hv_synic * synic,int vector)80 static bool synic_has_vector_connected(struct kvm_vcpu_hv_synic *synic,
81 				      int vector)
82 {
83 	int i;
84 
85 	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
86 		if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
87 			return true;
88 	}
89 	return false;
90 }
91 
synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic * synic,int vector)92 static bool synic_has_vector_auto_eoi(struct kvm_vcpu_hv_synic *synic,
93 				     int vector)
94 {
95 	int i;
96 	u64 sint_value;
97 
98 	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
99 		sint_value = synic_read_sint(synic, i);
100 		if (synic_get_sint_vector(sint_value) == vector &&
101 		    sint_value & HV_SYNIC_SINT_AUTO_EOI)
102 			return true;
103 	}
104 	return false;
105 }
106 
synic_update_vector(struct kvm_vcpu_hv_synic * synic,int vector)107 static void synic_update_vector(struct kvm_vcpu_hv_synic *synic,
108 				int vector)
109 {
110 	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
111 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
112 	bool auto_eoi_old, auto_eoi_new;
113 
114 	if (vector < HV_SYNIC_FIRST_VALID_VECTOR)
115 		return;
116 
117 	if (synic_has_vector_connected(synic, vector))
118 		__set_bit(vector, synic->vec_bitmap);
119 	else
120 		__clear_bit(vector, synic->vec_bitmap);
121 
122 	auto_eoi_old = !bitmap_empty(synic->auto_eoi_bitmap, 256);
123 
124 	if (synic_has_vector_auto_eoi(synic, vector))
125 		__set_bit(vector, synic->auto_eoi_bitmap);
126 	else
127 		__clear_bit(vector, synic->auto_eoi_bitmap);
128 
129 	auto_eoi_new = !bitmap_empty(synic->auto_eoi_bitmap, 256);
130 
131 	if (auto_eoi_old == auto_eoi_new)
132 		return;
133 
134 	if (!enable_apicv)
135 		return;
136 
137 	down_write(&vcpu->kvm->arch.apicv_update_lock);
138 
139 	if (auto_eoi_new)
140 		hv->synic_auto_eoi_used++;
141 	else
142 		hv->synic_auto_eoi_used--;
143 
144 	/*
145 	 * Inhibit APICv if any vCPU is using SynIC's AutoEOI, which relies on
146 	 * the hypervisor to manually inject IRQs.
147 	 */
148 	__kvm_set_or_clear_apicv_inhibit(vcpu->kvm,
149 					 APICV_INHIBIT_REASON_HYPERV,
150 					 !!hv->synic_auto_eoi_used);
151 
152 	up_write(&vcpu->kvm->arch.apicv_update_lock);
153 }
154 
synic_set_sint(struct kvm_vcpu_hv_synic * synic,int sint,u64 data,bool host)155 static int synic_set_sint(struct kvm_vcpu_hv_synic *synic, int sint,
156 			  u64 data, bool host)
157 {
158 	int vector, old_vector;
159 	bool masked;
160 
161 	vector = data & HV_SYNIC_SINT_VECTOR_MASK;
162 	masked = data & HV_SYNIC_SINT_MASKED;
163 
164 	/*
165 	 * Valid vectors are 16-255, however, nested Hyper-V attempts to write
166 	 * default '0x10000' value on boot and this should not #GP. We need to
167 	 * allow zero-initing the register from host as well.
168 	 */
169 	if (vector < HV_SYNIC_FIRST_VALID_VECTOR && !host && !masked)
170 		return 1;
171 	/*
172 	 * Guest may configure multiple SINTs to use the same vector, so
173 	 * we maintain a bitmap of vectors handled by synic, and a
174 	 * bitmap of vectors with auto-eoi behavior.  The bitmaps are
175 	 * updated here, and atomically queried on fast paths.
176 	 */
177 	old_vector = synic_read_sint(synic, sint) & HV_SYNIC_SINT_VECTOR_MASK;
178 
179 	atomic64_set(&synic->sint[sint], data);
180 
181 	synic_update_vector(synic, old_vector);
182 
183 	synic_update_vector(synic, vector);
184 
185 	/* Load SynIC vectors into EOI exit bitmap */
186 	kvm_make_request(KVM_REQ_SCAN_IOAPIC, hv_synic_to_vcpu(synic));
187 	return 0;
188 }
189 
get_vcpu_by_vpidx(struct kvm * kvm,u32 vpidx)190 static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx)
191 {
192 	struct kvm_vcpu *vcpu = NULL;
193 	unsigned long i;
194 
195 	if (vpidx >= KVM_MAX_VCPUS)
196 		return NULL;
197 
198 	vcpu = kvm_get_vcpu(kvm, vpidx);
199 	if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx)
200 		return vcpu;
201 	kvm_for_each_vcpu(i, vcpu, kvm)
202 		if (kvm_hv_get_vpindex(vcpu) == vpidx)
203 			return vcpu;
204 	return NULL;
205 }
206 
synic_get(struct kvm * kvm,u32 vpidx)207 static struct kvm_vcpu_hv_synic *synic_get(struct kvm *kvm, u32 vpidx)
208 {
209 	struct kvm_vcpu *vcpu;
210 	struct kvm_vcpu_hv_synic *synic;
211 
212 	vcpu = get_vcpu_by_vpidx(kvm, vpidx);
213 	if (!vcpu || !to_hv_vcpu(vcpu))
214 		return NULL;
215 	synic = to_hv_synic(vcpu);
216 	return (synic->active) ? synic : NULL;
217 }
218 
kvm_hv_notify_acked_sint(struct kvm_vcpu * vcpu,u32 sint)219 static void kvm_hv_notify_acked_sint(struct kvm_vcpu *vcpu, u32 sint)
220 {
221 	struct kvm *kvm = vcpu->kvm;
222 	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
223 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
224 	struct kvm_vcpu_hv_stimer *stimer;
225 	int gsi, idx;
226 
227 	trace_kvm_hv_notify_acked_sint(vcpu->vcpu_id, sint);
228 
229 	/* Try to deliver pending Hyper-V SynIC timers messages */
230 	for (idx = 0; idx < ARRAY_SIZE(hv_vcpu->stimer); idx++) {
231 		stimer = &hv_vcpu->stimer[idx];
232 		if (stimer->msg_pending && stimer->config.enable &&
233 		    !stimer->config.direct_mode &&
234 		    stimer->config.sintx == sint)
235 			stimer_mark_pending(stimer, false);
236 	}
237 
238 	idx = srcu_read_lock(&kvm->irq_srcu);
239 	gsi = atomic_read(&synic->sint_to_gsi[sint]);
240 	if (gsi != -1)
241 		kvm_notify_acked_gsi(kvm, gsi);
242 	srcu_read_unlock(&kvm->irq_srcu, idx);
243 }
244 
synic_exit(struct kvm_vcpu_hv_synic * synic,u32 msr)245 static void synic_exit(struct kvm_vcpu_hv_synic *synic, u32 msr)
246 {
247 	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
248 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
249 
250 	hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNIC;
251 	hv_vcpu->exit.u.synic.msr = msr;
252 	hv_vcpu->exit.u.synic.control = synic->control;
253 	hv_vcpu->exit.u.synic.evt_page = synic->evt_page;
254 	hv_vcpu->exit.u.synic.msg_page = synic->msg_page;
255 
256 	kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
257 }
258 
synic_set_msr(struct kvm_vcpu_hv_synic * synic,u32 msr,u64 data,bool host)259 static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
260 			 u32 msr, u64 data, bool host)
261 {
262 	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
263 	int ret;
264 
265 	if (!synic->active && (!host || data))
266 		return 1;
267 
268 	trace_kvm_hv_synic_set_msr(vcpu->vcpu_id, msr, data, host);
269 
270 	ret = 0;
271 	switch (msr) {
272 	case HV_X64_MSR_SCONTROL:
273 		synic->control = data;
274 		if (!host)
275 			synic_exit(synic, msr);
276 		break;
277 	case HV_X64_MSR_SVERSION:
278 		if (!host) {
279 			ret = 1;
280 			break;
281 		}
282 		synic->version = data;
283 		break;
284 	case HV_X64_MSR_SIEFP:
285 		if ((data & HV_SYNIC_SIEFP_ENABLE) && !host &&
286 		    !synic->dont_zero_synic_pages)
287 			if (kvm_clear_guest(vcpu->kvm,
288 					    data & PAGE_MASK, PAGE_SIZE)) {
289 				ret = 1;
290 				break;
291 			}
292 		synic->evt_page = data;
293 		if (!host)
294 			synic_exit(synic, msr);
295 		break;
296 	case HV_X64_MSR_SIMP:
297 		if ((data & HV_SYNIC_SIMP_ENABLE) && !host &&
298 		    !synic->dont_zero_synic_pages)
299 			if (kvm_clear_guest(vcpu->kvm,
300 					    data & PAGE_MASK, PAGE_SIZE)) {
301 				ret = 1;
302 				break;
303 			}
304 		synic->msg_page = data;
305 		if (!host)
306 			synic_exit(synic, msr);
307 		break;
308 	case HV_X64_MSR_EOM: {
309 		int i;
310 
311 		if (!synic->active)
312 			break;
313 
314 		for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
315 			kvm_hv_notify_acked_sint(vcpu, i);
316 		break;
317 	}
318 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
319 		ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
320 		break;
321 	default:
322 		ret = 1;
323 		break;
324 	}
325 	return ret;
326 }
327 
kvm_hv_is_syndbg_enabled(struct kvm_vcpu * vcpu)328 static bool kvm_hv_is_syndbg_enabled(struct kvm_vcpu *vcpu)
329 {
330 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
331 
332 	return hv_vcpu->cpuid_cache.syndbg_cap_eax &
333 		HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
334 }
335 
kvm_hv_syndbg_complete_userspace(struct kvm_vcpu * vcpu)336 static int kvm_hv_syndbg_complete_userspace(struct kvm_vcpu *vcpu)
337 {
338 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
339 
340 	if (vcpu->run->hyperv.u.syndbg.msr == HV_X64_MSR_SYNDBG_CONTROL)
341 		hv->hv_syndbg.control.status =
342 			vcpu->run->hyperv.u.syndbg.status;
343 	return 1;
344 }
345 
syndbg_exit(struct kvm_vcpu * vcpu,u32 msr)346 static void syndbg_exit(struct kvm_vcpu *vcpu, u32 msr)
347 {
348 	struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
349 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
350 
351 	hv_vcpu->exit.type = KVM_EXIT_HYPERV_SYNDBG;
352 	hv_vcpu->exit.u.syndbg.msr = msr;
353 	hv_vcpu->exit.u.syndbg.control = syndbg->control.control;
354 	hv_vcpu->exit.u.syndbg.send_page = syndbg->control.send_page;
355 	hv_vcpu->exit.u.syndbg.recv_page = syndbg->control.recv_page;
356 	hv_vcpu->exit.u.syndbg.pending_page = syndbg->control.pending_page;
357 	vcpu->arch.complete_userspace_io =
358 			kvm_hv_syndbg_complete_userspace;
359 
360 	kvm_make_request(KVM_REQ_HV_EXIT, vcpu);
361 }
362 
syndbg_set_msr(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)363 static int syndbg_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
364 {
365 	struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
366 
367 	if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
368 		return 1;
369 
370 	trace_kvm_hv_syndbg_set_msr(vcpu->vcpu_id,
371 				    to_hv_vcpu(vcpu)->vp_index, msr, data);
372 	switch (msr) {
373 	case HV_X64_MSR_SYNDBG_CONTROL:
374 		syndbg->control.control = data;
375 		if (!host)
376 			syndbg_exit(vcpu, msr);
377 		break;
378 	case HV_X64_MSR_SYNDBG_STATUS:
379 		syndbg->control.status = data;
380 		break;
381 	case HV_X64_MSR_SYNDBG_SEND_BUFFER:
382 		syndbg->control.send_page = data;
383 		break;
384 	case HV_X64_MSR_SYNDBG_RECV_BUFFER:
385 		syndbg->control.recv_page = data;
386 		break;
387 	case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
388 		syndbg->control.pending_page = data;
389 		if (!host)
390 			syndbg_exit(vcpu, msr);
391 		break;
392 	case HV_X64_MSR_SYNDBG_OPTIONS:
393 		syndbg->options = data;
394 		break;
395 	default:
396 		break;
397 	}
398 
399 	return 0;
400 }
401 
syndbg_get_msr(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)402 static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
403 {
404 	struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
405 
406 	if (!kvm_hv_is_syndbg_enabled(vcpu) && !host)
407 		return 1;
408 
409 	switch (msr) {
410 	case HV_X64_MSR_SYNDBG_CONTROL:
411 		*pdata = syndbg->control.control;
412 		break;
413 	case HV_X64_MSR_SYNDBG_STATUS:
414 		*pdata = syndbg->control.status;
415 		break;
416 	case HV_X64_MSR_SYNDBG_SEND_BUFFER:
417 		*pdata = syndbg->control.send_page;
418 		break;
419 	case HV_X64_MSR_SYNDBG_RECV_BUFFER:
420 		*pdata = syndbg->control.recv_page;
421 		break;
422 	case HV_X64_MSR_SYNDBG_PENDING_BUFFER:
423 		*pdata = syndbg->control.pending_page;
424 		break;
425 	case HV_X64_MSR_SYNDBG_OPTIONS:
426 		*pdata = syndbg->options;
427 		break;
428 	default:
429 		break;
430 	}
431 
432 	trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata);
433 
434 	return 0;
435 }
436 
synic_get_msr(struct kvm_vcpu_hv_synic * synic,u32 msr,u64 * pdata,bool host)437 static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
438 			 bool host)
439 {
440 	int ret;
441 
442 	if (!synic->active && !host)
443 		return 1;
444 
445 	ret = 0;
446 	switch (msr) {
447 	case HV_X64_MSR_SCONTROL:
448 		*pdata = synic->control;
449 		break;
450 	case HV_X64_MSR_SVERSION:
451 		*pdata = synic->version;
452 		break;
453 	case HV_X64_MSR_SIEFP:
454 		*pdata = synic->evt_page;
455 		break;
456 	case HV_X64_MSR_SIMP:
457 		*pdata = synic->msg_page;
458 		break;
459 	case HV_X64_MSR_EOM:
460 		*pdata = 0;
461 		break;
462 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
463 		*pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
464 		break;
465 	default:
466 		ret = 1;
467 		break;
468 	}
469 	return ret;
470 }
471 
synic_set_irq(struct kvm_vcpu_hv_synic * synic,u32 sint)472 static int synic_set_irq(struct kvm_vcpu_hv_synic *synic, u32 sint)
473 {
474 	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
475 	struct kvm_lapic_irq irq;
476 	int ret, vector;
477 
478 	if (KVM_BUG_ON(!lapic_in_kernel(vcpu), vcpu->kvm))
479 		return -EINVAL;
480 
481 	if (sint >= ARRAY_SIZE(synic->sint))
482 		return -EINVAL;
483 
484 	vector = synic_get_sint_vector(synic_read_sint(synic, sint));
485 	if (vector < 0)
486 		return -ENOENT;
487 
488 	memset(&irq, 0, sizeof(irq));
489 	irq.shorthand = APIC_DEST_SELF;
490 	irq.dest_mode = APIC_DEST_PHYSICAL;
491 	irq.delivery_mode = APIC_DM_FIXED;
492 	irq.vector = vector;
493 	irq.level = 1;
494 
495 	ret = kvm_irq_delivery_to_apic(vcpu->kvm, vcpu->arch.apic, &irq, NULL);
496 	trace_kvm_hv_synic_set_irq(vcpu->vcpu_id, sint, irq.vector, ret);
497 	return ret;
498 }
499 
kvm_hv_synic_set_irq(struct kvm * kvm,u32 vpidx,u32 sint)500 int kvm_hv_synic_set_irq(struct kvm *kvm, u32 vpidx, u32 sint)
501 {
502 	struct kvm_vcpu_hv_synic *synic;
503 
504 	synic = synic_get(kvm, vpidx);
505 	if (!synic)
506 		return -EINVAL;
507 
508 	return synic_set_irq(synic, sint);
509 }
510 
kvm_hv_synic_send_eoi(struct kvm_vcpu * vcpu,int vector)511 void kvm_hv_synic_send_eoi(struct kvm_vcpu *vcpu, int vector)
512 {
513 	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
514 	int i;
515 
516 	trace_kvm_hv_synic_send_eoi(vcpu->vcpu_id, vector);
517 
518 	for (i = 0; i < ARRAY_SIZE(synic->sint); i++)
519 		if (synic_get_sint_vector(synic_read_sint(synic, i)) == vector)
520 			kvm_hv_notify_acked_sint(vcpu, i);
521 }
522 
kvm_hv_set_sint_gsi(struct kvm * kvm,u32 vpidx,u32 sint,int gsi)523 static int kvm_hv_set_sint_gsi(struct kvm *kvm, u32 vpidx, u32 sint, int gsi)
524 {
525 	struct kvm_vcpu_hv_synic *synic;
526 
527 	synic = synic_get(kvm, vpidx);
528 	if (!synic)
529 		return -EINVAL;
530 
531 	if (sint >= ARRAY_SIZE(synic->sint_to_gsi))
532 		return -EINVAL;
533 
534 	atomic_set(&synic->sint_to_gsi[sint], gsi);
535 	return 0;
536 }
537 
kvm_hv_irq_routing_update(struct kvm * kvm)538 void kvm_hv_irq_routing_update(struct kvm *kvm)
539 {
540 	struct kvm_irq_routing_table *irq_rt;
541 	struct kvm_kernel_irq_routing_entry *e;
542 	u32 gsi;
543 
544 	irq_rt = srcu_dereference_check(kvm->irq_routing, &kvm->irq_srcu,
545 					lockdep_is_held(&kvm->irq_lock));
546 
547 	for (gsi = 0; gsi < irq_rt->nr_rt_entries; gsi++) {
548 		hlist_for_each_entry(e, &irq_rt->map[gsi], link) {
549 			if (e->type == KVM_IRQ_ROUTING_HV_SINT)
550 				kvm_hv_set_sint_gsi(kvm, e->hv_sint.vcpu,
551 						    e->hv_sint.sint, gsi);
552 		}
553 	}
554 }
555 
synic_init(struct kvm_vcpu_hv_synic * synic)556 static void synic_init(struct kvm_vcpu_hv_synic *synic)
557 {
558 	int i;
559 
560 	memset(synic, 0, sizeof(*synic));
561 	synic->version = HV_SYNIC_VERSION_1;
562 	for (i = 0; i < ARRAY_SIZE(synic->sint); i++) {
563 		atomic64_set(&synic->sint[i], HV_SYNIC_SINT_MASKED);
564 		atomic_set(&synic->sint_to_gsi[i], -1);
565 	}
566 }
567 
get_time_ref_counter(struct kvm * kvm)568 static u64 get_time_ref_counter(struct kvm *kvm)
569 {
570 	struct kvm_hv *hv = to_kvm_hv(kvm);
571 	struct kvm_vcpu *vcpu;
572 	u64 tsc;
573 
574 	/*
575 	 * Fall back to get_kvmclock_ns() when TSC page hasn't been set up,
576 	 * is broken, disabled or being updated.
577 	 */
578 	if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET)
579 		return div_u64(get_kvmclock_ns(kvm), 100);
580 
581 	vcpu = kvm_get_vcpu(kvm, 0);
582 	tsc = kvm_read_l1_tsc(vcpu, rdtsc());
583 	return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64)
584 		+ hv->tsc_ref.tsc_offset;
585 }
586 
stimer_mark_pending(struct kvm_vcpu_hv_stimer * stimer,bool vcpu_kick)587 static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
588 				bool vcpu_kick)
589 {
590 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
591 
592 	set_bit(stimer->index,
593 		to_hv_vcpu(vcpu)->stimer_pending_bitmap);
594 	kvm_make_request(KVM_REQ_HV_STIMER, vcpu);
595 	if (vcpu_kick)
596 		kvm_vcpu_kick(vcpu);
597 }
598 
stimer_cleanup(struct kvm_vcpu_hv_stimer * stimer)599 static void stimer_cleanup(struct kvm_vcpu_hv_stimer *stimer)
600 {
601 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
602 
603 	trace_kvm_hv_stimer_cleanup(hv_stimer_to_vcpu(stimer)->vcpu_id,
604 				    stimer->index);
605 
606 	hrtimer_cancel(&stimer->timer);
607 	clear_bit(stimer->index,
608 		  to_hv_vcpu(vcpu)->stimer_pending_bitmap);
609 	stimer->msg_pending = false;
610 	stimer->exp_time = 0;
611 }
612 
stimer_timer_callback(struct hrtimer * timer)613 static enum hrtimer_restart stimer_timer_callback(struct hrtimer *timer)
614 {
615 	struct kvm_vcpu_hv_stimer *stimer;
616 
617 	stimer = container_of(timer, struct kvm_vcpu_hv_stimer, timer);
618 	trace_kvm_hv_stimer_callback(hv_stimer_to_vcpu(stimer)->vcpu_id,
619 				     stimer->index);
620 	stimer_mark_pending(stimer, true);
621 
622 	return HRTIMER_NORESTART;
623 }
624 
625 /*
626  * stimer_start() assumptions:
627  * a) stimer->count is not equal to 0
628  * b) stimer->config has HV_STIMER_ENABLE flag
629  */
stimer_start(struct kvm_vcpu_hv_stimer * stimer)630 static int stimer_start(struct kvm_vcpu_hv_stimer *stimer)
631 {
632 	u64 time_now;
633 	ktime_t ktime_now;
634 
635 	time_now = get_time_ref_counter(hv_stimer_to_vcpu(stimer)->kvm);
636 	ktime_now = ktime_get();
637 
638 	if (stimer->config.periodic) {
639 		if (stimer->exp_time) {
640 			if (time_now >= stimer->exp_time) {
641 				u64 remainder;
642 
643 				div64_u64_rem(time_now - stimer->exp_time,
644 					      stimer->count, &remainder);
645 				stimer->exp_time =
646 					time_now + (stimer->count - remainder);
647 			}
648 		} else
649 			stimer->exp_time = time_now + stimer->count;
650 
651 		trace_kvm_hv_stimer_start_periodic(
652 					hv_stimer_to_vcpu(stimer)->vcpu_id,
653 					stimer->index,
654 					time_now, stimer->exp_time);
655 
656 		hrtimer_start(&stimer->timer,
657 			      ktime_add_ns(ktime_now,
658 					   100 * (stimer->exp_time - time_now)),
659 			      HRTIMER_MODE_ABS);
660 		return 0;
661 	}
662 	stimer->exp_time = stimer->count;
663 	if (time_now >= stimer->count) {
664 		/*
665 		 * Expire timer according to Hypervisor Top-Level Functional
666 		 * specification v4(15.3.1):
667 		 * "If a one shot is enabled and the specified count is in
668 		 * the past, it will expire immediately."
669 		 */
670 		stimer_mark_pending(stimer, false);
671 		return 0;
672 	}
673 
674 	trace_kvm_hv_stimer_start_one_shot(hv_stimer_to_vcpu(stimer)->vcpu_id,
675 					   stimer->index,
676 					   time_now, stimer->count);
677 
678 	hrtimer_start(&stimer->timer,
679 		      ktime_add_ns(ktime_now, 100 * (stimer->count - time_now)),
680 		      HRTIMER_MODE_ABS);
681 	return 0;
682 }
683 
stimer_set_config(struct kvm_vcpu_hv_stimer * stimer,u64 config,bool host)684 static int stimer_set_config(struct kvm_vcpu_hv_stimer *stimer, u64 config,
685 			     bool host)
686 {
687 	union hv_stimer_config new_config = {.as_uint64 = config},
688 		old_config = {.as_uint64 = stimer->config.as_uint64};
689 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
690 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
691 	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
692 
693 	if (!synic->active && (!host || config))
694 		return 1;
695 
696 	if (unlikely(!host && hv_vcpu->enforce_cpuid && new_config.direct_mode &&
697 		     !(hv_vcpu->cpuid_cache.features_edx &
698 		       HV_STIMER_DIRECT_MODE_AVAILABLE)))
699 		return 1;
700 
701 	trace_kvm_hv_stimer_set_config(hv_stimer_to_vcpu(stimer)->vcpu_id,
702 				       stimer->index, config, host);
703 
704 	stimer_cleanup(stimer);
705 	if (old_config.enable &&
706 	    !new_config.direct_mode && new_config.sintx == 0)
707 		new_config.enable = 0;
708 	stimer->config.as_uint64 = new_config.as_uint64;
709 
710 	if (stimer->config.enable)
711 		stimer_mark_pending(stimer, false);
712 
713 	return 0;
714 }
715 
stimer_set_count(struct kvm_vcpu_hv_stimer * stimer,u64 count,bool host)716 static int stimer_set_count(struct kvm_vcpu_hv_stimer *stimer, u64 count,
717 			    bool host)
718 {
719 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
720 	struct kvm_vcpu_hv_synic *synic = to_hv_synic(vcpu);
721 
722 	if (!synic->active && (!host || count))
723 		return 1;
724 
725 	trace_kvm_hv_stimer_set_count(hv_stimer_to_vcpu(stimer)->vcpu_id,
726 				      stimer->index, count, host);
727 
728 	stimer_cleanup(stimer);
729 	stimer->count = count;
730 	if (stimer->count == 0)
731 		stimer->config.enable = 0;
732 	else if (stimer->config.auto_enable)
733 		stimer->config.enable = 1;
734 
735 	if (stimer->config.enable)
736 		stimer_mark_pending(stimer, false);
737 
738 	return 0;
739 }
740 
stimer_get_config(struct kvm_vcpu_hv_stimer * stimer,u64 * pconfig)741 static int stimer_get_config(struct kvm_vcpu_hv_stimer *stimer, u64 *pconfig)
742 {
743 	*pconfig = stimer->config.as_uint64;
744 	return 0;
745 }
746 
stimer_get_count(struct kvm_vcpu_hv_stimer * stimer,u64 * pcount)747 static int stimer_get_count(struct kvm_vcpu_hv_stimer *stimer, u64 *pcount)
748 {
749 	*pcount = stimer->count;
750 	return 0;
751 }
752 
synic_deliver_msg(struct kvm_vcpu_hv_synic * synic,u32 sint,struct hv_message * src_msg,bool no_retry)753 static int synic_deliver_msg(struct kvm_vcpu_hv_synic *synic, u32 sint,
754 			     struct hv_message *src_msg, bool no_retry)
755 {
756 	struct kvm_vcpu *vcpu = hv_synic_to_vcpu(synic);
757 	int msg_off = offsetof(struct hv_message_page, sint_message[sint]);
758 	gfn_t msg_page_gfn;
759 	struct hv_message_header hv_hdr;
760 	int r;
761 
762 	if (!(synic->msg_page & HV_SYNIC_SIMP_ENABLE))
763 		return -ENOENT;
764 
765 	msg_page_gfn = synic->msg_page >> PAGE_SHIFT;
766 
767 	/*
768 	 * Strictly following the spec-mandated ordering would assume setting
769 	 * .msg_pending before checking .message_type.  However, this function
770 	 * is only called in vcpu context so the entire update is atomic from
771 	 * guest POV and thus the exact order here doesn't matter.
772 	 */
773 	r = kvm_vcpu_read_guest_page(vcpu, msg_page_gfn, &hv_hdr.message_type,
774 				     msg_off + offsetof(struct hv_message,
775 							header.message_type),
776 				     sizeof(hv_hdr.message_type));
777 	if (r < 0)
778 		return r;
779 
780 	if (hv_hdr.message_type != HVMSG_NONE) {
781 		if (no_retry)
782 			return 0;
783 
784 		hv_hdr.message_flags.msg_pending = 1;
785 		r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn,
786 					      &hv_hdr.message_flags,
787 					      msg_off +
788 					      offsetof(struct hv_message,
789 						       header.message_flags),
790 					      sizeof(hv_hdr.message_flags));
791 		if (r < 0)
792 			return r;
793 		return -EAGAIN;
794 	}
795 
796 	r = kvm_vcpu_write_guest_page(vcpu, msg_page_gfn, src_msg, msg_off,
797 				      sizeof(src_msg->header) +
798 				      src_msg->header.payload_size);
799 	if (r < 0)
800 		return r;
801 
802 	r = synic_set_irq(synic, sint);
803 	if (r < 0)
804 		return r;
805 	if (r == 0)
806 		return -EFAULT;
807 	return 0;
808 }
809 
stimer_send_msg(struct kvm_vcpu_hv_stimer * stimer)810 static int stimer_send_msg(struct kvm_vcpu_hv_stimer *stimer)
811 {
812 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
813 	struct hv_message *msg = &stimer->msg;
814 	struct hv_timer_message_payload *payload =
815 			(struct hv_timer_message_payload *)&msg->u.payload;
816 
817 	/*
818 	 * To avoid piling up periodic ticks, don't retry message
819 	 * delivery for them (within "lazy" lost ticks policy).
820 	 */
821 	bool no_retry = stimer->config.periodic;
822 
823 	payload->expiration_time = stimer->exp_time;
824 	payload->delivery_time = get_time_ref_counter(vcpu->kvm);
825 	return synic_deliver_msg(to_hv_synic(vcpu),
826 				 stimer->config.sintx, msg,
827 				 no_retry);
828 }
829 
stimer_notify_direct(struct kvm_vcpu_hv_stimer * stimer)830 static int stimer_notify_direct(struct kvm_vcpu_hv_stimer *stimer)
831 {
832 	struct kvm_vcpu *vcpu = hv_stimer_to_vcpu(stimer);
833 	struct kvm_lapic_irq irq = {
834 		.delivery_mode = APIC_DM_FIXED,
835 		.vector = stimer->config.apic_vector
836 	};
837 
838 	if (lapic_in_kernel(vcpu))
839 		return !kvm_apic_set_irq(vcpu, &irq, NULL);
840 	return 0;
841 }
842 
stimer_expiration(struct kvm_vcpu_hv_stimer * stimer)843 static void stimer_expiration(struct kvm_vcpu_hv_stimer *stimer)
844 {
845 	int r, direct = stimer->config.direct_mode;
846 
847 	stimer->msg_pending = true;
848 	if (!direct)
849 		r = stimer_send_msg(stimer);
850 	else
851 		r = stimer_notify_direct(stimer);
852 	trace_kvm_hv_stimer_expiration(hv_stimer_to_vcpu(stimer)->vcpu_id,
853 				       stimer->index, direct, r);
854 	if (!r) {
855 		stimer->msg_pending = false;
856 		if (!(stimer->config.periodic))
857 			stimer->config.enable = 0;
858 	}
859 }
860 
kvm_hv_process_stimers(struct kvm_vcpu * vcpu)861 void kvm_hv_process_stimers(struct kvm_vcpu *vcpu)
862 {
863 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
864 	struct kvm_vcpu_hv_stimer *stimer;
865 	u64 time_now, exp_time;
866 	int i;
867 
868 	if (!hv_vcpu)
869 		return;
870 
871 	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
872 		if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) {
873 			stimer = &hv_vcpu->stimer[i];
874 			if (stimer->config.enable) {
875 				exp_time = stimer->exp_time;
876 
877 				if (exp_time) {
878 					time_now =
879 						get_time_ref_counter(vcpu->kvm);
880 					if (time_now >= exp_time)
881 						stimer_expiration(stimer);
882 				}
883 
884 				if ((stimer->config.enable) &&
885 				    stimer->count) {
886 					if (!stimer->msg_pending)
887 						stimer_start(stimer);
888 				} else
889 					stimer_cleanup(stimer);
890 			}
891 		}
892 }
893 
kvm_hv_vcpu_uninit(struct kvm_vcpu * vcpu)894 void kvm_hv_vcpu_uninit(struct kvm_vcpu *vcpu)
895 {
896 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
897 	int i;
898 
899 	if (!hv_vcpu)
900 		return;
901 
902 	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
903 		stimer_cleanup(&hv_vcpu->stimer[i]);
904 
905 	kfree(hv_vcpu);
906 	vcpu->arch.hyperv = NULL;
907 }
908 
kvm_hv_assist_page_enabled(struct kvm_vcpu * vcpu)909 bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu)
910 {
911 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
912 
913 	if (!hv_vcpu)
914 		return false;
915 
916 	if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE))
917 		return false;
918 	return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED;
919 }
920 EXPORT_SYMBOL_GPL(kvm_hv_assist_page_enabled);
921 
kvm_hv_get_assist_page(struct kvm_vcpu * vcpu)922 int kvm_hv_get_assist_page(struct kvm_vcpu *vcpu)
923 {
924 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
925 
926 	if (!hv_vcpu || !kvm_hv_assist_page_enabled(vcpu))
927 		return -EFAULT;
928 
929 	return kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.pv_eoi.data,
930 				     &hv_vcpu->vp_assist_page, sizeof(struct hv_vp_assist_page));
931 }
932 EXPORT_SYMBOL_GPL(kvm_hv_get_assist_page);
933 
stimer_prepare_msg(struct kvm_vcpu_hv_stimer * stimer)934 static void stimer_prepare_msg(struct kvm_vcpu_hv_stimer *stimer)
935 {
936 	struct hv_message *msg = &stimer->msg;
937 	struct hv_timer_message_payload *payload =
938 			(struct hv_timer_message_payload *)&msg->u.payload;
939 
940 	memset(&msg->header, 0, sizeof(msg->header));
941 	msg->header.message_type = HVMSG_TIMER_EXPIRED;
942 	msg->header.payload_size = sizeof(*payload);
943 
944 	payload->timer_index = stimer->index;
945 	payload->expiration_time = 0;
946 	payload->delivery_time = 0;
947 }
948 
stimer_init(struct kvm_vcpu_hv_stimer * stimer,int timer_index)949 static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
950 {
951 	memset(stimer, 0, sizeof(*stimer));
952 	stimer->index = timer_index;
953 	hrtimer_init(&stimer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
954 	stimer->timer.function = stimer_timer_callback;
955 	stimer_prepare_msg(stimer);
956 }
957 
kvm_hv_vcpu_init(struct kvm_vcpu * vcpu)958 int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
959 {
960 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
961 	int i;
962 
963 	if (hv_vcpu)
964 		return 0;
965 
966 	hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
967 	if (!hv_vcpu)
968 		return -ENOMEM;
969 
970 	vcpu->arch.hyperv = hv_vcpu;
971 	hv_vcpu->vcpu = vcpu;
972 
973 	synic_init(&hv_vcpu->synic);
974 
975 	bitmap_zero(hv_vcpu->stimer_pending_bitmap, HV_SYNIC_STIMER_COUNT);
976 	for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++)
977 		stimer_init(&hv_vcpu->stimer[i], i);
978 
979 	hv_vcpu->vp_index = vcpu->vcpu_idx;
980 
981 	for (i = 0; i < HV_NR_TLB_FLUSH_FIFOS; i++) {
982 		INIT_KFIFO(hv_vcpu->tlb_flush_fifo[i].entries);
983 		spin_lock_init(&hv_vcpu->tlb_flush_fifo[i].write_lock);
984 	}
985 
986 	return 0;
987 }
988 
kvm_hv_activate_synic(struct kvm_vcpu * vcpu,bool dont_zero_synic_pages)989 int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
990 {
991 	struct kvm_vcpu_hv_synic *synic;
992 	int r;
993 
994 	r = kvm_hv_vcpu_init(vcpu);
995 	if (r)
996 		return r;
997 
998 	synic = to_hv_synic(vcpu);
999 
1000 	synic->active = true;
1001 	synic->dont_zero_synic_pages = dont_zero_synic_pages;
1002 	synic->control = HV_SYNIC_CONTROL_ENABLE;
1003 	return 0;
1004 }
1005 
kvm_hv_msr_partition_wide(u32 msr)1006 static bool kvm_hv_msr_partition_wide(u32 msr)
1007 {
1008 	bool r = false;
1009 
1010 	switch (msr) {
1011 	case HV_X64_MSR_GUEST_OS_ID:
1012 	case HV_X64_MSR_HYPERCALL:
1013 	case HV_X64_MSR_REFERENCE_TSC:
1014 	case HV_X64_MSR_TIME_REF_COUNT:
1015 	case HV_X64_MSR_CRASH_CTL:
1016 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1017 	case HV_X64_MSR_RESET:
1018 	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1019 	case HV_X64_MSR_TSC_EMULATION_CONTROL:
1020 	case HV_X64_MSR_TSC_EMULATION_STATUS:
1021 	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1022 	case HV_X64_MSR_SYNDBG_OPTIONS:
1023 	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1024 		r = true;
1025 		break;
1026 	}
1027 
1028 	return r;
1029 }
1030 
kvm_hv_msr_get_crash_data(struct kvm * kvm,u32 index,u64 * pdata)1031 static int kvm_hv_msr_get_crash_data(struct kvm *kvm, u32 index, u64 *pdata)
1032 {
1033 	struct kvm_hv *hv = to_kvm_hv(kvm);
1034 	size_t size = ARRAY_SIZE(hv->hv_crash_param);
1035 
1036 	if (WARN_ON_ONCE(index >= size))
1037 		return -EINVAL;
1038 
1039 	*pdata = hv->hv_crash_param[array_index_nospec(index, size)];
1040 	return 0;
1041 }
1042 
kvm_hv_msr_get_crash_ctl(struct kvm * kvm,u64 * pdata)1043 static int kvm_hv_msr_get_crash_ctl(struct kvm *kvm, u64 *pdata)
1044 {
1045 	struct kvm_hv *hv = to_kvm_hv(kvm);
1046 
1047 	*pdata = hv->hv_crash_ctl;
1048 	return 0;
1049 }
1050 
kvm_hv_msr_set_crash_ctl(struct kvm * kvm,u64 data)1051 static int kvm_hv_msr_set_crash_ctl(struct kvm *kvm, u64 data)
1052 {
1053 	struct kvm_hv *hv = to_kvm_hv(kvm);
1054 
1055 	hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY;
1056 
1057 	return 0;
1058 }
1059 
kvm_hv_msr_set_crash_data(struct kvm * kvm,u32 index,u64 data)1060 static int kvm_hv_msr_set_crash_data(struct kvm *kvm, u32 index, u64 data)
1061 {
1062 	struct kvm_hv *hv = to_kvm_hv(kvm);
1063 	size_t size = ARRAY_SIZE(hv->hv_crash_param);
1064 
1065 	if (WARN_ON_ONCE(index >= size))
1066 		return -EINVAL;
1067 
1068 	hv->hv_crash_param[array_index_nospec(index, size)] = data;
1069 	return 0;
1070 }
1071 
1072 /*
1073  * The kvmclock and Hyper-V TSC page use similar formulas, and converting
1074  * between them is possible:
1075  *
1076  * kvmclock formula:
1077  *    nsec = (ticks - tsc_timestamp) * tsc_to_system_mul * 2^(tsc_shift-32)
1078  *           + system_time
1079  *
1080  * Hyper-V formula:
1081  *    nsec/100 = ticks * scale / 2^64 + offset
1082  *
1083  * When tsc_timestamp = system_time = 0, offset is zero in the Hyper-V formula.
1084  * By dividing the kvmclock formula by 100 and equating what's left we get:
1085  *    ticks * scale / 2^64 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1086  *            scale / 2^64 =         tsc_to_system_mul * 2^(tsc_shift-32) / 100
1087  *            scale        =         tsc_to_system_mul * 2^(32+tsc_shift) / 100
1088  *
1089  * Now expand the kvmclock formula and divide by 100:
1090  *    nsec = ticks * tsc_to_system_mul * 2^(tsc_shift-32)
1091  *           - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32)
1092  *           + system_time
1093  *    nsec/100 = ticks * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1094  *               - tsc_timestamp * tsc_to_system_mul * 2^(tsc_shift-32) / 100
1095  *               + system_time / 100
1096  *
1097  * Replace tsc_to_system_mul * 2^(tsc_shift-32) / 100 by scale / 2^64:
1098  *    nsec/100 = ticks * scale / 2^64
1099  *               - tsc_timestamp * scale / 2^64
1100  *               + system_time / 100
1101  *
1102  * Equate with the Hyper-V formula so that ticks * scale / 2^64 cancels out:
1103  *    offset = system_time / 100 - tsc_timestamp * scale / 2^64
1104  *
1105  * These two equivalencies are implemented in this function.
1106  */
compute_tsc_page_parameters(struct pvclock_vcpu_time_info * hv_clock,struct ms_hyperv_tsc_page * tsc_ref)1107 static bool compute_tsc_page_parameters(struct pvclock_vcpu_time_info *hv_clock,
1108 					struct ms_hyperv_tsc_page *tsc_ref)
1109 {
1110 	u64 max_mul;
1111 
1112 	if (!(hv_clock->flags & PVCLOCK_TSC_STABLE_BIT))
1113 		return false;
1114 
1115 	/*
1116 	 * check if scale would overflow, if so we use the time ref counter
1117 	 *    tsc_to_system_mul * 2^(tsc_shift+32) / 100 >= 2^64
1118 	 *    tsc_to_system_mul / 100 >= 2^(32-tsc_shift)
1119 	 *    tsc_to_system_mul >= 100 * 2^(32-tsc_shift)
1120 	 */
1121 	max_mul = 100ull << (32 - hv_clock->tsc_shift);
1122 	if (hv_clock->tsc_to_system_mul >= max_mul)
1123 		return false;
1124 
1125 	/*
1126 	 * Otherwise compute the scale and offset according to the formulas
1127 	 * derived above.
1128 	 */
1129 	tsc_ref->tsc_scale =
1130 		mul_u64_u32_div(1ULL << (32 + hv_clock->tsc_shift),
1131 				hv_clock->tsc_to_system_mul,
1132 				100);
1133 
1134 	tsc_ref->tsc_offset = hv_clock->system_time;
1135 	do_div(tsc_ref->tsc_offset, 100);
1136 	tsc_ref->tsc_offset -=
1137 		mul_u64_u64_shr(hv_clock->tsc_timestamp, tsc_ref->tsc_scale, 64);
1138 	return true;
1139 }
1140 
1141 /*
1142  * Don't touch TSC page values if the guest has opted for TSC emulation after
1143  * migration. KVM doesn't fully support reenlightenment notifications and TSC
1144  * access emulation and Hyper-V is known to expect the values in TSC page to
1145  * stay constant before TSC access emulation is disabled from guest side
1146  * (HV_X64_MSR_TSC_EMULATION_STATUS). KVM userspace is expected to preserve TSC
1147  * frequency and guest visible TSC value across migration (and prevent it when
1148  * TSC scaling is unsupported).
1149  */
tsc_page_update_unsafe(struct kvm_hv * hv)1150 static inline bool tsc_page_update_unsafe(struct kvm_hv *hv)
1151 {
1152 	return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) &&
1153 		hv->hv_tsc_emulation_control;
1154 }
1155 
kvm_hv_setup_tsc_page(struct kvm * kvm,struct pvclock_vcpu_time_info * hv_clock)1156 void kvm_hv_setup_tsc_page(struct kvm *kvm,
1157 			   struct pvclock_vcpu_time_info *hv_clock)
1158 {
1159 	struct kvm_hv *hv = to_kvm_hv(kvm);
1160 	u32 tsc_seq;
1161 	u64 gfn;
1162 
1163 	BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence));
1164 	BUILD_BUG_ON(offsetof(struct ms_hyperv_tsc_page, tsc_sequence) != 0);
1165 
1166 	mutex_lock(&hv->hv_lock);
1167 
1168 	if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN ||
1169 	    hv->hv_tsc_page_status == HV_TSC_PAGE_SET ||
1170 	    hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET)
1171 		goto out_unlock;
1172 
1173 	if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1174 		goto out_unlock;
1175 
1176 	gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1177 	/*
1178 	 * Because the TSC parameters only vary when there is a
1179 	 * change in the master clock, do not bother with caching.
1180 	 */
1181 	if (unlikely(kvm_read_guest(kvm, gfn_to_gpa(gfn),
1182 				    &tsc_seq, sizeof(tsc_seq))))
1183 		goto out_err;
1184 
1185 	if (tsc_seq && tsc_page_update_unsafe(hv)) {
1186 		if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1187 			goto out_err;
1188 
1189 		hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1190 		goto out_unlock;
1191 	}
1192 
1193 	/*
1194 	 * While we're computing and writing the parameters, force the
1195 	 * guest to use the time reference count MSR.
1196 	 */
1197 	hv->tsc_ref.tsc_sequence = 0;
1198 	if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1199 			    &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1200 		goto out_err;
1201 
1202 	if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref))
1203 		goto out_err;
1204 
1205 	/* Ensure sequence is zero before writing the rest of the struct.  */
1206 	smp_wmb();
1207 	if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref)))
1208 		goto out_err;
1209 
1210 	/*
1211 	 * Now switch to the TSC page mechanism by writing the sequence.
1212 	 */
1213 	tsc_seq++;
1214 	if (tsc_seq == 0xFFFFFFFF || tsc_seq == 0)
1215 		tsc_seq = 1;
1216 
1217 	/* Write the struct entirely before the non-zero sequence.  */
1218 	smp_wmb();
1219 
1220 	hv->tsc_ref.tsc_sequence = tsc_seq;
1221 	if (kvm_write_guest(kvm, gfn_to_gpa(gfn),
1222 			    &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence)))
1223 		goto out_err;
1224 
1225 	hv->hv_tsc_page_status = HV_TSC_PAGE_SET;
1226 	goto out_unlock;
1227 
1228 out_err:
1229 	hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN;
1230 out_unlock:
1231 	mutex_unlock(&hv->hv_lock);
1232 }
1233 
kvm_hv_request_tsc_page_update(struct kvm * kvm)1234 void kvm_hv_request_tsc_page_update(struct kvm *kvm)
1235 {
1236 	struct kvm_hv *hv = to_kvm_hv(kvm);
1237 
1238 	mutex_lock(&hv->hv_lock);
1239 
1240 	if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET &&
1241 	    !tsc_page_update_unsafe(hv))
1242 		hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1243 
1244 	mutex_unlock(&hv->hv_lock);
1245 }
1246 
hv_check_msr_access(struct kvm_vcpu_hv * hv_vcpu,u32 msr)1247 static bool hv_check_msr_access(struct kvm_vcpu_hv *hv_vcpu, u32 msr)
1248 {
1249 	if (!hv_vcpu->enforce_cpuid)
1250 		return true;
1251 
1252 	switch (msr) {
1253 	case HV_X64_MSR_GUEST_OS_ID:
1254 	case HV_X64_MSR_HYPERCALL:
1255 		return hv_vcpu->cpuid_cache.features_eax &
1256 			HV_MSR_HYPERCALL_AVAILABLE;
1257 	case HV_X64_MSR_VP_RUNTIME:
1258 		return hv_vcpu->cpuid_cache.features_eax &
1259 			HV_MSR_VP_RUNTIME_AVAILABLE;
1260 	case HV_X64_MSR_TIME_REF_COUNT:
1261 		return hv_vcpu->cpuid_cache.features_eax &
1262 			HV_MSR_TIME_REF_COUNT_AVAILABLE;
1263 	case HV_X64_MSR_VP_INDEX:
1264 		return hv_vcpu->cpuid_cache.features_eax &
1265 			HV_MSR_VP_INDEX_AVAILABLE;
1266 	case HV_X64_MSR_RESET:
1267 		return hv_vcpu->cpuid_cache.features_eax &
1268 			HV_MSR_RESET_AVAILABLE;
1269 	case HV_X64_MSR_REFERENCE_TSC:
1270 		return hv_vcpu->cpuid_cache.features_eax &
1271 			HV_MSR_REFERENCE_TSC_AVAILABLE;
1272 	case HV_X64_MSR_SCONTROL:
1273 	case HV_X64_MSR_SVERSION:
1274 	case HV_X64_MSR_SIEFP:
1275 	case HV_X64_MSR_SIMP:
1276 	case HV_X64_MSR_EOM:
1277 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1278 		return hv_vcpu->cpuid_cache.features_eax &
1279 			HV_MSR_SYNIC_AVAILABLE;
1280 	case HV_X64_MSR_STIMER0_CONFIG:
1281 	case HV_X64_MSR_STIMER1_CONFIG:
1282 	case HV_X64_MSR_STIMER2_CONFIG:
1283 	case HV_X64_MSR_STIMER3_CONFIG:
1284 	case HV_X64_MSR_STIMER0_COUNT:
1285 	case HV_X64_MSR_STIMER1_COUNT:
1286 	case HV_X64_MSR_STIMER2_COUNT:
1287 	case HV_X64_MSR_STIMER3_COUNT:
1288 		return hv_vcpu->cpuid_cache.features_eax &
1289 			HV_MSR_SYNTIMER_AVAILABLE;
1290 	case HV_X64_MSR_EOI:
1291 	case HV_X64_MSR_ICR:
1292 	case HV_X64_MSR_TPR:
1293 	case HV_X64_MSR_VP_ASSIST_PAGE:
1294 		return hv_vcpu->cpuid_cache.features_eax &
1295 			HV_MSR_APIC_ACCESS_AVAILABLE;
1296 	case HV_X64_MSR_TSC_FREQUENCY:
1297 	case HV_X64_MSR_APIC_FREQUENCY:
1298 		return hv_vcpu->cpuid_cache.features_eax &
1299 			HV_ACCESS_FREQUENCY_MSRS;
1300 	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1301 	case HV_X64_MSR_TSC_EMULATION_CONTROL:
1302 	case HV_X64_MSR_TSC_EMULATION_STATUS:
1303 		return hv_vcpu->cpuid_cache.features_eax &
1304 			HV_ACCESS_REENLIGHTENMENT;
1305 	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1306 		return hv_vcpu->cpuid_cache.features_eax &
1307 			HV_ACCESS_TSC_INVARIANT;
1308 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1309 	case HV_X64_MSR_CRASH_CTL:
1310 		return hv_vcpu->cpuid_cache.features_edx &
1311 			HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
1312 	case HV_X64_MSR_SYNDBG_OPTIONS:
1313 	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1314 		return hv_vcpu->cpuid_cache.features_edx &
1315 			HV_FEATURE_DEBUG_MSRS_AVAILABLE;
1316 	default:
1317 		break;
1318 	}
1319 
1320 	return false;
1321 }
1322 
kvm_hv_set_msr_pw(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)1323 static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
1324 			     bool host)
1325 {
1326 	struct kvm *kvm = vcpu->kvm;
1327 	struct kvm_hv *hv = to_kvm_hv(kvm);
1328 
1329 	if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1330 		return 1;
1331 
1332 	switch (msr) {
1333 	case HV_X64_MSR_GUEST_OS_ID:
1334 		hv->hv_guest_os_id = data;
1335 		/* setting guest os id to zero disables hypercall page */
1336 		if (!hv->hv_guest_os_id)
1337 			hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE;
1338 		break;
1339 	case HV_X64_MSR_HYPERCALL: {
1340 		u8 instructions[9];
1341 		int i = 0;
1342 		u64 addr;
1343 
1344 		/* if guest os id is not set hypercall should remain disabled */
1345 		if (!hv->hv_guest_os_id)
1346 			break;
1347 		if (!(data & HV_X64_MSR_HYPERCALL_ENABLE)) {
1348 			hv->hv_hypercall = data;
1349 			break;
1350 		}
1351 
1352 		/*
1353 		 * If Xen and Hyper-V hypercalls are both enabled, disambiguate
1354 		 * the same way Xen itself does, by setting the bit 31 of EAX
1355 		 * which is RsvdZ in the 32-bit Hyper-V hypercall ABI and just
1356 		 * going to be clobbered on 64-bit.
1357 		 */
1358 		if (kvm_xen_hypercall_enabled(kvm)) {
1359 			/* orl $0x80000000, %eax */
1360 			instructions[i++] = 0x0d;
1361 			instructions[i++] = 0x00;
1362 			instructions[i++] = 0x00;
1363 			instructions[i++] = 0x00;
1364 			instructions[i++] = 0x80;
1365 		}
1366 
1367 		/* vmcall/vmmcall */
1368 		static_call(kvm_x86_patch_hypercall)(vcpu, instructions + i);
1369 		i += 3;
1370 
1371 		/* ret */
1372 		((unsigned char *)instructions)[i++] = 0xc3;
1373 
1374 		addr = data & HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_MASK;
1375 		if (kvm_vcpu_write_guest(vcpu, addr, instructions, i))
1376 			return 1;
1377 		hv->hv_hypercall = data;
1378 		break;
1379 	}
1380 	case HV_X64_MSR_REFERENCE_TSC:
1381 		hv->hv_tsc_page = data;
1382 		if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) {
1383 			if (!host)
1384 				hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED;
1385 			else
1386 				hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED;
1387 			kvm_make_request(KVM_REQ_MASTERCLOCK_UPDATE, vcpu);
1388 		} else {
1389 			hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET;
1390 		}
1391 		break;
1392 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1393 		return kvm_hv_msr_set_crash_data(kvm,
1394 						 msr - HV_X64_MSR_CRASH_P0,
1395 						 data);
1396 	case HV_X64_MSR_CRASH_CTL:
1397 		if (host)
1398 			return kvm_hv_msr_set_crash_ctl(kvm, data);
1399 
1400 		if (data & HV_CRASH_CTL_CRASH_NOTIFY) {
1401 			vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n",
1402 				   hv->hv_crash_param[0],
1403 				   hv->hv_crash_param[1],
1404 				   hv->hv_crash_param[2],
1405 				   hv->hv_crash_param[3],
1406 				   hv->hv_crash_param[4]);
1407 
1408 			/* Send notification about crash to user space */
1409 			kvm_make_request(KVM_REQ_HV_CRASH, vcpu);
1410 		}
1411 		break;
1412 	case HV_X64_MSR_RESET:
1413 		if (data == 1) {
1414 			vcpu_debug(vcpu, "hyper-v reset requested\n");
1415 			kvm_make_request(KVM_REQ_HV_RESET, vcpu);
1416 		}
1417 		break;
1418 	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1419 		hv->hv_reenlightenment_control = data;
1420 		break;
1421 	case HV_X64_MSR_TSC_EMULATION_CONTROL:
1422 		hv->hv_tsc_emulation_control = data;
1423 		break;
1424 	case HV_X64_MSR_TSC_EMULATION_STATUS:
1425 		if (data && !host)
1426 			return 1;
1427 
1428 		hv->hv_tsc_emulation_status = data;
1429 		break;
1430 	case HV_X64_MSR_TIME_REF_COUNT:
1431 		/* read-only, but still ignore it if host-initiated */
1432 		if (!host)
1433 			return 1;
1434 		break;
1435 	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1436 		/* Only bit 0 is supported */
1437 		if (data & ~HV_EXPOSE_INVARIANT_TSC)
1438 			return 1;
1439 
1440 		/* The feature can't be disabled from the guest */
1441 		if (!host && hv->hv_invtsc_control && !data)
1442 			return 1;
1443 
1444 		hv->hv_invtsc_control = data;
1445 		break;
1446 	case HV_X64_MSR_SYNDBG_OPTIONS:
1447 	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1448 		return syndbg_set_msr(vcpu, msr, data, host);
1449 	default:
1450 		kvm_pr_unimpl_wrmsr(vcpu, msr, data);
1451 		return 1;
1452 	}
1453 	return 0;
1454 }
1455 
1456 /* Calculate cpu time spent by current task in 100ns units */
current_task_runtime_100ns(void)1457 static u64 current_task_runtime_100ns(void)
1458 {
1459 	u64 utime, stime;
1460 
1461 	task_cputime_adjusted(current, &utime, &stime);
1462 
1463 	return div_u64(utime + stime, 100);
1464 }
1465 
kvm_hv_set_msr(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)1466 static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1467 {
1468 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1469 
1470 	if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1471 		return 1;
1472 
1473 	switch (msr) {
1474 	case HV_X64_MSR_VP_INDEX: {
1475 		struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1476 		u32 new_vp_index = (u32)data;
1477 
1478 		if (!host || new_vp_index >= KVM_MAX_VCPUS)
1479 			return 1;
1480 
1481 		if (new_vp_index == hv_vcpu->vp_index)
1482 			return 0;
1483 
1484 		/*
1485 		 * The VP index is initialized to vcpu_index by
1486 		 * kvm_hv_vcpu_postcreate so they initially match.  Now the
1487 		 * VP index is changing, adjust num_mismatched_vp_indexes if
1488 		 * it now matches or no longer matches vcpu_idx.
1489 		 */
1490 		if (hv_vcpu->vp_index == vcpu->vcpu_idx)
1491 			atomic_inc(&hv->num_mismatched_vp_indexes);
1492 		else if (new_vp_index == vcpu->vcpu_idx)
1493 			atomic_dec(&hv->num_mismatched_vp_indexes);
1494 
1495 		hv_vcpu->vp_index = new_vp_index;
1496 		break;
1497 	}
1498 	case HV_X64_MSR_VP_ASSIST_PAGE: {
1499 		u64 gfn;
1500 		unsigned long addr;
1501 
1502 		if (!(data & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) {
1503 			hv_vcpu->hv_vapic = data;
1504 			if (kvm_lapic_set_pv_eoi(vcpu, 0, 0))
1505 				return 1;
1506 			break;
1507 		}
1508 		gfn = data >> HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_SHIFT;
1509 		addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
1510 		if (kvm_is_error_hva(addr))
1511 			return 1;
1512 
1513 		/*
1514 		 * Clear apic_assist portion of struct hv_vp_assist_page
1515 		 * only, there can be valuable data in the rest which needs
1516 		 * to be preserved e.g. on migration.
1517 		 */
1518 		if (__put_user(0, (u32 __user *)addr))
1519 			return 1;
1520 		hv_vcpu->hv_vapic = data;
1521 		kvm_vcpu_mark_page_dirty(vcpu, gfn);
1522 		if (kvm_lapic_set_pv_eoi(vcpu,
1523 					    gfn_to_gpa(gfn) | KVM_MSR_ENABLED,
1524 					    sizeof(struct hv_vp_assist_page)))
1525 			return 1;
1526 		break;
1527 	}
1528 	case HV_X64_MSR_EOI:
1529 		return kvm_hv_vapic_msr_write(vcpu, APIC_EOI, data);
1530 	case HV_X64_MSR_ICR:
1531 		return kvm_hv_vapic_msr_write(vcpu, APIC_ICR, data);
1532 	case HV_X64_MSR_TPR:
1533 		return kvm_hv_vapic_msr_write(vcpu, APIC_TASKPRI, data);
1534 	case HV_X64_MSR_VP_RUNTIME:
1535 		if (!host)
1536 			return 1;
1537 		hv_vcpu->runtime_offset = data - current_task_runtime_100ns();
1538 		break;
1539 	case HV_X64_MSR_SCONTROL:
1540 	case HV_X64_MSR_SVERSION:
1541 	case HV_X64_MSR_SIEFP:
1542 	case HV_X64_MSR_SIMP:
1543 	case HV_X64_MSR_EOM:
1544 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1545 		return synic_set_msr(to_hv_synic(vcpu), msr, data, host);
1546 	case HV_X64_MSR_STIMER0_CONFIG:
1547 	case HV_X64_MSR_STIMER1_CONFIG:
1548 	case HV_X64_MSR_STIMER2_CONFIG:
1549 	case HV_X64_MSR_STIMER3_CONFIG: {
1550 		int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1551 
1552 		return stimer_set_config(to_hv_stimer(vcpu, timer_index),
1553 					 data, host);
1554 	}
1555 	case HV_X64_MSR_STIMER0_COUNT:
1556 	case HV_X64_MSR_STIMER1_COUNT:
1557 	case HV_X64_MSR_STIMER2_COUNT:
1558 	case HV_X64_MSR_STIMER3_COUNT: {
1559 		int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1560 
1561 		return stimer_set_count(to_hv_stimer(vcpu, timer_index),
1562 					data, host);
1563 	}
1564 	case HV_X64_MSR_TSC_FREQUENCY:
1565 	case HV_X64_MSR_APIC_FREQUENCY:
1566 		/* read-only, but still ignore it if host-initiated */
1567 		if (!host)
1568 			return 1;
1569 		break;
1570 	default:
1571 		kvm_pr_unimpl_wrmsr(vcpu, msr, data);
1572 		return 1;
1573 	}
1574 
1575 	return 0;
1576 }
1577 
kvm_hv_get_msr_pw(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)1578 static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1579 			     bool host)
1580 {
1581 	u64 data = 0;
1582 	struct kvm *kvm = vcpu->kvm;
1583 	struct kvm_hv *hv = to_kvm_hv(kvm);
1584 
1585 	if (unlikely(!host && !hv_check_msr_access(to_hv_vcpu(vcpu), msr)))
1586 		return 1;
1587 
1588 	switch (msr) {
1589 	case HV_X64_MSR_GUEST_OS_ID:
1590 		data = hv->hv_guest_os_id;
1591 		break;
1592 	case HV_X64_MSR_HYPERCALL:
1593 		data = hv->hv_hypercall;
1594 		break;
1595 	case HV_X64_MSR_TIME_REF_COUNT:
1596 		data = get_time_ref_counter(kvm);
1597 		break;
1598 	case HV_X64_MSR_REFERENCE_TSC:
1599 		data = hv->hv_tsc_page;
1600 		break;
1601 	case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4:
1602 		return kvm_hv_msr_get_crash_data(kvm,
1603 						 msr - HV_X64_MSR_CRASH_P0,
1604 						 pdata);
1605 	case HV_X64_MSR_CRASH_CTL:
1606 		return kvm_hv_msr_get_crash_ctl(kvm, pdata);
1607 	case HV_X64_MSR_RESET:
1608 		data = 0;
1609 		break;
1610 	case HV_X64_MSR_REENLIGHTENMENT_CONTROL:
1611 		data = hv->hv_reenlightenment_control;
1612 		break;
1613 	case HV_X64_MSR_TSC_EMULATION_CONTROL:
1614 		data = hv->hv_tsc_emulation_control;
1615 		break;
1616 	case HV_X64_MSR_TSC_EMULATION_STATUS:
1617 		data = hv->hv_tsc_emulation_status;
1618 		break;
1619 	case HV_X64_MSR_TSC_INVARIANT_CONTROL:
1620 		data = hv->hv_invtsc_control;
1621 		break;
1622 	case HV_X64_MSR_SYNDBG_OPTIONS:
1623 	case HV_X64_MSR_SYNDBG_CONTROL ... HV_X64_MSR_SYNDBG_PENDING_BUFFER:
1624 		return syndbg_get_msr(vcpu, msr, pdata, host);
1625 	default:
1626 		kvm_pr_unimpl_rdmsr(vcpu, msr);
1627 		return 1;
1628 	}
1629 
1630 	*pdata = data;
1631 	return 0;
1632 }
1633 
kvm_hv_get_msr(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)1634 static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
1635 			  bool host)
1636 {
1637 	u64 data = 0;
1638 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1639 
1640 	if (unlikely(!host && !hv_check_msr_access(hv_vcpu, msr)))
1641 		return 1;
1642 
1643 	switch (msr) {
1644 	case HV_X64_MSR_VP_INDEX:
1645 		data = hv_vcpu->vp_index;
1646 		break;
1647 	case HV_X64_MSR_EOI:
1648 		return kvm_hv_vapic_msr_read(vcpu, APIC_EOI, pdata);
1649 	case HV_X64_MSR_ICR:
1650 		return kvm_hv_vapic_msr_read(vcpu, APIC_ICR, pdata);
1651 	case HV_X64_MSR_TPR:
1652 		return kvm_hv_vapic_msr_read(vcpu, APIC_TASKPRI, pdata);
1653 	case HV_X64_MSR_VP_ASSIST_PAGE:
1654 		data = hv_vcpu->hv_vapic;
1655 		break;
1656 	case HV_X64_MSR_VP_RUNTIME:
1657 		data = current_task_runtime_100ns() + hv_vcpu->runtime_offset;
1658 		break;
1659 	case HV_X64_MSR_SCONTROL:
1660 	case HV_X64_MSR_SVERSION:
1661 	case HV_X64_MSR_SIEFP:
1662 	case HV_X64_MSR_SIMP:
1663 	case HV_X64_MSR_EOM:
1664 	case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
1665 		return synic_get_msr(to_hv_synic(vcpu), msr, pdata, host);
1666 	case HV_X64_MSR_STIMER0_CONFIG:
1667 	case HV_X64_MSR_STIMER1_CONFIG:
1668 	case HV_X64_MSR_STIMER2_CONFIG:
1669 	case HV_X64_MSR_STIMER3_CONFIG: {
1670 		int timer_index = (msr - HV_X64_MSR_STIMER0_CONFIG)/2;
1671 
1672 		return stimer_get_config(to_hv_stimer(vcpu, timer_index),
1673 					 pdata);
1674 	}
1675 	case HV_X64_MSR_STIMER0_COUNT:
1676 	case HV_X64_MSR_STIMER1_COUNT:
1677 	case HV_X64_MSR_STIMER2_COUNT:
1678 	case HV_X64_MSR_STIMER3_COUNT: {
1679 		int timer_index = (msr - HV_X64_MSR_STIMER0_COUNT)/2;
1680 
1681 		return stimer_get_count(to_hv_stimer(vcpu, timer_index),
1682 					pdata);
1683 	}
1684 	case HV_X64_MSR_TSC_FREQUENCY:
1685 		data = (u64)vcpu->arch.virtual_tsc_khz * 1000;
1686 		break;
1687 	case HV_X64_MSR_APIC_FREQUENCY:
1688 		data = APIC_BUS_FREQUENCY;
1689 		break;
1690 	default:
1691 		kvm_pr_unimpl_rdmsr(vcpu, msr);
1692 		return 1;
1693 	}
1694 	*pdata = data;
1695 	return 0;
1696 }
1697 
kvm_hv_set_msr_common(struct kvm_vcpu * vcpu,u32 msr,u64 data,bool host)1698 int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
1699 {
1700 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1701 
1702 	if (!host && !vcpu->arch.hyperv_enabled)
1703 		return 1;
1704 
1705 	if (kvm_hv_vcpu_init(vcpu))
1706 		return 1;
1707 
1708 	if (kvm_hv_msr_partition_wide(msr)) {
1709 		int r;
1710 
1711 		mutex_lock(&hv->hv_lock);
1712 		r = kvm_hv_set_msr_pw(vcpu, msr, data, host);
1713 		mutex_unlock(&hv->hv_lock);
1714 		return r;
1715 	} else
1716 		return kvm_hv_set_msr(vcpu, msr, data, host);
1717 }
1718 
kvm_hv_get_msr_common(struct kvm_vcpu * vcpu,u32 msr,u64 * pdata,bool host)1719 int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
1720 {
1721 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
1722 
1723 	if (!host && !vcpu->arch.hyperv_enabled)
1724 		return 1;
1725 
1726 	if (kvm_hv_vcpu_init(vcpu))
1727 		return 1;
1728 
1729 	if (kvm_hv_msr_partition_wide(msr)) {
1730 		int r;
1731 
1732 		mutex_lock(&hv->hv_lock);
1733 		r = kvm_hv_get_msr_pw(vcpu, msr, pdata, host);
1734 		mutex_unlock(&hv->hv_lock);
1735 		return r;
1736 	} else
1737 		return kvm_hv_get_msr(vcpu, msr, pdata, host);
1738 }
1739 
sparse_set_to_vcpu_mask(struct kvm * kvm,u64 * sparse_banks,u64 valid_bank_mask,unsigned long * vcpu_mask)1740 static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks,
1741 				    u64 valid_bank_mask, unsigned long *vcpu_mask)
1742 {
1743 	struct kvm_hv *hv = to_kvm_hv(kvm);
1744 	bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes);
1745 	u64 vp_bitmap[KVM_HV_MAX_SPARSE_VCPU_SET_BITS];
1746 	struct kvm_vcpu *vcpu;
1747 	int bank, sbank = 0;
1748 	unsigned long i;
1749 	u64 *bitmap;
1750 
1751 	BUILD_BUG_ON(sizeof(vp_bitmap) >
1752 		     sizeof(*vcpu_mask) * BITS_TO_LONGS(KVM_MAX_VCPUS));
1753 
1754 	/*
1755 	 * If vp_index == vcpu_idx for all vCPUs, fill vcpu_mask directly, else
1756 	 * fill a temporary buffer and manually test each vCPU's VP index.
1757 	 */
1758 	if (likely(!has_mismatch))
1759 		bitmap = (u64 *)vcpu_mask;
1760 	else
1761 		bitmap = vp_bitmap;
1762 
1763 	/*
1764 	 * Each set of 64 VPs is packed into sparse_banks, with valid_bank_mask
1765 	 * having a '1' for each bank that exists in sparse_banks.  Sets must
1766 	 * be in ascending order, i.e. bank0..bankN.
1767 	 */
1768 	memset(bitmap, 0, sizeof(vp_bitmap));
1769 	for_each_set_bit(bank, (unsigned long *)&valid_bank_mask,
1770 			 KVM_HV_MAX_SPARSE_VCPU_SET_BITS)
1771 		bitmap[bank] = sparse_banks[sbank++];
1772 
1773 	if (likely(!has_mismatch))
1774 		return;
1775 
1776 	bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
1777 	kvm_for_each_vcpu(i, vcpu, kvm) {
1778 		if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap))
1779 			__set_bit(i, vcpu_mask);
1780 	}
1781 }
1782 
hv_is_vp_in_sparse_set(u32 vp_id,u64 valid_bank_mask,u64 sparse_banks[])1783 static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[])
1784 {
1785 	int valid_bit_nr = vp_id / HV_VCPUS_PER_SPARSE_BANK;
1786 	unsigned long sbank;
1787 
1788 	if (!test_bit(valid_bit_nr, (unsigned long *)&valid_bank_mask))
1789 		return false;
1790 
1791 	/*
1792 	 * The index into the sparse bank is the number of preceding bits in
1793 	 * the valid mask.  Optimize for VMs with <64 vCPUs by skipping the
1794 	 * fancy math if there can't possibly be preceding bits.
1795 	 */
1796 	if (valid_bit_nr)
1797 		sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0));
1798 	else
1799 		sbank = 0;
1800 
1801 	return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK,
1802 			(unsigned long *)&sparse_banks[sbank]);
1803 }
1804 
1805 struct kvm_hv_hcall {
1806 	/* Hypercall input data */
1807 	u64 param;
1808 	u64 ingpa;
1809 	u64 outgpa;
1810 	u16 code;
1811 	u16 var_cnt;
1812 	u16 rep_cnt;
1813 	u16 rep_idx;
1814 	bool fast;
1815 	bool rep;
1816 	sse128_t xmm[HV_HYPERCALL_MAX_XMM_REGISTERS];
1817 
1818 	/*
1819 	 * Current read offset when KVM reads hypercall input data gradually,
1820 	 * either offset in bytes from 'ingpa' for regular hypercalls or the
1821 	 * number of already consumed 'XMM halves' for 'fast' hypercalls.
1822 	 */
1823 	union {
1824 		gpa_t data_offset;
1825 		int consumed_xmm_halves;
1826 	};
1827 };
1828 
1829 
kvm_hv_get_hc_data(struct kvm * kvm,struct kvm_hv_hcall * hc,u16 orig_cnt,u16 cnt_cap,u64 * data)1830 static int kvm_hv_get_hc_data(struct kvm *kvm, struct kvm_hv_hcall *hc,
1831 			      u16 orig_cnt, u16 cnt_cap, u64 *data)
1832 {
1833 	/*
1834 	 * Preserve the original count when ignoring entries via a "cap", KVM
1835 	 * still needs to validate the guest input (though the non-XMM path
1836 	 * punts on the checks).
1837 	 */
1838 	u16 cnt = min(orig_cnt, cnt_cap);
1839 	int i, j;
1840 
1841 	if (hc->fast) {
1842 		/*
1843 		 * Each XMM holds two sparse banks, but do not count halves that
1844 		 * have already been consumed for hypercall parameters.
1845 		 */
1846 		if (orig_cnt > 2 * HV_HYPERCALL_MAX_XMM_REGISTERS - hc->consumed_xmm_halves)
1847 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
1848 
1849 		for (i = 0; i < cnt; i++) {
1850 			j = i + hc->consumed_xmm_halves;
1851 			if (j % 2)
1852 				data[i] = sse128_hi(hc->xmm[j / 2]);
1853 			else
1854 				data[i] = sse128_lo(hc->xmm[j / 2]);
1855 		}
1856 		return 0;
1857 	}
1858 
1859 	return kvm_read_guest(kvm, hc->ingpa + hc->data_offset, data,
1860 			      cnt * sizeof(*data));
1861 }
1862 
kvm_get_sparse_vp_set(struct kvm * kvm,struct kvm_hv_hcall * hc,u64 * sparse_banks)1863 static u64 kvm_get_sparse_vp_set(struct kvm *kvm, struct kvm_hv_hcall *hc,
1864 				 u64 *sparse_banks)
1865 {
1866 	if (hc->var_cnt > HV_MAX_SPARSE_VCPU_BANKS)
1867 		return -EINVAL;
1868 
1869 	/* Cap var_cnt to ignore banks that cannot contain a legal VP index. */
1870 	return kvm_hv_get_hc_data(kvm, hc, hc->var_cnt, KVM_HV_MAX_SPARSE_VCPU_SET_BITS,
1871 				  sparse_banks);
1872 }
1873 
kvm_hv_get_tlb_flush_entries(struct kvm * kvm,struct kvm_hv_hcall * hc,u64 entries[])1874 static int kvm_hv_get_tlb_flush_entries(struct kvm *kvm, struct kvm_hv_hcall *hc, u64 entries[])
1875 {
1876 	return kvm_hv_get_hc_data(kvm, hc, hc->rep_cnt, hc->rep_cnt, entries);
1877 }
1878 
hv_tlb_flush_enqueue(struct kvm_vcpu * vcpu,struct kvm_vcpu_hv_tlb_flush_fifo * tlb_flush_fifo,u64 * entries,int count)1879 static void hv_tlb_flush_enqueue(struct kvm_vcpu *vcpu,
1880 				 struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo,
1881 				 u64 *entries, int count)
1882 {
1883 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1884 	u64 flush_all_entry = KVM_HV_TLB_FLUSHALL_ENTRY;
1885 
1886 	if (!hv_vcpu)
1887 		return;
1888 
1889 	spin_lock(&tlb_flush_fifo->write_lock);
1890 
1891 	/*
1892 	 * All entries should fit on the fifo leaving one free for 'flush all'
1893 	 * entry in case another request comes in. In case there's not enough
1894 	 * space, just put 'flush all' entry there.
1895 	 */
1896 	if (count && entries && count < kfifo_avail(&tlb_flush_fifo->entries)) {
1897 		WARN_ON(kfifo_in(&tlb_flush_fifo->entries, entries, count) != count);
1898 		goto out_unlock;
1899 	}
1900 
1901 	/*
1902 	 * Note: full fifo always contains 'flush all' entry, no need to check the
1903 	 * return value.
1904 	 */
1905 	kfifo_in(&tlb_flush_fifo->entries, &flush_all_entry, 1);
1906 
1907 out_unlock:
1908 	spin_unlock(&tlb_flush_fifo->write_lock);
1909 }
1910 
kvm_hv_vcpu_flush_tlb(struct kvm_vcpu * vcpu)1911 int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
1912 {
1913 	struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
1914 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1915 	u64 entries[KVM_HV_TLB_FLUSH_FIFO_SIZE];
1916 	int i, j, count;
1917 	gva_t gva;
1918 
1919 	if (!tdp_enabled || !hv_vcpu)
1920 		return -EINVAL;
1921 
1922 	tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(vcpu, is_guest_mode(vcpu));
1923 
1924 	count = kfifo_out(&tlb_flush_fifo->entries, entries, KVM_HV_TLB_FLUSH_FIFO_SIZE);
1925 
1926 	for (i = 0; i < count; i++) {
1927 		if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
1928 			goto out_flush_all;
1929 
1930 		/*
1931 		 * Lower 12 bits of 'address' encode the number of additional
1932 		 * pages to flush.
1933 		 */
1934 		gva = entries[i] & PAGE_MASK;
1935 		for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
1936 			static_call(kvm_x86_flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
1937 
1938 		++vcpu->stat.tlb_flush;
1939 	}
1940 	return 0;
1941 
1942 out_flush_all:
1943 	kfifo_reset_out(&tlb_flush_fifo->entries);
1944 
1945 	/* Fall back to full flush. */
1946 	return -ENOSPC;
1947 }
1948 
kvm_hv_flush_tlb(struct kvm_vcpu * vcpu,struct kvm_hv_hcall * hc)1949 static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
1950 {
1951 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
1952 	u64 *sparse_banks = hv_vcpu->sparse_banks;
1953 	struct kvm *kvm = vcpu->kvm;
1954 	struct hv_tlb_flush_ex flush_ex;
1955 	struct hv_tlb_flush flush;
1956 	DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS);
1957 	struct kvm_vcpu_hv_tlb_flush_fifo *tlb_flush_fifo;
1958 	/*
1959 	 * Normally, there can be no more than 'KVM_HV_TLB_FLUSH_FIFO_SIZE'
1960 	 * entries on the TLB flush fifo. The last entry, however, needs to be
1961 	 * always left free for 'flush all' entry which gets placed when
1962 	 * there is not enough space to put all the requested entries.
1963 	 */
1964 	u64 __tlb_flush_entries[KVM_HV_TLB_FLUSH_FIFO_SIZE - 1];
1965 	u64 *tlb_flush_entries;
1966 	u64 valid_bank_mask;
1967 	struct kvm_vcpu *v;
1968 	unsigned long i;
1969 	bool all_cpus;
1970 
1971 	/*
1972 	 * The Hyper-V TLFS doesn't allow more than HV_MAX_SPARSE_VCPU_BANKS
1973 	 * sparse banks. Fail the build if KVM's max allowed number of
1974 	 * vCPUs (>4096) exceeds this limit.
1975 	 */
1976 	BUILD_BUG_ON(KVM_HV_MAX_SPARSE_VCPU_SET_BITS > HV_MAX_SPARSE_VCPU_BANKS);
1977 
1978 	/*
1979 	 * 'Slow' hypercall's first parameter is the address in guest's memory
1980 	 * where hypercall parameters are placed. This is either a GPA or a
1981 	 * nested GPA when KVM is handling the call from L2 ('direct' TLB
1982 	 * flush).  Translate the address here so the memory can be uniformly
1983 	 * read with kvm_read_guest().
1984 	 */
1985 	if (!hc->fast && is_guest_mode(vcpu)) {
1986 		hc->ingpa = translate_nested_gpa(vcpu, hc->ingpa, 0, NULL);
1987 		if (unlikely(hc->ingpa == INVALID_GPA))
1988 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
1989 	}
1990 
1991 	if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST ||
1992 	    hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE) {
1993 		if (hc->fast) {
1994 			flush.address_space = hc->ingpa;
1995 			flush.flags = hc->outgpa;
1996 			flush.processor_mask = sse128_lo(hc->xmm[0]);
1997 			hc->consumed_xmm_halves = 1;
1998 		} else {
1999 			if (unlikely(kvm_read_guest(kvm, hc->ingpa,
2000 						    &flush, sizeof(flush))))
2001 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
2002 			hc->data_offset = sizeof(flush);
2003 		}
2004 
2005 		trace_kvm_hv_flush_tlb(flush.processor_mask,
2006 				       flush.address_space, flush.flags,
2007 				       is_guest_mode(vcpu));
2008 
2009 		valid_bank_mask = BIT_ULL(0);
2010 		sparse_banks[0] = flush.processor_mask;
2011 
2012 		/*
2013 		 * Work around possible WS2012 bug: it sends hypercalls
2014 		 * with processor_mask = 0x0 and HV_FLUSH_ALL_PROCESSORS clear,
2015 		 * while also expecting us to flush something and crashing if
2016 		 * we don't. Let's treat processor_mask == 0 same as
2017 		 * HV_FLUSH_ALL_PROCESSORS.
2018 		 */
2019 		all_cpus = (flush.flags & HV_FLUSH_ALL_PROCESSORS) ||
2020 			flush.processor_mask == 0;
2021 	} else {
2022 		if (hc->fast) {
2023 			flush_ex.address_space = hc->ingpa;
2024 			flush_ex.flags = hc->outgpa;
2025 			memcpy(&flush_ex.hv_vp_set,
2026 			       &hc->xmm[0], sizeof(hc->xmm[0]));
2027 			hc->consumed_xmm_halves = 2;
2028 		} else {
2029 			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &flush_ex,
2030 						    sizeof(flush_ex))))
2031 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
2032 			hc->data_offset = sizeof(flush_ex);
2033 		}
2034 
2035 		trace_kvm_hv_flush_tlb_ex(flush_ex.hv_vp_set.valid_bank_mask,
2036 					  flush_ex.hv_vp_set.format,
2037 					  flush_ex.address_space,
2038 					  flush_ex.flags, is_guest_mode(vcpu));
2039 
2040 		valid_bank_mask = flush_ex.hv_vp_set.valid_bank_mask;
2041 		all_cpus = flush_ex.hv_vp_set.format !=
2042 			HV_GENERIC_SET_SPARSE_4K;
2043 
2044 		if (hc->var_cnt != hweight64(valid_bank_mask))
2045 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
2046 
2047 		if (!all_cpus) {
2048 			if (!hc->var_cnt)
2049 				goto ret_success;
2050 
2051 			if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2052 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
2053 		}
2054 
2055 		/*
2056 		 * Hyper-V TLFS doesn't explicitly forbid non-empty sparse vCPU
2057 		 * banks (and, thus, non-zero 'var_cnt') for the 'all vCPUs'
2058 		 * case (HV_GENERIC_SET_ALL).  Always adjust data_offset and
2059 		 * consumed_xmm_halves to make sure TLB flush entries are read
2060 		 * from the correct offset.
2061 		 */
2062 		if (hc->fast)
2063 			hc->consumed_xmm_halves += hc->var_cnt;
2064 		else
2065 			hc->data_offset += hc->var_cnt * sizeof(sparse_banks[0]);
2066 	}
2067 
2068 	if (hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE ||
2069 	    hc->code == HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX ||
2070 	    hc->rep_cnt > ARRAY_SIZE(__tlb_flush_entries)) {
2071 		tlb_flush_entries = NULL;
2072 	} else {
2073 		if (kvm_hv_get_tlb_flush_entries(kvm, hc, __tlb_flush_entries))
2074 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
2075 		tlb_flush_entries = __tlb_flush_entries;
2076 	}
2077 
2078 	/*
2079 	 * vcpu->arch.cr3 may not be up-to-date for running vCPUs so we can't
2080 	 * analyze it here, flush TLB regardless of the specified address space.
2081 	 */
2082 	if (all_cpus && !is_guest_mode(vcpu)) {
2083 		kvm_for_each_vcpu(i, v, kvm) {
2084 			tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
2085 			hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2086 					     tlb_flush_entries, hc->rep_cnt);
2087 		}
2088 
2089 		kvm_make_all_cpus_request(kvm, KVM_REQ_HV_TLB_FLUSH);
2090 	} else if (!is_guest_mode(vcpu)) {
2091 		sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask);
2092 
2093 		for_each_set_bit(i, vcpu_mask, KVM_MAX_VCPUS) {
2094 			v = kvm_get_vcpu(kvm, i);
2095 			if (!v)
2096 				continue;
2097 			tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, false);
2098 			hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2099 					     tlb_flush_entries, hc->rep_cnt);
2100 		}
2101 
2102 		kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
2103 	} else {
2104 		struct kvm_vcpu_hv *hv_v;
2105 
2106 		bitmap_zero(vcpu_mask, KVM_MAX_VCPUS);
2107 
2108 		kvm_for_each_vcpu(i, v, kvm) {
2109 			hv_v = to_hv_vcpu(v);
2110 
2111 			/*
2112 			 * The following check races with nested vCPUs entering/exiting
2113 			 * and/or migrating between L1's vCPUs, however the only case when
2114 			 * KVM *must* flush the TLB is when the target L2 vCPU keeps
2115 			 * running on the same L1 vCPU from the moment of the request until
2116 			 * kvm_hv_flush_tlb() returns. TLB is fully flushed in all other
2117 			 * cases, e.g. when the target L2 vCPU migrates to a different L1
2118 			 * vCPU or when the corresponding L1 vCPU temporary switches to a
2119 			 * different L2 vCPU while the request is being processed.
2120 			 */
2121 			if (!hv_v || hv_v->nested.vm_id != hv_vcpu->nested.vm_id)
2122 				continue;
2123 
2124 			if (!all_cpus &&
2125 			    !hv_is_vp_in_sparse_set(hv_v->nested.vp_id, valid_bank_mask,
2126 						    sparse_banks))
2127 				continue;
2128 
2129 			__set_bit(i, vcpu_mask);
2130 			tlb_flush_fifo = kvm_hv_get_tlb_flush_fifo(v, true);
2131 			hv_tlb_flush_enqueue(v, tlb_flush_fifo,
2132 					     tlb_flush_entries, hc->rep_cnt);
2133 		}
2134 
2135 		kvm_make_vcpus_request_mask(kvm, KVM_REQ_HV_TLB_FLUSH, vcpu_mask);
2136 	}
2137 
2138 ret_success:
2139 	/* We always do full TLB flush, set 'Reps completed' = 'Rep Count' */
2140 	return (u64)HV_STATUS_SUCCESS |
2141 		((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET);
2142 }
2143 
kvm_hv_send_ipi_to_many(struct kvm * kvm,u32 vector,u64 * sparse_banks,u64 valid_bank_mask)2144 static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector,
2145 				    u64 *sparse_banks, u64 valid_bank_mask)
2146 {
2147 	struct kvm_lapic_irq irq = {
2148 		.delivery_mode = APIC_DM_FIXED,
2149 		.vector = vector
2150 	};
2151 	struct kvm_vcpu *vcpu;
2152 	unsigned long i;
2153 
2154 	kvm_for_each_vcpu(i, vcpu, kvm) {
2155 		if (sparse_banks &&
2156 		    !hv_is_vp_in_sparse_set(kvm_hv_get_vpindex(vcpu),
2157 					    valid_bank_mask, sparse_banks))
2158 			continue;
2159 
2160 		/* We fail only when APIC is disabled */
2161 		kvm_apic_set_irq(vcpu, &irq, NULL);
2162 	}
2163 }
2164 
kvm_hv_send_ipi(struct kvm_vcpu * vcpu,struct kvm_hv_hcall * hc)2165 static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2166 {
2167 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2168 	u64 *sparse_banks = hv_vcpu->sparse_banks;
2169 	struct kvm *kvm = vcpu->kvm;
2170 	struct hv_send_ipi_ex send_ipi_ex;
2171 	struct hv_send_ipi send_ipi;
2172 	u64 valid_bank_mask;
2173 	u32 vector;
2174 	bool all_cpus;
2175 
2176 	if (hc->code == HVCALL_SEND_IPI) {
2177 		if (!hc->fast) {
2178 			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi,
2179 						    sizeof(send_ipi))))
2180 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
2181 			sparse_banks[0] = send_ipi.cpu_mask;
2182 			vector = send_ipi.vector;
2183 		} else {
2184 			/* 'reserved' part of hv_send_ipi should be 0 */
2185 			if (unlikely(hc->ingpa >> 32 != 0))
2186 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
2187 			sparse_banks[0] = hc->outgpa;
2188 			vector = (u32)hc->ingpa;
2189 		}
2190 		all_cpus = false;
2191 		valid_bank_mask = BIT_ULL(0);
2192 
2193 		trace_kvm_hv_send_ipi(vector, sparse_banks[0]);
2194 	} else {
2195 		if (!hc->fast) {
2196 			if (unlikely(kvm_read_guest(kvm, hc->ingpa, &send_ipi_ex,
2197 						    sizeof(send_ipi_ex))))
2198 				return HV_STATUS_INVALID_HYPERCALL_INPUT;
2199 		} else {
2200 			send_ipi_ex.vector = (u32)hc->ingpa;
2201 			send_ipi_ex.vp_set.format = hc->outgpa;
2202 			send_ipi_ex.vp_set.valid_bank_mask = sse128_lo(hc->xmm[0]);
2203 		}
2204 
2205 		trace_kvm_hv_send_ipi_ex(send_ipi_ex.vector,
2206 					 send_ipi_ex.vp_set.format,
2207 					 send_ipi_ex.vp_set.valid_bank_mask);
2208 
2209 		vector = send_ipi_ex.vector;
2210 		valid_bank_mask = send_ipi_ex.vp_set.valid_bank_mask;
2211 		all_cpus = send_ipi_ex.vp_set.format == HV_GENERIC_SET_ALL;
2212 
2213 		if (hc->var_cnt != hweight64(valid_bank_mask))
2214 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
2215 
2216 		if (all_cpus)
2217 			goto check_and_send_ipi;
2218 
2219 		if (!hc->var_cnt)
2220 			goto ret_success;
2221 
2222 		if (!hc->fast)
2223 			hc->data_offset = offsetof(struct hv_send_ipi_ex,
2224 						   vp_set.bank_contents);
2225 		else
2226 			hc->consumed_xmm_halves = 1;
2227 
2228 		if (kvm_get_sparse_vp_set(kvm, hc, sparse_banks))
2229 			return HV_STATUS_INVALID_HYPERCALL_INPUT;
2230 	}
2231 
2232 check_and_send_ipi:
2233 	if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR))
2234 		return HV_STATUS_INVALID_HYPERCALL_INPUT;
2235 
2236 	if (all_cpus)
2237 		kvm_hv_send_ipi_to_many(kvm, vector, NULL, 0);
2238 	else
2239 		kvm_hv_send_ipi_to_many(kvm, vector, sparse_banks, valid_bank_mask);
2240 
2241 ret_success:
2242 	return HV_STATUS_SUCCESS;
2243 }
2244 
kvm_hv_set_cpuid(struct kvm_vcpu * vcpu,bool hyperv_enabled)2245 void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
2246 {
2247 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2248 	struct kvm_cpuid_entry2 *entry;
2249 
2250 	vcpu->arch.hyperv_enabled = hyperv_enabled;
2251 
2252 	if (!hv_vcpu) {
2253 		/*
2254 		 * KVM should have already allocated kvm_vcpu_hv if Hyper-V is
2255 		 * enabled in CPUID.
2256 		 */
2257 		WARN_ON_ONCE(vcpu->arch.hyperv_enabled);
2258 		return;
2259 	}
2260 
2261 	memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache));
2262 
2263 	if (!vcpu->arch.hyperv_enabled)
2264 		return;
2265 
2266 	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
2267 	if (entry) {
2268 		hv_vcpu->cpuid_cache.features_eax = entry->eax;
2269 		hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
2270 		hv_vcpu->cpuid_cache.features_edx = entry->edx;
2271 	}
2272 
2273 	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
2274 	if (entry) {
2275 		hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
2276 		hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
2277 	}
2278 
2279 	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
2280 	if (entry)
2281 		hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
2282 
2283 	entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_NESTED_FEATURES);
2284 	if (entry) {
2285 		hv_vcpu->cpuid_cache.nested_eax = entry->eax;
2286 		hv_vcpu->cpuid_cache.nested_ebx = entry->ebx;
2287 	}
2288 }
2289 
kvm_hv_set_enforce_cpuid(struct kvm_vcpu * vcpu,bool enforce)2290 int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
2291 {
2292 	struct kvm_vcpu_hv *hv_vcpu;
2293 	int ret = 0;
2294 
2295 	if (!to_hv_vcpu(vcpu)) {
2296 		if (enforce) {
2297 			ret = kvm_hv_vcpu_init(vcpu);
2298 			if (ret)
2299 				return ret;
2300 		} else {
2301 			return 0;
2302 		}
2303 	}
2304 
2305 	hv_vcpu = to_hv_vcpu(vcpu);
2306 	hv_vcpu->enforce_cpuid = enforce;
2307 
2308 	return ret;
2309 }
2310 
kvm_hv_hypercall_set_result(struct kvm_vcpu * vcpu,u64 result)2311 static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
2312 {
2313 	bool longmode;
2314 
2315 	longmode = is_64_bit_hypercall(vcpu);
2316 	if (longmode)
2317 		kvm_rax_write(vcpu, result);
2318 	else {
2319 		kvm_rdx_write(vcpu, result >> 32);
2320 		kvm_rax_write(vcpu, result & 0xffffffff);
2321 	}
2322 }
2323 
kvm_hv_hypercall_complete(struct kvm_vcpu * vcpu,u64 result)2324 static int kvm_hv_hypercall_complete(struct kvm_vcpu *vcpu, u64 result)
2325 {
2326 	u32 tlb_lock_count = 0;
2327 	int ret;
2328 
2329 	if (hv_result_success(result) && is_guest_mode(vcpu) &&
2330 	    kvm_hv_is_tlb_flush_hcall(vcpu) &&
2331 	    kvm_read_guest(vcpu->kvm, to_hv_vcpu(vcpu)->nested.pa_page_gpa,
2332 			   &tlb_lock_count, sizeof(tlb_lock_count)))
2333 		result = HV_STATUS_INVALID_HYPERCALL_INPUT;
2334 
2335 	trace_kvm_hv_hypercall_done(result);
2336 	kvm_hv_hypercall_set_result(vcpu, result);
2337 	++vcpu->stat.hypercalls;
2338 
2339 	ret = kvm_skip_emulated_instruction(vcpu);
2340 
2341 	if (tlb_lock_count)
2342 		kvm_x86_ops.nested_ops->hv_inject_synthetic_vmexit_post_tlb_flush(vcpu);
2343 
2344 	return ret;
2345 }
2346 
kvm_hv_hypercall_complete_userspace(struct kvm_vcpu * vcpu)2347 static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
2348 {
2349 	return kvm_hv_hypercall_complete(vcpu, vcpu->run->hyperv.u.hcall.result);
2350 }
2351 
kvm_hvcall_signal_event(struct kvm_vcpu * vcpu,struct kvm_hv_hcall * hc)2352 static u16 kvm_hvcall_signal_event(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc)
2353 {
2354 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);
2355 	struct eventfd_ctx *eventfd;
2356 
2357 	if (unlikely(!hc->fast)) {
2358 		int ret;
2359 		gpa_t gpa = hc->ingpa;
2360 
2361 		if ((gpa & (__alignof__(hc->ingpa) - 1)) ||
2362 		    offset_in_page(gpa) + sizeof(hc->ingpa) > PAGE_SIZE)
2363 			return HV_STATUS_INVALID_ALIGNMENT;
2364 
2365 		ret = kvm_vcpu_read_guest(vcpu, gpa,
2366 					  &hc->ingpa, sizeof(hc->ingpa));
2367 		if (ret < 0)
2368 			return HV_STATUS_INVALID_ALIGNMENT;
2369 	}
2370 
2371 	/*
2372 	 * Per spec, bits 32-47 contain the extra "flag number".  However, we
2373 	 * have no use for it, and in all known usecases it is zero, so just
2374 	 * report lookup failure if it isn't.
2375 	 */
2376 	if (hc->ingpa & 0xffff00000000ULL)
2377 		return HV_STATUS_INVALID_PORT_ID;
2378 	/* remaining bits are reserved-zero */
2379 	if (hc->ingpa & ~KVM_HYPERV_CONN_ID_MASK)
2380 		return HV_STATUS_INVALID_HYPERCALL_INPUT;
2381 
2382 	/* the eventfd is protected by vcpu->kvm->srcu, but conn_to_evt isn't */
2383 	rcu_read_lock();
2384 	eventfd = idr_find(&hv->conn_to_evt, hc->ingpa);
2385 	rcu_read_unlock();
2386 	if (!eventfd)
2387 		return HV_STATUS_INVALID_PORT_ID;
2388 
2389 	eventfd_signal(eventfd, 1);
2390 	return HV_STATUS_SUCCESS;
2391 }
2392 
is_xmm_fast_hypercall(struct kvm_hv_hcall * hc)2393 static bool is_xmm_fast_hypercall(struct kvm_hv_hcall *hc)
2394 {
2395 	switch (hc->code) {
2396 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2397 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2398 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2399 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2400 	case HVCALL_SEND_IPI_EX:
2401 		return true;
2402 	}
2403 
2404 	return false;
2405 }
2406 
kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall * hc)2407 static void kvm_hv_hypercall_read_xmm(struct kvm_hv_hcall *hc)
2408 {
2409 	int reg;
2410 
2411 	kvm_fpu_get();
2412 	for (reg = 0; reg < HV_HYPERCALL_MAX_XMM_REGISTERS; reg++)
2413 		_kvm_read_sse_reg(reg, &hc->xmm[reg]);
2414 	kvm_fpu_put();
2415 }
2416 
hv_check_hypercall_access(struct kvm_vcpu_hv * hv_vcpu,u16 code)2417 static bool hv_check_hypercall_access(struct kvm_vcpu_hv *hv_vcpu, u16 code)
2418 {
2419 	if (!hv_vcpu->enforce_cpuid)
2420 		return true;
2421 
2422 	switch (code) {
2423 	case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2424 		return hv_vcpu->cpuid_cache.enlightenments_ebx &&
2425 			hv_vcpu->cpuid_cache.enlightenments_ebx != U32_MAX;
2426 	case HVCALL_POST_MESSAGE:
2427 		return hv_vcpu->cpuid_cache.features_ebx & HV_POST_MESSAGES;
2428 	case HVCALL_SIGNAL_EVENT:
2429 		return hv_vcpu->cpuid_cache.features_ebx & HV_SIGNAL_EVENTS;
2430 	case HVCALL_POST_DEBUG_DATA:
2431 	case HVCALL_RETRIEVE_DEBUG_DATA:
2432 	case HVCALL_RESET_DEBUG_SESSION:
2433 		/*
2434 		 * Return 'true' when SynDBG is disabled so the resulting code
2435 		 * will be HV_STATUS_INVALID_HYPERCALL_CODE.
2436 		 */
2437 		return !kvm_hv_is_syndbg_enabled(hv_vcpu->vcpu) ||
2438 			hv_vcpu->cpuid_cache.features_ebx & HV_DEBUGGING;
2439 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2440 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2441 		if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2442 		      HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2443 			return false;
2444 		fallthrough;
2445 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2446 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2447 		return hv_vcpu->cpuid_cache.enlightenments_eax &
2448 			HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2449 	case HVCALL_SEND_IPI_EX:
2450 		if (!(hv_vcpu->cpuid_cache.enlightenments_eax &
2451 		      HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED))
2452 			return false;
2453 		fallthrough;
2454 	case HVCALL_SEND_IPI:
2455 		return hv_vcpu->cpuid_cache.enlightenments_eax &
2456 			HV_X64_CLUSTER_IPI_RECOMMENDED;
2457 	case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX:
2458 		return hv_vcpu->cpuid_cache.features_ebx &
2459 			HV_ENABLE_EXTENDED_HYPERCALLS;
2460 	default:
2461 		break;
2462 	}
2463 
2464 	return true;
2465 }
2466 
kvm_hv_hypercall(struct kvm_vcpu * vcpu)2467 int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
2468 {
2469 	struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
2470 	struct kvm_hv_hcall hc;
2471 	u64 ret = HV_STATUS_SUCCESS;
2472 
2473 	/*
2474 	 * hypercall generates UD from non zero cpl and real mode
2475 	 * per HYPER-V spec
2476 	 */
2477 	if (static_call(kvm_x86_get_cpl)(vcpu) != 0 || !is_protmode(vcpu)) {
2478 		kvm_queue_exception(vcpu, UD_VECTOR);
2479 		return 1;
2480 	}
2481 
2482 #ifdef CONFIG_X86_64
2483 	if (is_64_bit_hypercall(vcpu)) {
2484 		hc.param = kvm_rcx_read(vcpu);
2485 		hc.ingpa = kvm_rdx_read(vcpu);
2486 		hc.outgpa = kvm_r8_read(vcpu);
2487 	} else
2488 #endif
2489 	{
2490 		hc.param = ((u64)kvm_rdx_read(vcpu) << 32) |
2491 			    (kvm_rax_read(vcpu) & 0xffffffff);
2492 		hc.ingpa = ((u64)kvm_rbx_read(vcpu) << 32) |
2493 			    (kvm_rcx_read(vcpu) & 0xffffffff);
2494 		hc.outgpa = ((u64)kvm_rdi_read(vcpu) << 32) |
2495 			     (kvm_rsi_read(vcpu) & 0xffffffff);
2496 	}
2497 
2498 	hc.code = hc.param & 0xffff;
2499 	hc.var_cnt = (hc.param & HV_HYPERCALL_VARHEAD_MASK) >> HV_HYPERCALL_VARHEAD_OFFSET;
2500 	hc.fast = !!(hc.param & HV_HYPERCALL_FAST_BIT);
2501 	hc.rep_cnt = (hc.param >> HV_HYPERCALL_REP_COMP_OFFSET) & 0xfff;
2502 	hc.rep_idx = (hc.param >> HV_HYPERCALL_REP_START_OFFSET) & 0xfff;
2503 	hc.rep = !!(hc.rep_cnt || hc.rep_idx);
2504 
2505 	trace_kvm_hv_hypercall(hc.code, hc.fast, hc.var_cnt, hc.rep_cnt,
2506 			       hc.rep_idx, hc.ingpa, hc.outgpa);
2507 
2508 	if (unlikely(!hv_check_hypercall_access(hv_vcpu, hc.code))) {
2509 		ret = HV_STATUS_ACCESS_DENIED;
2510 		goto hypercall_complete;
2511 	}
2512 
2513 	if (unlikely(hc.param & HV_HYPERCALL_RSVD_MASK)) {
2514 		ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2515 		goto hypercall_complete;
2516 	}
2517 
2518 	if (hc.fast && is_xmm_fast_hypercall(&hc)) {
2519 		if (unlikely(hv_vcpu->enforce_cpuid &&
2520 			     !(hv_vcpu->cpuid_cache.features_edx &
2521 			       HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE))) {
2522 			kvm_queue_exception(vcpu, UD_VECTOR);
2523 			return 1;
2524 		}
2525 
2526 		kvm_hv_hypercall_read_xmm(&hc);
2527 	}
2528 
2529 	switch (hc.code) {
2530 	case HVCALL_NOTIFY_LONG_SPIN_WAIT:
2531 		if (unlikely(hc.rep || hc.var_cnt)) {
2532 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2533 			break;
2534 		}
2535 		kvm_vcpu_on_spin(vcpu, true);
2536 		break;
2537 	case HVCALL_SIGNAL_EVENT:
2538 		if (unlikely(hc.rep || hc.var_cnt)) {
2539 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2540 			break;
2541 		}
2542 		ret = kvm_hvcall_signal_event(vcpu, &hc);
2543 		if (ret != HV_STATUS_INVALID_PORT_ID)
2544 			break;
2545 		fallthrough;	/* maybe userspace knows this conn_id */
2546 	case HVCALL_POST_MESSAGE:
2547 		/* don't bother userspace if it has no way to handle it */
2548 		if (unlikely(hc.rep || hc.var_cnt || !to_hv_synic(vcpu)->active)) {
2549 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2550 			break;
2551 		}
2552 		goto hypercall_userspace_exit;
2553 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST:
2554 		if (unlikely(hc.var_cnt)) {
2555 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2556 			break;
2557 		}
2558 		fallthrough;
2559 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX:
2560 		if (unlikely(!hc.rep_cnt || hc.rep_idx)) {
2561 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2562 			break;
2563 		}
2564 		ret = kvm_hv_flush_tlb(vcpu, &hc);
2565 		break;
2566 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE:
2567 		if (unlikely(hc.var_cnt)) {
2568 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2569 			break;
2570 		}
2571 		fallthrough;
2572 	case HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX:
2573 		if (unlikely(hc.rep)) {
2574 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2575 			break;
2576 		}
2577 		ret = kvm_hv_flush_tlb(vcpu, &hc);
2578 		break;
2579 	case HVCALL_SEND_IPI:
2580 		if (unlikely(hc.var_cnt)) {
2581 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2582 			break;
2583 		}
2584 		fallthrough;
2585 	case HVCALL_SEND_IPI_EX:
2586 		if (unlikely(hc.rep)) {
2587 			ret = HV_STATUS_INVALID_HYPERCALL_INPUT;
2588 			break;
2589 		}
2590 		ret = kvm_hv_send_ipi(vcpu, &hc);
2591 		break;
2592 	case HVCALL_POST_DEBUG_DATA:
2593 	case HVCALL_RETRIEVE_DEBUG_DATA:
2594 		if (unlikely(hc.fast)) {
2595 			ret = HV_STATUS_INVALID_PARAMETER;
2596 			break;
2597 		}
2598 		fallthrough;
2599 	case HVCALL_RESET_DEBUG_SESSION: {
2600 		struct kvm_hv_syndbg *syndbg = to_hv_syndbg(vcpu);
2601 
2602 		if (!kvm_hv_is_syndbg_enabled(vcpu)) {
2603 			ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2604 			break;
2605 		}
2606 
2607 		if (!(syndbg->options & HV_X64_SYNDBG_OPTION_USE_HCALLS)) {
2608 			ret = HV_STATUS_OPERATION_DENIED;
2609 			break;
2610 		}
2611 		goto hypercall_userspace_exit;
2612 	}
2613 	case HV_EXT_CALL_QUERY_CAPABILITIES ... HV_EXT_CALL_MAX:
2614 		if (unlikely(hc.fast)) {
2615 			ret = HV_STATUS_INVALID_PARAMETER;
2616 			break;
2617 		}
2618 		goto hypercall_userspace_exit;
2619 	default:
2620 		ret = HV_STATUS_INVALID_HYPERCALL_CODE;
2621 		break;
2622 	}
2623 
2624 hypercall_complete:
2625 	return kvm_hv_hypercall_complete(vcpu, ret);
2626 
2627 hypercall_userspace_exit:
2628 	vcpu->run->exit_reason = KVM_EXIT_HYPERV;
2629 	vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
2630 	vcpu->run->hyperv.u.hcall.input = hc.param;
2631 	vcpu->run->hyperv.u.hcall.params[0] = hc.ingpa;
2632 	vcpu->run->hyperv.u.hcall.params[1] = hc.outgpa;
2633 	vcpu->arch.complete_userspace_io = kvm_hv_hypercall_complete_userspace;
2634 	return 0;
2635 }
2636 
kvm_hv_init_vm(struct kvm * kvm)2637 void kvm_hv_init_vm(struct kvm *kvm)
2638 {
2639 	struct kvm_hv *hv = to_kvm_hv(kvm);
2640 
2641 	mutex_init(&hv->hv_lock);
2642 	idr_init(&hv->conn_to_evt);
2643 }
2644 
kvm_hv_destroy_vm(struct kvm * kvm)2645 void kvm_hv_destroy_vm(struct kvm *kvm)
2646 {
2647 	struct kvm_hv *hv = to_kvm_hv(kvm);
2648 	struct eventfd_ctx *eventfd;
2649 	int i;
2650 
2651 	idr_for_each_entry(&hv->conn_to_evt, eventfd, i)
2652 		eventfd_ctx_put(eventfd);
2653 	idr_destroy(&hv->conn_to_evt);
2654 }
2655 
kvm_hv_eventfd_assign(struct kvm * kvm,u32 conn_id,int fd)2656 static int kvm_hv_eventfd_assign(struct kvm *kvm, u32 conn_id, int fd)
2657 {
2658 	struct kvm_hv *hv = to_kvm_hv(kvm);
2659 	struct eventfd_ctx *eventfd;
2660 	int ret;
2661 
2662 	eventfd = eventfd_ctx_fdget(fd);
2663 	if (IS_ERR(eventfd))
2664 		return PTR_ERR(eventfd);
2665 
2666 	mutex_lock(&hv->hv_lock);
2667 	ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1,
2668 			GFP_KERNEL_ACCOUNT);
2669 	mutex_unlock(&hv->hv_lock);
2670 
2671 	if (ret >= 0)
2672 		return 0;
2673 
2674 	if (ret == -ENOSPC)
2675 		ret = -EEXIST;
2676 	eventfd_ctx_put(eventfd);
2677 	return ret;
2678 }
2679 
kvm_hv_eventfd_deassign(struct kvm * kvm,u32 conn_id)2680 static int kvm_hv_eventfd_deassign(struct kvm *kvm, u32 conn_id)
2681 {
2682 	struct kvm_hv *hv = to_kvm_hv(kvm);
2683 	struct eventfd_ctx *eventfd;
2684 
2685 	mutex_lock(&hv->hv_lock);
2686 	eventfd = idr_remove(&hv->conn_to_evt, conn_id);
2687 	mutex_unlock(&hv->hv_lock);
2688 
2689 	if (!eventfd)
2690 		return -ENOENT;
2691 
2692 	synchronize_srcu(&kvm->srcu);
2693 	eventfd_ctx_put(eventfd);
2694 	return 0;
2695 }
2696 
kvm_vm_ioctl_hv_eventfd(struct kvm * kvm,struct kvm_hyperv_eventfd * args)2697 int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
2698 {
2699 	if ((args->flags & ~KVM_HYPERV_EVENTFD_DEASSIGN) ||
2700 	    (args->conn_id & ~KVM_HYPERV_CONN_ID_MASK))
2701 		return -EINVAL;
2702 
2703 	if (args->flags == KVM_HYPERV_EVENTFD_DEASSIGN)
2704 		return kvm_hv_eventfd_deassign(kvm, args->conn_id);
2705 	return kvm_hv_eventfd_assign(kvm, args->conn_id, args->fd);
2706 }
2707 
kvm_get_hv_cpuid(struct kvm_vcpu * vcpu,struct kvm_cpuid2 * cpuid,struct kvm_cpuid_entry2 __user * entries)2708 int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
2709 		     struct kvm_cpuid_entry2 __user *entries)
2710 {
2711 	uint16_t evmcs_ver = 0;
2712 	struct kvm_cpuid_entry2 cpuid_entries[] = {
2713 		{ .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
2714 		{ .function = HYPERV_CPUID_INTERFACE },
2715 		{ .function = HYPERV_CPUID_VERSION },
2716 		{ .function = HYPERV_CPUID_FEATURES },
2717 		{ .function = HYPERV_CPUID_ENLIGHTMENT_INFO },
2718 		{ .function = HYPERV_CPUID_IMPLEMENT_LIMITS },
2719 		{ .function = HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS },
2720 		{ .function = HYPERV_CPUID_SYNDBG_INTERFACE },
2721 		{ .function = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES	},
2722 		{ .function = HYPERV_CPUID_NESTED_FEATURES },
2723 	};
2724 	int i, nent = ARRAY_SIZE(cpuid_entries);
2725 
2726 	if (kvm_x86_ops.nested_ops->get_evmcs_version)
2727 		evmcs_ver = kvm_x86_ops.nested_ops->get_evmcs_version(vcpu);
2728 
2729 	if (cpuid->nent < nent)
2730 		return -E2BIG;
2731 
2732 	if (cpuid->nent > nent)
2733 		cpuid->nent = nent;
2734 
2735 	for (i = 0; i < nent; i++) {
2736 		struct kvm_cpuid_entry2 *ent = &cpuid_entries[i];
2737 		u32 signature[3];
2738 
2739 		switch (ent->function) {
2740 		case HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS:
2741 			memcpy(signature, "Linux KVM Hv", 12);
2742 
2743 			ent->eax = HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES;
2744 			ent->ebx = signature[0];
2745 			ent->ecx = signature[1];
2746 			ent->edx = signature[2];
2747 			break;
2748 
2749 		case HYPERV_CPUID_INTERFACE:
2750 			ent->eax = HYPERV_CPUID_SIGNATURE_EAX;
2751 			break;
2752 
2753 		case HYPERV_CPUID_VERSION:
2754 			/*
2755 			 * We implement some Hyper-V 2016 functions so let's use
2756 			 * this version.
2757 			 */
2758 			ent->eax = 0x00003839;
2759 			ent->ebx = 0x000A0000;
2760 			break;
2761 
2762 		case HYPERV_CPUID_FEATURES:
2763 			ent->eax |= HV_MSR_VP_RUNTIME_AVAILABLE;
2764 			ent->eax |= HV_MSR_TIME_REF_COUNT_AVAILABLE;
2765 			ent->eax |= HV_MSR_SYNIC_AVAILABLE;
2766 			ent->eax |= HV_MSR_SYNTIMER_AVAILABLE;
2767 			ent->eax |= HV_MSR_APIC_ACCESS_AVAILABLE;
2768 			ent->eax |= HV_MSR_HYPERCALL_AVAILABLE;
2769 			ent->eax |= HV_MSR_VP_INDEX_AVAILABLE;
2770 			ent->eax |= HV_MSR_RESET_AVAILABLE;
2771 			ent->eax |= HV_MSR_REFERENCE_TSC_AVAILABLE;
2772 			ent->eax |= HV_ACCESS_FREQUENCY_MSRS;
2773 			ent->eax |= HV_ACCESS_REENLIGHTENMENT;
2774 			ent->eax |= HV_ACCESS_TSC_INVARIANT;
2775 
2776 			ent->ebx |= HV_POST_MESSAGES;
2777 			ent->ebx |= HV_SIGNAL_EVENTS;
2778 			ent->ebx |= HV_ENABLE_EXTENDED_HYPERCALLS;
2779 
2780 			ent->edx |= HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE;
2781 			ent->edx |= HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
2782 			ent->edx |= HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE;
2783 
2784 			ent->ebx |= HV_DEBUGGING;
2785 			ent->edx |= HV_X64_GUEST_DEBUGGING_AVAILABLE;
2786 			ent->edx |= HV_FEATURE_DEBUG_MSRS_AVAILABLE;
2787 			ent->edx |= HV_FEATURE_EXT_GVA_RANGES_FLUSH;
2788 
2789 			/*
2790 			 * Direct Synthetic timers only make sense with in-kernel
2791 			 * LAPIC
2792 			 */
2793 			if (!vcpu || lapic_in_kernel(vcpu))
2794 				ent->edx |= HV_STIMER_DIRECT_MODE_AVAILABLE;
2795 
2796 			break;
2797 
2798 		case HYPERV_CPUID_ENLIGHTMENT_INFO:
2799 			ent->eax |= HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED;
2800 			ent->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
2801 			ent->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
2802 			ent->eax |= HV_X64_CLUSTER_IPI_RECOMMENDED;
2803 			ent->eax |= HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED;
2804 			if (evmcs_ver)
2805 				ent->eax |= HV_X64_ENLIGHTENED_VMCS_RECOMMENDED;
2806 			if (!cpu_smt_possible())
2807 				ent->eax |= HV_X64_NO_NONARCH_CORESHARING;
2808 
2809 			ent->eax |= HV_DEPRECATING_AEOI_RECOMMENDED;
2810 			/*
2811 			 * Default number of spinlock retry attempts, matches
2812 			 * HyperV 2016.
2813 			 */
2814 			ent->ebx = 0x00000FFF;
2815 
2816 			break;
2817 
2818 		case HYPERV_CPUID_IMPLEMENT_LIMITS:
2819 			/* Maximum number of virtual processors */
2820 			ent->eax = KVM_MAX_VCPUS;
2821 			/*
2822 			 * Maximum number of logical processors, matches
2823 			 * HyperV 2016.
2824 			 */
2825 			ent->ebx = 64;
2826 
2827 			break;
2828 
2829 		case HYPERV_CPUID_NESTED_FEATURES:
2830 			ent->eax = evmcs_ver;
2831 			ent->eax |= HV_X64_NESTED_DIRECT_FLUSH;
2832 			ent->eax |= HV_X64_NESTED_MSR_BITMAP;
2833 			ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
2834 			break;
2835 
2836 		case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS:
2837 			memcpy(signature, "Linux KVM Hv", 12);
2838 
2839 			ent->eax = 0;
2840 			ent->ebx = signature[0];
2841 			ent->ecx = signature[1];
2842 			ent->edx = signature[2];
2843 			break;
2844 
2845 		case HYPERV_CPUID_SYNDBG_INTERFACE:
2846 			memcpy(signature, "VS#1\0\0\0\0\0\0\0\0", 12);
2847 			ent->eax = signature[0];
2848 			break;
2849 
2850 		case HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES:
2851 			ent->eax |= HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING;
2852 			break;
2853 
2854 		default:
2855 			break;
2856 		}
2857 	}
2858 
2859 	if (copy_to_user(entries, cpuid_entries,
2860 			 nent * sizeof(struct kvm_cpuid_entry2)))
2861 		return -EFAULT;
2862 
2863 	return 0;
2864 }
2865