1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  *
6  * Authors:
7  *	Anup Patel <apatel@ventanamicro.com>
8  */
9 
10 #include <linux/bits.h>
11 #include <linux/kvm_host.h>
12 #include <linux/uaccess.h>
13 #include <asm/kvm_aia_imsic.h>
14 
unlock_vcpus(struct kvm * kvm,int vcpu_lock_idx)15 static void unlock_vcpus(struct kvm *kvm, int vcpu_lock_idx)
16 {
17 	struct kvm_vcpu *tmp_vcpu;
18 
19 	for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) {
20 		tmp_vcpu = kvm_get_vcpu(kvm, vcpu_lock_idx);
21 		mutex_unlock(&tmp_vcpu->mutex);
22 	}
23 }
24 
unlock_all_vcpus(struct kvm * kvm)25 static void unlock_all_vcpus(struct kvm *kvm)
26 {
27 	unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1);
28 }
29 
lock_all_vcpus(struct kvm * kvm)30 static bool lock_all_vcpus(struct kvm *kvm)
31 {
32 	struct kvm_vcpu *tmp_vcpu;
33 	unsigned long c;
34 
35 	kvm_for_each_vcpu(c, tmp_vcpu, kvm) {
36 		if (!mutex_trylock(&tmp_vcpu->mutex)) {
37 			unlock_vcpus(kvm, c - 1);
38 			return false;
39 		}
40 	}
41 
42 	return true;
43 }
44 
aia_create(struct kvm_device * dev,u32 type)45 static int aia_create(struct kvm_device *dev, u32 type)
46 {
47 	int ret;
48 	unsigned long i;
49 	struct kvm *kvm = dev->kvm;
50 	struct kvm_vcpu *vcpu;
51 
52 	if (irqchip_in_kernel(kvm))
53 		return -EEXIST;
54 
55 	ret = -EBUSY;
56 	if (!lock_all_vcpus(kvm))
57 		return ret;
58 
59 	kvm_for_each_vcpu(i, vcpu, kvm) {
60 		if (vcpu->arch.ran_atleast_once)
61 			goto out_unlock;
62 	}
63 	ret = 0;
64 
65 	kvm->arch.aia.in_kernel = true;
66 
67 out_unlock:
68 	unlock_all_vcpus(kvm);
69 	return ret;
70 }
71 
aia_destroy(struct kvm_device * dev)72 static void aia_destroy(struct kvm_device *dev)
73 {
74 	kfree(dev);
75 }
76 
aia_config(struct kvm * kvm,unsigned long type,u32 * nr,bool write)77 static int aia_config(struct kvm *kvm, unsigned long type,
78 		      u32 *nr, bool write)
79 {
80 	struct kvm_aia *aia = &kvm->arch.aia;
81 
82 	/* Writes can only be done before irqchip is initialized */
83 	if (write && kvm_riscv_aia_initialized(kvm))
84 		return -EBUSY;
85 
86 	switch (type) {
87 	case KVM_DEV_RISCV_AIA_CONFIG_MODE:
88 		if (write) {
89 			switch (*nr) {
90 			case KVM_DEV_RISCV_AIA_MODE_EMUL:
91 				break;
92 			case KVM_DEV_RISCV_AIA_MODE_HWACCEL:
93 			case KVM_DEV_RISCV_AIA_MODE_AUTO:
94 				/*
95 				 * HW Acceleration and Auto modes only
96 				 * supported on host with non-zero guest
97 				 * external interrupts (i.e. non-zero
98 				 * VS-level IMSIC pages).
99 				 */
100 				if (!kvm_riscv_aia_nr_hgei)
101 					return -EINVAL;
102 				break;
103 			default:
104 				return -EINVAL;
105 			}
106 			aia->mode = *nr;
107 		} else
108 			*nr = aia->mode;
109 		break;
110 	case KVM_DEV_RISCV_AIA_CONFIG_IDS:
111 		if (write) {
112 			if ((*nr < KVM_DEV_RISCV_AIA_IDS_MIN) ||
113 			    (*nr >= KVM_DEV_RISCV_AIA_IDS_MAX) ||
114 			    ((*nr & KVM_DEV_RISCV_AIA_IDS_MIN) !=
115 			     KVM_DEV_RISCV_AIA_IDS_MIN) ||
116 			    (kvm_riscv_aia_max_ids <= *nr))
117 				return -EINVAL;
118 			aia->nr_ids = *nr;
119 		} else
120 			*nr = aia->nr_ids;
121 		break;
122 	case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
123 		if (write) {
124 			if ((*nr >= KVM_DEV_RISCV_AIA_SRCS_MAX) ||
125 			    (*nr >= kvm_riscv_aia_max_ids))
126 				return -EINVAL;
127 			aia->nr_sources = *nr;
128 		} else
129 			*nr = aia->nr_sources;
130 		break;
131 	case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
132 		if (write) {
133 			if (*nr >= KVM_DEV_RISCV_AIA_GROUP_BITS_MAX)
134 				return -EINVAL;
135 			aia->nr_group_bits = *nr;
136 		} else
137 			*nr = aia->nr_group_bits;
138 		break;
139 	case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
140 		if (write) {
141 			if ((*nr < KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN) ||
142 			    (*nr >= KVM_DEV_RISCV_AIA_GROUP_SHIFT_MAX))
143 				return -EINVAL;
144 			aia->nr_group_shift = *nr;
145 		} else
146 			*nr = aia->nr_group_shift;
147 		break;
148 	case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
149 		if (write) {
150 			if (*nr >= KVM_DEV_RISCV_AIA_HART_BITS_MAX)
151 				return -EINVAL;
152 			aia->nr_hart_bits = *nr;
153 		} else
154 			*nr = aia->nr_hart_bits;
155 		break;
156 	case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
157 		if (write) {
158 			if (*nr >= KVM_DEV_RISCV_AIA_GUEST_BITS_MAX)
159 				return -EINVAL;
160 			aia->nr_guest_bits = *nr;
161 		} else
162 			*nr = aia->nr_guest_bits;
163 		break;
164 	default:
165 		return -ENXIO;
166 	}
167 
168 	return 0;
169 }
170 
aia_aplic_addr(struct kvm * kvm,u64 * addr,bool write)171 static int aia_aplic_addr(struct kvm *kvm, u64 *addr, bool write)
172 {
173 	struct kvm_aia *aia = &kvm->arch.aia;
174 
175 	if (write) {
176 		/* Writes can only be done before irqchip is initialized */
177 		if (kvm_riscv_aia_initialized(kvm))
178 			return -EBUSY;
179 
180 		if (*addr & (KVM_DEV_RISCV_APLIC_ALIGN - 1))
181 			return -EINVAL;
182 
183 		aia->aplic_addr = *addr;
184 	} else
185 		*addr = aia->aplic_addr;
186 
187 	return 0;
188 }
189 
aia_imsic_addr(struct kvm * kvm,u64 * addr,unsigned long vcpu_idx,bool write)190 static int aia_imsic_addr(struct kvm *kvm, u64 *addr,
191 			  unsigned long vcpu_idx, bool write)
192 {
193 	struct kvm_vcpu *vcpu;
194 	struct kvm_vcpu_aia *vcpu_aia;
195 
196 	vcpu = kvm_get_vcpu(kvm, vcpu_idx);
197 	if (!vcpu)
198 		return -EINVAL;
199 	vcpu_aia = &vcpu->arch.aia_context;
200 
201 	if (write) {
202 		/* Writes can only be done before irqchip is initialized */
203 		if (kvm_riscv_aia_initialized(kvm))
204 			return -EBUSY;
205 
206 		if (*addr & (KVM_DEV_RISCV_IMSIC_ALIGN - 1))
207 			return -EINVAL;
208 	}
209 
210 	mutex_lock(&vcpu->mutex);
211 	if (write)
212 		vcpu_aia->imsic_addr = *addr;
213 	else
214 		*addr = vcpu_aia->imsic_addr;
215 	mutex_unlock(&vcpu->mutex);
216 
217 	return 0;
218 }
219 
aia_imsic_ppn(struct kvm_aia * aia,gpa_t addr)220 static gpa_t aia_imsic_ppn(struct kvm_aia *aia, gpa_t addr)
221 {
222 	u32 h, l;
223 	gpa_t mask = 0;
224 
225 	h = aia->nr_hart_bits + aia->nr_guest_bits +
226 	    IMSIC_MMIO_PAGE_SHIFT - 1;
227 	mask = GENMASK_ULL(h, 0);
228 
229 	if (aia->nr_group_bits) {
230 		h = aia->nr_group_bits + aia->nr_group_shift - 1;
231 		l = aia->nr_group_shift;
232 		mask |= GENMASK_ULL(h, l);
233 	}
234 
235 	return (addr & ~mask) >> IMSIC_MMIO_PAGE_SHIFT;
236 }
237 
aia_imsic_hart_index(struct kvm_aia * aia,gpa_t addr)238 static u32 aia_imsic_hart_index(struct kvm_aia *aia, gpa_t addr)
239 {
240 	u32 hart, group = 0;
241 
242 	hart = (addr >> (aia->nr_guest_bits + IMSIC_MMIO_PAGE_SHIFT)) &
243 		GENMASK_ULL(aia->nr_hart_bits - 1, 0);
244 	if (aia->nr_group_bits)
245 		group = (addr >> aia->nr_group_shift) &
246 			GENMASK_ULL(aia->nr_group_bits - 1, 0);
247 
248 	return (group << aia->nr_hart_bits) | hart;
249 }
250 
aia_init(struct kvm * kvm)251 static int aia_init(struct kvm *kvm)
252 {
253 	int ret, i;
254 	unsigned long idx;
255 	struct kvm_vcpu *vcpu;
256 	struct kvm_vcpu_aia *vaia;
257 	struct kvm_aia *aia = &kvm->arch.aia;
258 	gpa_t base_ppn = KVM_RISCV_AIA_UNDEF_ADDR;
259 
260 	/* Irqchip can be initialized only once */
261 	if (kvm_riscv_aia_initialized(kvm))
262 		return -EBUSY;
263 
264 	/* We might be in the middle of creating a VCPU? */
265 	if (kvm->created_vcpus != atomic_read(&kvm->online_vcpus))
266 		return -EBUSY;
267 
268 	/* Number of sources should be less than or equals number of IDs */
269 	if (aia->nr_ids < aia->nr_sources)
270 		return -EINVAL;
271 
272 	/* APLIC base is required for non-zero number of sources */
273 	if (aia->nr_sources && aia->aplic_addr == KVM_RISCV_AIA_UNDEF_ADDR)
274 		return -EINVAL;
275 
276 	/* Initialize APLIC */
277 	ret = kvm_riscv_aia_aplic_init(kvm);
278 	if (ret)
279 		return ret;
280 
281 	/* Iterate over each VCPU */
282 	kvm_for_each_vcpu(idx, vcpu, kvm) {
283 		vaia = &vcpu->arch.aia_context;
284 
285 		/* IMSIC base is required */
286 		if (vaia->imsic_addr == KVM_RISCV_AIA_UNDEF_ADDR) {
287 			ret = -EINVAL;
288 			goto fail_cleanup_imsics;
289 		}
290 
291 		/* All IMSICs should have matching base PPN */
292 		if (base_ppn == KVM_RISCV_AIA_UNDEF_ADDR)
293 			base_ppn = aia_imsic_ppn(aia, vaia->imsic_addr);
294 		if (base_ppn != aia_imsic_ppn(aia, vaia->imsic_addr)) {
295 			ret = -EINVAL;
296 			goto fail_cleanup_imsics;
297 		}
298 
299 		/* Update HART index of the IMSIC based on IMSIC base */
300 		vaia->hart_index = aia_imsic_hart_index(aia,
301 							vaia->imsic_addr);
302 
303 		/* Initialize IMSIC for this VCPU */
304 		ret = kvm_riscv_vcpu_aia_imsic_init(vcpu);
305 		if (ret)
306 			goto fail_cleanup_imsics;
307 	}
308 
309 	/* Set the initialized flag */
310 	kvm->arch.aia.initialized = true;
311 
312 	return 0;
313 
314 fail_cleanup_imsics:
315 	for (i = idx - 1; i >= 0; i--) {
316 		vcpu = kvm_get_vcpu(kvm, i);
317 		if (!vcpu)
318 			continue;
319 		kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
320 	}
321 	kvm_riscv_aia_aplic_cleanup(kvm);
322 	return ret;
323 }
324 
aia_set_attr(struct kvm_device * dev,struct kvm_device_attr * attr)325 static int aia_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
326 {
327 	u32 nr;
328 	u64 addr;
329 	int nr_vcpus, r = -ENXIO;
330 	unsigned long v, type = (unsigned long)attr->attr;
331 	void __user *uaddr = (void __user *)(long)attr->addr;
332 
333 	switch (attr->group) {
334 	case KVM_DEV_RISCV_AIA_GRP_CONFIG:
335 		if (copy_from_user(&nr, uaddr, sizeof(nr)))
336 			return -EFAULT;
337 
338 		mutex_lock(&dev->kvm->lock);
339 		r = aia_config(dev->kvm, type, &nr, true);
340 		mutex_unlock(&dev->kvm->lock);
341 
342 		break;
343 
344 	case KVM_DEV_RISCV_AIA_GRP_ADDR:
345 		if (copy_from_user(&addr, uaddr, sizeof(addr)))
346 			return -EFAULT;
347 
348 		nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
349 		mutex_lock(&dev->kvm->lock);
350 		if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
351 			r = aia_aplic_addr(dev->kvm, &addr, true);
352 		else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
353 			r = aia_imsic_addr(dev->kvm, &addr,
354 			    type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), true);
355 		mutex_unlock(&dev->kvm->lock);
356 
357 		break;
358 
359 	case KVM_DEV_RISCV_AIA_GRP_CTRL:
360 		switch (type) {
361 		case KVM_DEV_RISCV_AIA_CTRL_INIT:
362 			mutex_lock(&dev->kvm->lock);
363 			r = aia_init(dev->kvm);
364 			mutex_unlock(&dev->kvm->lock);
365 			break;
366 		}
367 
368 		break;
369 	case KVM_DEV_RISCV_AIA_GRP_APLIC:
370 		if (copy_from_user(&nr, uaddr, sizeof(nr)))
371 			return -EFAULT;
372 
373 		mutex_lock(&dev->kvm->lock);
374 		r = kvm_riscv_aia_aplic_set_attr(dev->kvm, type, nr);
375 		mutex_unlock(&dev->kvm->lock);
376 
377 		break;
378 	case KVM_DEV_RISCV_AIA_GRP_IMSIC:
379 		if (copy_from_user(&v, uaddr, sizeof(v)))
380 			return -EFAULT;
381 
382 		mutex_lock(&dev->kvm->lock);
383 		r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, true, &v);
384 		mutex_unlock(&dev->kvm->lock);
385 
386 		break;
387 	}
388 
389 	return r;
390 }
391 
aia_get_attr(struct kvm_device * dev,struct kvm_device_attr * attr)392 static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
393 {
394 	u32 nr;
395 	u64 addr;
396 	int nr_vcpus, r = -ENXIO;
397 	void __user *uaddr = (void __user *)(long)attr->addr;
398 	unsigned long v, type = (unsigned long)attr->attr;
399 
400 	switch (attr->group) {
401 	case KVM_DEV_RISCV_AIA_GRP_CONFIG:
402 		if (copy_from_user(&nr, uaddr, sizeof(nr)))
403 			return -EFAULT;
404 
405 		mutex_lock(&dev->kvm->lock);
406 		r = aia_config(dev->kvm, type, &nr, false);
407 		mutex_unlock(&dev->kvm->lock);
408 		if (r)
409 			return r;
410 
411 		if (copy_to_user(uaddr, &nr, sizeof(nr)))
412 			return -EFAULT;
413 
414 		break;
415 	case KVM_DEV_RISCV_AIA_GRP_ADDR:
416 		if (copy_from_user(&addr, uaddr, sizeof(addr)))
417 			return -EFAULT;
418 
419 		nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
420 		mutex_lock(&dev->kvm->lock);
421 		if (type == KVM_DEV_RISCV_AIA_ADDR_APLIC)
422 			r = aia_aplic_addr(dev->kvm, &addr, false);
423 		else if (type < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
424 			r = aia_imsic_addr(dev->kvm, &addr,
425 			    type - KVM_DEV_RISCV_AIA_ADDR_IMSIC(0), false);
426 		mutex_unlock(&dev->kvm->lock);
427 		if (r)
428 			return r;
429 
430 		if (copy_to_user(uaddr, &addr, sizeof(addr)))
431 			return -EFAULT;
432 
433 		break;
434 	case KVM_DEV_RISCV_AIA_GRP_APLIC:
435 		if (copy_from_user(&nr, uaddr, sizeof(nr)))
436 			return -EFAULT;
437 
438 		mutex_lock(&dev->kvm->lock);
439 		r = kvm_riscv_aia_aplic_get_attr(dev->kvm, type, &nr);
440 		mutex_unlock(&dev->kvm->lock);
441 		if (r)
442 			return r;
443 
444 		if (copy_to_user(uaddr, &nr, sizeof(nr)))
445 			return -EFAULT;
446 
447 		break;
448 	case KVM_DEV_RISCV_AIA_GRP_IMSIC:
449 		if (copy_from_user(&v, uaddr, sizeof(v)))
450 			return -EFAULT;
451 
452 		mutex_lock(&dev->kvm->lock);
453 		r = kvm_riscv_aia_imsic_rw_attr(dev->kvm, type, false, &v);
454 		mutex_unlock(&dev->kvm->lock);
455 		if (r)
456 			return r;
457 
458 		if (copy_to_user(uaddr, &v, sizeof(v)))
459 			return -EFAULT;
460 
461 		break;
462 	}
463 
464 	return r;
465 }
466 
aia_has_attr(struct kvm_device * dev,struct kvm_device_attr * attr)467 static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
468 {
469 	int nr_vcpus;
470 
471 	switch (attr->group) {
472 	case KVM_DEV_RISCV_AIA_GRP_CONFIG:
473 		switch (attr->attr) {
474 		case KVM_DEV_RISCV_AIA_CONFIG_MODE:
475 		case KVM_DEV_RISCV_AIA_CONFIG_IDS:
476 		case KVM_DEV_RISCV_AIA_CONFIG_SRCS:
477 		case KVM_DEV_RISCV_AIA_CONFIG_GROUP_BITS:
478 		case KVM_DEV_RISCV_AIA_CONFIG_GROUP_SHIFT:
479 		case KVM_DEV_RISCV_AIA_CONFIG_HART_BITS:
480 		case KVM_DEV_RISCV_AIA_CONFIG_GUEST_BITS:
481 			return 0;
482 		}
483 		break;
484 	case KVM_DEV_RISCV_AIA_GRP_ADDR:
485 		nr_vcpus = atomic_read(&dev->kvm->online_vcpus);
486 		if (attr->attr == KVM_DEV_RISCV_AIA_ADDR_APLIC)
487 			return 0;
488 		else if (attr->attr < KVM_DEV_RISCV_AIA_ADDR_IMSIC(nr_vcpus))
489 			return 0;
490 		break;
491 	case KVM_DEV_RISCV_AIA_GRP_CTRL:
492 		switch (attr->attr) {
493 		case KVM_DEV_RISCV_AIA_CTRL_INIT:
494 			return 0;
495 		}
496 		break;
497 	case KVM_DEV_RISCV_AIA_GRP_APLIC:
498 		return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
499 	case KVM_DEV_RISCV_AIA_GRP_IMSIC:
500 		return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
501 	}
502 
503 	return -ENXIO;
504 }
505 
506 struct kvm_device_ops kvm_riscv_aia_device_ops = {
507 	.name = "kvm-riscv-aia",
508 	.create = aia_create,
509 	.destroy = aia_destroy,
510 	.set_attr = aia_set_attr,
511 	.get_attr = aia_get_attr,
512 	.has_attr = aia_has_attr,
513 };
514 
kvm_riscv_vcpu_aia_update(struct kvm_vcpu * vcpu)515 int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
516 {
517 	/* Proceed only if AIA was initialized successfully */
518 	if (!kvm_riscv_aia_initialized(vcpu->kvm))
519 		return 1;
520 
521 	/* Update the IMSIC HW state before entering guest mode */
522 	return kvm_riscv_vcpu_aia_imsic_update(vcpu);
523 }
524 
kvm_riscv_vcpu_aia_reset(struct kvm_vcpu * vcpu)525 void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
526 {
527 	struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
528 	struct kvm_vcpu_aia_csr *reset_csr =
529 				&vcpu->arch.aia_context.guest_reset_csr;
530 
531 	if (!kvm_riscv_aia_available())
532 		return;
533 	memcpy(csr, reset_csr, sizeof(*csr));
534 
535 	/* Proceed only if AIA was initialized successfully */
536 	if (!kvm_riscv_aia_initialized(vcpu->kvm))
537 		return;
538 
539 	/* Reset the IMSIC context */
540 	kvm_riscv_vcpu_aia_imsic_reset(vcpu);
541 }
542 
kvm_riscv_vcpu_aia_init(struct kvm_vcpu * vcpu)543 int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu)
544 {
545 	struct kvm_vcpu_aia *vaia = &vcpu->arch.aia_context;
546 
547 	if (!kvm_riscv_aia_available())
548 		return 0;
549 
550 	/*
551 	 * We don't do any memory allocations over here because these
552 	 * will be done after AIA device is initialized by the user-space.
553 	 *
554 	 * Refer, aia_init() implementation for more details.
555 	 */
556 
557 	/* Initialize default values in AIA vcpu context */
558 	vaia->imsic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
559 	vaia->hart_index = vcpu->vcpu_idx;
560 
561 	return 0;
562 }
563 
kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu * vcpu)564 void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu)
565 {
566 	/* Proceed only if AIA was initialized successfully */
567 	if (!kvm_riscv_aia_initialized(vcpu->kvm))
568 		return;
569 
570 	/* Cleanup IMSIC context */
571 	kvm_riscv_vcpu_aia_imsic_cleanup(vcpu);
572 }
573 
kvm_riscv_aia_inject_msi_by_id(struct kvm * kvm,u32 hart_index,u32 guest_index,u32 iid)574 int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
575 				   u32 guest_index, u32 iid)
576 {
577 	unsigned long idx;
578 	struct kvm_vcpu *vcpu;
579 
580 	/* Proceed only if AIA was initialized successfully */
581 	if (!kvm_riscv_aia_initialized(kvm))
582 		return -EBUSY;
583 
584 	/* Inject MSI to matching VCPU */
585 	kvm_for_each_vcpu(idx, vcpu, kvm) {
586 		if (vcpu->arch.aia_context.hart_index == hart_index)
587 			return kvm_riscv_vcpu_aia_imsic_inject(vcpu,
588 							       guest_index,
589 							       0, iid);
590 	}
591 
592 	return 0;
593 }
594 
kvm_riscv_aia_inject_msi(struct kvm * kvm,struct kvm_msi * msi)595 int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
596 {
597 	gpa_t tppn, ippn;
598 	unsigned long idx;
599 	struct kvm_vcpu *vcpu;
600 	u32 g, toff, iid = msi->data;
601 	struct kvm_aia *aia = &kvm->arch.aia;
602 	gpa_t target = (((gpa_t)msi->address_hi) << 32) | msi->address_lo;
603 
604 	/* Proceed only if AIA was initialized successfully */
605 	if (!kvm_riscv_aia_initialized(kvm))
606 		return -EBUSY;
607 
608 	/* Convert target address to target PPN */
609 	tppn = target >> IMSIC_MMIO_PAGE_SHIFT;
610 
611 	/* Extract and clear Guest ID from target PPN */
612 	g = tppn & (BIT(aia->nr_guest_bits) - 1);
613 	tppn &= ~((gpa_t)(BIT(aia->nr_guest_bits) - 1));
614 
615 	/* Inject MSI to matching VCPU */
616 	kvm_for_each_vcpu(idx, vcpu, kvm) {
617 		ippn = vcpu->arch.aia_context.imsic_addr >>
618 					IMSIC_MMIO_PAGE_SHIFT;
619 		if (ippn == tppn) {
620 			toff = target & (IMSIC_MMIO_PAGE_SZ - 1);
621 			return kvm_riscv_vcpu_aia_imsic_inject(vcpu, g,
622 							       toff, iid);
623 		}
624 	}
625 
626 	return 0;
627 }
628 
kvm_riscv_aia_inject_irq(struct kvm * kvm,unsigned int irq,bool level)629 int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level)
630 {
631 	/* Proceed only if AIA was initialized successfully */
632 	if (!kvm_riscv_aia_initialized(kvm))
633 		return -EBUSY;
634 
635 	/* Inject interrupt level change in APLIC */
636 	return kvm_riscv_aia_aplic_inject(kvm, irq, level);
637 }
638 
kvm_riscv_aia_init_vm(struct kvm * kvm)639 void kvm_riscv_aia_init_vm(struct kvm *kvm)
640 {
641 	struct kvm_aia *aia = &kvm->arch.aia;
642 
643 	if (!kvm_riscv_aia_available())
644 		return;
645 
646 	/*
647 	 * We don't do any memory allocations over here because these
648 	 * will be done after AIA device is initialized by the user-space.
649 	 *
650 	 * Refer, aia_init() implementation for more details.
651 	 */
652 
653 	/* Initialize default values in AIA global context */
654 	aia->mode = (kvm_riscv_aia_nr_hgei) ?
655 		KVM_DEV_RISCV_AIA_MODE_AUTO : KVM_DEV_RISCV_AIA_MODE_EMUL;
656 	aia->nr_ids = kvm_riscv_aia_max_ids - 1;
657 	aia->nr_sources = 0;
658 	aia->nr_group_bits = 0;
659 	aia->nr_group_shift = KVM_DEV_RISCV_AIA_GROUP_SHIFT_MIN;
660 	aia->nr_hart_bits = 0;
661 	aia->nr_guest_bits = 0;
662 	aia->aplic_addr = KVM_RISCV_AIA_UNDEF_ADDR;
663 }
664 
kvm_riscv_aia_destroy_vm(struct kvm * kvm)665 void kvm_riscv_aia_destroy_vm(struct kvm *kvm)
666 {
667 	/* Proceed only if AIA was initialized successfully */
668 	if (!kvm_riscv_aia_initialized(kvm))
669 		return;
670 
671 	/* Cleanup APLIC context */
672 	kvm_riscv_aia_aplic_cleanup(kvm);
673 }
674