1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 *
6 * Authors:
7 * Anup Patel <apatel@ventanamicro.com>
8 */
9
10 #include <linux/kvm_host.h>
11 #include <linux/math.h>
12 #include <linux/spinlock.h>
13 #include <linux/swab.h>
14 #include <kvm/iodev.h>
15 #include <asm/kvm_aia_aplic.h>
16
17 struct aplic_irq {
18 raw_spinlock_t lock;
19 u32 sourcecfg;
20 u32 state;
21 #define APLIC_IRQ_STATE_PENDING BIT(0)
22 #define APLIC_IRQ_STATE_ENABLED BIT(1)
23 #define APLIC_IRQ_STATE_ENPEND (APLIC_IRQ_STATE_PENDING | \
24 APLIC_IRQ_STATE_ENABLED)
25 #define APLIC_IRQ_STATE_INPUT BIT(8)
26 u32 target;
27 };
28
29 struct aplic {
30 struct kvm_io_device iodev;
31
32 u32 domaincfg;
33 u32 genmsi;
34
35 u32 nr_irqs;
36 u32 nr_words;
37 struct aplic_irq *irqs;
38 };
39
aplic_read_sourcecfg(struct aplic * aplic,u32 irq)40 static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq)
41 {
42 u32 ret;
43 unsigned long flags;
44 struct aplic_irq *irqd;
45
46 if (!irq || aplic->nr_irqs <= irq)
47 return 0;
48 irqd = &aplic->irqs[irq];
49
50 raw_spin_lock_irqsave(&irqd->lock, flags);
51 ret = irqd->sourcecfg;
52 raw_spin_unlock_irqrestore(&irqd->lock, flags);
53
54 return ret;
55 }
56
aplic_write_sourcecfg(struct aplic * aplic,u32 irq,u32 val)57 static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val)
58 {
59 unsigned long flags;
60 struct aplic_irq *irqd;
61
62 if (!irq || aplic->nr_irqs <= irq)
63 return;
64 irqd = &aplic->irqs[irq];
65
66 if (val & APLIC_SOURCECFG_D)
67 val = 0;
68 else
69 val &= APLIC_SOURCECFG_SM_MASK;
70
71 raw_spin_lock_irqsave(&irqd->lock, flags);
72 irqd->sourcecfg = val;
73 raw_spin_unlock_irqrestore(&irqd->lock, flags);
74 }
75
aplic_read_target(struct aplic * aplic,u32 irq)76 static u32 aplic_read_target(struct aplic *aplic, u32 irq)
77 {
78 u32 ret;
79 unsigned long flags;
80 struct aplic_irq *irqd;
81
82 if (!irq || aplic->nr_irqs <= irq)
83 return 0;
84 irqd = &aplic->irqs[irq];
85
86 raw_spin_lock_irqsave(&irqd->lock, flags);
87 ret = irqd->target;
88 raw_spin_unlock_irqrestore(&irqd->lock, flags);
89
90 return ret;
91 }
92
aplic_write_target(struct aplic * aplic,u32 irq,u32 val)93 static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val)
94 {
95 unsigned long flags;
96 struct aplic_irq *irqd;
97
98 if (!irq || aplic->nr_irqs <= irq)
99 return;
100 irqd = &aplic->irqs[irq];
101
102 val &= APLIC_TARGET_EIID_MASK |
103 (APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) |
104 (APLIC_TARGET_GUEST_IDX_MASK << APLIC_TARGET_GUEST_IDX_SHIFT);
105
106 raw_spin_lock_irqsave(&irqd->lock, flags);
107 irqd->target = val;
108 raw_spin_unlock_irqrestore(&irqd->lock, flags);
109 }
110
aplic_read_pending(struct aplic * aplic,u32 irq)111 static bool aplic_read_pending(struct aplic *aplic, u32 irq)
112 {
113 bool ret;
114 unsigned long flags;
115 struct aplic_irq *irqd;
116
117 if (!irq || aplic->nr_irqs <= irq)
118 return false;
119 irqd = &aplic->irqs[irq];
120
121 raw_spin_lock_irqsave(&irqd->lock, flags);
122 ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false;
123 raw_spin_unlock_irqrestore(&irqd->lock, flags);
124
125 return ret;
126 }
127
aplic_write_pending(struct aplic * aplic,u32 irq,bool pending)128 static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
129 {
130 unsigned long flags, sm;
131 struct aplic_irq *irqd;
132
133 if (!irq || aplic->nr_irqs <= irq)
134 return;
135 irqd = &aplic->irqs[irq];
136
137 raw_spin_lock_irqsave(&irqd->lock, flags);
138
139 sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK;
140 if (!pending &&
141 ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) ||
142 (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))
143 goto skip_write_pending;
144
145 if (pending)
146 irqd->state |= APLIC_IRQ_STATE_PENDING;
147 else
148 irqd->state &= ~APLIC_IRQ_STATE_PENDING;
149
150 skip_write_pending:
151 raw_spin_unlock_irqrestore(&irqd->lock, flags);
152 }
153
aplic_read_enabled(struct aplic * aplic,u32 irq)154 static bool aplic_read_enabled(struct aplic *aplic, u32 irq)
155 {
156 bool ret;
157 unsigned long flags;
158 struct aplic_irq *irqd;
159
160 if (!irq || aplic->nr_irqs <= irq)
161 return false;
162 irqd = &aplic->irqs[irq];
163
164 raw_spin_lock_irqsave(&irqd->lock, flags);
165 ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false;
166 raw_spin_unlock_irqrestore(&irqd->lock, flags);
167
168 return ret;
169 }
170
aplic_write_enabled(struct aplic * aplic,u32 irq,bool enabled)171 static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
172 {
173 unsigned long flags;
174 struct aplic_irq *irqd;
175
176 if (!irq || aplic->nr_irqs <= irq)
177 return;
178 irqd = &aplic->irqs[irq];
179
180 raw_spin_lock_irqsave(&irqd->lock, flags);
181 if (enabled)
182 irqd->state |= APLIC_IRQ_STATE_ENABLED;
183 else
184 irqd->state &= ~APLIC_IRQ_STATE_ENABLED;
185 raw_spin_unlock_irqrestore(&irqd->lock, flags);
186 }
187
aplic_read_input(struct aplic * aplic,u32 irq)188 static bool aplic_read_input(struct aplic *aplic, u32 irq)
189 {
190 bool ret;
191 unsigned long flags;
192 struct aplic_irq *irqd;
193
194 if (!irq || aplic->nr_irqs <= irq)
195 return false;
196 irqd = &aplic->irqs[irq];
197
198 raw_spin_lock_irqsave(&irqd->lock, flags);
199 ret = (irqd->state & APLIC_IRQ_STATE_INPUT) ? true : false;
200 raw_spin_unlock_irqrestore(&irqd->lock, flags);
201
202 return ret;
203 }
204
aplic_inject_msi(struct kvm * kvm,u32 irq,u32 target)205 static void aplic_inject_msi(struct kvm *kvm, u32 irq, u32 target)
206 {
207 u32 hart_idx, guest_idx, eiid;
208
209 hart_idx = target >> APLIC_TARGET_HART_IDX_SHIFT;
210 hart_idx &= APLIC_TARGET_HART_IDX_MASK;
211 guest_idx = target >> APLIC_TARGET_GUEST_IDX_SHIFT;
212 guest_idx &= APLIC_TARGET_GUEST_IDX_MASK;
213 eiid = target & APLIC_TARGET_EIID_MASK;
214 kvm_riscv_aia_inject_msi_by_id(kvm, hart_idx, guest_idx, eiid);
215 }
216
aplic_update_irq_range(struct kvm * kvm,u32 first,u32 last)217 static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last)
218 {
219 bool inject;
220 u32 irq, target;
221 unsigned long flags;
222 struct aplic_irq *irqd;
223 struct aplic *aplic = kvm->arch.aia.aplic_state;
224
225 if (!(aplic->domaincfg & APLIC_DOMAINCFG_IE))
226 return;
227
228 for (irq = first; irq <= last; irq++) {
229 if (!irq || aplic->nr_irqs <= irq)
230 continue;
231 irqd = &aplic->irqs[irq];
232
233 raw_spin_lock_irqsave(&irqd->lock, flags);
234
235 inject = false;
236 target = irqd->target;
237 if ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
238 APLIC_IRQ_STATE_ENPEND) {
239 irqd->state &= ~APLIC_IRQ_STATE_PENDING;
240 inject = true;
241 }
242
243 raw_spin_unlock_irqrestore(&irqd->lock, flags);
244
245 if (inject)
246 aplic_inject_msi(kvm, irq, target);
247 }
248 }
249
kvm_riscv_aia_aplic_inject(struct kvm * kvm,u32 source,bool level)250 int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level)
251 {
252 u32 target;
253 bool inject = false, ie;
254 unsigned long flags;
255 struct aplic_irq *irqd;
256 struct aplic *aplic = kvm->arch.aia.aplic_state;
257
258 if (!aplic || !source || (aplic->nr_irqs <= source))
259 return -ENODEV;
260 irqd = &aplic->irqs[source];
261 ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false;
262
263 raw_spin_lock_irqsave(&irqd->lock, flags);
264
265 if (irqd->sourcecfg & APLIC_SOURCECFG_D)
266 goto skip_unlock;
267
268 switch (irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK) {
269 case APLIC_SOURCECFG_SM_EDGE_RISE:
270 if (level && !(irqd->state & APLIC_IRQ_STATE_INPUT) &&
271 !(irqd->state & APLIC_IRQ_STATE_PENDING))
272 irqd->state |= APLIC_IRQ_STATE_PENDING;
273 break;
274 case APLIC_SOURCECFG_SM_EDGE_FALL:
275 if (!level && (irqd->state & APLIC_IRQ_STATE_INPUT) &&
276 !(irqd->state & APLIC_IRQ_STATE_PENDING))
277 irqd->state |= APLIC_IRQ_STATE_PENDING;
278 break;
279 case APLIC_SOURCECFG_SM_LEVEL_HIGH:
280 if (level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
281 irqd->state |= APLIC_IRQ_STATE_PENDING;
282 break;
283 case APLIC_SOURCECFG_SM_LEVEL_LOW:
284 if (!level && !(irqd->state & APLIC_IRQ_STATE_PENDING))
285 irqd->state |= APLIC_IRQ_STATE_PENDING;
286 break;
287 }
288
289 if (level)
290 irqd->state |= APLIC_IRQ_STATE_INPUT;
291 else
292 irqd->state &= ~APLIC_IRQ_STATE_INPUT;
293
294 target = irqd->target;
295 if (ie && ((irqd->state & APLIC_IRQ_STATE_ENPEND) ==
296 APLIC_IRQ_STATE_ENPEND)) {
297 irqd->state &= ~APLIC_IRQ_STATE_PENDING;
298 inject = true;
299 }
300
301 skip_unlock:
302 raw_spin_unlock_irqrestore(&irqd->lock, flags);
303
304 if (inject)
305 aplic_inject_msi(kvm, source, target);
306
307 return 0;
308 }
309
aplic_read_input_word(struct aplic * aplic,u32 word)310 static u32 aplic_read_input_word(struct aplic *aplic, u32 word)
311 {
312 u32 i, ret = 0;
313
314 for (i = 0; i < 32; i++)
315 ret |= aplic_read_input(aplic, word * 32 + i) ? BIT(i) : 0;
316
317 return ret;
318 }
319
aplic_read_pending_word(struct aplic * aplic,u32 word)320 static u32 aplic_read_pending_word(struct aplic *aplic, u32 word)
321 {
322 u32 i, ret = 0;
323
324 for (i = 0; i < 32; i++)
325 ret |= aplic_read_pending(aplic, word * 32 + i) ? BIT(i) : 0;
326
327 return ret;
328 }
329
aplic_write_pending_word(struct aplic * aplic,u32 word,u32 val,bool pending)330 static void aplic_write_pending_word(struct aplic *aplic, u32 word,
331 u32 val, bool pending)
332 {
333 u32 i;
334
335 for (i = 0; i < 32; i++) {
336 if (val & BIT(i))
337 aplic_write_pending(aplic, word * 32 + i, pending);
338 }
339 }
340
aplic_read_enabled_word(struct aplic * aplic,u32 word)341 static u32 aplic_read_enabled_word(struct aplic *aplic, u32 word)
342 {
343 u32 i, ret = 0;
344
345 for (i = 0; i < 32; i++)
346 ret |= aplic_read_enabled(aplic, word * 32 + i) ? BIT(i) : 0;
347
348 return ret;
349 }
350
aplic_write_enabled_word(struct aplic * aplic,u32 word,u32 val,bool enabled)351 static void aplic_write_enabled_word(struct aplic *aplic, u32 word,
352 u32 val, bool enabled)
353 {
354 u32 i;
355
356 for (i = 0; i < 32; i++) {
357 if (val & BIT(i))
358 aplic_write_enabled(aplic, word * 32 + i, enabled);
359 }
360 }
361
aplic_mmio_read_offset(struct kvm * kvm,gpa_t off,u32 * val32)362 static int aplic_mmio_read_offset(struct kvm *kvm, gpa_t off, u32 *val32)
363 {
364 u32 i;
365 struct aplic *aplic = kvm->arch.aia.aplic_state;
366
367 if ((off & 0x3) != 0)
368 return -EOPNOTSUPP;
369
370 if (off == APLIC_DOMAINCFG) {
371 *val32 = APLIC_DOMAINCFG_RDONLY |
372 aplic->domaincfg | APLIC_DOMAINCFG_DM;
373 } else if ((off >= APLIC_SOURCECFG_BASE) &&
374 (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
375 i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
376 *val32 = aplic_read_sourcecfg(aplic, i);
377 } else if ((off >= APLIC_SETIP_BASE) &&
378 (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
379 i = (off - APLIC_SETIP_BASE) >> 2;
380 *val32 = aplic_read_pending_word(aplic, i);
381 } else if (off == APLIC_SETIPNUM) {
382 *val32 = 0;
383 } else if ((off >= APLIC_CLRIP_BASE) &&
384 (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
385 i = (off - APLIC_CLRIP_BASE) >> 2;
386 *val32 = aplic_read_input_word(aplic, i);
387 } else if (off == APLIC_CLRIPNUM) {
388 *val32 = 0;
389 } else if ((off >= APLIC_SETIE_BASE) &&
390 (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
391 i = (off - APLIC_SETIE_BASE) >> 2;
392 *val32 = aplic_read_enabled_word(aplic, i);
393 } else if (off == APLIC_SETIENUM) {
394 *val32 = 0;
395 } else if ((off >= APLIC_CLRIE_BASE) &&
396 (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
397 *val32 = 0;
398 } else if (off == APLIC_CLRIENUM) {
399 *val32 = 0;
400 } else if (off == APLIC_SETIPNUM_LE) {
401 *val32 = 0;
402 } else if (off == APLIC_SETIPNUM_BE) {
403 *val32 = 0;
404 } else if (off == APLIC_GENMSI) {
405 *val32 = aplic->genmsi;
406 } else if ((off >= APLIC_TARGET_BASE) &&
407 (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
408 i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
409 *val32 = aplic_read_target(aplic, i);
410 } else
411 return -ENODEV;
412
413 return 0;
414 }
415
aplic_mmio_read(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,void * val)416 static int aplic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
417 gpa_t addr, int len, void *val)
418 {
419 if (len != 4)
420 return -EOPNOTSUPP;
421
422 return aplic_mmio_read_offset(vcpu->kvm,
423 addr - vcpu->kvm->arch.aia.aplic_addr,
424 val);
425 }
426
aplic_mmio_write_offset(struct kvm * kvm,gpa_t off,u32 val32)427 static int aplic_mmio_write_offset(struct kvm *kvm, gpa_t off, u32 val32)
428 {
429 u32 i;
430 struct aplic *aplic = kvm->arch.aia.aplic_state;
431
432 if ((off & 0x3) != 0)
433 return -EOPNOTSUPP;
434
435 if (off == APLIC_DOMAINCFG) {
436 /* Only IE bit writeable */
437 aplic->domaincfg = val32 & APLIC_DOMAINCFG_IE;
438 } else if ((off >= APLIC_SOURCECFG_BASE) &&
439 (off < (APLIC_SOURCECFG_BASE + (aplic->nr_irqs - 1) * 4))) {
440 i = ((off - APLIC_SOURCECFG_BASE) >> 2) + 1;
441 aplic_write_sourcecfg(aplic, i, val32);
442 } else if ((off >= APLIC_SETIP_BASE) &&
443 (off < (APLIC_SETIP_BASE + aplic->nr_words * 4))) {
444 i = (off - APLIC_SETIP_BASE) >> 2;
445 aplic_write_pending_word(aplic, i, val32, true);
446 } else if (off == APLIC_SETIPNUM) {
447 aplic_write_pending(aplic, val32, true);
448 } else if ((off >= APLIC_CLRIP_BASE) &&
449 (off < (APLIC_CLRIP_BASE + aplic->nr_words * 4))) {
450 i = (off - APLIC_CLRIP_BASE) >> 2;
451 aplic_write_pending_word(aplic, i, val32, false);
452 } else if (off == APLIC_CLRIPNUM) {
453 aplic_write_pending(aplic, val32, false);
454 } else if ((off >= APLIC_SETIE_BASE) &&
455 (off < (APLIC_SETIE_BASE + aplic->nr_words * 4))) {
456 i = (off - APLIC_SETIE_BASE) >> 2;
457 aplic_write_enabled_word(aplic, i, val32, true);
458 } else if (off == APLIC_SETIENUM) {
459 aplic_write_enabled(aplic, val32, true);
460 } else if ((off >= APLIC_CLRIE_BASE) &&
461 (off < (APLIC_CLRIE_BASE + aplic->nr_words * 4))) {
462 i = (off - APLIC_CLRIE_BASE) >> 2;
463 aplic_write_enabled_word(aplic, i, val32, false);
464 } else if (off == APLIC_CLRIENUM) {
465 aplic_write_enabled(aplic, val32, false);
466 } else if (off == APLIC_SETIPNUM_LE) {
467 aplic_write_pending(aplic, val32, true);
468 } else if (off == APLIC_SETIPNUM_BE) {
469 aplic_write_pending(aplic, __swab32(val32), true);
470 } else if (off == APLIC_GENMSI) {
471 aplic->genmsi = val32 & ~(APLIC_TARGET_GUEST_IDX_MASK <<
472 APLIC_TARGET_GUEST_IDX_SHIFT);
473 kvm_riscv_aia_inject_msi_by_id(kvm,
474 val32 >> APLIC_TARGET_HART_IDX_SHIFT, 0,
475 val32 & APLIC_TARGET_EIID_MASK);
476 } else if ((off >= APLIC_TARGET_BASE) &&
477 (off < (APLIC_TARGET_BASE + (aplic->nr_irqs - 1) * 4))) {
478 i = ((off - APLIC_TARGET_BASE) >> 2) + 1;
479 aplic_write_target(aplic, i, val32);
480 } else
481 return -ENODEV;
482
483 aplic_update_irq_range(kvm, 1, aplic->nr_irqs - 1);
484
485 return 0;
486 }
487
aplic_mmio_write(struct kvm_vcpu * vcpu,struct kvm_io_device * dev,gpa_t addr,int len,const void * val)488 static int aplic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev,
489 gpa_t addr, int len, const void *val)
490 {
491 if (len != 4)
492 return -EOPNOTSUPP;
493
494 return aplic_mmio_write_offset(vcpu->kvm,
495 addr - vcpu->kvm->arch.aia.aplic_addr,
496 *((const u32 *)val));
497 }
498
499 static struct kvm_io_device_ops aplic_iodoev_ops = {
500 .read = aplic_mmio_read,
501 .write = aplic_mmio_write,
502 };
503
kvm_riscv_aia_aplic_set_attr(struct kvm * kvm,unsigned long type,u32 v)504 int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v)
505 {
506 int rc;
507
508 if (!kvm->arch.aia.aplic_state)
509 return -ENODEV;
510
511 rc = aplic_mmio_write_offset(kvm, type, v);
512 if (rc)
513 return rc;
514
515 return 0;
516 }
517
kvm_riscv_aia_aplic_get_attr(struct kvm * kvm,unsigned long type,u32 * v)518 int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v)
519 {
520 int rc;
521
522 if (!kvm->arch.aia.aplic_state)
523 return -ENODEV;
524
525 rc = aplic_mmio_read_offset(kvm, type, v);
526 if (rc)
527 return rc;
528
529 return 0;
530 }
531
kvm_riscv_aia_aplic_has_attr(struct kvm * kvm,unsigned long type)532 int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type)
533 {
534 int rc;
535 u32 val;
536
537 if (!kvm->arch.aia.aplic_state)
538 return -ENODEV;
539
540 rc = aplic_mmio_read_offset(kvm, type, &val);
541 if (rc)
542 return rc;
543
544 return 0;
545 }
546
kvm_riscv_aia_aplic_init(struct kvm * kvm)547 int kvm_riscv_aia_aplic_init(struct kvm *kvm)
548 {
549 int i, ret = 0;
550 struct aplic *aplic;
551
552 /* Do nothing if we have zero sources */
553 if (!kvm->arch.aia.nr_sources)
554 return 0;
555
556 /* Allocate APLIC global state */
557 aplic = kzalloc(sizeof(*aplic), GFP_KERNEL);
558 if (!aplic)
559 return -ENOMEM;
560 kvm->arch.aia.aplic_state = aplic;
561
562 /* Setup APLIC IRQs */
563 aplic->nr_irqs = kvm->arch.aia.nr_sources + 1;
564 aplic->nr_words = DIV_ROUND_UP(aplic->nr_irqs, 32);
565 aplic->irqs = kcalloc(aplic->nr_irqs,
566 sizeof(*aplic->irqs), GFP_KERNEL);
567 if (!aplic->irqs) {
568 ret = -ENOMEM;
569 goto fail_free_aplic;
570 }
571 for (i = 0; i < aplic->nr_irqs; i++)
572 raw_spin_lock_init(&aplic->irqs[i].lock);
573
574 /* Setup IO device */
575 kvm_iodevice_init(&aplic->iodev, &aplic_iodoev_ops);
576 mutex_lock(&kvm->slots_lock);
577 ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS,
578 kvm->arch.aia.aplic_addr,
579 KVM_DEV_RISCV_APLIC_SIZE,
580 &aplic->iodev);
581 mutex_unlock(&kvm->slots_lock);
582 if (ret)
583 goto fail_free_aplic_irqs;
584
585 /* Setup default IRQ routing */
586 ret = kvm_riscv_setup_default_irq_routing(kvm, aplic->nr_irqs);
587 if (ret)
588 goto fail_unreg_iodev;
589
590 return 0;
591
592 fail_unreg_iodev:
593 mutex_lock(&kvm->slots_lock);
594 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
595 mutex_unlock(&kvm->slots_lock);
596 fail_free_aplic_irqs:
597 kfree(aplic->irqs);
598 fail_free_aplic:
599 kvm->arch.aia.aplic_state = NULL;
600 kfree(aplic);
601 return ret;
602 }
603
kvm_riscv_aia_aplic_cleanup(struct kvm * kvm)604 void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm)
605 {
606 struct aplic *aplic = kvm->arch.aia.aplic_state;
607
608 if (!aplic)
609 return;
610
611 mutex_lock(&kvm->slots_lock);
612 kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &aplic->iodev);
613 mutex_unlock(&kvm->slots_lock);
614
615 kfree(aplic->irqs);
616
617 kvm->arch.aia.aplic_state = NULL;
618 kfree(aplic);
619 }
620