1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2019 Western Digital Corporation or its affiliates.
4 *
5 * Authors:
6 * Anup Patel <anup.patel@wdc.com>
7 */
8
9 #include <linux/bitops.h>
10 #include <linux/entry-kvm.h>
11 #include <linux/errno.h>
12 #include <linux/err.h>
13 #include <linux/kdebug.h>
14 #include <linux/module.h>
15 #include <linux/percpu.h>
16 #include <linux/uaccess.h>
17 #include <linux/vmalloc.h>
18 #include <linux/sched/signal.h>
19 #include <linux/fs.h>
20 #include <linux/kvm_host.h>
21 #include <asm/csr.h>
22 #include <asm/cacheflush.h>
23 #include <asm/hwcap.h>
24
25 const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
26 KVM_GENERIC_VCPU_STATS(),
27 STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
28 STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
29 STATS_DESC_COUNTER(VCPU, mmio_exit_user),
30 STATS_DESC_COUNTER(VCPU, mmio_exit_kernel),
31 STATS_DESC_COUNTER(VCPU, csr_exit_user),
32 STATS_DESC_COUNTER(VCPU, csr_exit_kernel),
33 STATS_DESC_COUNTER(VCPU, signal_exits),
34 STATS_DESC_COUNTER(VCPU, exits)
35 };
36
37 const struct kvm_stats_header kvm_vcpu_stats_header = {
38 .name_size = KVM_STATS_NAME_SIZE,
39 .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc),
40 .id_offset = sizeof(struct kvm_stats_header),
41 .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE,
42 .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE +
43 sizeof(kvm_vcpu_stats_desc),
44 };
45
46 #define KVM_RISCV_BASE_ISA_MASK GENMASK(25, 0)
47
48 #define KVM_ISA_EXT_ARR(ext) [KVM_RISCV_ISA_EXT_##ext] = RISCV_ISA_EXT_##ext
49
50 /* Mapping between KVM ISA Extension ID & Host ISA extension ID */
51 static const unsigned long kvm_isa_ext_arr[] = {
52 [KVM_RISCV_ISA_EXT_A] = RISCV_ISA_EXT_a,
53 [KVM_RISCV_ISA_EXT_C] = RISCV_ISA_EXT_c,
54 [KVM_RISCV_ISA_EXT_D] = RISCV_ISA_EXT_d,
55 [KVM_RISCV_ISA_EXT_F] = RISCV_ISA_EXT_f,
56 [KVM_RISCV_ISA_EXT_H] = RISCV_ISA_EXT_h,
57 [KVM_RISCV_ISA_EXT_I] = RISCV_ISA_EXT_i,
58 [KVM_RISCV_ISA_EXT_M] = RISCV_ISA_EXT_m,
59
60 KVM_ISA_EXT_ARR(SSTC),
61 KVM_ISA_EXT_ARR(SVINVAL),
62 KVM_ISA_EXT_ARR(SVPBMT),
63 KVM_ISA_EXT_ARR(ZIHINTPAUSE),
64 KVM_ISA_EXT_ARR(ZICBOM),
65 };
66
kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)67 static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
68 {
69 unsigned long i;
70
71 for (i = 0; i < KVM_RISCV_ISA_EXT_MAX; i++) {
72 if (kvm_isa_ext_arr[i] == base_ext)
73 return i;
74 }
75
76 return KVM_RISCV_ISA_EXT_MAX;
77 }
78
kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)79 static bool kvm_riscv_vcpu_isa_enable_allowed(unsigned long ext)
80 {
81 switch (ext) {
82 case KVM_RISCV_ISA_EXT_H:
83 return false;
84 default:
85 break;
86 }
87
88 return true;
89 }
90
kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)91 static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
92 {
93 switch (ext) {
94 case KVM_RISCV_ISA_EXT_A:
95 case KVM_RISCV_ISA_EXT_C:
96 case KVM_RISCV_ISA_EXT_I:
97 case KVM_RISCV_ISA_EXT_M:
98 case KVM_RISCV_ISA_EXT_SSTC:
99 case KVM_RISCV_ISA_EXT_SVINVAL:
100 case KVM_RISCV_ISA_EXT_ZIHINTPAUSE:
101 return false;
102 default:
103 break;
104 }
105
106 return true;
107 }
108
kvm_riscv_reset_vcpu(struct kvm_vcpu * vcpu)109 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
110 {
111 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
112 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
113 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
114 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
115 bool loaded;
116
117 /**
118 * The preemption should be disabled here because it races with
119 * kvm_sched_out/kvm_sched_in(called from preempt notifiers) which
120 * also calls vcpu_load/put.
121 */
122 get_cpu();
123 loaded = (vcpu->cpu != -1);
124 if (loaded)
125 kvm_arch_vcpu_put(vcpu);
126
127 vcpu->arch.last_exit_cpu = -1;
128
129 memcpy(csr, reset_csr, sizeof(*csr));
130
131 memcpy(cntx, reset_cntx, sizeof(*cntx));
132
133 kvm_riscv_vcpu_fp_reset(vcpu);
134
135 kvm_riscv_vcpu_timer_reset(vcpu);
136
137 WRITE_ONCE(vcpu->arch.irqs_pending, 0);
138 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
139
140 vcpu->arch.hfence_head = 0;
141 vcpu->arch.hfence_tail = 0;
142 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue));
143
144 /* Reset the guest CSRs for hotplug usecase */
145 if (loaded)
146 kvm_arch_vcpu_load(vcpu, smp_processor_id());
147 put_cpu();
148 }
149
kvm_arch_vcpu_precreate(struct kvm * kvm,unsigned int id)150 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
151 {
152 return 0;
153 }
154
kvm_arch_vcpu_create(struct kvm_vcpu * vcpu)155 int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
156 {
157 struct kvm_cpu_context *cntx;
158 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
159 unsigned long host_isa, i;
160
161 /* Mark this VCPU never ran */
162 vcpu->arch.ran_atleast_once = false;
163 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO;
164 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX);
165
166 /* Setup ISA features available to VCPU */
167 for (i = 0; i < ARRAY_SIZE(kvm_isa_ext_arr); i++) {
168 host_isa = kvm_isa_ext_arr[i];
169 if (__riscv_isa_extension_available(NULL, host_isa) &&
170 kvm_riscv_vcpu_isa_enable_allowed(i))
171 set_bit(host_isa, vcpu->arch.isa);
172 }
173
174 /* Setup VCPU hfence queue */
175 spin_lock_init(&vcpu->arch.hfence_lock);
176
177 /* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
178 cntx = &vcpu->arch.guest_reset_context;
179 cntx->sstatus = SR_SPP | SR_SPIE;
180 cntx->hstatus = 0;
181 cntx->hstatus |= HSTATUS_VTW;
182 cntx->hstatus |= HSTATUS_SPVP;
183 cntx->hstatus |= HSTATUS_SPV;
184
185 /* By default, make CY, TM, and IR counters accessible in VU mode */
186 reset_csr->scounteren = 0x7;
187
188 /* Setup VCPU timer */
189 kvm_riscv_vcpu_timer_init(vcpu);
190
191 /* Reset VCPU */
192 kvm_riscv_reset_vcpu(vcpu);
193
194 return 0;
195 }
196
kvm_arch_vcpu_postcreate(struct kvm_vcpu * vcpu)197 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
198 {
199 /**
200 * vcpu with id 0 is the designated boot cpu.
201 * Keep all vcpus with non-zero id in power-off state so that
202 * they can be brought up using SBI HSM extension.
203 */
204 if (vcpu->vcpu_idx != 0)
205 kvm_riscv_vcpu_power_off(vcpu);
206 }
207
kvm_arch_vcpu_destroy(struct kvm_vcpu * vcpu)208 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
209 {
210 /* Cleanup VCPU timer */
211 kvm_riscv_vcpu_timer_deinit(vcpu);
212
213 /* Free unused pages pre-allocated for G-stage page table mappings */
214 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
215 }
216
kvm_cpu_has_pending_timer(struct kvm_vcpu * vcpu)217 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
218 {
219 return kvm_riscv_vcpu_timer_pending(vcpu);
220 }
221
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)222 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
223 {
224 }
225
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)226 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
227 {
228 }
229
kvm_arch_vcpu_runnable(struct kvm_vcpu * vcpu)230 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
231 {
232 return (kvm_riscv_vcpu_has_interrupts(vcpu, -1UL) &&
233 !vcpu->arch.power_off && !vcpu->arch.pause);
234 }
235
kvm_arch_vcpu_should_kick(struct kvm_vcpu * vcpu)236 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
237 {
238 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
239 }
240
kvm_arch_vcpu_in_kernel(struct kvm_vcpu * vcpu)241 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
242 {
243 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
244 }
245
kvm_arch_vcpu_fault(struct kvm_vcpu * vcpu,struct vm_fault * vmf)246 vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
247 {
248 return VM_FAULT_SIGBUS;
249 }
250
kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)251 static int kvm_riscv_vcpu_get_reg_config(struct kvm_vcpu *vcpu,
252 const struct kvm_one_reg *reg)
253 {
254 unsigned long __user *uaddr =
255 (unsigned long __user *)(unsigned long)reg->addr;
256 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
257 KVM_REG_SIZE_MASK |
258 KVM_REG_RISCV_CONFIG);
259 unsigned long reg_val;
260
261 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
262 return -EINVAL;
263
264 switch (reg_num) {
265 case KVM_REG_RISCV_CONFIG_REG(isa):
266 reg_val = vcpu->arch.isa[0] & KVM_RISCV_BASE_ISA_MASK;
267 break;
268 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
269 if (!riscv_isa_extension_available(vcpu->arch.isa, ZICBOM))
270 return -EINVAL;
271 reg_val = riscv_cbom_block_size;
272 break;
273 default:
274 return -EINVAL;
275 }
276
277 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
278 return -EFAULT;
279
280 return 0;
281 }
282
kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)283 static int kvm_riscv_vcpu_set_reg_config(struct kvm_vcpu *vcpu,
284 const struct kvm_one_reg *reg)
285 {
286 unsigned long __user *uaddr =
287 (unsigned long __user *)(unsigned long)reg->addr;
288 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
289 KVM_REG_SIZE_MASK |
290 KVM_REG_RISCV_CONFIG);
291 unsigned long i, isa_ext, reg_val;
292
293 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
294 return -EINVAL;
295
296 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
297 return -EFAULT;
298
299 /* This ONE REG interface is only defined for single letter extensions */
300 if (fls(reg_val) >= RISCV_ISA_EXT_BASE)
301 return -EINVAL;
302
303 switch (reg_num) {
304 case KVM_REG_RISCV_CONFIG_REG(isa):
305 if (!vcpu->arch.ran_atleast_once) {
306 /* Ignore the enable/disable request for certain extensions */
307 for (i = 0; i < RISCV_ISA_EXT_BASE; i++) {
308 isa_ext = kvm_riscv_vcpu_base2isa_ext(i);
309 if (isa_ext >= KVM_RISCV_ISA_EXT_MAX) {
310 reg_val &= ~BIT(i);
311 continue;
312 }
313 if (!kvm_riscv_vcpu_isa_enable_allowed(isa_ext))
314 if (reg_val & BIT(i))
315 reg_val &= ~BIT(i);
316 if (!kvm_riscv_vcpu_isa_disable_allowed(isa_ext))
317 if (!(reg_val & BIT(i)))
318 reg_val |= BIT(i);
319 }
320 reg_val &= riscv_isa_extension_base(NULL);
321 /* Do not modify anything beyond single letter extensions */
322 reg_val = (vcpu->arch.isa[0] & ~KVM_RISCV_BASE_ISA_MASK) |
323 (reg_val & KVM_RISCV_BASE_ISA_MASK);
324 vcpu->arch.isa[0] = reg_val;
325 kvm_riscv_vcpu_fp_reset(vcpu);
326 } else {
327 return -EOPNOTSUPP;
328 }
329 break;
330 case KVM_REG_RISCV_CONFIG_REG(zicbom_block_size):
331 return -EOPNOTSUPP;
332 default:
333 return -EINVAL;
334 }
335
336 return 0;
337 }
338
kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)339 static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
340 const struct kvm_one_reg *reg)
341 {
342 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
343 unsigned long __user *uaddr =
344 (unsigned long __user *)(unsigned long)reg->addr;
345 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
346 KVM_REG_SIZE_MASK |
347 KVM_REG_RISCV_CORE);
348 unsigned long reg_val;
349
350 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
351 return -EINVAL;
352 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
353 return -EINVAL;
354
355 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
356 reg_val = cntx->sepc;
357 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
358 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
359 reg_val = ((unsigned long *)cntx)[reg_num];
360 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode))
361 reg_val = (cntx->sstatus & SR_SPP) ?
362 KVM_RISCV_MODE_S : KVM_RISCV_MODE_U;
363 else
364 return -EINVAL;
365
366 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
367 return -EFAULT;
368
369 return 0;
370 }
371
kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)372 static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
373 const struct kvm_one_reg *reg)
374 {
375 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
376 unsigned long __user *uaddr =
377 (unsigned long __user *)(unsigned long)reg->addr;
378 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
379 KVM_REG_SIZE_MASK |
380 KVM_REG_RISCV_CORE);
381 unsigned long reg_val;
382
383 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
384 return -EINVAL;
385 if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
386 return -EINVAL;
387
388 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
389 return -EFAULT;
390
391 if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
392 cntx->sepc = reg_val;
393 else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
394 reg_num <= KVM_REG_RISCV_CORE_REG(regs.t6))
395 ((unsigned long *)cntx)[reg_num] = reg_val;
396 else if (reg_num == KVM_REG_RISCV_CORE_REG(mode)) {
397 if (reg_val == KVM_RISCV_MODE_S)
398 cntx->sstatus |= SR_SPP;
399 else
400 cntx->sstatus &= ~SR_SPP;
401 } else
402 return -EINVAL;
403
404 return 0;
405 }
406
kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)407 static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
408 const struct kvm_one_reg *reg)
409 {
410 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
411 unsigned long __user *uaddr =
412 (unsigned long __user *)(unsigned long)reg->addr;
413 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
414 KVM_REG_SIZE_MASK |
415 KVM_REG_RISCV_CSR);
416 unsigned long reg_val;
417
418 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
419 return -EINVAL;
420 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
421 return -EINVAL;
422
423 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
424 kvm_riscv_vcpu_flush_interrupts(vcpu);
425 reg_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
426 } else
427 reg_val = ((unsigned long *)csr)[reg_num];
428
429 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
430 return -EFAULT;
431
432 return 0;
433 }
434
kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)435 static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
436 const struct kvm_one_reg *reg)
437 {
438 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
439 unsigned long __user *uaddr =
440 (unsigned long __user *)(unsigned long)reg->addr;
441 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
442 KVM_REG_SIZE_MASK |
443 KVM_REG_RISCV_CSR);
444 unsigned long reg_val;
445
446 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
447 return -EINVAL;
448 if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
449 return -EINVAL;
450
451 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
452 return -EFAULT;
453
454 if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
455 reg_val &= VSIP_VALID_MASK;
456 reg_val <<= VSIP_TO_HVIP_SHIFT;
457 }
458
459 ((unsigned long *)csr)[reg_num] = reg_val;
460
461 if (reg_num == KVM_REG_RISCV_CSR_REG(sip))
462 WRITE_ONCE(vcpu->arch.irqs_pending_mask, 0);
463
464 return 0;
465 }
466
kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)467 static int kvm_riscv_vcpu_get_reg_isa_ext(struct kvm_vcpu *vcpu,
468 const struct kvm_one_reg *reg)
469 {
470 unsigned long __user *uaddr =
471 (unsigned long __user *)(unsigned long)reg->addr;
472 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
473 KVM_REG_SIZE_MASK |
474 KVM_REG_RISCV_ISA_EXT);
475 unsigned long reg_val = 0;
476 unsigned long host_isa_ext;
477
478 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
479 return -EINVAL;
480
481 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
482 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
483 return -EINVAL;
484
485 host_isa_ext = kvm_isa_ext_arr[reg_num];
486 if (__riscv_isa_extension_available(vcpu->arch.isa, host_isa_ext))
487 reg_val = 1; /* Mark the given extension as available */
488
489 if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id)))
490 return -EFAULT;
491
492 return 0;
493 }
494
kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)495 static int kvm_riscv_vcpu_set_reg_isa_ext(struct kvm_vcpu *vcpu,
496 const struct kvm_one_reg *reg)
497 {
498 unsigned long __user *uaddr =
499 (unsigned long __user *)(unsigned long)reg->addr;
500 unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
501 KVM_REG_SIZE_MASK |
502 KVM_REG_RISCV_ISA_EXT);
503 unsigned long reg_val;
504 unsigned long host_isa_ext;
505
506 if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
507 return -EINVAL;
508
509 if (reg_num >= KVM_RISCV_ISA_EXT_MAX ||
510 reg_num >= ARRAY_SIZE(kvm_isa_ext_arr))
511 return -EINVAL;
512
513 if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id)))
514 return -EFAULT;
515
516 host_isa_ext = kvm_isa_ext_arr[reg_num];
517 if (!__riscv_isa_extension_available(NULL, host_isa_ext))
518 return -EOPNOTSUPP;
519
520 if (!vcpu->arch.ran_atleast_once) {
521 /*
522 * All multi-letter extension and a few single letter
523 * extension can be disabled
524 */
525 if (reg_val == 1 &&
526 kvm_riscv_vcpu_isa_enable_allowed(reg_num))
527 set_bit(host_isa_ext, vcpu->arch.isa);
528 else if (!reg_val &&
529 kvm_riscv_vcpu_isa_disable_allowed(reg_num))
530 clear_bit(host_isa_ext, vcpu->arch.isa);
531 else
532 return -EINVAL;
533 kvm_riscv_vcpu_fp_reset(vcpu);
534 } else {
535 return -EOPNOTSUPP;
536 }
537
538 return 0;
539 }
540
kvm_riscv_vcpu_set_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)541 static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
542 const struct kvm_one_reg *reg)
543 {
544 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
545 return kvm_riscv_vcpu_set_reg_config(vcpu, reg);
546 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
547 return kvm_riscv_vcpu_set_reg_core(vcpu, reg);
548 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
549 return kvm_riscv_vcpu_set_reg_csr(vcpu, reg);
550 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
551 return kvm_riscv_vcpu_set_reg_timer(vcpu, reg);
552 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
553 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
554 KVM_REG_RISCV_FP_F);
555 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
556 return kvm_riscv_vcpu_set_reg_fp(vcpu, reg,
557 KVM_REG_RISCV_FP_D);
558 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
559 return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg);
560
561 return -EINVAL;
562 }
563
kvm_riscv_vcpu_get_reg(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)564 static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
565 const struct kvm_one_reg *reg)
566 {
567 if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CONFIG)
568 return kvm_riscv_vcpu_get_reg_config(vcpu, reg);
569 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CORE)
570 return kvm_riscv_vcpu_get_reg_core(vcpu, reg);
571 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_CSR)
572 return kvm_riscv_vcpu_get_reg_csr(vcpu, reg);
573 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER)
574 return kvm_riscv_vcpu_get_reg_timer(vcpu, reg);
575 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F)
576 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
577 KVM_REG_RISCV_FP_F);
578 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D)
579 return kvm_riscv_vcpu_get_reg_fp(vcpu, reg,
580 KVM_REG_RISCV_FP_D);
581 else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_ISA_EXT)
582 return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg);
583
584 return -EINVAL;
585 }
586
kvm_arch_vcpu_async_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)587 long kvm_arch_vcpu_async_ioctl(struct file *filp,
588 unsigned int ioctl, unsigned long arg)
589 {
590 struct kvm_vcpu *vcpu = filp->private_data;
591 void __user *argp = (void __user *)arg;
592
593 if (ioctl == KVM_INTERRUPT) {
594 struct kvm_interrupt irq;
595
596 if (copy_from_user(&irq, argp, sizeof(irq)))
597 return -EFAULT;
598
599 if (irq.irq == KVM_INTERRUPT_SET)
600 return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_VS_EXT);
601 else
602 return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_VS_EXT);
603 }
604
605 return -ENOIOCTLCMD;
606 }
607
kvm_arch_vcpu_ioctl(struct file * filp,unsigned int ioctl,unsigned long arg)608 long kvm_arch_vcpu_ioctl(struct file *filp,
609 unsigned int ioctl, unsigned long arg)
610 {
611 struct kvm_vcpu *vcpu = filp->private_data;
612 void __user *argp = (void __user *)arg;
613 long r = -EINVAL;
614
615 switch (ioctl) {
616 case KVM_SET_ONE_REG:
617 case KVM_GET_ONE_REG: {
618 struct kvm_one_reg reg;
619
620 r = -EFAULT;
621 if (copy_from_user(®, argp, sizeof(reg)))
622 break;
623
624 if (ioctl == KVM_SET_ONE_REG)
625 r = kvm_riscv_vcpu_set_reg(vcpu, ®);
626 else
627 r = kvm_riscv_vcpu_get_reg(vcpu, ®);
628 break;
629 }
630 default:
631 break;
632 }
633
634 return r;
635 }
636
kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)637 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
638 struct kvm_sregs *sregs)
639 {
640 return -EINVAL;
641 }
642
kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu * vcpu,struct kvm_sregs * sregs)643 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
644 struct kvm_sregs *sregs)
645 {
646 return -EINVAL;
647 }
648
kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)649 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
650 {
651 return -EINVAL;
652 }
653
kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu * vcpu,struct kvm_fpu * fpu)654 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
655 {
656 return -EINVAL;
657 }
658
kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu * vcpu,struct kvm_translation * tr)659 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
660 struct kvm_translation *tr)
661 {
662 return -EINVAL;
663 }
664
kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)665 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
666 {
667 return -EINVAL;
668 }
669
kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu * vcpu,struct kvm_regs * regs)670 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
671 {
672 return -EINVAL;
673 }
674
kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu * vcpu)675 void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
676 {
677 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
678 unsigned long mask, val;
679
680 if (READ_ONCE(vcpu->arch.irqs_pending_mask)) {
681 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask, 0);
682 val = READ_ONCE(vcpu->arch.irqs_pending) & mask;
683
684 csr->hvip &= ~mask;
685 csr->hvip |= val;
686 }
687 }
688
kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu * vcpu)689 void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
690 {
691 unsigned long hvip;
692 struct kvm_vcpu_arch *v = &vcpu->arch;
693 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
694
695 /* Read current HVIP and VSIE CSRs */
696 csr->vsie = csr_read(CSR_VSIE);
697
698 /* Sync-up HVIP.VSSIP bit changes does by Guest */
699 hvip = csr_read(CSR_HVIP);
700 if ((csr->hvip ^ hvip) & (1UL << IRQ_VS_SOFT)) {
701 if (hvip & (1UL << IRQ_VS_SOFT)) {
702 if (!test_and_set_bit(IRQ_VS_SOFT,
703 &v->irqs_pending_mask))
704 set_bit(IRQ_VS_SOFT, &v->irqs_pending);
705 } else {
706 if (!test_and_set_bit(IRQ_VS_SOFT,
707 &v->irqs_pending_mask))
708 clear_bit(IRQ_VS_SOFT, &v->irqs_pending);
709 }
710 }
711
712 /* Sync-up timer CSRs */
713 kvm_riscv_vcpu_timer_sync(vcpu);
714 }
715
kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu * vcpu,unsigned int irq)716 int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
717 {
718 if (irq != IRQ_VS_SOFT &&
719 irq != IRQ_VS_TIMER &&
720 irq != IRQ_VS_EXT)
721 return -EINVAL;
722
723 set_bit(irq, &vcpu->arch.irqs_pending);
724 smp_mb__before_atomic();
725 set_bit(irq, &vcpu->arch.irqs_pending_mask);
726
727 kvm_vcpu_kick(vcpu);
728
729 return 0;
730 }
731
kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu * vcpu,unsigned int irq)732 int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
733 {
734 if (irq != IRQ_VS_SOFT &&
735 irq != IRQ_VS_TIMER &&
736 irq != IRQ_VS_EXT)
737 return -EINVAL;
738
739 clear_bit(irq, &vcpu->arch.irqs_pending);
740 smp_mb__before_atomic();
741 set_bit(irq, &vcpu->arch.irqs_pending_mask);
742
743 return 0;
744 }
745
kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu * vcpu,unsigned long mask)746 bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask)
747 {
748 unsigned long ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK)
749 << VSIP_TO_HVIP_SHIFT) & mask;
750
751 return (READ_ONCE(vcpu->arch.irqs_pending) & ie) ? true : false;
752 }
753
kvm_riscv_vcpu_power_off(struct kvm_vcpu * vcpu)754 void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
755 {
756 vcpu->arch.power_off = true;
757 kvm_make_request(KVM_REQ_SLEEP, vcpu);
758 kvm_vcpu_kick(vcpu);
759 }
760
kvm_riscv_vcpu_power_on(struct kvm_vcpu * vcpu)761 void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
762 {
763 vcpu->arch.power_off = false;
764 kvm_vcpu_wake_up(vcpu);
765 }
766
kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)767 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
768 struct kvm_mp_state *mp_state)
769 {
770 if (vcpu->arch.power_off)
771 mp_state->mp_state = KVM_MP_STATE_STOPPED;
772 else
773 mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
774
775 return 0;
776 }
777
kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu * vcpu,struct kvm_mp_state * mp_state)778 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
779 struct kvm_mp_state *mp_state)
780 {
781 int ret = 0;
782
783 switch (mp_state->mp_state) {
784 case KVM_MP_STATE_RUNNABLE:
785 vcpu->arch.power_off = false;
786 break;
787 case KVM_MP_STATE_STOPPED:
788 kvm_riscv_vcpu_power_off(vcpu);
789 break;
790 default:
791 ret = -EINVAL;
792 }
793
794 return ret;
795 }
796
kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu * vcpu,struct kvm_guest_debug * dbg)797 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
798 struct kvm_guest_debug *dbg)
799 {
800 /* TODO; To be implemented later. */
801 return -EINVAL;
802 }
803
kvm_riscv_vcpu_update_config(const unsigned long * isa)804 static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
805 {
806 u64 henvcfg = 0;
807
808 if (riscv_isa_extension_available(isa, SVPBMT))
809 henvcfg |= ENVCFG_PBMTE;
810
811 if (riscv_isa_extension_available(isa, SSTC))
812 henvcfg |= ENVCFG_STCE;
813
814 if (riscv_isa_extension_available(isa, ZICBOM))
815 henvcfg |= (ENVCFG_CBIE | ENVCFG_CBCFE);
816
817 csr_write(CSR_HENVCFG, henvcfg);
818 #ifdef CONFIG_32BIT
819 csr_write(CSR_HENVCFGH, henvcfg >> 32);
820 #endif
821 }
822
kvm_arch_vcpu_load(struct kvm_vcpu * vcpu,int cpu)823 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
824 {
825 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
826
827 csr_write(CSR_VSSTATUS, csr->vsstatus);
828 csr_write(CSR_VSIE, csr->vsie);
829 csr_write(CSR_VSTVEC, csr->vstvec);
830 csr_write(CSR_VSSCRATCH, csr->vsscratch);
831 csr_write(CSR_VSEPC, csr->vsepc);
832 csr_write(CSR_VSCAUSE, csr->vscause);
833 csr_write(CSR_VSTVAL, csr->vstval);
834 csr_write(CSR_HVIP, csr->hvip);
835 csr_write(CSR_VSATP, csr->vsatp);
836
837 kvm_riscv_vcpu_update_config(vcpu->arch.isa);
838
839 kvm_riscv_gstage_update_hgatp(vcpu);
840
841 kvm_riscv_vcpu_timer_restore(vcpu);
842
843 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context);
844 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context,
845 vcpu->arch.isa);
846
847 vcpu->cpu = cpu;
848 }
849
kvm_arch_vcpu_put(struct kvm_vcpu * vcpu)850 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
851 {
852 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
853
854 vcpu->cpu = -1;
855
856 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context,
857 vcpu->arch.isa);
858 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);
859
860 kvm_riscv_vcpu_timer_save(vcpu);
861
862 csr->vsstatus = csr_read(CSR_VSSTATUS);
863 csr->vsie = csr_read(CSR_VSIE);
864 csr->vstvec = csr_read(CSR_VSTVEC);
865 csr->vsscratch = csr_read(CSR_VSSCRATCH);
866 csr->vsepc = csr_read(CSR_VSEPC);
867 csr->vscause = csr_read(CSR_VSCAUSE);
868 csr->vstval = csr_read(CSR_VSTVAL);
869 csr->hvip = csr_read(CSR_HVIP);
870 csr->vsatp = csr_read(CSR_VSATP);
871 }
872
kvm_riscv_check_vcpu_requests(struct kvm_vcpu * vcpu)873 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
874 {
875 struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu);
876
877 if (kvm_request_pending(vcpu)) {
878 if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
879 kvm_vcpu_srcu_read_unlock(vcpu);
880 rcuwait_wait_event(wait,
881 (!vcpu->arch.power_off) && (!vcpu->arch.pause),
882 TASK_INTERRUPTIBLE);
883 kvm_vcpu_srcu_read_lock(vcpu);
884
885 if (vcpu->arch.power_off || vcpu->arch.pause) {
886 /*
887 * Awaken to handle a signal, request to
888 * sleep again later.
889 */
890 kvm_make_request(KVM_REQ_SLEEP, vcpu);
891 }
892 }
893
894 if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
895 kvm_riscv_reset_vcpu(vcpu);
896
897 if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
898 kvm_riscv_gstage_update_hgatp(vcpu);
899
900 if (kvm_check_request(KVM_REQ_FENCE_I, vcpu))
901 kvm_riscv_fence_i_process(vcpu);
902
903 /*
904 * The generic KVM_REQ_TLB_FLUSH is same as
905 * KVM_REQ_HFENCE_GVMA_VMID_ALL
906 */
907 if (kvm_check_request(KVM_REQ_HFENCE_GVMA_VMID_ALL, vcpu))
908 kvm_riscv_hfence_gvma_vmid_all_process(vcpu);
909
910 if (kvm_check_request(KVM_REQ_HFENCE_VVMA_ALL, vcpu))
911 kvm_riscv_hfence_vvma_all_process(vcpu);
912
913 if (kvm_check_request(KVM_REQ_HFENCE, vcpu))
914 kvm_riscv_hfence_process(vcpu);
915 }
916 }
917
kvm_riscv_update_hvip(struct kvm_vcpu * vcpu)918 static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu)
919 {
920 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
921
922 csr_write(CSR_HVIP, csr->hvip);
923 }
924
925 /*
926 * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while
927 * the vCPU is running.
928 *
929 * This must be noinstr as instrumentation may make use of RCU, and this is not
930 * safe during the EQS.
931 */
kvm_riscv_vcpu_enter_exit(struct kvm_vcpu * vcpu)932 static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu)
933 {
934 guest_state_enter_irqoff();
935 __kvm_riscv_switch_to(&vcpu->arch);
936 vcpu->arch.last_exit_cpu = vcpu->cpu;
937 guest_state_exit_irqoff();
938 }
939
kvm_arch_vcpu_ioctl_run(struct kvm_vcpu * vcpu)940 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
941 {
942 int ret;
943 struct kvm_cpu_trap trap;
944 struct kvm_run *run = vcpu->run;
945
946 /* Mark this VCPU ran at least once */
947 vcpu->arch.ran_atleast_once = true;
948
949 kvm_vcpu_srcu_read_lock(vcpu);
950
951 switch (run->exit_reason) {
952 case KVM_EXIT_MMIO:
953 /* Process MMIO value returned from user-space */
954 ret = kvm_riscv_vcpu_mmio_return(vcpu, vcpu->run);
955 break;
956 case KVM_EXIT_RISCV_SBI:
957 /* Process SBI value returned from user-space */
958 ret = kvm_riscv_vcpu_sbi_return(vcpu, vcpu->run);
959 break;
960 case KVM_EXIT_RISCV_CSR:
961 /* Process CSR value returned from user-space */
962 ret = kvm_riscv_vcpu_csr_return(vcpu, vcpu->run);
963 break;
964 default:
965 ret = 0;
966 break;
967 }
968 if (ret) {
969 kvm_vcpu_srcu_read_unlock(vcpu);
970 return ret;
971 }
972
973 if (run->immediate_exit) {
974 kvm_vcpu_srcu_read_unlock(vcpu);
975 return -EINTR;
976 }
977
978 vcpu_load(vcpu);
979
980 kvm_sigset_activate(vcpu);
981
982 ret = 1;
983 run->exit_reason = KVM_EXIT_UNKNOWN;
984 while (ret > 0) {
985 /* Check conditions before entering the guest */
986 ret = xfer_to_guest_mode_handle_work(vcpu);
987 if (!ret)
988 ret = 1;
989
990 kvm_riscv_gstage_vmid_update(vcpu);
991
992 kvm_riscv_check_vcpu_requests(vcpu);
993
994 local_irq_disable();
995
996 /*
997 * Ensure we set mode to IN_GUEST_MODE after we disable
998 * interrupts and before the final VCPU requests check.
999 * See the comment in kvm_vcpu_exiting_guest_mode() and
1000 * Documentation/virt/kvm/vcpu-requests.rst
1001 */
1002 vcpu->mode = IN_GUEST_MODE;
1003
1004 kvm_vcpu_srcu_read_unlock(vcpu);
1005 smp_mb__after_srcu_read_unlock();
1006
1007 /*
1008 * We might have got VCPU interrupts updated asynchronously
1009 * so update it in HW.
1010 */
1011 kvm_riscv_vcpu_flush_interrupts(vcpu);
1012
1013 /* Update HVIP CSR for current CPU */
1014 kvm_riscv_update_hvip(vcpu);
1015
1016 if (ret <= 0 ||
1017 kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) ||
1018 kvm_request_pending(vcpu) ||
1019 xfer_to_guest_mode_work_pending()) {
1020 vcpu->mode = OUTSIDE_GUEST_MODE;
1021 local_irq_enable();
1022 kvm_vcpu_srcu_read_lock(vcpu);
1023 continue;
1024 }
1025
1026 /*
1027 * Cleanup stale TLB enteries
1028 *
1029 * Note: This should be done after G-stage VMID has been
1030 * updated using kvm_riscv_gstage_vmid_ver_changed()
1031 */
1032 kvm_riscv_local_tlb_sanitize(vcpu);
1033
1034 guest_timing_enter_irqoff();
1035
1036 kvm_riscv_vcpu_enter_exit(vcpu);
1037
1038 vcpu->mode = OUTSIDE_GUEST_MODE;
1039 vcpu->stat.exits++;
1040
1041 /*
1042 * Save SCAUSE, STVAL, HTVAL, and HTINST because we might
1043 * get an interrupt between __kvm_riscv_switch_to() and
1044 * local_irq_enable() which can potentially change CSRs.
1045 */
1046 trap.sepc = vcpu->arch.guest_context.sepc;
1047 trap.scause = csr_read(CSR_SCAUSE);
1048 trap.stval = csr_read(CSR_STVAL);
1049 trap.htval = csr_read(CSR_HTVAL);
1050 trap.htinst = csr_read(CSR_HTINST);
1051
1052 /* Syncup interrupts state with HW */
1053 kvm_riscv_vcpu_sync_interrupts(vcpu);
1054
1055 preempt_disable();
1056
1057 /*
1058 * We must ensure that any pending interrupts are taken before
1059 * we exit guest timing so that timer ticks are accounted as
1060 * guest time. Transiently unmask interrupts so that any
1061 * pending interrupts are taken.
1062 *
1063 * There's no barrier which ensures that pending interrupts are
1064 * recognised, so we just hope that the CPU takes any pending
1065 * interrupts between the enable and disable.
1066 */
1067 local_irq_enable();
1068 local_irq_disable();
1069
1070 guest_timing_exit_irqoff();
1071
1072 local_irq_enable();
1073
1074 preempt_enable();
1075
1076 kvm_vcpu_srcu_read_lock(vcpu);
1077
1078 ret = kvm_riscv_vcpu_exit(vcpu, run, &trap);
1079 }
1080
1081 kvm_sigset_deactivate(vcpu);
1082
1083 vcpu_put(vcpu);
1084
1085 kvm_vcpu_srcu_read_unlock(vcpu);
1086
1087 return ret;
1088 }
1089