1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 * Authors: Sanjay Lal <sanjayl@kymasys.com>
8 */
9
10 #ifndef __MIPS_KVM_HOST_H__
11 #define __MIPS_KVM_HOST_H__
12
13 #include <linux/cpumask.h>
14 #include <linux/mutex.h>
15 #include <linux/hrtimer.h>
16 #include <linux/interrupt.h>
17 #include <linux/types.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_types.h>
20 #include <linux/threads.h>
21 #include <linux/spinlock.h>
22
23 #include <asm/inst.h>
24 #include <asm/mipsregs.h>
25
26 #include <kvm/iodev.h>
27
28 /* MIPS KVM register ids */
29 #define MIPS_CP0_32(_R, _S) \
30 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
31
32 #define MIPS_CP0_64(_R, _S) \
33 (KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
34
35 #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0)
36 #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0)
37 #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0)
38 #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0)
39 #define KVM_REG_MIPS_CP0_CONTEXTCONFIG MIPS_CP0_32(4, 1)
40 #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2)
41 #define KVM_REG_MIPS_CP0_XCONTEXTCONFIG MIPS_CP0_64(4, 3)
42 #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0)
43 #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1)
44 #define KVM_REG_MIPS_CP0_SEGCTL0 MIPS_CP0_64(5, 2)
45 #define KVM_REG_MIPS_CP0_SEGCTL1 MIPS_CP0_64(5, 3)
46 #define KVM_REG_MIPS_CP0_SEGCTL2 MIPS_CP0_64(5, 4)
47 #define KVM_REG_MIPS_CP0_PWBASE MIPS_CP0_64(5, 5)
48 #define KVM_REG_MIPS_CP0_PWFIELD MIPS_CP0_64(5, 6)
49 #define KVM_REG_MIPS_CP0_PWSIZE MIPS_CP0_64(5, 7)
50 #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0)
51 #define KVM_REG_MIPS_CP0_PWCTL MIPS_CP0_32(6, 6)
52 #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0)
53 #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0)
54 #define KVM_REG_MIPS_CP0_BADINSTR MIPS_CP0_32(8, 1)
55 #define KVM_REG_MIPS_CP0_BADINSTRP MIPS_CP0_32(8, 2)
56 #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0)
57 #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0)
58 #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0)
59 #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0)
60 #define KVM_REG_MIPS_CP0_INTCTL MIPS_CP0_32(12, 1)
61 #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0)
62 #define KVM_REG_MIPS_CP0_EPC MIPS_CP0_64(14, 0)
63 #define KVM_REG_MIPS_CP0_PRID MIPS_CP0_32(15, 0)
64 #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1)
65 #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0)
66 #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1)
67 #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2)
68 #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3)
69 #define KVM_REG_MIPS_CP0_CONFIG4 MIPS_CP0_32(16, 4)
70 #define KVM_REG_MIPS_CP0_CONFIG5 MIPS_CP0_32(16, 5)
71 #define KVM_REG_MIPS_CP0_CONFIG6 MIPS_CP0_32(16, 6)
72 #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
73 #define KVM_REG_MIPS_CP0_MAARI MIPS_CP0_64(17, 2)
74 #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
75 #define KVM_REG_MIPS_CP0_DIAG MIPS_CP0_32(22, 0)
76 #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
77 #define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
78 #define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
79 #define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
80 #define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
81 #define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
82 #define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
83
84
85 #define KVM_MAX_VCPUS 16
86 /* memory slots that does not exposed to userspace */
87 #define KVM_PRIVATE_MEM_SLOTS 0
88
89 #define KVM_HALT_POLL_NS_DEFAULT 500000
90
91 extern unsigned long GUESTID_MASK;
92 extern unsigned long GUESTID_FIRST_VERSION;
93 extern unsigned long GUESTID_VERSION_MASK;
94
95 #define KVM_INVALID_ADDR 0xdeadbeef
96
97 /*
98 * EVA has overlapping user & kernel address spaces, so user VAs may be >
99 * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
100 * PAGE_OFFSET.
101 */
102
103 #define KVM_HVA_ERR_BAD (-1UL)
104 #define KVM_HVA_ERR_RO_BAD (-2UL)
105
kvm_is_error_hva(unsigned long addr)106 static inline bool kvm_is_error_hva(unsigned long addr)
107 {
108 return IS_ERR_VALUE(addr);
109 }
110
111 struct kvm_vm_stat {
112 struct kvm_vm_stat_generic generic;
113 };
114
115 struct kvm_vcpu_stat {
116 struct kvm_vcpu_stat_generic generic;
117 u64 wait_exits;
118 u64 cache_exits;
119 u64 signal_exits;
120 u64 int_exits;
121 u64 cop_unusable_exits;
122 u64 tlbmod_exits;
123 u64 tlbmiss_ld_exits;
124 u64 tlbmiss_st_exits;
125 u64 addrerr_st_exits;
126 u64 addrerr_ld_exits;
127 u64 syscall_exits;
128 u64 resvd_inst_exits;
129 u64 break_inst_exits;
130 u64 trap_inst_exits;
131 u64 msa_fpe_exits;
132 u64 fpe_exits;
133 u64 msa_disabled_exits;
134 u64 flush_dcache_exits;
135 u64 vz_gpsi_exits;
136 u64 vz_gsfc_exits;
137 u64 vz_hc_exits;
138 u64 vz_grr_exits;
139 u64 vz_gva_exits;
140 u64 vz_ghfc_exits;
141 u64 vz_gpa_exits;
142 u64 vz_resvd_exits;
143 #ifdef CONFIG_CPU_LOONGSON64
144 u64 vz_cpucfg_exits;
145 #endif
146 };
147
148 struct kvm_arch_memory_slot {
149 };
150
151 #ifdef CONFIG_CPU_LOONGSON64
152 struct ipi_state {
153 uint32_t status;
154 uint32_t en;
155 uint32_t set;
156 uint32_t clear;
157 uint64_t buf[4];
158 };
159
160 struct loongson_kvm_ipi;
161
162 struct ipi_io_device {
163 int node_id;
164 struct loongson_kvm_ipi *ipi;
165 struct kvm_io_device device;
166 };
167
168 struct loongson_kvm_ipi {
169 spinlock_t lock;
170 struct kvm *kvm;
171 struct ipi_state ipistate[16];
172 struct ipi_io_device dev_ipi[4];
173 };
174 #endif
175
176 struct kvm_arch {
177 /* Guest physical mm */
178 struct mm_struct gpa_mm;
179 /* Mask of CPUs needing GPA ASID flush */
180 cpumask_t asid_flush_mask;
181 #ifdef CONFIG_CPU_LOONGSON64
182 struct loongson_kvm_ipi ipi;
183 #endif
184 };
185
186 #define N_MIPS_COPROC_REGS 32
187 #define N_MIPS_COPROC_SEL 8
188
189 struct mips_coproc {
190 unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
191 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
192 unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
193 #endif
194 };
195
196 /*
197 * Coprocessor 0 register names
198 */
199 #define MIPS_CP0_TLB_INDEX 0
200 #define MIPS_CP0_TLB_RANDOM 1
201 #define MIPS_CP0_TLB_LOW 2
202 #define MIPS_CP0_TLB_LO0 2
203 #define MIPS_CP0_TLB_LO1 3
204 #define MIPS_CP0_TLB_CONTEXT 4
205 #define MIPS_CP0_TLB_PG_MASK 5
206 #define MIPS_CP0_TLB_WIRED 6
207 #define MIPS_CP0_HWRENA 7
208 #define MIPS_CP0_BAD_VADDR 8
209 #define MIPS_CP0_COUNT 9
210 #define MIPS_CP0_TLB_HI 10
211 #define MIPS_CP0_COMPARE 11
212 #define MIPS_CP0_STATUS 12
213 #define MIPS_CP0_CAUSE 13
214 #define MIPS_CP0_EXC_PC 14
215 #define MIPS_CP0_PRID 15
216 #define MIPS_CP0_CONFIG 16
217 #define MIPS_CP0_LLADDR 17
218 #define MIPS_CP0_WATCH_LO 18
219 #define MIPS_CP0_WATCH_HI 19
220 #define MIPS_CP0_TLB_XCONTEXT 20
221 #define MIPS_CP0_DIAG 22
222 #define MIPS_CP0_ECC 26
223 #define MIPS_CP0_CACHE_ERR 27
224 #define MIPS_CP0_TAG_LO 28
225 #define MIPS_CP0_TAG_HI 29
226 #define MIPS_CP0_ERROR_PC 30
227 #define MIPS_CP0_DEBUG 23
228 #define MIPS_CP0_DEPC 24
229 #define MIPS_CP0_PERFCNT 25
230 #define MIPS_CP0_ERRCTL 26
231 #define MIPS_CP0_DATA_LO 28
232 #define MIPS_CP0_DATA_HI 29
233 #define MIPS_CP0_DESAVE 31
234
235 #define MIPS_CP0_CONFIG_SEL 0
236 #define MIPS_CP0_CONFIG1_SEL 1
237 #define MIPS_CP0_CONFIG2_SEL 2
238 #define MIPS_CP0_CONFIG3_SEL 3
239 #define MIPS_CP0_CONFIG4_SEL 4
240 #define MIPS_CP0_CONFIG5_SEL 5
241
242 #define MIPS_CP0_GUESTCTL2 10
243 #define MIPS_CP0_GUESTCTL2_SEL 5
244 #define MIPS_CP0_GTOFFSET 12
245 #define MIPS_CP0_GTOFFSET_SEL 7
246
247 /* Resume Flags */
248 #define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
249 #define RESUME_FLAG_HOST (1<<1) /* Resume host? */
250
251 #define RESUME_GUEST 0
252 #define RESUME_GUEST_DR RESUME_FLAG_DR
253 #define RESUME_HOST RESUME_FLAG_HOST
254
255 enum emulation_result {
256 EMULATE_DONE, /* no further processing */
257 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
258 EMULATE_FAIL, /* can't emulate this instruction */
259 EMULATE_WAIT, /* WAIT instruction */
260 EMULATE_PRIV_FAIL,
261 EMULATE_EXCEPT, /* A guest exception has been generated */
262 EMULATE_HYPERCALL, /* HYPCALL instruction */
263 };
264
265 #if defined(CONFIG_64BIT)
266 #define VPN2_MASK GENMASK(cpu_vmbits - 1, 13)
267 #else
268 #define VPN2_MASK 0xffffe000
269 #endif
270 #define KVM_ENTRYHI_ASID cpu_asid_mask(&boot_cpu_data)
271 #define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
272 #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
273 #define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
274 #define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
275 #define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
276 #define TLB_IS_DIRTY(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
277 #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
278 ((y) & VPN2_MASK & ~(x).tlb_mask))
279 #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
280 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
281
282 struct kvm_mips_tlb {
283 long tlb_mask;
284 long tlb_hi;
285 long tlb_lo[2];
286 };
287
288 #define KVM_MIPS_AUX_FPU 0x1
289 #define KVM_MIPS_AUX_MSA 0x2
290
291 struct kvm_vcpu_arch {
292 void *guest_ebase;
293 int (*vcpu_run)(struct kvm_vcpu *vcpu);
294
295 /* Host registers preserved across guest mode execution */
296 unsigned long host_stack;
297 unsigned long host_gp;
298 unsigned long host_pgd;
299 unsigned long host_entryhi;
300
301 /* Host CP0 registers used when handling exits from guest */
302 unsigned long host_cp0_badvaddr;
303 unsigned long host_cp0_epc;
304 u32 host_cp0_cause;
305 u32 host_cp0_guestctl0;
306 u32 host_cp0_badinstr;
307 u32 host_cp0_badinstrp;
308
309 /* GPRS */
310 unsigned long gprs[32];
311 unsigned long hi;
312 unsigned long lo;
313 unsigned long pc;
314
315 /* FPU State */
316 struct mips_fpu_struct fpu;
317 /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
318 unsigned int aux_inuse;
319
320 /* COP0 State */
321 struct mips_coproc *cop0;
322
323 /* Resume PC after MMIO completion */
324 unsigned long io_pc;
325 /* GPR used as IO source/target */
326 u32 io_gpr;
327
328 struct hrtimer comparecount_timer;
329 /* Count timer control KVM register */
330 u32 count_ctl;
331 /* Count bias from the raw time */
332 u32 count_bias;
333 /* Frequency of timer in Hz */
334 u32 count_hz;
335 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
336 s64 count_dyn_bias;
337 /* Resume time */
338 ktime_t count_resume;
339 /* Period of timer tick in ns */
340 u64 count_period;
341
342 /* Bitmask of exceptions that are pending */
343 unsigned long pending_exceptions;
344
345 /* Bitmask of pending exceptions to be cleared */
346 unsigned long pending_exceptions_clr;
347
348 /* Cache some mmu pages needed inside spinlock regions */
349 struct kvm_mmu_memory_cache mmu_page_cache;
350
351 /* vcpu's vzguestid is different on each host cpu in an smp system */
352 u32 vzguestid[NR_CPUS];
353
354 /* wired guest TLB entries */
355 struct kvm_mips_tlb *wired_tlb;
356 unsigned int wired_tlb_limit;
357 unsigned int wired_tlb_used;
358
359 /* emulated guest MAAR registers */
360 unsigned long maar[6];
361
362 /* Last CPU the VCPU state was loaded on */
363 int last_sched_cpu;
364 /* Last CPU the VCPU actually executed guest code on */
365 int last_exec_cpu;
366
367 /* WAIT executed */
368 int wait;
369
370 u8 fpu_enabled;
371 u8 msa_enabled;
372 };
373
_kvm_atomic_set_c0_guest_reg(unsigned long * reg,unsigned long val)374 static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
375 unsigned long val)
376 {
377 unsigned long temp;
378 do {
379 __asm__ __volatile__(
380 " .set push \n"
381 " .set "MIPS_ISA_ARCH_LEVEL" \n"
382 " " __LL "%0, %1 \n"
383 " or %0, %2 \n"
384 " " __SC "%0, %1 \n"
385 " .set pop \n"
386 : "=&r" (temp), "+m" (*reg)
387 : "r" (val));
388 } while (unlikely(!temp));
389 }
390
_kvm_atomic_clear_c0_guest_reg(unsigned long * reg,unsigned long val)391 static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
392 unsigned long val)
393 {
394 unsigned long temp;
395 do {
396 __asm__ __volatile__(
397 " .set push \n"
398 " .set "MIPS_ISA_ARCH_LEVEL" \n"
399 " " __LL "%0, %1 \n"
400 " and %0, %2 \n"
401 " " __SC "%0, %1 \n"
402 " .set pop \n"
403 : "=&r" (temp), "+m" (*reg)
404 : "r" (~val));
405 } while (unlikely(!temp));
406 }
407
_kvm_atomic_change_c0_guest_reg(unsigned long * reg,unsigned long change,unsigned long val)408 static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
409 unsigned long change,
410 unsigned long val)
411 {
412 unsigned long temp;
413 do {
414 __asm__ __volatile__(
415 " .set push \n"
416 " .set "MIPS_ISA_ARCH_LEVEL" \n"
417 " " __LL "%0, %1 \n"
418 " and %0, %2 \n"
419 " or %0, %3 \n"
420 " " __SC "%0, %1 \n"
421 " .set pop \n"
422 : "=&r" (temp), "+m" (*reg)
423 : "r" (~change), "r" (val & change));
424 } while (unlikely(!temp));
425 }
426
427 /* Guest register types, used in accessor build below */
428 #define __KVMT32 u32
429 #define __KVMTl unsigned long
430
431 /*
432 * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
433 * These operate on the saved guest C0 state in RAM.
434 */
435
436 /* Generate saved context simple accessors */
437 #define __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
438 static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
439 { \
440 return cop0->reg[(_reg)][(sel)]; \
441 } \
442 static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0, \
443 __KVMT##type val) \
444 { \
445 cop0->reg[(_reg)][(sel)] = val; \
446 }
447
448 /* Generate saved context bitwise modifiers */
449 #define __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
450 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
451 __KVMT##type val) \
452 { \
453 cop0->reg[(_reg)][(sel)] |= val; \
454 } \
455 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
456 __KVMT##type val) \
457 { \
458 cop0->reg[(_reg)][(sel)] &= ~val; \
459 } \
460 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
461 __KVMT##type mask, \
462 __KVMT##type val) \
463 { \
464 unsigned long _mask = mask; \
465 cop0->reg[(_reg)][(sel)] &= ~_mask; \
466 cop0->reg[(_reg)][(sel)] |= val & _mask; \
467 }
468
469 /* Generate saved context atomic bitwise modifiers */
470 #define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
471 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0, \
472 __KVMT##type val) \
473 { \
474 _kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
475 } \
476 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0, \
477 __KVMT##type val) \
478 { \
479 _kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val); \
480 } \
481 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0, \
482 __KVMT##type mask, \
483 __KVMT##type val) \
484 { \
485 _kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
486 val); \
487 }
488
489 /*
490 * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
491 * These operate on the VZ guest C0 context in hardware.
492 */
493
494 /* Generate VZ guest context simple accessors */
495 #define __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
496 static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
497 { \
498 return read_gc0_##name(); \
499 } \
500 static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0, \
501 __KVMT##type val) \
502 { \
503 write_gc0_##name(val); \
504 }
505
506 /* Generate VZ guest context bitwise modifiers */
507 #define __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
508 static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0, \
509 __KVMT##type val) \
510 { \
511 set_gc0_##name(val); \
512 } \
513 static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0, \
514 __KVMT##type val) \
515 { \
516 clear_gc0_##name(val); \
517 } \
518 static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0, \
519 __KVMT##type mask, \
520 __KVMT##type val) \
521 { \
522 change_gc0_##name(mask, val); \
523 }
524
525 /* Generate VZ guest context save/restore to/from saved context */
526 #define __BUILD_KVM_SAVE_VZ(name, _reg, sel) \
527 static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0) \
528 { \
529 write_gc0_##name(cop0->reg[(_reg)][(sel)]); \
530 } \
531 static inline void kvm_save_gc0_##name(struct mips_coproc *cop0) \
532 { \
533 cop0->reg[(_reg)][(sel)] = read_gc0_##name(); \
534 }
535
536 /*
537 * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
538 * These wrap a set of operations to provide them with a different name.
539 */
540
541 /* Generate simple accessor wrapper */
542 #define __BUILD_KVM_RW_WRAP(name1, name2, type) \
543 static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0) \
544 { \
545 return kvm_read_##name2(cop0); \
546 } \
547 static inline void kvm_write_##name1(struct mips_coproc *cop0, \
548 __KVMT##type val) \
549 { \
550 kvm_write_##name2(cop0, val); \
551 }
552
553 /* Generate bitwise modifier wrapper */
554 #define __BUILD_KVM_SET_WRAP(name1, name2, type) \
555 static inline void kvm_set_##name1(struct mips_coproc *cop0, \
556 __KVMT##type val) \
557 { \
558 kvm_set_##name2(cop0, val); \
559 } \
560 static inline void kvm_clear_##name1(struct mips_coproc *cop0, \
561 __KVMT##type val) \
562 { \
563 kvm_clear_##name2(cop0, val); \
564 } \
565 static inline void kvm_change_##name1(struct mips_coproc *cop0, \
566 __KVMT##type mask, \
567 __KVMT##type val) \
568 { \
569 kvm_change_##name2(cop0, mask, val); \
570 }
571
572 /*
573 * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
574 * These generate accessors operating on the saved context in RAM, and wrap them
575 * with the common guest C0 accessors (for use by common emulation code).
576 */
577
578 #define __BUILD_KVM_RW_SW(name, type, _reg, sel) \
579 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
580 __BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
581
582 #define __BUILD_KVM_SET_SW(name, type, _reg, sel) \
583 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
584 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
585
586 #define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel) \
587 __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel) \
588 __BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
589
590 /*
591 * VZ (hardware assisted virtualisation)
592 * These macros use the active guest state in VZ mode (hardware registers),
593 */
594
595 /*
596 * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
597 * These generate accessors operating on the VZ guest context in hardware, and
598 * wrap them with the common guest C0 accessors (for use by common emulation
599 * code).
600 *
601 * Accessors operating on the saved context in RAM are also generated to allow
602 * convenient explicit saving and restoring of the state.
603 */
604
605 #define __BUILD_KVM_RW_HW(name, type, _reg, sel) \
606 __BUILD_KVM_RW_SAVED(name, type, _reg, sel) \
607 __BUILD_KVM_RW_VZ(name, type, _reg, sel) \
608 __BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type) \
609 __BUILD_KVM_SAVE_VZ(name, _reg, sel)
610
611 #define __BUILD_KVM_SET_HW(name, type, _reg, sel) \
612 __BUILD_KVM_SET_SAVED(name, type, _reg, sel) \
613 __BUILD_KVM_SET_VZ(name, type, _reg, sel) \
614 __BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
615
616 /*
617 * We can't do atomic modifications of COP0 state if hardware can modify it.
618 * Races must be handled explicitly.
619 */
620 #define __BUILD_KVM_ATOMIC_HW __BUILD_KVM_SET_HW
621
622 /*
623 * Define accessors for CP0 registers that are accessible to the guest. These
624 * are primarily used by common emulation code, which may need to access the
625 * registers differently depending on the implementation.
626 *
627 * fns_hw/sw name type reg num select
628 */
629 __BUILD_KVM_RW_HW(index, 32, MIPS_CP0_TLB_INDEX, 0)
630 __BUILD_KVM_RW_HW(entrylo0, l, MIPS_CP0_TLB_LO0, 0)
631 __BUILD_KVM_RW_HW(entrylo1, l, MIPS_CP0_TLB_LO1, 0)
632 __BUILD_KVM_RW_HW(context, l, MIPS_CP0_TLB_CONTEXT, 0)
633 __BUILD_KVM_RW_HW(contextconfig, 32, MIPS_CP0_TLB_CONTEXT, 1)
634 __BUILD_KVM_RW_HW(userlocal, l, MIPS_CP0_TLB_CONTEXT, 2)
635 __BUILD_KVM_RW_HW(xcontextconfig, l, MIPS_CP0_TLB_CONTEXT, 3)
636 __BUILD_KVM_RW_HW(pagemask, l, MIPS_CP0_TLB_PG_MASK, 0)
637 __BUILD_KVM_RW_HW(pagegrain, 32, MIPS_CP0_TLB_PG_MASK, 1)
638 __BUILD_KVM_RW_HW(segctl0, l, MIPS_CP0_TLB_PG_MASK, 2)
639 __BUILD_KVM_RW_HW(segctl1, l, MIPS_CP0_TLB_PG_MASK, 3)
640 __BUILD_KVM_RW_HW(segctl2, l, MIPS_CP0_TLB_PG_MASK, 4)
641 __BUILD_KVM_RW_HW(pwbase, l, MIPS_CP0_TLB_PG_MASK, 5)
642 __BUILD_KVM_RW_HW(pwfield, l, MIPS_CP0_TLB_PG_MASK, 6)
643 __BUILD_KVM_RW_HW(pwsize, l, MIPS_CP0_TLB_PG_MASK, 7)
644 __BUILD_KVM_RW_HW(wired, 32, MIPS_CP0_TLB_WIRED, 0)
645 __BUILD_KVM_RW_HW(pwctl, 32, MIPS_CP0_TLB_WIRED, 6)
646 __BUILD_KVM_RW_HW(hwrena, 32, MIPS_CP0_HWRENA, 0)
647 __BUILD_KVM_RW_HW(badvaddr, l, MIPS_CP0_BAD_VADDR, 0)
648 __BUILD_KVM_RW_HW(badinstr, 32, MIPS_CP0_BAD_VADDR, 1)
649 __BUILD_KVM_RW_HW(badinstrp, 32, MIPS_CP0_BAD_VADDR, 2)
650 __BUILD_KVM_RW_SW(count, 32, MIPS_CP0_COUNT, 0)
651 __BUILD_KVM_RW_HW(entryhi, l, MIPS_CP0_TLB_HI, 0)
652 __BUILD_KVM_RW_HW(compare, 32, MIPS_CP0_COMPARE, 0)
653 __BUILD_KVM_RW_HW(status, 32, MIPS_CP0_STATUS, 0)
654 __BUILD_KVM_RW_HW(intctl, 32, MIPS_CP0_STATUS, 1)
655 __BUILD_KVM_RW_HW(cause, 32, MIPS_CP0_CAUSE, 0)
656 __BUILD_KVM_RW_HW(epc, l, MIPS_CP0_EXC_PC, 0)
657 __BUILD_KVM_RW_SW(prid, 32, MIPS_CP0_PRID, 0)
658 __BUILD_KVM_RW_HW(ebase, l, MIPS_CP0_PRID, 1)
659 __BUILD_KVM_RW_HW(config, 32, MIPS_CP0_CONFIG, 0)
660 __BUILD_KVM_RW_HW(config1, 32, MIPS_CP0_CONFIG, 1)
661 __BUILD_KVM_RW_HW(config2, 32, MIPS_CP0_CONFIG, 2)
662 __BUILD_KVM_RW_HW(config3, 32, MIPS_CP0_CONFIG, 3)
663 __BUILD_KVM_RW_HW(config4, 32, MIPS_CP0_CONFIG, 4)
664 __BUILD_KVM_RW_HW(config5, 32, MIPS_CP0_CONFIG, 5)
665 __BUILD_KVM_RW_HW(config6, 32, MIPS_CP0_CONFIG, 6)
666 __BUILD_KVM_RW_HW(config7, 32, MIPS_CP0_CONFIG, 7)
667 __BUILD_KVM_RW_SW(maari, l, MIPS_CP0_LLADDR, 2)
668 __BUILD_KVM_RW_HW(xcontext, l, MIPS_CP0_TLB_XCONTEXT, 0)
669 __BUILD_KVM_RW_HW(errorepc, l, MIPS_CP0_ERROR_PC, 0)
670 __BUILD_KVM_RW_HW(kscratch1, l, MIPS_CP0_DESAVE, 2)
671 __BUILD_KVM_RW_HW(kscratch2, l, MIPS_CP0_DESAVE, 3)
672 __BUILD_KVM_RW_HW(kscratch3, l, MIPS_CP0_DESAVE, 4)
673 __BUILD_KVM_RW_HW(kscratch4, l, MIPS_CP0_DESAVE, 5)
674 __BUILD_KVM_RW_HW(kscratch5, l, MIPS_CP0_DESAVE, 6)
675 __BUILD_KVM_RW_HW(kscratch6, l, MIPS_CP0_DESAVE, 7)
676
677 /* Bitwise operations (on HW state) */
678 __BUILD_KVM_SET_HW(status, 32, MIPS_CP0_STATUS, 0)
679 /* Cause can be modified asynchronously from hardirq hrtimer callback */
680 __BUILD_KVM_ATOMIC_HW(cause, 32, MIPS_CP0_CAUSE, 0)
681 __BUILD_KVM_SET_HW(ebase, l, MIPS_CP0_PRID, 1)
682
683 /* Bitwise operations (on saved state) */
684 __BUILD_KVM_SET_SAVED(config, 32, MIPS_CP0_CONFIG, 0)
685 __BUILD_KVM_SET_SAVED(config1, 32, MIPS_CP0_CONFIG, 1)
686 __BUILD_KVM_SET_SAVED(config2, 32, MIPS_CP0_CONFIG, 2)
687 __BUILD_KVM_SET_SAVED(config3, 32, MIPS_CP0_CONFIG, 3)
688 __BUILD_KVM_SET_SAVED(config4, 32, MIPS_CP0_CONFIG, 4)
689 __BUILD_KVM_SET_SAVED(config5, 32, MIPS_CP0_CONFIG, 5)
690
691 /* Helpers */
692
kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch * vcpu)693 static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
694 {
695 return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
696 vcpu->fpu_enabled;
697 }
698
kvm_mips_guest_has_fpu(struct kvm_vcpu_arch * vcpu)699 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
700 {
701 return kvm_mips_guest_can_have_fpu(vcpu) &&
702 kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
703 }
704
kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch * vcpu)705 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
706 {
707 return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
708 vcpu->msa_enabled;
709 }
710
kvm_mips_guest_has_msa(struct kvm_vcpu_arch * vcpu)711 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
712 {
713 return kvm_mips_guest_can_have_msa(vcpu) &&
714 kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
715 }
716
717 struct kvm_mips_callbacks {
718 int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
719 int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
720 int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
721 int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
722 int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
723 int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
724 int (*handle_syscall)(struct kvm_vcpu *vcpu);
725 int (*handle_res_inst)(struct kvm_vcpu *vcpu);
726 int (*handle_break)(struct kvm_vcpu *vcpu);
727 int (*handle_trap)(struct kvm_vcpu *vcpu);
728 int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
729 int (*handle_fpe)(struct kvm_vcpu *vcpu);
730 int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
731 int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
732 int (*hardware_enable)(void);
733 void (*hardware_disable)(void);
734 int (*check_extension)(struct kvm *kvm, long ext);
735 int (*vcpu_init)(struct kvm_vcpu *vcpu);
736 void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
737 int (*vcpu_setup)(struct kvm_vcpu *vcpu);
738 void (*prepare_flush_shadow)(struct kvm *kvm);
739 gpa_t (*gva_to_gpa)(gva_t gva);
740 void (*queue_timer_int)(struct kvm_vcpu *vcpu);
741 void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
742 void (*queue_io_int)(struct kvm_vcpu *vcpu,
743 struct kvm_mips_interrupt *irq);
744 void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
745 struct kvm_mips_interrupt *irq);
746 int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
747 u32 cause);
748 int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
749 u32 cause);
750 unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
751 int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
752 int (*get_one_reg)(struct kvm_vcpu *vcpu,
753 const struct kvm_one_reg *reg, s64 *v);
754 int (*set_one_reg)(struct kvm_vcpu *vcpu,
755 const struct kvm_one_reg *reg, s64 v);
756 int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
757 int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
758 int (*vcpu_run)(struct kvm_vcpu *vcpu);
759 void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
760 };
761 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
762 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
763
764 /* Debug: dump vcpu state */
765 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
766
767 extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
768
769 /* Building of entry/exception code */
770 int kvm_mips_entry_setup(void);
771 void *kvm_mips_build_vcpu_run(void *addr);
772 void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
773 void *kvm_mips_build_exception(void *addr, void *handler);
774 void *kvm_mips_build_exit(void *addr);
775
776 /* FPU/MSA context management */
777 void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
778 void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
779 void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
780 void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
781 void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
782 void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
783 void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
784 void kvm_own_fpu(struct kvm_vcpu *vcpu);
785 void kvm_own_msa(struct kvm_vcpu *vcpu);
786 void kvm_drop_fpu(struct kvm_vcpu *vcpu);
787 void kvm_lose_fpu(struct kvm_vcpu *vcpu);
788
789 /* TLB handling */
790 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
791 struct kvm_vcpu *vcpu, bool write_fault);
792
793 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
794 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
795 unsigned long *gpa);
796 void kvm_vz_local_flush_roottlb_all_guests(void);
797 void kvm_vz_local_flush_guesttlb_all(void);
798 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
799 unsigned int count);
800 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
801 unsigned int count);
802 #ifdef CONFIG_CPU_LOONGSON64
803 void kvm_loongson_clear_guest_vtlb(void);
804 void kvm_loongson_clear_guest_ftlb(void);
805 #endif
806
807 /* MMU handling */
808
809 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
810 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
811 pgd_t *kvm_pgd_alloc(void);
812 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
813
814 #define KVM_ARCH_WANT_MMU_NOTIFIER
815
816 /* Emulation */
817 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
818 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
819 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
820
821 /**
822 * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
823 * @vcpu: Virtual CPU.
824 *
825 * Returns: Whether the TLBL exception was likely due to an instruction
826 * fetch fault rather than a data load fault.
827 */
kvm_is_ifetch_fault(struct kvm_vcpu_arch * vcpu)828 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
829 {
830 unsigned long badvaddr = vcpu->host_cp0_badvaddr;
831 unsigned long epc = msk_isa16_mode(vcpu->pc);
832 u32 cause = vcpu->host_cp0_cause;
833
834 if (epc == badvaddr)
835 return true;
836
837 /*
838 * Branches may be 32-bit or 16-bit instructions.
839 * This isn't exact, but we don't really support MIPS16 or microMIPS yet
840 * in KVM anyway.
841 */
842 if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
843 return true;
844
845 return false;
846 }
847
848 extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
849
850 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
851 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
852 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
853 void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
854 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
855 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
856 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
857 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
858 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
859 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
860
861 /* fairly internal functions requiring some care to use */
862 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
863 ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
864 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
865 u32 count, int min_drift);
866
867 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
868 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
869
870 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
871 u32 cause,
872 struct kvm_vcpu *vcpu);
873 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
874 u32 cause,
875 struct kvm_vcpu *vcpu);
876
877 /* COP0 */
878 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
879
880 /* Hypercalls (hypcall.c) */
881
882 enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
883 union mips_instruction inst);
884 int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
885
886 /* Misc */
887 extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
888 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
889 extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
890 struct kvm_mips_interrupt *irq);
891
kvm_arch_hardware_unsetup(void)892 static inline void kvm_arch_hardware_unsetup(void) {}
kvm_arch_sync_events(struct kvm * kvm)893 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)894 static inline void kvm_arch_free_memslot(struct kvm *kvm,
895 struct kvm_memory_slot *slot) {}
kvm_arch_memslots_updated(struct kvm * kvm,u64 gen)896 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)897 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)898 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)899 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
kvm_arch_vcpu_block_finish(struct kvm_vcpu * vcpu)900 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
901
902 #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
903 int kvm_arch_flush_remote_tlb(struct kvm *kvm);
904
905 #endif /* __MIPS_KVM_HOST_H__ */
906