1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
7 * Authors: Sanjay Lal <sanjayl@kymasys.com>
8 */
9 
10 #ifndef __MIPS_KVM_HOST_H__
11 #define __MIPS_KVM_HOST_H__
12 
13 #include <linux/cpumask.h>
14 #include <linux/mutex.h>
15 #include <linux/hrtimer.h>
16 #include <linux/interrupt.h>
17 #include <linux/types.h>
18 #include <linux/kvm.h>
19 #include <linux/kvm_types.h>
20 #include <linux/threads.h>
21 #include <linux/spinlock.h>
22 
23 #include <asm/inst.h>
24 #include <asm/mipsregs.h>
25 
26 #include <kvm/iodev.h>
27 
28 /* MIPS KVM register ids */
29 #define MIPS_CP0_32(_R, _S)					\
30 	(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
31 
32 #define MIPS_CP0_64(_R, _S)					\
33 	(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
34 
35 #define KVM_REG_MIPS_CP0_INDEX		MIPS_CP0_32(0, 0)
36 #define KVM_REG_MIPS_CP0_ENTRYLO0	MIPS_CP0_64(2, 0)
37 #define KVM_REG_MIPS_CP0_ENTRYLO1	MIPS_CP0_64(3, 0)
38 #define KVM_REG_MIPS_CP0_CONTEXT	MIPS_CP0_64(4, 0)
39 #define KVM_REG_MIPS_CP0_CONTEXTCONFIG	MIPS_CP0_32(4, 1)
40 #define KVM_REG_MIPS_CP0_USERLOCAL	MIPS_CP0_64(4, 2)
41 #define KVM_REG_MIPS_CP0_XCONTEXTCONFIG	MIPS_CP0_64(4, 3)
42 #define KVM_REG_MIPS_CP0_PAGEMASK	MIPS_CP0_32(5, 0)
43 #define KVM_REG_MIPS_CP0_PAGEGRAIN	MIPS_CP0_32(5, 1)
44 #define KVM_REG_MIPS_CP0_SEGCTL0	MIPS_CP0_64(5, 2)
45 #define KVM_REG_MIPS_CP0_SEGCTL1	MIPS_CP0_64(5, 3)
46 #define KVM_REG_MIPS_CP0_SEGCTL2	MIPS_CP0_64(5, 4)
47 #define KVM_REG_MIPS_CP0_PWBASE		MIPS_CP0_64(5, 5)
48 #define KVM_REG_MIPS_CP0_PWFIELD	MIPS_CP0_64(5, 6)
49 #define KVM_REG_MIPS_CP0_PWSIZE		MIPS_CP0_64(5, 7)
50 #define KVM_REG_MIPS_CP0_WIRED		MIPS_CP0_32(6, 0)
51 #define KVM_REG_MIPS_CP0_PWCTL		MIPS_CP0_32(6, 6)
52 #define KVM_REG_MIPS_CP0_HWRENA		MIPS_CP0_32(7, 0)
53 #define KVM_REG_MIPS_CP0_BADVADDR	MIPS_CP0_64(8, 0)
54 #define KVM_REG_MIPS_CP0_BADINSTR	MIPS_CP0_32(8, 1)
55 #define KVM_REG_MIPS_CP0_BADINSTRP	MIPS_CP0_32(8, 2)
56 #define KVM_REG_MIPS_CP0_COUNT		MIPS_CP0_32(9, 0)
57 #define KVM_REG_MIPS_CP0_ENTRYHI	MIPS_CP0_64(10, 0)
58 #define KVM_REG_MIPS_CP0_COMPARE	MIPS_CP0_32(11, 0)
59 #define KVM_REG_MIPS_CP0_STATUS		MIPS_CP0_32(12, 0)
60 #define KVM_REG_MIPS_CP0_INTCTL		MIPS_CP0_32(12, 1)
61 #define KVM_REG_MIPS_CP0_CAUSE		MIPS_CP0_32(13, 0)
62 #define KVM_REG_MIPS_CP0_EPC		MIPS_CP0_64(14, 0)
63 #define KVM_REG_MIPS_CP0_PRID		MIPS_CP0_32(15, 0)
64 #define KVM_REG_MIPS_CP0_EBASE		MIPS_CP0_64(15, 1)
65 #define KVM_REG_MIPS_CP0_CONFIG		MIPS_CP0_32(16, 0)
66 #define KVM_REG_MIPS_CP0_CONFIG1	MIPS_CP0_32(16, 1)
67 #define KVM_REG_MIPS_CP0_CONFIG2	MIPS_CP0_32(16, 2)
68 #define KVM_REG_MIPS_CP0_CONFIG3	MIPS_CP0_32(16, 3)
69 #define KVM_REG_MIPS_CP0_CONFIG4	MIPS_CP0_32(16, 4)
70 #define KVM_REG_MIPS_CP0_CONFIG5	MIPS_CP0_32(16, 5)
71 #define KVM_REG_MIPS_CP0_CONFIG6	MIPS_CP0_32(16, 6)
72 #define KVM_REG_MIPS_CP0_CONFIG7	MIPS_CP0_32(16, 7)
73 #define KVM_REG_MIPS_CP0_MAARI		MIPS_CP0_64(17, 2)
74 #define KVM_REG_MIPS_CP0_XCONTEXT	MIPS_CP0_64(20, 0)
75 #define KVM_REG_MIPS_CP0_DIAG		MIPS_CP0_32(22, 0)
76 #define KVM_REG_MIPS_CP0_ERROREPC	MIPS_CP0_64(30, 0)
77 #define KVM_REG_MIPS_CP0_KSCRATCH1	MIPS_CP0_64(31, 2)
78 #define KVM_REG_MIPS_CP0_KSCRATCH2	MIPS_CP0_64(31, 3)
79 #define KVM_REG_MIPS_CP0_KSCRATCH3	MIPS_CP0_64(31, 4)
80 #define KVM_REG_MIPS_CP0_KSCRATCH4	MIPS_CP0_64(31, 5)
81 #define KVM_REG_MIPS_CP0_KSCRATCH5	MIPS_CP0_64(31, 6)
82 #define KVM_REG_MIPS_CP0_KSCRATCH6	MIPS_CP0_64(31, 7)
83 
84 
85 #define KVM_MAX_VCPUS		16
86 #define KVM_USER_MEM_SLOTS	16
87 /* memory slots that does not exposed to userspace */
88 #define KVM_PRIVATE_MEM_SLOTS	0
89 
90 #define KVM_HALT_POLL_NS_DEFAULT 500000
91 
92 #ifdef CONFIG_KVM_MIPS_VZ
93 extern unsigned long GUESTID_MASK;
94 extern unsigned long GUESTID_FIRST_VERSION;
95 extern unsigned long GUESTID_VERSION_MASK;
96 #endif
97 
98 
99 /*
100  * Special address that contains the comm page, used for reducing # of traps
101  * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
102  * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
103  * caught.
104  */
105 #define KVM_GUEST_COMMPAGE_ADDR		((PAGE_SIZE > 0x8000) ?	0 : \
106 					 (0x8000 - PAGE_SIZE))
107 
108 #define KVM_GUEST_KERNEL_MODE(vcpu)	((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
109 					((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
110 
111 #define KVM_GUEST_KUSEG			0x00000000UL
112 #define KVM_GUEST_KSEG0			0x40000000UL
113 #define KVM_GUEST_KSEG1			0x40000000UL
114 #define KVM_GUEST_KSEG23		0x60000000UL
115 #define KVM_GUEST_KSEGX(a)		((_ACAST32_(a)) & 0xe0000000)
116 #define KVM_GUEST_CPHYSADDR(a)		((_ACAST32_(a)) & 0x1fffffff)
117 
118 #define KVM_GUEST_CKSEG0ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
119 #define KVM_GUEST_CKSEG1ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
120 #define KVM_GUEST_CKSEG23ADDR(a)	(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
121 
122 /*
123  * Map an address to a certain kernel segment
124  */
125 #define KVM_GUEST_KSEG0ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
126 #define KVM_GUEST_KSEG1ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1)
127 #define KVM_GUEST_KSEG23ADDR(a)		(KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23)
128 
129 #define KVM_INVALID_PAGE		0xdeadbeef
130 #define KVM_INVALID_ADDR		0xdeadbeef
131 
132 /*
133  * EVA has overlapping user & kernel address spaces, so user VAs may be >
134  * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of
135  * PAGE_OFFSET.
136  */
137 
138 #define KVM_HVA_ERR_BAD			(-1UL)
139 #define KVM_HVA_ERR_RO_BAD		(-2UL)
140 
kvm_is_error_hva(unsigned long addr)141 static inline bool kvm_is_error_hva(unsigned long addr)
142 {
143 	return IS_ERR_VALUE(addr);
144 }
145 
146 struct kvm_vm_stat {
147 	ulong remote_tlb_flush;
148 };
149 
150 struct kvm_vcpu_stat {
151 	u64 wait_exits;
152 	u64 cache_exits;
153 	u64 signal_exits;
154 	u64 int_exits;
155 	u64 cop_unusable_exits;
156 	u64 tlbmod_exits;
157 	u64 tlbmiss_ld_exits;
158 	u64 tlbmiss_st_exits;
159 	u64 addrerr_st_exits;
160 	u64 addrerr_ld_exits;
161 	u64 syscall_exits;
162 	u64 resvd_inst_exits;
163 	u64 break_inst_exits;
164 	u64 trap_inst_exits;
165 	u64 msa_fpe_exits;
166 	u64 fpe_exits;
167 	u64 msa_disabled_exits;
168 	u64 flush_dcache_exits;
169 #ifdef CONFIG_KVM_MIPS_VZ
170 	u64 vz_gpsi_exits;
171 	u64 vz_gsfc_exits;
172 	u64 vz_hc_exits;
173 	u64 vz_grr_exits;
174 	u64 vz_gva_exits;
175 	u64 vz_ghfc_exits;
176 	u64 vz_gpa_exits;
177 	u64 vz_resvd_exits;
178 #ifdef CONFIG_CPU_LOONGSON64
179 	u64 vz_cpucfg_exits;
180 #endif
181 #endif
182 	u64 halt_successful_poll;
183 	u64 halt_attempted_poll;
184 	u64 halt_poll_success_ns;
185 	u64 halt_poll_fail_ns;
186 	u64 halt_poll_invalid;
187 	u64 halt_wakeup;
188 };
189 
190 struct kvm_arch_memory_slot {
191 };
192 
193 #ifdef CONFIG_CPU_LOONGSON64
194 struct ipi_state {
195 	uint32_t status;
196 	uint32_t en;
197 	uint32_t set;
198 	uint32_t clear;
199 	uint64_t buf[4];
200 };
201 
202 struct loongson_kvm_ipi;
203 
204 struct ipi_io_device {
205 	int node_id;
206 	struct loongson_kvm_ipi *ipi;
207 	struct kvm_io_device device;
208 };
209 
210 struct loongson_kvm_ipi {
211 	spinlock_t lock;
212 	struct kvm *kvm;
213 	struct ipi_state ipistate[16];
214 	struct ipi_io_device dev_ipi[4];
215 };
216 #endif
217 
218 struct kvm_arch {
219 	/* Guest physical mm */
220 	struct mm_struct gpa_mm;
221 	/* Mask of CPUs needing GPA ASID flush */
222 	cpumask_t asid_flush_mask;
223 #ifdef CONFIG_CPU_LOONGSON64
224 	struct loongson_kvm_ipi ipi;
225 #endif
226 };
227 
228 #define N_MIPS_COPROC_REGS	32
229 #define N_MIPS_COPROC_SEL	8
230 
231 struct mips_coproc {
232 	unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
233 #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
234 	unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL];
235 #endif
236 };
237 
238 /*
239  * Coprocessor 0 register names
240  */
241 #define MIPS_CP0_TLB_INDEX	0
242 #define MIPS_CP0_TLB_RANDOM	1
243 #define MIPS_CP0_TLB_LOW	2
244 #define MIPS_CP0_TLB_LO0	2
245 #define MIPS_CP0_TLB_LO1	3
246 #define MIPS_CP0_TLB_CONTEXT	4
247 #define MIPS_CP0_TLB_PG_MASK	5
248 #define MIPS_CP0_TLB_WIRED	6
249 #define MIPS_CP0_HWRENA		7
250 #define MIPS_CP0_BAD_VADDR	8
251 #define MIPS_CP0_COUNT		9
252 #define MIPS_CP0_TLB_HI		10
253 #define MIPS_CP0_COMPARE	11
254 #define MIPS_CP0_STATUS		12
255 #define MIPS_CP0_CAUSE		13
256 #define MIPS_CP0_EXC_PC		14
257 #define MIPS_CP0_PRID		15
258 #define MIPS_CP0_CONFIG		16
259 #define MIPS_CP0_LLADDR		17
260 #define MIPS_CP0_WATCH_LO	18
261 #define MIPS_CP0_WATCH_HI	19
262 #define MIPS_CP0_TLB_XCONTEXT	20
263 #define MIPS_CP0_DIAG		22
264 #define MIPS_CP0_ECC		26
265 #define MIPS_CP0_CACHE_ERR	27
266 #define MIPS_CP0_TAG_LO		28
267 #define MIPS_CP0_TAG_HI		29
268 #define MIPS_CP0_ERROR_PC	30
269 #define MIPS_CP0_DEBUG		23
270 #define MIPS_CP0_DEPC		24
271 #define MIPS_CP0_PERFCNT	25
272 #define MIPS_CP0_ERRCTL		26
273 #define MIPS_CP0_DATA_LO	28
274 #define MIPS_CP0_DATA_HI	29
275 #define MIPS_CP0_DESAVE		31
276 
277 #define MIPS_CP0_CONFIG_SEL	0
278 #define MIPS_CP0_CONFIG1_SEL	1
279 #define MIPS_CP0_CONFIG2_SEL	2
280 #define MIPS_CP0_CONFIG3_SEL	3
281 #define MIPS_CP0_CONFIG4_SEL	4
282 #define MIPS_CP0_CONFIG5_SEL	5
283 
284 #define MIPS_CP0_GUESTCTL2	10
285 #define MIPS_CP0_GUESTCTL2_SEL	5
286 #define MIPS_CP0_GTOFFSET	12
287 #define MIPS_CP0_GTOFFSET_SEL	7
288 
289 /* Resume Flags */
290 #define RESUME_FLAG_DR		(1<<0)	/* Reload guest nonvolatile state? */
291 #define RESUME_FLAG_HOST	(1<<1)	/* Resume host? */
292 
293 #define RESUME_GUEST		0
294 #define RESUME_GUEST_DR		RESUME_FLAG_DR
295 #define RESUME_HOST		RESUME_FLAG_HOST
296 
297 enum emulation_result {
298 	EMULATE_DONE,		/* no further processing */
299 	EMULATE_DO_MMIO,	/* kvm_run filled with MMIO request */
300 	EMULATE_FAIL,		/* can't emulate this instruction */
301 	EMULATE_WAIT,		/* WAIT instruction */
302 	EMULATE_PRIV_FAIL,
303 	EMULATE_EXCEPT,		/* A guest exception has been generated */
304 	EMULATE_HYPERCALL,	/* HYPCALL instruction */
305 };
306 
307 #define mips3_paddr_to_tlbpfn(x) \
308 	(((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
309 #define mips3_tlbpfn_to_paddr(x) \
310 	((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT)
311 
312 #define MIPS3_PG_SHIFT		6
313 #define MIPS3_PG_FRAME		0x3fffffc0
314 
315 #if defined(CONFIG_64BIT)
316 #define VPN2_MASK		GENMASK(cpu_vmbits - 1, 13)
317 #else
318 #define VPN2_MASK		0xffffe000
319 #endif
320 #define KVM_ENTRYHI_ASID	cpu_asid_mask(&boot_cpu_data)
321 #define TLB_IS_GLOBAL(x)	((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
322 #define TLB_VPN2(x)		((x).tlb_hi & VPN2_MASK)
323 #define TLB_ASID(x)		((x).tlb_hi & KVM_ENTRYHI_ASID)
324 #define TLB_LO_IDX(x, va)	(((va) >> PAGE_SHIFT) & 1)
325 #define TLB_IS_VALID(x, va)	((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
326 #define TLB_IS_DIRTY(x, va)	((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_D)
327 #define TLB_HI_VPN2_HIT(x, y)	((TLB_VPN2(x) & ~(x).tlb_mask) ==	\
328 				 ((y) & VPN2_MASK & ~(x).tlb_mask))
329 #define TLB_HI_ASID_HIT(x, y)	(TLB_IS_GLOBAL(x) ||			\
330 				 TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
331 
332 struct kvm_mips_tlb {
333 	long tlb_mask;
334 	long tlb_hi;
335 	long tlb_lo[2];
336 };
337 
338 #define KVM_MIPS_AUX_FPU	0x1
339 #define KVM_MIPS_AUX_MSA	0x2
340 
341 #define KVM_MIPS_GUEST_TLB_SIZE	64
342 struct kvm_vcpu_arch {
343 	void *guest_ebase;
344 	int (*vcpu_run)(struct kvm_vcpu *vcpu);
345 
346 	/* Host registers preserved across guest mode execution */
347 	unsigned long host_stack;
348 	unsigned long host_gp;
349 	unsigned long host_pgd;
350 	unsigned long host_entryhi;
351 
352 	/* Host CP0 registers used when handling exits from guest */
353 	unsigned long host_cp0_badvaddr;
354 	unsigned long host_cp0_epc;
355 	u32 host_cp0_cause;
356 	u32 host_cp0_guestctl0;
357 	u32 host_cp0_badinstr;
358 	u32 host_cp0_badinstrp;
359 
360 	/* GPRS */
361 	unsigned long gprs[32];
362 	unsigned long hi;
363 	unsigned long lo;
364 	unsigned long pc;
365 
366 	/* FPU State */
367 	struct mips_fpu_struct fpu;
368 	/* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
369 	unsigned int aux_inuse;
370 
371 	/* COP0 State */
372 	struct mips_coproc *cop0;
373 
374 	/* Host KSEG0 address of the EI/DI offset */
375 	void *kseg0_commpage;
376 
377 	/* Resume PC after MMIO completion */
378 	unsigned long io_pc;
379 	/* GPR used as IO source/target */
380 	u32 io_gpr;
381 
382 	struct hrtimer comparecount_timer;
383 	/* Count timer control KVM register */
384 	u32 count_ctl;
385 	/* Count bias from the raw time */
386 	u32 count_bias;
387 	/* Frequency of timer in Hz */
388 	u32 count_hz;
389 	/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
390 	s64 count_dyn_bias;
391 	/* Resume time */
392 	ktime_t count_resume;
393 	/* Period of timer tick in ns */
394 	u64 count_period;
395 
396 	/* Bitmask of exceptions that are pending */
397 	unsigned long pending_exceptions;
398 
399 	/* Bitmask of pending exceptions to be cleared */
400 	unsigned long pending_exceptions_clr;
401 
402 	/* S/W Based TLB for guest */
403 	struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
404 
405 	/* Guest kernel/user [partial] mm */
406 	struct mm_struct guest_kernel_mm, guest_user_mm;
407 
408 	/* Guest ASID of last user mode execution */
409 	unsigned int last_user_gasid;
410 
411 	/* Cache some mmu pages needed inside spinlock regions */
412 	struct kvm_mmu_memory_cache mmu_page_cache;
413 
414 #ifdef CONFIG_KVM_MIPS_VZ
415 	/* vcpu's vzguestid is different on each host cpu in an smp system */
416 	u32 vzguestid[NR_CPUS];
417 
418 	/* wired guest TLB entries */
419 	struct kvm_mips_tlb *wired_tlb;
420 	unsigned int wired_tlb_limit;
421 	unsigned int wired_tlb_used;
422 
423 	/* emulated guest MAAR registers */
424 	unsigned long maar[6];
425 #endif
426 
427 	/* Last CPU the VCPU state was loaded on */
428 	int last_sched_cpu;
429 	/* Last CPU the VCPU actually executed guest code on */
430 	int last_exec_cpu;
431 
432 	/* WAIT executed */
433 	int wait;
434 
435 	u8 fpu_enabled;
436 	u8 msa_enabled;
437 };
438 
_kvm_atomic_set_c0_guest_reg(unsigned long * reg,unsigned long val)439 static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
440 						unsigned long val)
441 {
442 	unsigned long temp;
443 	do {
444 		__asm__ __volatile__(
445 		"	.set	push				\n"
446 		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
447 		"	" __LL "%0, %1				\n"
448 		"	or	%0, %2				\n"
449 		"	" __SC	"%0, %1				\n"
450 		"	.set	pop				\n"
451 		: "=&r" (temp), "+m" (*reg)
452 		: "r" (val));
453 	} while (unlikely(!temp));
454 }
455 
_kvm_atomic_clear_c0_guest_reg(unsigned long * reg,unsigned long val)456 static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
457 						  unsigned long val)
458 {
459 	unsigned long temp;
460 	do {
461 		__asm__ __volatile__(
462 		"	.set	push				\n"
463 		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
464 		"	" __LL "%0, %1				\n"
465 		"	and	%0, %2				\n"
466 		"	" __SC	"%0, %1				\n"
467 		"	.set	pop				\n"
468 		: "=&r" (temp), "+m" (*reg)
469 		: "r" (~val));
470 	} while (unlikely(!temp));
471 }
472 
_kvm_atomic_change_c0_guest_reg(unsigned long * reg,unsigned long change,unsigned long val)473 static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
474 						   unsigned long change,
475 						   unsigned long val)
476 {
477 	unsigned long temp;
478 	do {
479 		__asm__ __volatile__(
480 		"	.set	push				\n"
481 		"	.set	"MIPS_ISA_ARCH_LEVEL"		\n"
482 		"	" __LL "%0, %1				\n"
483 		"	and	%0, %2				\n"
484 		"	or	%0, %3				\n"
485 		"	" __SC	"%0, %1				\n"
486 		"	.set	pop				\n"
487 		: "=&r" (temp), "+m" (*reg)
488 		: "r" (~change), "r" (val & change));
489 	} while (unlikely(!temp));
490 }
491 
492 /* Guest register types, used in accessor build below */
493 #define __KVMT32	u32
494 #define __KVMTl	unsigned long
495 
496 /*
497  * __BUILD_KVM_$ops_SAVED(): kvm_$op_sw_gc0_$reg()
498  * These operate on the saved guest C0 state in RAM.
499  */
500 
501 /* Generate saved context simple accessors */
502 #define __BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
503 static inline __KVMT##type kvm_read_sw_gc0_##name(struct mips_coproc *cop0) \
504 {									\
505 	return cop0->reg[(_reg)][(sel)];				\
506 }									\
507 static inline void kvm_write_sw_gc0_##name(struct mips_coproc *cop0,	\
508 					   __KVMT##type val)		\
509 {									\
510 	cop0->reg[(_reg)][(sel)] = val;					\
511 }
512 
513 /* Generate saved context bitwise modifiers */
514 #define __BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
515 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0,	\
516 					 __KVMT##type val)		\
517 {									\
518 	cop0->reg[(_reg)][(sel)] |= val;				\
519 }									\
520 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0,	\
521 					   __KVMT##type val)		\
522 {									\
523 	cop0->reg[(_reg)][(sel)] &= ~val;				\
524 }									\
525 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0,	\
526 					    __KVMT##type mask,		\
527 					    __KVMT##type val)		\
528 {									\
529 	unsigned long _mask = mask;					\
530 	cop0->reg[(_reg)][(sel)] &= ~_mask;				\
531 	cop0->reg[(_reg)][(sel)] |= val & _mask;			\
532 }
533 
534 /* Generate saved context atomic bitwise modifiers */
535 #define __BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel)			\
536 static inline void kvm_set_sw_gc0_##name(struct mips_coproc *cop0,	\
537 					 __KVMT##type val)		\
538 {									\
539 	_kvm_atomic_set_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val);	\
540 }									\
541 static inline void kvm_clear_sw_gc0_##name(struct mips_coproc *cop0,	\
542 					   __KVMT##type val)		\
543 {									\
544 	_kvm_atomic_clear_c0_guest_reg(&cop0->reg[(_reg)][(sel)], val);	\
545 }									\
546 static inline void kvm_change_sw_gc0_##name(struct mips_coproc *cop0,	\
547 					    __KVMT##type mask,		\
548 					    __KVMT##type val)		\
549 {									\
550 	_kvm_atomic_change_c0_guest_reg(&cop0->reg[(_reg)][(sel)], mask, \
551 					val);				\
552 }
553 
554 /*
555  * __BUILD_KVM_$ops_VZ(): kvm_$op_vz_gc0_$reg()
556  * These operate on the VZ guest C0 context in hardware.
557  */
558 
559 /* Generate VZ guest context simple accessors */
560 #define __BUILD_KVM_RW_VZ(name, type, _reg, sel)			\
561 static inline __KVMT##type kvm_read_vz_gc0_##name(struct mips_coproc *cop0) \
562 {									\
563 	return read_gc0_##name();					\
564 }									\
565 static inline void kvm_write_vz_gc0_##name(struct mips_coproc *cop0,	\
566 					   __KVMT##type val)		\
567 {									\
568 	write_gc0_##name(val);						\
569 }
570 
571 /* Generate VZ guest context bitwise modifiers */
572 #define __BUILD_KVM_SET_VZ(name, type, _reg, sel)			\
573 static inline void kvm_set_vz_gc0_##name(struct mips_coproc *cop0,	\
574 					 __KVMT##type val)		\
575 {									\
576 	set_gc0_##name(val);						\
577 }									\
578 static inline void kvm_clear_vz_gc0_##name(struct mips_coproc *cop0,	\
579 					   __KVMT##type val)		\
580 {									\
581 	clear_gc0_##name(val);						\
582 }									\
583 static inline void kvm_change_vz_gc0_##name(struct mips_coproc *cop0,	\
584 					    __KVMT##type mask,		\
585 					    __KVMT##type val)		\
586 {									\
587 	change_gc0_##name(mask, val);					\
588 }
589 
590 /* Generate VZ guest context save/restore to/from saved context */
591 #define __BUILD_KVM_SAVE_VZ(name, _reg, sel)			\
592 static inline void kvm_restore_gc0_##name(struct mips_coproc *cop0)	\
593 {									\
594 	write_gc0_##name(cop0->reg[(_reg)][(sel)]);			\
595 }									\
596 static inline void kvm_save_gc0_##name(struct mips_coproc *cop0)	\
597 {									\
598 	cop0->reg[(_reg)][(sel)] = read_gc0_##name();			\
599 }
600 
601 /*
602  * __BUILD_KVM_$ops_WRAP(): kvm_$op_$name1() -> kvm_$op_$name2()
603  * These wrap a set of operations to provide them with a different name.
604  */
605 
606 /* Generate simple accessor wrapper */
607 #define __BUILD_KVM_RW_WRAP(name1, name2, type)				\
608 static inline __KVMT##type kvm_read_##name1(struct mips_coproc *cop0)	\
609 {									\
610 	return kvm_read_##name2(cop0);					\
611 }									\
612 static inline void kvm_write_##name1(struct mips_coproc *cop0,		\
613 				     __KVMT##type val)			\
614 {									\
615 	kvm_write_##name2(cop0, val);					\
616 }
617 
618 /* Generate bitwise modifier wrapper */
619 #define __BUILD_KVM_SET_WRAP(name1, name2, type)			\
620 static inline void kvm_set_##name1(struct mips_coproc *cop0,		\
621 				   __KVMT##type val)			\
622 {									\
623 	kvm_set_##name2(cop0, val);					\
624 }									\
625 static inline void kvm_clear_##name1(struct mips_coproc *cop0,		\
626 				     __KVMT##type val)			\
627 {									\
628 	kvm_clear_##name2(cop0, val);					\
629 }									\
630 static inline void kvm_change_##name1(struct mips_coproc *cop0,		\
631 				      __KVMT##type mask,		\
632 				      __KVMT##type val)			\
633 {									\
634 	kvm_change_##name2(cop0, mask, val);				\
635 }
636 
637 /*
638  * __BUILD_KVM_$ops_SW(): kvm_$op_c0_guest_$reg() -> kvm_$op_sw_gc0_$reg()
639  * These generate accessors operating on the saved context in RAM, and wrap them
640  * with the common guest C0 accessors (for use by common emulation code).
641  */
642 
643 #define __BUILD_KVM_RW_SW(name, type, _reg, sel)			\
644 	__BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
645 	__BUILD_KVM_RW_WRAP(c0_guest_##name, sw_gc0_##name, type)
646 
647 #define __BUILD_KVM_SET_SW(name, type, _reg, sel)			\
648 	__BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
649 	__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
650 
651 #define __BUILD_KVM_ATOMIC_SW(name, type, _reg, sel)			\
652 	__BUILD_KVM_ATOMIC_SAVED(name, type, _reg, sel)			\
653 	__BUILD_KVM_SET_WRAP(c0_guest_##name, sw_gc0_##name, type)
654 
655 #ifndef CONFIG_KVM_MIPS_VZ
656 
657 /*
658  * T&E (trap & emulate software based virtualisation)
659  * We generate the common accessors operating exclusively on the saved context
660  * in RAM.
661  */
662 
663 #define __BUILD_KVM_RW_HW	__BUILD_KVM_RW_SW
664 #define __BUILD_KVM_SET_HW	__BUILD_KVM_SET_SW
665 #define __BUILD_KVM_ATOMIC_HW	__BUILD_KVM_ATOMIC_SW
666 
667 #else
668 
669 /*
670  * VZ (hardware assisted virtualisation)
671  * These macros use the active guest state in VZ mode (hardware registers),
672  */
673 
674 /*
675  * __BUILD_KVM_$ops_HW(): kvm_$op_c0_guest_$reg() -> kvm_$op_vz_gc0_$reg()
676  * These generate accessors operating on the VZ guest context in hardware, and
677  * wrap them with the common guest C0 accessors (for use by common emulation
678  * code).
679  *
680  * Accessors operating on the saved context in RAM are also generated to allow
681  * convenient explicit saving and restoring of the state.
682  */
683 
684 #define __BUILD_KVM_RW_HW(name, type, _reg, sel)			\
685 	__BUILD_KVM_RW_SAVED(name, type, _reg, sel)			\
686 	__BUILD_KVM_RW_VZ(name, type, _reg, sel)			\
687 	__BUILD_KVM_RW_WRAP(c0_guest_##name, vz_gc0_##name, type)	\
688 	__BUILD_KVM_SAVE_VZ(name, _reg, sel)
689 
690 #define __BUILD_KVM_SET_HW(name, type, _reg, sel)			\
691 	__BUILD_KVM_SET_SAVED(name, type, _reg, sel)			\
692 	__BUILD_KVM_SET_VZ(name, type, _reg, sel)			\
693 	__BUILD_KVM_SET_WRAP(c0_guest_##name, vz_gc0_##name, type)
694 
695 /*
696  * We can't do atomic modifications of COP0 state if hardware can modify it.
697  * Races must be handled explicitly.
698  */
699 #define __BUILD_KVM_ATOMIC_HW	__BUILD_KVM_SET_HW
700 
701 #endif
702 
703 /*
704  * Define accessors for CP0 registers that are accessible to the guest. These
705  * are primarily used by common emulation code, which may need to access the
706  * registers differently depending on the implementation.
707  *
708  *    fns_hw/sw    name     type    reg num         select
709  */
710 __BUILD_KVM_RW_HW(index,          32, MIPS_CP0_TLB_INDEX,    0)
711 __BUILD_KVM_RW_HW(entrylo0,       l,  MIPS_CP0_TLB_LO0,      0)
712 __BUILD_KVM_RW_HW(entrylo1,       l,  MIPS_CP0_TLB_LO1,      0)
713 __BUILD_KVM_RW_HW(context,        l,  MIPS_CP0_TLB_CONTEXT,  0)
714 __BUILD_KVM_RW_HW(contextconfig,  32, MIPS_CP0_TLB_CONTEXT,  1)
715 __BUILD_KVM_RW_HW(userlocal,      l,  MIPS_CP0_TLB_CONTEXT,  2)
716 __BUILD_KVM_RW_HW(xcontextconfig, l,  MIPS_CP0_TLB_CONTEXT,  3)
717 __BUILD_KVM_RW_HW(pagemask,       l,  MIPS_CP0_TLB_PG_MASK,  0)
718 __BUILD_KVM_RW_HW(pagegrain,      32, MIPS_CP0_TLB_PG_MASK,  1)
719 __BUILD_KVM_RW_HW(segctl0,        l,  MIPS_CP0_TLB_PG_MASK,  2)
720 __BUILD_KVM_RW_HW(segctl1,        l,  MIPS_CP0_TLB_PG_MASK,  3)
721 __BUILD_KVM_RW_HW(segctl2,        l,  MIPS_CP0_TLB_PG_MASK,  4)
722 __BUILD_KVM_RW_HW(pwbase,         l,  MIPS_CP0_TLB_PG_MASK,  5)
723 __BUILD_KVM_RW_HW(pwfield,        l,  MIPS_CP0_TLB_PG_MASK,  6)
724 __BUILD_KVM_RW_HW(pwsize,         l,  MIPS_CP0_TLB_PG_MASK,  7)
725 __BUILD_KVM_RW_HW(wired,          32, MIPS_CP0_TLB_WIRED,    0)
726 __BUILD_KVM_RW_HW(pwctl,          32, MIPS_CP0_TLB_WIRED,    6)
727 __BUILD_KVM_RW_HW(hwrena,         32, MIPS_CP0_HWRENA,       0)
728 __BUILD_KVM_RW_HW(badvaddr,       l,  MIPS_CP0_BAD_VADDR,    0)
729 __BUILD_KVM_RW_HW(badinstr,       32, MIPS_CP0_BAD_VADDR,    1)
730 __BUILD_KVM_RW_HW(badinstrp,      32, MIPS_CP0_BAD_VADDR,    2)
731 __BUILD_KVM_RW_SW(count,          32, MIPS_CP0_COUNT,        0)
732 __BUILD_KVM_RW_HW(entryhi,        l,  MIPS_CP0_TLB_HI,       0)
733 __BUILD_KVM_RW_HW(compare,        32, MIPS_CP0_COMPARE,      0)
734 __BUILD_KVM_RW_HW(status,         32, MIPS_CP0_STATUS,       0)
735 __BUILD_KVM_RW_HW(intctl,         32, MIPS_CP0_STATUS,       1)
736 __BUILD_KVM_RW_HW(cause,          32, MIPS_CP0_CAUSE,        0)
737 __BUILD_KVM_RW_HW(epc,            l,  MIPS_CP0_EXC_PC,       0)
738 __BUILD_KVM_RW_SW(prid,           32, MIPS_CP0_PRID,         0)
739 __BUILD_KVM_RW_HW(ebase,          l,  MIPS_CP0_PRID,         1)
740 __BUILD_KVM_RW_HW(config,         32, MIPS_CP0_CONFIG,       0)
741 __BUILD_KVM_RW_HW(config1,        32, MIPS_CP0_CONFIG,       1)
742 __BUILD_KVM_RW_HW(config2,        32, MIPS_CP0_CONFIG,       2)
743 __BUILD_KVM_RW_HW(config3,        32, MIPS_CP0_CONFIG,       3)
744 __BUILD_KVM_RW_HW(config4,        32, MIPS_CP0_CONFIG,       4)
745 __BUILD_KVM_RW_HW(config5,        32, MIPS_CP0_CONFIG,       5)
746 __BUILD_KVM_RW_HW(config6,        32, MIPS_CP0_CONFIG,       6)
747 __BUILD_KVM_RW_HW(config7,        32, MIPS_CP0_CONFIG,       7)
748 __BUILD_KVM_RW_SW(maari,          l,  MIPS_CP0_LLADDR,       2)
749 __BUILD_KVM_RW_HW(xcontext,       l,  MIPS_CP0_TLB_XCONTEXT, 0)
750 __BUILD_KVM_RW_HW(errorepc,       l,  MIPS_CP0_ERROR_PC,     0)
751 __BUILD_KVM_RW_HW(kscratch1,      l,  MIPS_CP0_DESAVE,       2)
752 __BUILD_KVM_RW_HW(kscratch2,      l,  MIPS_CP0_DESAVE,       3)
753 __BUILD_KVM_RW_HW(kscratch3,      l,  MIPS_CP0_DESAVE,       4)
754 __BUILD_KVM_RW_HW(kscratch4,      l,  MIPS_CP0_DESAVE,       5)
755 __BUILD_KVM_RW_HW(kscratch5,      l,  MIPS_CP0_DESAVE,       6)
756 __BUILD_KVM_RW_HW(kscratch6,      l,  MIPS_CP0_DESAVE,       7)
757 
758 /* Bitwise operations (on HW state) */
759 __BUILD_KVM_SET_HW(status,        32, MIPS_CP0_STATUS,       0)
760 /* Cause can be modified asynchronously from hardirq hrtimer callback */
761 __BUILD_KVM_ATOMIC_HW(cause,      32, MIPS_CP0_CAUSE,        0)
762 __BUILD_KVM_SET_HW(ebase,         l,  MIPS_CP0_PRID,         1)
763 
764 /* Bitwise operations (on saved state) */
765 __BUILD_KVM_SET_SAVED(config,     32, MIPS_CP0_CONFIG,       0)
766 __BUILD_KVM_SET_SAVED(config1,    32, MIPS_CP0_CONFIG,       1)
767 __BUILD_KVM_SET_SAVED(config2,    32, MIPS_CP0_CONFIG,       2)
768 __BUILD_KVM_SET_SAVED(config3,    32, MIPS_CP0_CONFIG,       3)
769 __BUILD_KVM_SET_SAVED(config4,    32, MIPS_CP0_CONFIG,       4)
770 __BUILD_KVM_SET_SAVED(config5,    32, MIPS_CP0_CONFIG,       5)
771 
772 /* Helpers */
773 
kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch * vcpu)774 static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
775 {
776 	return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
777 		vcpu->fpu_enabled;
778 }
779 
kvm_mips_guest_has_fpu(struct kvm_vcpu_arch * vcpu)780 static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
781 {
782 	return kvm_mips_guest_can_have_fpu(vcpu) &&
783 		kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
784 }
785 
kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch * vcpu)786 static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
787 {
788 	return (!__builtin_constant_p(cpu_has_msa) || cpu_has_msa) &&
789 		vcpu->msa_enabled;
790 }
791 
kvm_mips_guest_has_msa(struct kvm_vcpu_arch * vcpu)792 static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
793 {
794 	return kvm_mips_guest_can_have_msa(vcpu) &&
795 		kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
796 }
797 
798 struct kvm_mips_callbacks {
799 	int (*handle_cop_unusable)(struct kvm_vcpu *vcpu);
800 	int (*handle_tlb_mod)(struct kvm_vcpu *vcpu);
801 	int (*handle_tlb_ld_miss)(struct kvm_vcpu *vcpu);
802 	int (*handle_tlb_st_miss)(struct kvm_vcpu *vcpu);
803 	int (*handle_addr_err_st)(struct kvm_vcpu *vcpu);
804 	int (*handle_addr_err_ld)(struct kvm_vcpu *vcpu);
805 	int (*handle_syscall)(struct kvm_vcpu *vcpu);
806 	int (*handle_res_inst)(struct kvm_vcpu *vcpu);
807 	int (*handle_break)(struct kvm_vcpu *vcpu);
808 	int (*handle_trap)(struct kvm_vcpu *vcpu);
809 	int (*handle_msa_fpe)(struct kvm_vcpu *vcpu);
810 	int (*handle_fpe)(struct kvm_vcpu *vcpu);
811 	int (*handle_msa_disabled)(struct kvm_vcpu *vcpu);
812 	int (*handle_guest_exit)(struct kvm_vcpu *vcpu);
813 	int (*hardware_enable)(void);
814 	void (*hardware_disable)(void);
815 	int (*check_extension)(struct kvm *kvm, long ext);
816 	int (*vcpu_init)(struct kvm_vcpu *vcpu);
817 	void (*vcpu_uninit)(struct kvm_vcpu *vcpu);
818 	int (*vcpu_setup)(struct kvm_vcpu *vcpu);
819 	void (*flush_shadow_all)(struct kvm *kvm);
820 	/*
821 	 * Must take care of flushing any cached GPA PTEs (e.g. guest entries in
822 	 * VZ root TLB, or T&E GVA page tables and corresponding root TLB
823 	 * mappings).
824 	 */
825 	void (*flush_shadow_memslot)(struct kvm *kvm,
826 				     const struct kvm_memory_slot *slot);
827 	gpa_t (*gva_to_gpa)(gva_t gva);
828 	void (*queue_timer_int)(struct kvm_vcpu *vcpu);
829 	void (*dequeue_timer_int)(struct kvm_vcpu *vcpu);
830 	void (*queue_io_int)(struct kvm_vcpu *vcpu,
831 			     struct kvm_mips_interrupt *irq);
832 	void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
833 			       struct kvm_mips_interrupt *irq);
834 	int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
835 			   u32 cause);
836 	int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
837 			 u32 cause);
838 	unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
839 	int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
840 	int (*get_one_reg)(struct kvm_vcpu *vcpu,
841 			   const struct kvm_one_reg *reg, s64 *v);
842 	int (*set_one_reg)(struct kvm_vcpu *vcpu,
843 			   const struct kvm_one_reg *reg, s64 v);
844 	int (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
845 	int (*vcpu_put)(struct kvm_vcpu *vcpu, int cpu);
846 	int (*vcpu_run)(struct kvm_vcpu *vcpu);
847 	void (*vcpu_reenter)(struct kvm_vcpu *vcpu);
848 };
849 extern struct kvm_mips_callbacks *kvm_mips_callbacks;
850 int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
851 
852 /* Debug: dump vcpu state */
853 int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
854 
855 extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu);
856 
857 /* Building of entry/exception code */
858 int kvm_mips_entry_setup(void);
859 void *kvm_mips_build_vcpu_run(void *addr);
860 void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler);
861 void *kvm_mips_build_exception(void *addr, void *handler);
862 void *kvm_mips_build_exit(void *addr);
863 
864 /* FPU/MSA context management */
865 void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
866 void __kvm_restore_fpu(struct kvm_vcpu_arch *vcpu);
867 void __kvm_restore_fcsr(struct kvm_vcpu_arch *vcpu);
868 void __kvm_save_msa(struct kvm_vcpu_arch *vcpu);
869 void __kvm_restore_msa(struct kvm_vcpu_arch *vcpu);
870 void __kvm_restore_msa_upper(struct kvm_vcpu_arch *vcpu);
871 void __kvm_restore_msacsr(struct kvm_vcpu_arch *vcpu);
872 void kvm_own_fpu(struct kvm_vcpu *vcpu);
873 void kvm_own_msa(struct kvm_vcpu *vcpu);
874 void kvm_drop_fpu(struct kvm_vcpu *vcpu);
875 void kvm_lose_fpu(struct kvm_vcpu *vcpu);
876 
877 /* TLB handling */
878 u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
879 
880 u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
881 
882 u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
883 
884 #ifdef CONFIG_KVM_MIPS_VZ
885 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
886 				      struct kvm_vcpu *vcpu, bool write_fault);
887 #endif
888 extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
889 					   struct kvm_vcpu *vcpu,
890 					   bool write_fault);
891 
892 extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
893 					      struct kvm_vcpu *vcpu);
894 
895 extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
896 						struct kvm_mips_tlb *tlb,
897 						unsigned long gva,
898 						bool write_fault);
899 
900 extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
901 						     u32 *opc,
902 						     struct kvm_vcpu *vcpu,
903 						     bool write_fault);
904 
905 extern void kvm_mips_dump_host_tlbs(void);
906 extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
907 extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi,
908 				 bool user, bool kernel);
909 
910 extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu,
911 				     unsigned long entryhi);
912 
913 #ifdef CONFIG_KVM_MIPS_VZ
914 int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
915 int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva,
916 			    unsigned long *gpa);
917 void kvm_vz_local_flush_roottlb_all_guests(void);
918 void kvm_vz_local_flush_guesttlb_all(void);
919 void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index,
920 			  unsigned int count);
921 void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index,
922 			  unsigned int count);
923 #ifdef CONFIG_CPU_LOONGSON64
924 void kvm_loongson_clear_guest_vtlb(void);
925 void kvm_loongson_clear_guest_ftlb(void);
926 #endif
927 #endif
928 
929 void kvm_mips_suspend_mm(int cpu);
930 void kvm_mips_resume_mm(int cpu);
931 
932 /* MMU handling */
933 
934 /**
935  * enum kvm_mips_flush - Types of MMU flushes.
936  * @KMF_USER:	Flush guest user virtual memory mappings.
937  *		Guest USeg only.
938  * @KMF_KERN:	Flush guest kernel virtual memory mappings.
939  *		Guest USeg and KSeg2/3.
940  * @KMF_GPA:	Flush guest physical memory mappings.
941  *		Also includes KSeg0 if KMF_KERN is set.
942  */
943 enum kvm_mips_flush {
944 	KMF_USER	= 0x0,
945 	KMF_KERN	= 0x1,
946 	KMF_GPA		= 0x2,
947 };
948 void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
949 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
950 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
951 pgd_t *kvm_pgd_alloc(void);
952 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
953 void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
954 				  bool user);
955 void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu);
956 void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu);
957 
958 enum kvm_mips_fault_result {
959 	KVM_MIPS_MAPPED = 0,
960 	KVM_MIPS_GVA,
961 	KVM_MIPS_GPA,
962 	KVM_MIPS_TLB,
963 	KVM_MIPS_TLBINV,
964 	KVM_MIPS_TLBMOD,
965 };
966 enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
967 						   unsigned long gva,
968 						   bool write);
969 
970 #define KVM_ARCH_WANT_MMU_NOTIFIER
971 int kvm_unmap_hva_range(struct kvm *kvm,
972 			unsigned long start, unsigned long end, unsigned flags);
973 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
974 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
975 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
976 
977 /* Emulation */
978 int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
979 enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
980 int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
981 int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out);
982 
983 /**
984  * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault.
985  * @vcpu:	Virtual CPU.
986  *
987  * Returns:	Whether the TLBL exception was likely due to an instruction
988  *		fetch fault rather than a data load fault.
989  */
kvm_is_ifetch_fault(struct kvm_vcpu_arch * vcpu)990 static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *vcpu)
991 {
992 	unsigned long badvaddr = vcpu->host_cp0_badvaddr;
993 	unsigned long epc = msk_isa16_mode(vcpu->pc);
994 	u32 cause = vcpu->host_cp0_cause;
995 
996 	if (epc == badvaddr)
997 		return true;
998 
999 	/*
1000 	 * Branches may be 32-bit or 16-bit instructions.
1001 	 * This isn't exact, but we don't really support MIPS16 or microMIPS yet
1002 	 * in KVM anyway.
1003 	 */
1004 	if ((cause & CAUSEF_BD) && badvaddr - epc <= 4)
1005 		return true;
1006 
1007 	return false;
1008 }
1009 
1010 extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
1011 						   u32 *opc,
1012 						   struct kvm_vcpu *vcpu);
1013 
1014 long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu);
1015 
1016 extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
1017 						      u32 *opc,
1018 						      struct kvm_vcpu *vcpu);
1019 
1020 extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
1021 							 u32 *opc,
1022 							 struct kvm_vcpu *vcpu);
1023 
1024 extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
1025 							u32 *opc,
1026 							struct kvm_vcpu *vcpu);
1027 
1028 extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
1029 							 u32 *opc,
1030 							 struct kvm_vcpu *vcpu);
1031 
1032 extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
1033 							u32 *opc,
1034 							struct kvm_vcpu *vcpu);
1035 
1036 extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
1037 						     u32 *opc,
1038 						     struct kvm_vcpu *vcpu);
1039 
1040 extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
1041 						      u32 *opc,
1042 						      struct kvm_vcpu *vcpu);
1043 
1044 extern enum emulation_result kvm_mips_handle_ri(u32 cause,
1045 						u32 *opc,
1046 						struct kvm_vcpu *vcpu);
1047 
1048 extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
1049 						     u32 *opc,
1050 						     struct kvm_vcpu *vcpu);
1051 
1052 extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
1053 						     u32 *opc,
1054 						     struct kvm_vcpu *vcpu);
1055 
1056 extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
1057 						       u32 *opc,
1058 						       struct kvm_vcpu *vcpu);
1059 
1060 extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
1061 							 u32 *opc,
1062 							 struct kvm_vcpu *vcpu);
1063 
1064 extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
1065 						      u32 *opc,
1066 						      struct kvm_vcpu *vcpu);
1067 
1068 extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
1069 							 u32 *opc,
1070 							 struct kvm_vcpu *vcpu);
1071 
1072 extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu);
1073 
1074 u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
1075 void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
1076 void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
1077 void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz);
1078 int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
1079 int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
1080 int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz);
1081 void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
1082 void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
1083 enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
1084 
1085 /* fairly internal functions requiring some care to use */
1086 int kvm_mips_count_disabled(struct kvm_vcpu *vcpu);
1087 ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count);
1088 int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
1089 			     u32 count, int min_drift);
1090 
1091 #ifdef CONFIG_KVM_MIPS_VZ
1092 void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu);
1093 void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu);
1094 #else
kvm_vz_acquire_htimer(struct kvm_vcpu * vcpu)1095 static inline void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) {}
kvm_vz_lose_htimer(struct kvm_vcpu * vcpu)1096 static inline void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) {}
1097 #endif
1098 
1099 enum emulation_result kvm_mips_check_privilege(u32 cause,
1100 					       u32 *opc,
1101 					       struct kvm_vcpu *vcpu);
1102 
1103 enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
1104 					     u32 *opc,
1105 					     u32 cause,
1106 					     struct kvm_vcpu *vcpu);
1107 enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
1108 					   u32 *opc,
1109 					   u32 cause,
1110 					   struct kvm_vcpu *vcpu);
1111 enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
1112 					     u32 cause,
1113 					     struct kvm_vcpu *vcpu);
1114 enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
1115 					    u32 cause,
1116 					    struct kvm_vcpu *vcpu);
1117 
1118 /* COP0 */
1119 enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu);
1120 
1121 unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu);
1122 unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu);
1123 unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
1124 unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
1125 
1126 /* Hypercalls (hypcall.c) */
1127 
1128 enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu,
1129 					    union mips_instruction inst);
1130 int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu);
1131 
1132 /* Dynamic binary translation */
1133 extern int kvm_mips_trans_cache_index(union mips_instruction inst,
1134 				      u32 *opc, struct kvm_vcpu *vcpu);
1135 extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
1136 				   struct kvm_vcpu *vcpu);
1137 extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
1138 			       struct kvm_vcpu *vcpu);
1139 extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
1140 			       struct kvm_vcpu *vcpu);
1141 
1142 /* Misc */
1143 extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
1144 extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
1145 extern int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
1146 			     struct kvm_mips_interrupt *irq);
1147 
kvm_arch_hardware_unsetup(void)1148 static inline void kvm_arch_hardware_unsetup(void) {}
kvm_arch_sync_events(struct kvm * kvm)1149 static inline void kvm_arch_sync_events(struct kvm *kvm) {}
kvm_arch_free_memslot(struct kvm * kvm,struct kvm_memory_slot * slot)1150 static inline void kvm_arch_free_memslot(struct kvm *kvm,
1151 					 struct kvm_memory_slot *slot) {}
kvm_arch_memslots_updated(struct kvm * kvm,u64 gen)1152 static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
kvm_arch_sched_in(struct kvm_vcpu * vcpu,int cpu)1153 static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
kvm_arch_vcpu_blocking(struct kvm_vcpu * vcpu)1154 static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
kvm_arch_vcpu_unblocking(struct kvm_vcpu * vcpu)1155 static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
kvm_arch_vcpu_block_finish(struct kvm_vcpu * vcpu)1156 static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
1157 
1158 #endif /* __MIPS_KVM_HOST_H__ */
1159