1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright SUSE Linux Products GmbH 2009
16  *
17  * Authors: Alexander Graf <agraf@suse.de>
18  */
19 
20 #ifndef __ASM_KVM_BOOK3S_H__
21 #define __ASM_KVM_BOOK3S_H__
22 
23 #include <linux/types.h>
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_book3s_asm.h>
26 
27 struct kvmppc_bat {
28 	u64 raw;
29 	u32 bepi;
30 	u32 bepi_mask;
31 	u32 brpn;
32 	u8 wimg;
33 	u8 pp;
34 	bool vs		: 1;
35 	bool vp		: 1;
36 };
37 
38 struct kvmppc_sid_map {
39 	u64 guest_vsid;
40 	u64 guest_esid;
41 	u64 host_vsid;
42 	bool valid	: 1;
43 };
44 
45 #define SID_MAP_BITS    9
46 #define SID_MAP_NUM     (1 << SID_MAP_BITS)
47 #define SID_MAP_MASK    (SID_MAP_NUM - 1)
48 
49 #ifdef CONFIG_PPC_BOOK3S_64
50 #define SID_CONTEXTS	1
51 #else
52 #define SID_CONTEXTS	128
53 #define VSID_POOL_SIZE	(SID_CONTEXTS * 16)
54 #endif
55 
56 struct hpte_cache {
57 	struct hlist_node list_pte;
58 	struct hlist_node list_pte_long;
59 	struct hlist_node list_vpte;
60 	struct hlist_node list_vpte_long;
61 #ifdef CONFIG_PPC_BOOK3S_64
62 	struct hlist_node list_vpte_64k;
63 #endif
64 	struct rcu_head rcu_head;
65 	u64 host_vpn;
66 	u64 pfn;
67 	ulong slot;
68 	struct kvmppc_pte pte;
69 	int pagesize;
70 };
71 
72 /*
73  * Struct for a virtual core.
74  * Note: entry_exit_map combines a bitmap of threads that have entered
75  * in the bottom 8 bits and a bitmap of threads that have exited in the
76  * next 8 bits.  This is so that we can atomically set the entry bit
77  * iff the exit map is 0 without taking a lock.
78  */
79 struct kvmppc_vcore {
80 	int n_runnable;
81 	int num_threads;
82 	int entry_exit_map;
83 	int napping_threads;
84 	int first_vcpuid;
85 	u16 pcpu;
86 	u16 last_cpu;
87 	u8 vcore_state;
88 	u8 in_guest;
89 	struct kvm_vcpu *runnable_threads[MAX_SMT_THREADS];
90 	struct list_head preempt_list;
91 	spinlock_t lock;
92 	struct swait_queue_head wq;
93 	spinlock_t stoltb_lock;	/* protects stolen_tb and preempt_tb */
94 	u64 stolen_tb;
95 	u64 preempt_tb;
96 	struct kvm_vcpu *runner;
97 	struct kvm *kvm;
98 	u64 tb_offset;		/* guest timebase - host timebase */
99 	u64 tb_offset_applied;	/* timebase offset currently in force */
100 	ulong lpcr;
101 	u32 arch_compat;
102 	ulong pcr;
103 	ulong dpdes;		/* doorbell state (POWER8) */
104 	ulong vtb;		/* virtual timebase */
105 	ulong conferring_threads;
106 	unsigned int halt_poll_ns;
107 	atomic_t online_count;
108 };
109 
110 struct kvmppc_vcpu_book3s {
111 	struct kvmppc_sid_map sid_map[SID_MAP_NUM];
112 	struct {
113 		u64 esid;
114 		u64 vsid;
115 	} slb_shadow[64];
116 	u8 slb_shadow_max;
117 	struct kvmppc_bat ibat[8];
118 	struct kvmppc_bat dbat[8];
119 	u64 hid[6];
120 	u64 gqr[8];
121 	u64 sdr1;
122 	u64 hior;
123 	u64 msr_mask;
124 	u64 vtb;
125 #ifdef CONFIG_PPC_BOOK3S_32
126 	u32 vsid_pool[VSID_POOL_SIZE];
127 	u32 vsid_next;
128 #else
129 	u64 proto_vsid_first;
130 	u64 proto_vsid_max;
131 	u64 proto_vsid_next;
132 #endif
133 	int context_id[SID_CONTEXTS];
134 
135 	bool hior_explicit;		/* HIOR is set by ioctl, not PVR */
136 
137 	struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
138 	struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
139 	struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
140 	struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG];
141 #ifdef CONFIG_PPC_BOOK3S_64
142 	struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K];
143 #endif
144 	int hpte_cache_count;
145 	spinlock_t mmu_lock;
146 };
147 
148 #define VSID_REAL	0x07ffffffffc00000ULL
149 #define VSID_BAT	0x07ffffffffb00000ULL
150 #define VSID_64K	0x0800000000000000ULL
151 #define VSID_1T		0x1000000000000000ULL
152 #define VSID_REAL_DR	0x2000000000000000ULL
153 #define VSID_REAL_IR	0x4000000000000000ULL
154 #define VSID_PR		0x8000000000000000ULL
155 
156 extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask);
157 extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask);
158 extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end);
159 extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr);
160 extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu);
161 extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
162 extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
163 extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
164 			       bool iswrite);
165 extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
166 extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
167 extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
168 extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
169 extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
170 			struct kvm_vcpu *vcpu, unsigned long addr,
171 			unsigned long status);
172 extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
173 			unsigned long slb_v, unsigned long valid);
174 extern int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
175 			unsigned long gpa, gva_t ea, int is_store);
176 
177 extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
178 extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
179 extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte);
180 extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu);
181 extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu);
182 extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
183 extern int kvmppc_mmu_hpte_sysinit(void);
184 extern void kvmppc_mmu_hpte_sysexit(void);
185 extern int kvmppc_mmu_hv_init(void);
186 extern int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hc);
187 
188 extern int kvmppc_book3s_radix_page_fault(struct kvm_run *run,
189 			struct kvm_vcpu *vcpu,
190 			unsigned long ea, unsigned long dsisr);
191 extern int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
192 			struct kvmppc_pte *gpte, bool data, bool iswrite);
193 extern int kvmppc_init_vm_radix(struct kvm *kvm);
194 extern void kvmppc_free_radix(struct kvm *kvm);
195 extern int kvmppc_radix_init(void);
196 extern void kvmppc_radix_exit(void);
197 extern int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
198 			unsigned long gfn);
199 extern int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
200 			unsigned long gfn);
201 extern int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
202 			unsigned long gfn);
203 extern long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
204 			struct kvm_memory_slot *memslot, unsigned long *map);
205 extern int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
206 
207 /* XXX remove this export when load_last_inst() is generic */
208 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, bool data);
209 extern void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec);
210 extern void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
211 					  unsigned int vec);
212 extern void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags);
213 extern void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac);
214 extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
215 			   bool upper, u32 val);
216 extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
217 extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
218 extern kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa,
219 			bool writing, bool *writable);
220 extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
221 			unsigned long *rmap, long pte_index, int realmode);
222 extern void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot,
223 			unsigned long gfn, unsigned long psize);
224 extern void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
225 			unsigned long pte_index);
226 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
227 			unsigned long pte_index);
228 extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
229 			unsigned long *nb_ret);
230 extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr,
231 			unsigned long gpa, bool dirty);
232 extern long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
233 			long pte_index, unsigned long pteh, unsigned long ptel,
234 			pgd_t *pgdir, bool realmode, unsigned long *idx_ret);
235 extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
236 			unsigned long pte_index, unsigned long avpn,
237 			unsigned long *hpret);
238 extern long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm,
239 			struct kvm_memory_slot *memslot, unsigned long *map);
240 extern void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa,
241 			struct kvm_memory_slot *memslot,
242 			unsigned long *map);
243 extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr,
244 			unsigned long mask);
245 extern void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr);
246 
247 extern int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu);
248 extern int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu);
249 extern void kvmhv_emulate_tm_rollback(struct kvm_vcpu *vcpu);
250 
251 extern void kvmppc_entry_trampoline(void);
252 extern void kvmppc_hv_entry_trampoline(void);
253 extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
254 extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
255 extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
256 extern void kvmppc_pr_init_default_hcalls(struct kvm *kvm);
257 extern int kvmppc_hcall_impl_pr(unsigned long cmd);
258 extern int kvmppc_hcall_impl_hv_realmode(unsigned long cmd);
259 extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
260 extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
261 
262 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
263 void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu);
264 void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu);
265 void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu);
266 void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu);
267 #else
kvmppc_save_tm_pr(struct kvm_vcpu * vcpu)268 static inline void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) {}
kvmppc_restore_tm_pr(struct kvm_vcpu * vcpu)269 static inline void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) {}
kvmppc_save_tm_sprs(struct kvm_vcpu * vcpu)270 static inline void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu) {}
kvmppc_restore_tm_sprs(struct kvm_vcpu * vcpu)271 static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) {}
272 #endif
273 
274 void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
275 
276 extern int kvm_irq_bypass;
277 
to_book3s(struct kvm_vcpu * vcpu)278 static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
279 {
280 	return vcpu->arch.book3s;
281 }
282 
283 /* Also add subarch specific defines */
284 
285 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
286 #include <asm/kvm_book3s_32.h>
287 #endif
288 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
289 #include <asm/kvm_book3s_64.h>
290 #endif
291 
kvmppc_set_gpr(struct kvm_vcpu * vcpu,int num,ulong val)292 static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
293 {
294 	vcpu->arch.regs.gpr[num] = val;
295 }
296 
kvmppc_get_gpr(struct kvm_vcpu * vcpu,int num)297 static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
298 {
299 	return vcpu->arch.regs.gpr[num];
300 }
301 
kvmppc_set_cr(struct kvm_vcpu * vcpu,u32 val)302 static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
303 {
304 	vcpu->arch.cr = val;
305 }
306 
kvmppc_get_cr(struct kvm_vcpu * vcpu)307 static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
308 {
309 	return vcpu->arch.cr;
310 }
311 
kvmppc_set_xer(struct kvm_vcpu * vcpu,ulong val)312 static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, ulong val)
313 {
314 	vcpu->arch.regs.xer = val;
315 }
316 
kvmppc_get_xer(struct kvm_vcpu * vcpu)317 static inline ulong kvmppc_get_xer(struct kvm_vcpu *vcpu)
318 {
319 	return vcpu->arch.regs.xer;
320 }
321 
kvmppc_set_ctr(struct kvm_vcpu * vcpu,ulong val)322 static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
323 {
324 	vcpu->arch.regs.ctr = val;
325 }
326 
kvmppc_get_ctr(struct kvm_vcpu * vcpu)327 static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
328 {
329 	return vcpu->arch.regs.ctr;
330 }
331 
kvmppc_set_lr(struct kvm_vcpu * vcpu,ulong val)332 static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
333 {
334 	vcpu->arch.regs.link = val;
335 }
336 
kvmppc_get_lr(struct kvm_vcpu * vcpu)337 static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
338 {
339 	return vcpu->arch.regs.link;
340 }
341 
kvmppc_set_pc(struct kvm_vcpu * vcpu,ulong val)342 static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
343 {
344 	vcpu->arch.regs.nip = val;
345 }
346 
kvmppc_get_pc(struct kvm_vcpu * vcpu)347 static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
348 {
349 	return vcpu->arch.regs.nip;
350 }
351 
352 static inline u64 kvmppc_get_msr(struct kvm_vcpu *vcpu);
kvmppc_need_byteswap(struct kvm_vcpu * vcpu)353 static inline bool kvmppc_need_byteswap(struct kvm_vcpu *vcpu)
354 {
355 	return (kvmppc_get_msr(vcpu) & MSR_LE) != (MSR_KERNEL & MSR_LE);
356 }
357 
kvmppc_get_fault_dar(struct kvm_vcpu * vcpu)358 static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
359 {
360 	return vcpu->arch.fault_dar;
361 }
362 
is_kvmppc_resume_guest(int r)363 static inline bool is_kvmppc_resume_guest(int r)
364 {
365 	return (r == RESUME_GUEST || r == RESUME_GUEST_NV);
366 }
367 
368 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm);
kvmppc_supports_magic_page(struct kvm_vcpu * vcpu)369 static inline bool kvmppc_supports_magic_page(struct kvm_vcpu *vcpu)
370 {
371 	/* Only PR KVM supports the magic page */
372 	return !is_kvmppc_hv_enabled(vcpu->kvm);
373 }
374 
375 extern int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu);
376 extern int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu);
377 
378 /* Magic register values loaded into r3 and r4 before the 'sc' assembly
379  * instruction for the OSI hypercalls */
380 #define OSI_SC_MAGIC_R3			0x113724FA
381 #define OSI_SC_MAGIC_R4			0x77810F9B
382 
383 #define INS_DCBZ			0x7c0007ec
384 /* TO = 31 for unconditional trap */
385 #define INS_TW				0x7fe00008
386 
387 /* LPIDs we support with this build -- runtime limit may be lower */
388 #define KVMPPC_NR_LPIDS			(LPID_RSVD + 1)
389 
390 #define SPLIT_HACK_MASK			0xff000000
391 #define SPLIT_HACK_OFFS			0xfb000000
392 
393 /*
394  * This packs a VCPU ID from the [0..KVM_MAX_VCPU_ID) space down to the
395  * [0..KVM_MAX_VCPUS) space, using knowledge of the guest's core stride
396  * (but not its actual threading mode, which is not available) to avoid
397  * collisions.
398  *
399  * The implementation leaves VCPU IDs from the range [0..KVM_MAX_VCPUS) (block
400  * 0) unchanged: if the guest is filling each VCORE completely then it will be
401  * using consecutive IDs and it will fill the space without any packing.
402  *
403  * For higher VCPU IDs, the packed ID is based on the VCPU ID modulo
404  * KVM_MAX_VCPUS (effectively masking off the top bits) and then an offset is
405  * added to avoid collisions.
406  *
407  * VCPU IDs in the range [KVM_MAX_VCPUS..(KVM_MAX_VCPUS*2)) (block 1) are only
408  * possible if the guest is leaving at least 1/2 of each VCORE empty, so IDs
409  * can be safely packed into the second half of each VCORE by adding an offset
410  * of (stride / 2).
411  *
412  * Similarly, if VCPU IDs in the range [(KVM_MAX_VCPUS*2)..(KVM_MAX_VCPUS*4))
413  * (blocks 2 and 3) are seen, the guest must be leaving at least 3/4 of each
414  * VCORE empty so packed IDs can be offset by (stride / 4) and (stride * 3 / 4).
415  *
416  * Finally, VCPU IDs from blocks 5..7 will only be seen if the guest is using a
417  * stride of 8 and 1 thread per core so the remaining offsets of 1, 5, 3 and 7
418  * must be free to use.
419  *
420  * (The offsets for each block are stored in block_offsets[], indexed by the
421  * block number if the stride is 8. For cases where the guest's stride is less
422  * than 8, we can re-use the block_offsets array by multiplying the block
423  * number by (MAX_SMT_THREADS / stride) to reach the correct entry.)
424  */
kvmppc_pack_vcpu_id(struct kvm * kvm,u32 id)425 static inline u32 kvmppc_pack_vcpu_id(struct kvm *kvm, u32 id)
426 {
427 	const int block_offsets[MAX_SMT_THREADS] = {0, 4, 2, 6, 1, 5, 3, 7};
428 	int stride = kvm->arch.emul_smt_mode;
429 	int block = (id / KVM_MAX_VCPUS) * (MAX_SMT_THREADS / stride);
430 	u32 packed_id;
431 
432 	if (WARN_ONCE(block >= MAX_SMT_THREADS, "VCPU ID too large to pack"))
433 		return 0;
434 	packed_id = (id % KVM_MAX_VCPUS) + block_offsets[block];
435 	if (WARN_ONCE(packed_id >= KVM_MAX_VCPUS, "VCPU ID packing failed"))
436 		return 0;
437 	return packed_id;
438 }
439 
440 #endif /* __ASM_KVM_BOOK3S_H__ */
441