1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Alexander Graf <agraf@suse.de>
7 */
8
9 #include <linux/kvm_host.h>
10
11 #include <asm/kvm_ppc.h>
12 #include <asm/kvm_book3s.h>
13 #include <asm/book3s/32/mmu-hash.h>
14 #include <asm/machdep.h>
15 #include <asm/mmu_context.h>
16 #include <asm/hw_irq.h>
17 #include "book3s.h"
18
19 /* #define DEBUG_MMU */
20 /* #define DEBUG_SR */
21
22 #ifdef DEBUG_MMU
23 #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
24 #else
25 #define dprintk_mmu(a, ...) do { } while(0)
26 #endif
27
28 #ifdef DEBUG_SR
29 #define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__)
30 #else
31 #define dprintk_sr(a, ...) do { } while(0)
32 #endif
33
34 #if PAGE_SHIFT != 12
35 #error Unknown page size
36 #endif
37
38 #ifdef CONFIG_SMP
39 #error XXX need to grab mmu_hash_lock
40 #endif
41
42 #ifdef CONFIG_PTE_64BIT
43 #error Only 32 bit pages are supported for now
44 #endif
45
46 static ulong htab;
47 static u32 htabmask;
48
kvmppc_mmu_invalidate_pte(struct kvm_vcpu * vcpu,struct hpte_cache * pte)49 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
50 {
51 volatile u32 *pteg;
52
53 /* Remove from host HTAB */
54 pteg = (u32*)pte->slot;
55 pteg[0] = 0;
56
57 /* And make sure it's gone from the TLB too */
58 asm volatile ("sync");
59 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory");
60 asm volatile ("sync");
61 asm volatile ("tlbsync");
62 }
63
64 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
65 * a hash, so we don't waste cycles on looping */
kvmppc_sid_hash(struct kvm_vcpu * vcpu,u64 gvsid)66 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
67 {
68 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
69 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
70 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
71 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
72 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
73 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
74 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
75 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
76 }
77
78
find_sid_vsid(struct kvm_vcpu * vcpu,u64 gvsid)79 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
80 {
81 struct kvmppc_sid_map *map;
82 u16 sid_map_mask;
83
84 if (kvmppc_get_msr(vcpu) & MSR_PR)
85 gvsid |= VSID_PR;
86
87 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
88 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
89 if (map->guest_vsid == gvsid) {
90 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
91 gvsid, map->host_vsid);
92 return map;
93 }
94
95 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
96 if (map->guest_vsid == gvsid) {
97 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n",
98 gvsid, map->host_vsid);
99 return map;
100 }
101
102 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid);
103 return NULL;
104 }
105
kvmppc_mmu_get_pteg(struct kvm_vcpu * vcpu,u32 vsid,u32 eaddr,bool primary)106 static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr,
107 bool primary)
108 {
109 u32 page, hash;
110 ulong pteg = htab;
111
112 page = (eaddr & ~ESID_MASK) >> 12;
113
114 hash = ((vsid ^ page) << 6);
115 if (!primary)
116 hash = ~hash;
117
118 hash &= htabmask;
119
120 pteg |= hash;
121
122 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n",
123 htab, hash, htabmask, pteg);
124
125 return (u32*)pteg;
126 }
127
128 extern char etext[];
129
kvmppc_mmu_map_page(struct kvm_vcpu * vcpu,struct kvmppc_pte * orig_pte,bool iswrite)130 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
131 bool iswrite)
132 {
133 kvm_pfn_t hpaddr;
134 u64 vpn;
135 u64 vsid;
136 struct kvmppc_sid_map *map;
137 volatile u32 *pteg;
138 u32 eaddr = orig_pte->eaddr;
139 u32 pteg0, pteg1;
140 register int rr = 0;
141 bool primary = false;
142 bool evict = false;
143 struct hpte_cache *pte;
144 int r = 0;
145 bool writable;
146
147 /* Get host physical address for gpa */
148 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
149 if (is_error_noslot_pfn(hpaddr)) {
150 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
151 orig_pte->raddr);
152 r = -EINVAL;
153 goto out;
154 }
155 hpaddr <<= PAGE_SHIFT;
156
157 /* and write the mapping ea -> hpa into the pt */
158 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
159 map = find_sid_vsid(vcpu, vsid);
160 if (!map) {
161 kvmppc_mmu_map_segment(vcpu, eaddr);
162 map = find_sid_vsid(vcpu, vsid);
163 }
164 BUG_ON(!map);
165
166 vsid = map->host_vsid;
167 vpn = (vsid << (SID_SHIFT - VPN_SHIFT)) |
168 ((eaddr & ~ESID_MASK) >> VPN_SHIFT);
169 next_pteg:
170 if (rr == 16) {
171 primary = !primary;
172 evict = true;
173 rr = 0;
174 }
175
176 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary);
177
178 /* not evicting yet */
179 if (!evict && (pteg[rr] & PTE_V)) {
180 rr += 2;
181 goto next_pteg;
182 }
183
184 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr);
185 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
186 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
187 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
188 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
189 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
190 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
191 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
192 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
193
194 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V |
195 (primary ? 0 : PTE_SEC);
196 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C;
197
198 if (orig_pte->may_write && writable) {
199 pteg1 |= PP_RWRW;
200 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
201 } else {
202 pteg1 |= PP_RWRX;
203 }
204
205 if (orig_pte->may_execute)
206 kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT);
207
208 local_irq_disable();
209
210 if (pteg[rr]) {
211 pteg[rr] = 0;
212 asm volatile ("sync");
213 }
214 pteg[rr + 1] = pteg1;
215 pteg[rr] = pteg0;
216 asm volatile ("sync");
217
218 local_irq_enable();
219
220 dprintk_mmu("KVM: new PTEG: %p\n", pteg);
221 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]);
222 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]);
223 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]);
224 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]);
225 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]);
226 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]);
227 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]);
228 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]);
229
230
231 /* Now tell our Shadow PTE code about the new page */
232
233 pte = kvmppc_mmu_hpte_cache_next(vcpu);
234 if (!pte) {
235 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
236 r = -EAGAIN;
237 goto out;
238 }
239
240 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n",
241 orig_pte->may_write ? 'w' : '-',
242 orig_pte->may_execute ? 'x' : '-',
243 orig_pte->eaddr, (ulong)pteg, vpn,
244 orig_pte->vpage, hpaddr);
245
246 pte->slot = (ulong)&pteg[rr];
247 pte->host_vpn = vpn;
248 pte->pte = *orig_pte;
249 pte->pfn = hpaddr >> PAGE_SHIFT;
250
251 kvmppc_mmu_hpte_cache_map(vcpu, pte);
252
253 kvm_release_pfn_clean(hpaddr >> PAGE_SHIFT);
254 out:
255 return r;
256 }
257
kvmppc_mmu_unmap_page(struct kvm_vcpu * vcpu,struct kvmppc_pte * pte)258 void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
259 {
260 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, 0xfffffffffULL);
261 }
262
create_sid_map(struct kvm_vcpu * vcpu,u64 gvsid)263 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
264 {
265 struct kvmppc_sid_map *map;
266 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
267 u16 sid_map_mask;
268 static int backwards_map = 0;
269
270 if (kvmppc_get_msr(vcpu) & MSR_PR)
271 gvsid |= VSID_PR;
272
273 /* We might get collisions that trap in preceding order, so let's
274 map them differently */
275
276 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
277 if (backwards_map)
278 sid_map_mask = SID_MAP_MASK - sid_map_mask;
279
280 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
281
282 /* Make sure we're taking the other map next time */
283 backwards_map = !backwards_map;
284
285 /* Uh-oh ... out of mappings. Let's flush! */
286 if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) {
287 vcpu_book3s->vsid_next = 0;
288 memset(vcpu_book3s->sid_map, 0,
289 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
290 kvmppc_mmu_pte_flush(vcpu, 0, 0);
291 kvmppc_mmu_flush_segments(vcpu);
292 }
293 map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next];
294 vcpu_book3s->vsid_next++;
295
296 map->guest_vsid = gvsid;
297 map->valid = true;
298
299 return map;
300 }
301
kvmppc_mmu_map_segment(struct kvm_vcpu * vcpu,ulong eaddr)302 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
303 {
304 u32 esid = eaddr >> SID_SHIFT;
305 u64 gvsid;
306 u32 sr;
307 struct kvmppc_sid_map *map;
308 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
309 int r = 0;
310
311 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
312 /* Invalidate an entry */
313 svcpu->sr[esid] = SR_INVALID;
314 r = -ENOENT;
315 goto out;
316 }
317
318 map = find_sid_vsid(vcpu, gvsid);
319 if (!map)
320 map = create_sid_map(vcpu, gvsid);
321
322 map->guest_esid = esid;
323 sr = map->host_vsid | SR_KP;
324 svcpu->sr[esid] = sr;
325
326 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
327
328 out:
329 svcpu_put(svcpu);
330 return r;
331 }
332
kvmppc_mmu_flush_segments(struct kvm_vcpu * vcpu)333 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
334 {
335 int i;
336 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
337
338 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
339 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
340 svcpu->sr[i] = SR_INVALID;
341
342 svcpu_put(svcpu);
343 }
344
kvmppc_mmu_destroy_pr(struct kvm_vcpu * vcpu)345 void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
346 {
347 int i;
348
349 kvmppc_mmu_hpte_destroy(vcpu);
350 preempt_disable();
351 for (i = 0; i < SID_CONTEXTS; i++)
352 __destroy_context(to_book3s(vcpu)->context_id[i]);
353 preempt_enable();
354 }
355
356 /* From mm/mmu_context_hash32.c */
357 #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
358
kvmppc_mmu_init_pr(struct kvm_vcpu * vcpu)359 int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
360 {
361 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
362 int err;
363 ulong sdr1;
364 int i;
365 int j;
366
367 for (i = 0; i < SID_CONTEXTS; i++) {
368 err = __init_new_context();
369 if (err < 0)
370 goto init_fail;
371 vcpu3s->context_id[i] = err;
372
373 /* Remember context id for this combination */
374 for (j = 0; j < 16; j++)
375 vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j);
376 }
377
378 vcpu3s->vsid_next = 0;
379
380 /* Remember where the HTAB is */
381 asm ( "mfsdr1 %0" : "=r"(sdr1) );
382 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0;
383 htab = (ulong)__va(sdr1 & 0xffff0000);
384
385 kvmppc_mmu_hpte_init(vcpu);
386
387 return 0;
388
389 init_fail:
390 for (j = 0; j < i; j++) {
391 if (!vcpu3s->context_id[j])
392 continue;
393
394 __destroy_context(to_book3s(vcpu)->context_id[j]);
395 }
396
397 return -1;
398 }
399