1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2016 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13
14 #include <asm/kvm_ppc.h>
15 #include <asm/kvm_book3s.h>
16 #include <asm/page.h>
17 #include <asm/mmu.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
21
22 /*
23 * Supported radix tree geometry.
24 * Like p9, we support either 5 or 9 bits at the first (lowest) level,
25 * for a page size of 64k or 4k.
26 */
27 static int p9_supported_radix_bits[4] = { 5, 9, 9, 13 };
28
kvmppc_mmu_radix_xlate(struct kvm_vcpu * vcpu,gva_t eaddr,struct kvmppc_pte * gpte,bool data,bool iswrite)29 int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
30 struct kvmppc_pte *gpte, bool data, bool iswrite)
31 {
32 struct kvm *kvm = vcpu->kvm;
33 u32 pid;
34 int ret, level, ps;
35 __be64 prte, rpte;
36 unsigned long ptbl;
37 unsigned long root, pte, index;
38 unsigned long rts, bits, offset;
39 unsigned long gpa;
40 unsigned long proc_tbl_size;
41
42 /* Work out effective PID */
43 switch (eaddr >> 62) {
44 case 0:
45 pid = vcpu->arch.pid;
46 break;
47 case 3:
48 pid = 0;
49 break;
50 default:
51 return -EINVAL;
52 }
53 proc_tbl_size = 1 << ((kvm->arch.process_table & PRTS_MASK) + 12);
54 if (pid * 16 >= proc_tbl_size)
55 return -EINVAL;
56
57 /* Read partition table to find root of tree for effective PID */
58 ptbl = (kvm->arch.process_table & PRTB_MASK) + (pid * 16);
59 ret = kvm_read_guest(kvm, ptbl, &prte, sizeof(prte));
60 if (ret)
61 return ret;
62
63 root = be64_to_cpu(prte);
64 rts = ((root & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
65 ((root & RTS2_MASK) >> RTS2_SHIFT);
66 bits = root & RPDS_MASK;
67 root = root & RPDB_MASK;
68
69 offset = rts + 31;
70
71 /* current implementations only support 52-bit space */
72 if (offset != 52)
73 return -EINVAL;
74
75 for (level = 3; level >= 0; --level) {
76 if (level && bits != p9_supported_radix_bits[level])
77 return -EINVAL;
78 if (level == 0 && !(bits == 5 || bits == 9))
79 return -EINVAL;
80 offset -= bits;
81 index = (eaddr >> offset) & ((1UL << bits) - 1);
82 /* check that low bits of page table base are zero */
83 if (root & ((1UL << (bits + 3)) - 1))
84 return -EINVAL;
85 ret = kvm_read_guest(kvm, root + index * 8,
86 &rpte, sizeof(rpte));
87 if (ret)
88 return ret;
89 pte = __be64_to_cpu(rpte);
90 if (!(pte & _PAGE_PRESENT))
91 return -ENOENT;
92 if (pte & _PAGE_PTE)
93 break;
94 bits = pte & 0x1f;
95 root = pte & 0x0fffffffffffff00ul;
96 }
97 /* need a leaf at lowest level; 512GB pages not supported */
98 if (level < 0 || level == 3)
99 return -EINVAL;
100
101 /* offset is now log base 2 of the page size */
102 gpa = pte & 0x01fffffffffff000ul;
103 if (gpa & ((1ul << offset) - 1))
104 return -EINVAL;
105 gpa += eaddr & ((1ul << offset) - 1);
106 for (ps = MMU_PAGE_4K; ps < MMU_PAGE_COUNT; ++ps)
107 if (offset == mmu_psize_defs[ps].shift)
108 break;
109 gpte->page_size = ps;
110
111 gpte->eaddr = eaddr;
112 gpte->raddr = gpa;
113
114 /* Work out permissions */
115 gpte->may_read = !!(pte & _PAGE_READ);
116 gpte->may_write = !!(pte & _PAGE_WRITE);
117 gpte->may_execute = !!(pte & _PAGE_EXEC);
118 if (kvmppc_get_msr(vcpu) & MSR_PR) {
119 if (pte & _PAGE_PRIVILEGED) {
120 gpte->may_read = 0;
121 gpte->may_write = 0;
122 gpte->may_execute = 0;
123 }
124 } else {
125 if (!(pte & _PAGE_PRIVILEGED)) {
126 /* Check AMR/IAMR to see if strict mode is in force */
127 if (vcpu->arch.amr & (1ul << 62))
128 gpte->may_read = 0;
129 if (vcpu->arch.amr & (1ul << 63))
130 gpte->may_write = 0;
131 if (vcpu->arch.iamr & (1ul << 62))
132 gpte->may_execute = 0;
133 }
134 }
135
136 return 0;
137 }
138
kvmppc_radix_tlbie_page(struct kvm * kvm,unsigned long addr,unsigned int pshift)139 static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
140 unsigned int pshift)
141 {
142 unsigned long psize = PAGE_SIZE;
143
144 if (pshift)
145 psize = 1UL << pshift;
146
147 addr &= ~(psize - 1);
148 radix__flush_tlb_lpid_page(kvm->arch.lpid, addr, psize);
149 }
150
kvmppc_radix_flush_pwc(struct kvm * kvm)151 static void kvmppc_radix_flush_pwc(struct kvm *kvm)
152 {
153 radix__flush_pwc_lpid(kvm->arch.lpid);
154 }
155
kvmppc_radix_update_pte(struct kvm * kvm,pte_t * ptep,unsigned long clr,unsigned long set,unsigned long addr,unsigned int shift)156 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep,
157 unsigned long clr, unsigned long set,
158 unsigned long addr, unsigned int shift)
159 {
160 return __radix_pte_update(ptep, clr, set);
161 }
162
kvmppc_radix_set_pte_at(struct kvm * kvm,unsigned long addr,pte_t * ptep,pte_t pte)163 void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr,
164 pte_t *ptep, pte_t pte)
165 {
166 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0);
167 }
168
169 static struct kmem_cache *kvm_pte_cache;
170 static struct kmem_cache *kvm_pmd_cache;
171
kvmppc_pte_alloc(void)172 static pte_t *kvmppc_pte_alloc(void)
173 {
174 return kmem_cache_alloc(kvm_pte_cache, GFP_KERNEL);
175 }
176
kvmppc_pte_free(pte_t * ptep)177 static void kvmppc_pte_free(pte_t *ptep)
178 {
179 kmem_cache_free(kvm_pte_cache, ptep);
180 }
181
182 /* Like pmd_huge() and pmd_large(), but works regardless of config options */
pmd_is_leaf(pmd_t pmd)183 static inline int pmd_is_leaf(pmd_t pmd)
184 {
185 return !!(pmd_val(pmd) & _PAGE_PTE);
186 }
187
kvmppc_pmd_alloc(void)188 static pmd_t *kvmppc_pmd_alloc(void)
189 {
190 return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
191 }
192
kvmppc_pmd_free(pmd_t * pmdp)193 static void kvmppc_pmd_free(pmd_t *pmdp)
194 {
195 kmem_cache_free(kvm_pmd_cache, pmdp);
196 }
197
kvmppc_unmap_pte(struct kvm * kvm,pte_t * pte,unsigned long gpa,unsigned int shift)198 static void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte,
199 unsigned long gpa, unsigned int shift)
200
201 {
202 unsigned long page_size = 1ul << shift;
203 unsigned long old;
204
205 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift);
206 kvmppc_radix_tlbie_page(kvm, gpa, shift);
207 if (old & _PAGE_DIRTY) {
208 unsigned long gfn = gpa >> PAGE_SHIFT;
209 struct kvm_memory_slot *memslot;
210
211 memslot = gfn_to_memslot(kvm, gfn);
212 if (memslot && memslot->dirty_bitmap)
213 kvmppc_update_dirty_map(memslot, gfn, page_size);
214 }
215 }
216
217 /*
218 * kvmppc_free_p?d are used to free existing page tables, and recursively
219 * descend and clear and free children.
220 * Callers are responsible for flushing the PWC.
221 *
222 * When page tables are being unmapped/freed as part of page fault path
223 * (full == false), ptes are not expected. There is code to unmap them
224 * and emit a warning if encountered, but there may already be data
225 * corruption due to the unexpected mappings.
226 */
kvmppc_unmap_free_pte(struct kvm * kvm,pte_t * pte,bool full)227 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full)
228 {
229 if (full) {
230 memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
231 } else {
232 pte_t *p = pte;
233 unsigned long it;
234
235 for (it = 0; it < PTRS_PER_PTE; ++it, ++p) {
236 if (pte_val(*p) == 0)
237 continue;
238 WARN_ON_ONCE(1);
239 kvmppc_unmap_pte(kvm, p,
240 pte_pfn(*p) << PAGE_SHIFT,
241 PAGE_SHIFT);
242 }
243 }
244
245 kvmppc_pte_free(pte);
246 }
247
kvmppc_unmap_free_pmd(struct kvm * kvm,pmd_t * pmd,bool full)248 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full)
249 {
250 unsigned long im;
251 pmd_t *p = pmd;
252
253 for (im = 0; im < PTRS_PER_PMD; ++im, ++p) {
254 if (!pmd_present(*p))
255 continue;
256 if (pmd_is_leaf(*p)) {
257 if (full) {
258 pmd_clear(p);
259 } else {
260 WARN_ON_ONCE(1);
261 kvmppc_unmap_pte(kvm, (pte_t *)p,
262 pte_pfn(*(pte_t *)p) << PAGE_SHIFT,
263 PMD_SHIFT);
264 }
265 } else {
266 pte_t *pte;
267
268 pte = pte_offset_map(p, 0);
269 kvmppc_unmap_free_pte(kvm, pte, full);
270 pmd_clear(p);
271 }
272 }
273 kvmppc_pmd_free(pmd);
274 }
275
kvmppc_unmap_free_pud(struct kvm * kvm,pud_t * pud)276 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud)
277 {
278 unsigned long iu;
279 pud_t *p = pud;
280
281 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
282 if (!pud_present(*p))
283 continue;
284 if (pud_huge(*p)) {
285 pud_clear(p);
286 } else {
287 pmd_t *pmd;
288
289 pmd = pmd_offset(p, 0);
290 kvmppc_unmap_free_pmd(kvm, pmd, true);
291 pud_clear(p);
292 }
293 }
294 pud_free(kvm->mm, pud);
295 }
296
kvmppc_free_radix(struct kvm * kvm)297 void kvmppc_free_radix(struct kvm *kvm)
298 {
299 unsigned long ig;
300 pgd_t *pgd;
301
302 if (!kvm->arch.pgtable)
303 return;
304 pgd = kvm->arch.pgtable;
305 for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) {
306 pud_t *pud;
307
308 if (!pgd_present(*pgd))
309 continue;
310 pud = pud_offset(pgd, 0);
311 kvmppc_unmap_free_pud(kvm, pud);
312 pgd_clear(pgd);
313 }
314 pgd_free(kvm->mm, kvm->arch.pgtable);
315 kvm->arch.pgtable = NULL;
316 }
317
kvmppc_unmap_free_pmd_entry_table(struct kvm * kvm,pmd_t * pmd,unsigned long gpa)318 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd,
319 unsigned long gpa)
320 {
321 pte_t *pte = pte_offset_kernel(pmd, 0);
322
323 /*
324 * Clearing the pmd entry then flushing the PWC ensures that the pte
325 * page no longer be cached by the MMU, so can be freed without
326 * flushing the PWC again.
327 */
328 pmd_clear(pmd);
329 kvmppc_radix_flush_pwc(kvm);
330
331 kvmppc_unmap_free_pte(kvm, pte, false);
332 }
333
kvmppc_unmap_free_pud_entry_table(struct kvm * kvm,pud_t * pud,unsigned long gpa)334 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud,
335 unsigned long gpa)
336 {
337 pmd_t *pmd = pmd_offset(pud, 0);
338
339 /*
340 * Clearing the pud entry then flushing the PWC ensures that the pmd
341 * page and any children pte pages will no longer be cached by the MMU,
342 * so can be freed without flushing the PWC again.
343 */
344 pud_clear(pud);
345 kvmppc_radix_flush_pwc(kvm);
346
347 kvmppc_unmap_free_pmd(kvm, pmd, false);
348 }
349
350 /*
351 * There are a number of bits which may differ between different faults to
352 * the same partition scope entry. RC bits, in the course of cleaning and
353 * aging. And the write bit can change, either the access could have been
354 * upgraded, or a read fault could happen concurrently with a write fault
355 * that sets those bits first.
356 */
357 #define PTE_BITS_MUST_MATCH (~(_PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED))
358
kvmppc_create_pte(struct kvm * kvm,pte_t pte,unsigned long gpa,unsigned int level,unsigned long mmu_seq)359 static int kvmppc_create_pte(struct kvm *kvm, pte_t pte, unsigned long gpa,
360 unsigned int level, unsigned long mmu_seq)
361 {
362 pgd_t *pgd;
363 pud_t *pud, *new_pud = NULL;
364 pmd_t *pmd, *new_pmd = NULL;
365 pte_t *ptep, *new_ptep = NULL;
366 int ret;
367
368 /* Traverse the guest's 2nd-level tree, allocate new levels needed */
369 pgd = kvm->arch.pgtable + pgd_index(gpa);
370 pud = NULL;
371 if (pgd_present(*pgd))
372 pud = pud_offset(pgd, gpa);
373 else
374 new_pud = pud_alloc_one(kvm->mm, gpa);
375
376 pmd = NULL;
377 if (pud && pud_present(*pud) && !pud_huge(*pud))
378 pmd = pmd_offset(pud, gpa);
379 else if (level <= 1)
380 new_pmd = kvmppc_pmd_alloc();
381
382 if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd)))
383 new_ptep = kvmppc_pte_alloc();
384
385 /* Check if we might have been invalidated; let the guest retry if so */
386 spin_lock(&kvm->mmu_lock);
387 ret = -EAGAIN;
388 if (mmu_notifier_retry(kvm, mmu_seq))
389 goto out_unlock;
390
391 /* Now traverse again under the lock and change the tree */
392 ret = -ENOMEM;
393 if (pgd_none(*pgd)) {
394 if (!new_pud)
395 goto out_unlock;
396 pgd_populate(kvm->mm, pgd, new_pud);
397 new_pud = NULL;
398 }
399 pud = pud_offset(pgd, gpa);
400 if (pud_huge(*pud)) {
401 unsigned long hgpa = gpa & PUD_MASK;
402
403 /* Check if we raced and someone else has set the same thing */
404 if (level == 2) {
405 if (pud_raw(*pud) == pte_raw(pte)) {
406 ret = 0;
407 goto out_unlock;
408 }
409 /* Valid 1GB page here already, add our extra bits */
410 WARN_ON_ONCE((pud_val(*pud) ^ pte_val(pte)) &
411 PTE_BITS_MUST_MATCH);
412 kvmppc_radix_update_pte(kvm, (pte_t *)pud,
413 0, pte_val(pte), hgpa, PUD_SHIFT);
414 ret = 0;
415 goto out_unlock;
416 }
417 /*
418 * If we raced with another CPU which has just put
419 * a 1GB pte in after we saw a pmd page, try again.
420 */
421 if (!new_pmd) {
422 ret = -EAGAIN;
423 goto out_unlock;
424 }
425 /* Valid 1GB page here already, remove it */
426 kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT);
427 }
428 if (level == 2) {
429 if (!pud_none(*pud)) {
430 /*
431 * There's a page table page here, but we wanted to
432 * install a large page, so remove and free the page
433 * table page.
434 */
435 kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa);
436 }
437 kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte);
438 ret = 0;
439 goto out_unlock;
440 }
441 if (pud_none(*pud)) {
442 if (!new_pmd)
443 goto out_unlock;
444 pud_populate(kvm->mm, pud, new_pmd);
445 new_pmd = NULL;
446 }
447 pmd = pmd_offset(pud, gpa);
448 if (pmd_is_leaf(*pmd)) {
449 unsigned long lgpa = gpa & PMD_MASK;
450
451 /* Check if we raced and someone else has set the same thing */
452 if (level == 1) {
453 if (pmd_raw(*pmd) == pte_raw(pte)) {
454 ret = 0;
455 goto out_unlock;
456 }
457 /* Valid 2MB page here already, add our extra bits */
458 WARN_ON_ONCE((pmd_val(*pmd) ^ pte_val(pte)) &
459 PTE_BITS_MUST_MATCH);
460 kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd),
461 0, pte_val(pte), lgpa, PMD_SHIFT);
462 ret = 0;
463 goto out_unlock;
464 }
465
466 /*
467 * If we raced with another CPU which has just put
468 * a 2MB pte in after we saw a pte page, try again.
469 */
470 if (!new_ptep) {
471 ret = -EAGAIN;
472 goto out_unlock;
473 }
474 /* Valid 2MB page here already, remove it */
475 kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT);
476 }
477 if (level == 1) {
478 if (!pmd_none(*pmd)) {
479 /*
480 * There's a page table page here, but we wanted to
481 * install a large page, so remove and free the page
482 * table page.
483 */
484 kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa);
485 }
486 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte);
487 ret = 0;
488 goto out_unlock;
489 }
490 if (pmd_none(*pmd)) {
491 if (!new_ptep)
492 goto out_unlock;
493 pmd_populate(kvm->mm, pmd, new_ptep);
494 new_ptep = NULL;
495 }
496 ptep = pte_offset_kernel(pmd, gpa);
497 if (pte_present(*ptep)) {
498 /* Check if someone else set the same thing */
499 if (pte_raw(*ptep) == pte_raw(pte)) {
500 ret = 0;
501 goto out_unlock;
502 }
503 /* Valid page here already, add our extra bits */
504 WARN_ON_ONCE((pte_val(*ptep) ^ pte_val(pte)) &
505 PTE_BITS_MUST_MATCH);
506 kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0);
507 ret = 0;
508 goto out_unlock;
509 }
510 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte);
511 ret = 0;
512
513 out_unlock:
514 spin_unlock(&kvm->mmu_lock);
515 if (new_pud)
516 pud_free(kvm->mm, new_pud);
517 if (new_pmd)
518 kvmppc_pmd_free(new_pmd);
519 if (new_ptep)
520 kvmppc_pte_free(new_ptep);
521 return ret;
522 }
523
kvmppc_book3s_radix_page_fault(struct kvm_run * run,struct kvm_vcpu * vcpu,unsigned long ea,unsigned long dsisr)524 int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
525 unsigned long ea, unsigned long dsisr)
526 {
527 struct kvm *kvm = vcpu->kvm;
528 unsigned long mmu_seq;
529 unsigned long gpa, gfn, hva;
530 struct kvm_memory_slot *memslot;
531 struct page *page = NULL;
532 long ret;
533 bool writing;
534 bool upgrade_write = false;
535 bool *upgrade_p = &upgrade_write;
536 pte_t pte, *ptep;
537 unsigned long pgflags;
538 unsigned int shift, level;
539
540 /* Check for unusual errors */
541 if (dsisr & DSISR_UNSUPP_MMU) {
542 pr_err("KVM: Got unsupported MMU fault\n");
543 return -EFAULT;
544 }
545 if (dsisr & DSISR_BADACCESS) {
546 /* Reflect to the guest as DSI */
547 pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr);
548 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
549 return RESUME_GUEST;
550 }
551
552 /* Translate the logical address and get the page */
553 gpa = vcpu->arch.fault_gpa & ~0xfffUL;
554 gpa &= ~0xF000000000000000ul;
555 gfn = gpa >> PAGE_SHIFT;
556 if (!(dsisr & DSISR_PRTABLE_FAULT))
557 gpa |= ea & 0xfff;
558 memslot = gfn_to_memslot(kvm, gfn);
559
560 /* No memslot means it's an emulated MMIO region */
561 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
562 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS |
563 DSISR_SET_RC)) {
564 /*
565 * Bad address in guest page table tree, or other
566 * unusual error - reflect it to the guest as DSI.
567 */
568 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
569 return RESUME_GUEST;
570 }
571 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
572 dsisr & DSISR_ISSTORE);
573 }
574
575 writing = (dsisr & DSISR_ISSTORE) != 0;
576 if (memslot->flags & KVM_MEM_READONLY) {
577 if (writing) {
578 /* give the guest a DSI */
579 dsisr = DSISR_ISSTORE | DSISR_PROTFAULT;
580 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
581 return RESUME_GUEST;
582 }
583 upgrade_p = NULL;
584 }
585
586 if (dsisr & DSISR_SET_RC) {
587 /*
588 * Need to set an R or C bit in the 2nd-level tables;
589 * since we are just helping out the hardware here,
590 * it is sufficient to do what the hardware does.
591 */
592 pgflags = _PAGE_ACCESSED;
593 if (writing)
594 pgflags |= _PAGE_DIRTY;
595 /*
596 * We are walking the secondary page table here. We can do this
597 * without disabling irq.
598 */
599 spin_lock(&kvm->mmu_lock);
600 ptep = __find_linux_pte(kvm->arch.pgtable,
601 gpa, NULL, &shift);
602 if (ptep && pte_present(*ptep) &&
603 (!writing || pte_write(*ptep))) {
604 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags,
605 gpa, shift);
606 dsisr &= ~DSISR_SET_RC;
607 }
608 spin_unlock(&kvm->mmu_lock);
609 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
610 DSISR_PROTFAULT | DSISR_SET_RC)))
611 return RESUME_GUEST;
612 }
613
614 /* used to check for invalidations in progress */
615 mmu_seq = kvm->mmu_notifier_seq;
616 smp_rmb();
617
618 /*
619 * Do a fast check first, since __gfn_to_pfn_memslot doesn't
620 * do it with !atomic && !async, which is how we call it.
621 * We always ask for write permission since the common case
622 * is that the page is writable.
623 */
624 hva = gfn_to_hva_memslot(memslot, gfn);
625 if (upgrade_p && __get_user_pages_fast(hva, 1, 1, &page) == 1) {
626 upgrade_write = true;
627 } else {
628 unsigned long pfn;
629
630 /* Call KVM generic code to do the slow-path check */
631 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
632 writing, upgrade_p);
633 if (is_error_noslot_pfn(pfn))
634 return -EFAULT;
635 page = NULL;
636 if (pfn_valid(pfn)) {
637 page = pfn_to_page(pfn);
638 if (PageReserved(page))
639 page = NULL;
640 }
641 }
642
643 /*
644 * Read the PTE from the process' radix tree and use that
645 * so we get the shift and attribute bits.
646 */
647 local_irq_disable();
648 ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
649 /*
650 * If the PTE disappeared temporarily due to a THP
651 * collapse, just return and let the guest try again.
652 */
653 if (!ptep) {
654 local_irq_enable();
655 if (page)
656 put_page(page);
657 return RESUME_GUEST;
658 }
659 pte = *ptep;
660 local_irq_enable();
661
662 /* Get pte level from shift/size */
663 if (shift == PUD_SHIFT &&
664 (gpa & (PUD_SIZE - PAGE_SIZE)) ==
665 (hva & (PUD_SIZE - PAGE_SIZE))) {
666 level = 2;
667 } else if (shift == PMD_SHIFT &&
668 (gpa & (PMD_SIZE - PAGE_SIZE)) ==
669 (hva & (PMD_SIZE - PAGE_SIZE))) {
670 level = 1;
671 } else {
672 level = 0;
673 if (shift > PAGE_SHIFT) {
674 /*
675 * If the pte maps more than one page, bring over
676 * bits from the virtual address to get the real
677 * address of the specific single page we want.
678 */
679 unsigned long rpnmask = (1ul << shift) - PAGE_SIZE;
680 pte = __pte(pte_val(pte) | (hva & rpnmask));
681 }
682 }
683
684 pte = __pte(pte_val(pte) | _PAGE_EXEC | _PAGE_ACCESSED);
685 if (writing || upgrade_write) {
686 if (pte_val(pte) & _PAGE_WRITE)
687 pte = __pte(pte_val(pte) | _PAGE_DIRTY);
688 } else {
689 pte = __pte(pte_val(pte) & ~(_PAGE_WRITE | _PAGE_DIRTY));
690 }
691
692 /* Allocate space in the tree and write the PTE */
693 ret = kvmppc_create_pte(kvm, pte, gpa, level, mmu_seq);
694
695 if (page) {
696 if (!ret && (pte_val(pte) & _PAGE_WRITE))
697 set_page_dirty_lock(page);
698 put_page(page);
699 }
700
701 if (ret == 0 || ret == -EAGAIN)
702 ret = RESUME_GUEST;
703 return ret;
704 }
705
706 /* Called with kvm->lock held */
kvm_unmap_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)707 int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
708 unsigned long gfn)
709 {
710 pte_t *ptep;
711 unsigned long gpa = gfn << PAGE_SHIFT;
712 unsigned int shift;
713 unsigned long old;
714
715 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
716 if (ptep && pte_present(*ptep)) {
717 old = kvmppc_radix_update_pte(kvm, ptep, ~0UL, 0,
718 gpa, shift);
719 kvmppc_radix_tlbie_page(kvm, gpa, shift);
720 if ((old & _PAGE_DIRTY) && memslot->dirty_bitmap) {
721 unsigned long psize = PAGE_SIZE;
722 if (shift)
723 psize = 1ul << shift;
724 kvmppc_update_dirty_map(memslot, gfn, psize);
725 }
726 }
727 return 0;
728 }
729
730 /* Called with kvm->lock held */
kvm_age_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)731 int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
732 unsigned long gfn)
733 {
734 pte_t *ptep;
735 unsigned long gpa = gfn << PAGE_SHIFT;
736 unsigned int shift;
737 int ref = 0;
738
739 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
740 if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
741 kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
742 gpa, shift);
743 /* XXX need to flush tlb here? */
744 ref = 1;
745 }
746 return ref;
747 }
748
749 /* Called with kvm->lock held */
kvm_test_age_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long gfn)750 int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
751 unsigned long gfn)
752 {
753 pte_t *ptep;
754 unsigned long gpa = gfn << PAGE_SHIFT;
755 unsigned int shift;
756 int ref = 0;
757
758 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
759 if (ptep && pte_present(*ptep) && pte_young(*ptep))
760 ref = 1;
761 return ref;
762 }
763
764 /* Returns the number of PAGE_SIZE pages that are dirty */
kvm_radix_test_clear_dirty(struct kvm * kvm,struct kvm_memory_slot * memslot,int pagenum)765 static int kvm_radix_test_clear_dirty(struct kvm *kvm,
766 struct kvm_memory_slot *memslot, int pagenum)
767 {
768 unsigned long gfn = memslot->base_gfn + pagenum;
769 unsigned long gpa = gfn << PAGE_SHIFT;
770 pte_t *ptep;
771 unsigned int shift;
772 int ret = 0;
773
774 ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
775 if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
776 ret = 1;
777 if (shift)
778 ret = 1 << (shift - PAGE_SHIFT);
779 kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
780 gpa, shift);
781 kvmppc_radix_tlbie_page(kvm, gpa, shift);
782 }
783 return ret;
784 }
785
kvmppc_hv_get_dirty_log_radix(struct kvm * kvm,struct kvm_memory_slot * memslot,unsigned long * map)786 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm,
787 struct kvm_memory_slot *memslot, unsigned long *map)
788 {
789 unsigned long i, j;
790 int npages;
791
792 for (i = 0; i < memslot->npages; i = j) {
793 npages = kvm_radix_test_clear_dirty(kvm, memslot, i);
794
795 /*
796 * Note that if npages > 0 then i must be a multiple of npages,
797 * since huge pages are only used to back the guest at guest
798 * real addresses that are a multiple of their size.
799 * Since we have at most one PTE covering any given guest
800 * real address, if npages > 1 we can skip to i + npages.
801 */
802 j = i + 1;
803 if (npages) {
804 set_dirty_bits(map, i, npages);
805 j = i + npages;
806 }
807 }
808 return 0;
809 }
810
add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info * info,int psize,int * indexp)811 static void add_rmmu_ap_encoding(struct kvm_ppc_rmmu_info *info,
812 int psize, int *indexp)
813 {
814 if (!mmu_psize_defs[psize].shift)
815 return;
816 info->ap_encodings[*indexp] = mmu_psize_defs[psize].shift |
817 (mmu_psize_defs[psize].ap << 29);
818 ++(*indexp);
819 }
820
kvmhv_get_rmmu_info(struct kvm * kvm,struct kvm_ppc_rmmu_info * info)821 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info)
822 {
823 int i;
824
825 if (!radix_enabled())
826 return -EINVAL;
827 memset(info, 0, sizeof(*info));
828
829 /* 4k page size */
830 info->geometries[0].page_shift = 12;
831 info->geometries[0].level_bits[0] = 9;
832 for (i = 1; i < 4; ++i)
833 info->geometries[0].level_bits[i] = p9_supported_radix_bits[i];
834 /* 64k page size */
835 info->geometries[1].page_shift = 16;
836 for (i = 0; i < 4; ++i)
837 info->geometries[1].level_bits[i] = p9_supported_radix_bits[i];
838
839 i = 0;
840 add_rmmu_ap_encoding(info, MMU_PAGE_4K, &i);
841 add_rmmu_ap_encoding(info, MMU_PAGE_64K, &i);
842 add_rmmu_ap_encoding(info, MMU_PAGE_2M, &i);
843 add_rmmu_ap_encoding(info, MMU_PAGE_1G, &i);
844
845 return 0;
846 }
847
kvmppc_init_vm_radix(struct kvm * kvm)848 int kvmppc_init_vm_radix(struct kvm *kvm)
849 {
850 kvm->arch.pgtable = pgd_alloc(kvm->mm);
851 if (!kvm->arch.pgtable)
852 return -ENOMEM;
853 return 0;
854 }
855
pte_ctor(void * addr)856 static void pte_ctor(void *addr)
857 {
858 memset(addr, 0, RADIX_PTE_TABLE_SIZE);
859 }
860
pmd_ctor(void * addr)861 static void pmd_ctor(void *addr)
862 {
863 memset(addr, 0, RADIX_PMD_TABLE_SIZE);
864 }
865
kvmppc_radix_init(void)866 int kvmppc_radix_init(void)
867 {
868 unsigned long size = sizeof(void *) << RADIX_PTE_INDEX_SIZE;
869
870 kvm_pte_cache = kmem_cache_create("kvm-pte", size, size, 0, pte_ctor);
871 if (!kvm_pte_cache)
872 return -ENOMEM;
873
874 size = sizeof(void *) << RADIX_PMD_INDEX_SIZE;
875
876 kvm_pmd_cache = kmem_cache_create("kvm-pmd", size, size, 0, pmd_ctor);
877 if (!kvm_pmd_cache) {
878 kmem_cache_destroy(kvm_pte_cache);
879 return -ENOMEM;
880 }
881
882 return 0;
883 }
884
kvmppc_radix_exit(void)885 void kvmppc_radix_exit(void)
886 {
887 kmem_cache_destroy(kvm_pte_cache);
888 kmem_cache_destroy(kvm_pmd_cache);
889 }
890