1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * KVM guest address space mapping code
4 *
5 * Copyright IBM Corp. 2007, 2020
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * David Hildenbrand <david@redhat.com>
8 * Janosch Frank <frankja@linux.vnet.ibm.com>
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/pagewalk.h>
13 #include <linux/swap.h>
14 #include <linux/smp.h>
15 #include <linux/spinlock.h>
16 #include <linux/slab.h>
17 #include <linux/swapops.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
20 #include <linux/pgtable.h>
21
22 #include <asm/pgalloc.h>
23 #include <asm/gmap.h>
24 #include <asm/tlb.h>
25
26 #define GMAP_SHADOW_FAKE_TABLE 1ULL
27
28 /**
29 * gmap_alloc - allocate and initialize a guest address space
30 * @limit: maximum address of the gmap address space
31 *
32 * Returns a guest address space structure.
33 */
gmap_alloc(unsigned long limit)34 static struct gmap *gmap_alloc(unsigned long limit)
35 {
36 struct gmap *gmap;
37 struct page *page;
38 unsigned long *table;
39 unsigned long etype, atype;
40
41 if (limit < _REGION3_SIZE) {
42 limit = _REGION3_SIZE - 1;
43 atype = _ASCE_TYPE_SEGMENT;
44 etype = _SEGMENT_ENTRY_EMPTY;
45 } else if (limit < _REGION2_SIZE) {
46 limit = _REGION2_SIZE - 1;
47 atype = _ASCE_TYPE_REGION3;
48 etype = _REGION3_ENTRY_EMPTY;
49 } else if (limit < _REGION1_SIZE) {
50 limit = _REGION1_SIZE - 1;
51 atype = _ASCE_TYPE_REGION2;
52 etype = _REGION2_ENTRY_EMPTY;
53 } else {
54 limit = -1UL;
55 atype = _ASCE_TYPE_REGION1;
56 etype = _REGION1_ENTRY_EMPTY;
57 }
58 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL_ACCOUNT);
59 if (!gmap)
60 goto out;
61 INIT_LIST_HEAD(&gmap->crst_list);
62 INIT_LIST_HEAD(&gmap->children);
63 INIT_LIST_HEAD(&gmap->pt_list);
64 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL_ACCOUNT);
65 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC | __GFP_ACCOUNT);
66 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC | __GFP_ACCOUNT);
67 spin_lock_init(&gmap->guest_table_lock);
68 spin_lock_init(&gmap->shadow_lock);
69 refcount_set(&gmap->ref_count, 1);
70 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
71 if (!page)
72 goto out_free;
73 page->index = 0;
74 list_add(&page->lru, &gmap->crst_list);
75 table = (unsigned long *) page_to_phys(page);
76 crst_table_init(table, etype);
77 gmap->table = table;
78 gmap->asce = atype | _ASCE_TABLE_LENGTH |
79 _ASCE_USER_BITS | __pa(table);
80 gmap->asce_end = limit;
81 return gmap;
82
83 out_free:
84 kfree(gmap);
85 out:
86 return NULL;
87 }
88
89 /**
90 * gmap_create - create a guest address space
91 * @mm: pointer to the parent mm_struct
92 * @limit: maximum size of the gmap address space
93 *
94 * Returns a guest address space structure.
95 */
gmap_create(struct mm_struct * mm,unsigned long limit)96 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
97 {
98 struct gmap *gmap;
99 unsigned long gmap_asce;
100
101 gmap = gmap_alloc(limit);
102 if (!gmap)
103 return NULL;
104 gmap->mm = mm;
105 spin_lock(&mm->context.lock);
106 list_add_rcu(&gmap->list, &mm->context.gmap_list);
107 if (list_is_singular(&mm->context.gmap_list))
108 gmap_asce = gmap->asce;
109 else
110 gmap_asce = -1UL;
111 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
112 spin_unlock(&mm->context.lock);
113 return gmap;
114 }
115 EXPORT_SYMBOL_GPL(gmap_create);
116
gmap_flush_tlb(struct gmap * gmap)117 static void gmap_flush_tlb(struct gmap *gmap)
118 {
119 if (MACHINE_HAS_IDTE)
120 __tlb_flush_idte(gmap->asce);
121 else
122 __tlb_flush_global();
123 }
124
gmap_radix_tree_free(struct radix_tree_root * root)125 static void gmap_radix_tree_free(struct radix_tree_root *root)
126 {
127 struct radix_tree_iter iter;
128 unsigned long indices[16];
129 unsigned long index;
130 void __rcu **slot;
131 int i, nr;
132
133 /* A radix tree is freed by deleting all of its entries */
134 index = 0;
135 do {
136 nr = 0;
137 radix_tree_for_each_slot(slot, root, &iter, index) {
138 indices[nr] = iter.index;
139 if (++nr == 16)
140 break;
141 }
142 for (i = 0; i < nr; i++) {
143 index = indices[i];
144 radix_tree_delete(root, index);
145 }
146 } while (nr > 0);
147 }
148
gmap_rmap_radix_tree_free(struct radix_tree_root * root)149 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
150 {
151 struct gmap_rmap *rmap, *rnext, *head;
152 struct radix_tree_iter iter;
153 unsigned long indices[16];
154 unsigned long index;
155 void __rcu **slot;
156 int i, nr;
157
158 /* A radix tree is freed by deleting all of its entries */
159 index = 0;
160 do {
161 nr = 0;
162 radix_tree_for_each_slot(slot, root, &iter, index) {
163 indices[nr] = iter.index;
164 if (++nr == 16)
165 break;
166 }
167 for (i = 0; i < nr; i++) {
168 index = indices[i];
169 head = radix_tree_delete(root, index);
170 gmap_for_each_rmap_safe(rmap, rnext, head)
171 kfree(rmap);
172 }
173 } while (nr > 0);
174 }
175
176 /**
177 * gmap_free - free a guest address space
178 * @gmap: pointer to the guest address space structure
179 *
180 * No locks required. There are no references to this gmap anymore.
181 */
gmap_free(struct gmap * gmap)182 static void gmap_free(struct gmap *gmap)
183 {
184 struct page *page, *next;
185
186 /* Flush tlb of all gmaps (if not already done for shadows) */
187 if (!(gmap_is_shadow(gmap) && gmap->removed))
188 gmap_flush_tlb(gmap);
189 /* Free all segment & region tables. */
190 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
191 __free_pages(page, CRST_ALLOC_ORDER);
192 gmap_radix_tree_free(&gmap->guest_to_host);
193 gmap_radix_tree_free(&gmap->host_to_guest);
194
195 /* Free additional data for a shadow gmap */
196 if (gmap_is_shadow(gmap)) {
197 /* Free all page tables. */
198 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
199 page_table_free_pgste(page);
200 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
201 /* Release reference to the parent */
202 gmap_put(gmap->parent);
203 }
204
205 kfree(gmap);
206 }
207
208 /**
209 * gmap_get - increase reference counter for guest address space
210 * @gmap: pointer to the guest address space structure
211 *
212 * Returns the gmap pointer
213 */
gmap_get(struct gmap * gmap)214 struct gmap *gmap_get(struct gmap *gmap)
215 {
216 refcount_inc(&gmap->ref_count);
217 return gmap;
218 }
219 EXPORT_SYMBOL_GPL(gmap_get);
220
221 /**
222 * gmap_put - decrease reference counter for guest address space
223 * @gmap: pointer to the guest address space structure
224 *
225 * If the reference counter reaches zero the guest address space is freed.
226 */
gmap_put(struct gmap * gmap)227 void gmap_put(struct gmap *gmap)
228 {
229 if (refcount_dec_and_test(&gmap->ref_count))
230 gmap_free(gmap);
231 }
232 EXPORT_SYMBOL_GPL(gmap_put);
233
234 /**
235 * gmap_remove - remove a guest address space but do not free it yet
236 * @gmap: pointer to the guest address space structure
237 */
gmap_remove(struct gmap * gmap)238 void gmap_remove(struct gmap *gmap)
239 {
240 struct gmap *sg, *next;
241 unsigned long gmap_asce;
242
243 /* Remove all shadow gmaps linked to this gmap */
244 if (!list_empty(&gmap->children)) {
245 spin_lock(&gmap->shadow_lock);
246 list_for_each_entry_safe(sg, next, &gmap->children, list) {
247 list_del(&sg->list);
248 gmap_put(sg);
249 }
250 spin_unlock(&gmap->shadow_lock);
251 }
252 /* Remove gmap from the pre-mm list */
253 spin_lock(&gmap->mm->context.lock);
254 list_del_rcu(&gmap->list);
255 if (list_empty(&gmap->mm->context.gmap_list))
256 gmap_asce = 0;
257 else if (list_is_singular(&gmap->mm->context.gmap_list))
258 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
259 struct gmap, list)->asce;
260 else
261 gmap_asce = -1UL;
262 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
263 spin_unlock(&gmap->mm->context.lock);
264 synchronize_rcu();
265 /* Put reference */
266 gmap_put(gmap);
267 }
268 EXPORT_SYMBOL_GPL(gmap_remove);
269
270 /**
271 * gmap_enable - switch primary space to the guest address space
272 * @gmap: pointer to the guest address space structure
273 */
gmap_enable(struct gmap * gmap)274 void gmap_enable(struct gmap *gmap)
275 {
276 S390_lowcore.gmap = (unsigned long) gmap;
277 }
278 EXPORT_SYMBOL_GPL(gmap_enable);
279
280 /**
281 * gmap_disable - switch back to the standard primary address space
282 * @gmap: pointer to the guest address space structure
283 */
gmap_disable(struct gmap * gmap)284 void gmap_disable(struct gmap *gmap)
285 {
286 S390_lowcore.gmap = 0UL;
287 }
288 EXPORT_SYMBOL_GPL(gmap_disable);
289
290 /**
291 * gmap_get_enabled - get a pointer to the currently enabled gmap
292 *
293 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
294 */
gmap_get_enabled(void)295 struct gmap *gmap_get_enabled(void)
296 {
297 return (struct gmap *) S390_lowcore.gmap;
298 }
299 EXPORT_SYMBOL_GPL(gmap_get_enabled);
300
301 /*
302 * gmap_alloc_table is assumed to be called with mmap_lock held
303 */
gmap_alloc_table(struct gmap * gmap,unsigned long * table,unsigned long init,unsigned long gaddr)304 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
305 unsigned long init, unsigned long gaddr)
306 {
307 struct page *page;
308 unsigned long *new;
309
310 /* since we dont free the gmap table until gmap_free we can unlock */
311 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
312 if (!page)
313 return -ENOMEM;
314 new = (unsigned long *) page_to_phys(page);
315 crst_table_init(new, init);
316 spin_lock(&gmap->guest_table_lock);
317 if (*table & _REGION_ENTRY_INVALID) {
318 list_add(&page->lru, &gmap->crst_list);
319 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
320 (*table & _REGION_ENTRY_TYPE_MASK);
321 page->index = gaddr;
322 page = NULL;
323 }
324 spin_unlock(&gmap->guest_table_lock);
325 if (page)
326 __free_pages(page, CRST_ALLOC_ORDER);
327 return 0;
328 }
329
330 /**
331 * __gmap_segment_gaddr - find virtual address from segment pointer
332 * @entry: pointer to a segment table entry in the guest address space
333 *
334 * Returns the virtual address in the guest address space for the segment
335 */
__gmap_segment_gaddr(unsigned long * entry)336 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
337 {
338 struct page *page;
339 unsigned long offset, mask;
340
341 offset = (unsigned long) entry / sizeof(unsigned long);
342 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
343 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
344 page = virt_to_page((void *)((unsigned long) entry & mask));
345 return page->index + offset;
346 }
347
348 /**
349 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
350 * @gmap: pointer to the guest address space structure
351 * @vmaddr: address in the host process address space
352 *
353 * Returns 1 if a TLB flush is required
354 */
__gmap_unlink_by_vmaddr(struct gmap * gmap,unsigned long vmaddr)355 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
356 {
357 unsigned long *entry;
358 int flush = 0;
359
360 BUG_ON(gmap_is_shadow(gmap));
361 spin_lock(&gmap->guest_table_lock);
362 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
363 if (entry) {
364 flush = (*entry != _SEGMENT_ENTRY_EMPTY);
365 *entry = _SEGMENT_ENTRY_EMPTY;
366 }
367 spin_unlock(&gmap->guest_table_lock);
368 return flush;
369 }
370
371 /**
372 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
373 * @gmap: pointer to the guest address space structure
374 * @gaddr: address in the guest address space
375 *
376 * Returns 1 if a TLB flush is required
377 */
__gmap_unmap_by_gaddr(struct gmap * gmap,unsigned long gaddr)378 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
379 {
380 unsigned long vmaddr;
381
382 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
383 gaddr >> PMD_SHIFT);
384 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
385 }
386
387 /**
388 * gmap_unmap_segment - unmap segment from the guest address space
389 * @gmap: pointer to the guest address space structure
390 * @to: address in the guest address space
391 * @len: length of the memory area to unmap
392 *
393 * Returns 0 if the unmap succeeded, -EINVAL if not.
394 */
gmap_unmap_segment(struct gmap * gmap,unsigned long to,unsigned long len)395 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
396 {
397 unsigned long off;
398 int flush;
399
400 BUG_ON(gmap_is_shadow(gmap));
401 if ((to | len) & (PMD_SIZE - 1))
402 return -EINVAL;
403 if (len == 0 || to + len < to)
404 return -EINVAL;
405
406 flush = 0;
407 mmap_write_lock(gmap->mm);
408 for (off = 0; off < len; off += PMD_SIZE)
409 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
410 mmap_write_unlock(gmap->mm);
411 if (flush)
412 gmap_flush_tlb(gmap);
413 return 0;
414 }
415 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
416
417 /**
418 * gmap_map_segment - map a segment to the guest address space
419 * @gmap: pointer to the guest address space structure
420 * @from: source address in the parent address space
421 * @to: target address in the guest address space
422 * @len: length of the memory area to map
423 *
424 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
425 */
gmap_map_segment(struct gmap * gmap,unsigned long from,unsigned long to,unsigned long len)426 int gmap_map_segment(struct gmap *gmap, unsigned long from,
427 unsigned long to, unsigned long len)
428 {
429 unsigned long off;
430 int flush;
431
432 BUG_ON(gmap_is_shadow(gmap));
433 if ((from | to | len) & (PMD_SIZE - 1))
434 return -EINVAL;
435 if (len == 0 || from + len < from || to + len < to ||
436 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
437 return -EINVAL;
438
439 flush = 0;
440 mmap_write_lock(gmap->mm);
441 for (off = 0; off < len; off += PMD_SIZE) {
442 /* Remove old translation */
443 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
444 /* Store new translation */
445 if (radix_tree_insert(&gmap->guest_to_host,
446 (to + off) >> PMD_SHIFT,
447 (void *) from + off))
448 break;
449 }
450 mmap_write_unlock(gmap->mm);
451 if (flush)
452 gmap_flush_tlb(gmap);
453 if (off >= len)
454 return 0;
455 gmap_unmap_segment(gmap, to, len);
456 return -ENOMEM;
457 }
458 EXPORT_SYMBOL_GPL(gmap_map_segment);
459
460 /**
461 * __gmap_translate - translate a guest address to a user space address
462 * @gmap: pointer to guest mapping meta data structure
463 * @gaddr: guest address
464 *
465 * Returns user space address which corresponds to the guest address or
466 * -EFAULT if no such mapping exists.
467 * This function does not establish potentially missing page table entries.
468 * The mmap_lock of the mm that belongs to the address space must be held
469 * when this function gets called.
470 *
471 * Note: Can also be called for shadow gmaps.
472 */
__gmap_translate(struct gmap * gmap,unsigned long gaddr)473 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
474 {
475 unsigned long vmaddr;
476
477 vmaddr = (unsigned long)
478 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
479 /* Note: guest_to_host is empty for a shadow gmap */
480 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
481 }
482 EXPORT_SYMBOL_GPL(__gmap_translate);
483
484 /**
485 * gmap_translate - translate a guest address to a user space address
486 * @gmap: pointer to guest mapping meta data structure
487 * @gaddr: guest address
488 *
489 * Returns user space address which corresponds to the guest address or
490 * -EFAULT if no such mapping exists.
491 * This function does not establish potentially missing page table entries.
492 */
gmap_translate(struct gmap * gmap,unsigned long gaddr)493 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
494 {
495 unsigned long rc;
496
497 mmap_read_lock(gmap->mm);
498 rc = __gmap_translate(gmap, gaddr);
499 mmap_read_unlock(gmap->mm);
500 return rc;
501 }
502 EXPORT_SYMBOL_GPL(gmap_translate);
503
504 /**
505 * gmap_unlink - disconnect a page table from the gmap shadow tables
506 * @mm: pointer to the parent mm_struct
507 * @table: pointer to the host page table
508 * @vmaddr: vm address associated with the host page table
509 */
gmap_unlink(struct mm_struct * mm,unsigned long * table,unsigned long vmaddr)510 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
511 unsigned long vmaddr)
512 {
513 struct gmap *gmap;
514 int flush;
515
516 rcu_read_lock();
517 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
518 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
519 if (flush)
520 gmap_flush_tlb(gmap);
521 }
522 rcu_read_unlock();
523 }
524
525 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
526 unsigned long gaddr);
527
528 /**
529 * __gmap_link - set up shadow page tables to connect a host to a guest address
530 * @gmap: pointer to guest mapping meta data structure
531 * @gaddr: guest address
532 * @vmaddr: vm address
533 *
534 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
535 * if the vm address is already mapped to a different guest segment.
536 * The mmap_lock of the mm that belongs to the address space must be held
537 * when this function gets called.
538 */
__gmap_link(struct gmap * gmap,unsigned long gaddr,unsigned long vmaddr)539 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
540 {
541 struct mm_struct *mm;
542 unsigned long *table;
543 spinlock_t *ptl;
544 pgd_t *pgd;
545 p4d_t *p4d;
546 pud_t *pud;
547 pmd_t *pmd;
548 u64 unprot;
549 int rc;
550
551 BUG_ON(gmap_is_shadow(gmap));
552 /* Create higher level tables in the gmap page table */
553 table = gmap->table;
554 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
555 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
556 if ((*table & _REGION_ENTRY_INVALID) &&
557 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
558 gaddr & _REGION1_MASK))
559 return -ENOMEM;
560 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
561 }
562 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
563 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
564 if ((*table & _REGION_ENTRY_INVALID) &&
565 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
566 gaddr & _REGION2_MASK))
567 return -ENOMEM;
568 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
569 }
570 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
571 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
572 if ((*table & _REGION_ENTRY_INVALID) &&
573 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
574 gaddr & _REGION3_MASK))
575 return -ENOMEM;
576 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
577 }
578 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
579 /* Walk the parent mm page table */
580 mm = gmap->mm;
581 pgd = pgd_offset(mm, vmaddr);
582 VM_BUG_ON(pgd_none(*pgd));
583 p4d = p4d_offset(pgd, vmaddr);
584 VM_BUG_ON(p4d_none(*p4d));
585 pud = pud_offset(p4d, vmaddr);
586 VM_BUG_ON(pud_none(*pud));
587 /* large puds cannot yet be handled */
588 if (pud_large(*pud))
589 return -EFAULT;
590 pmd = pmd_offset(pud, vmaddr);
591 VM_BUG_ON(pmd_none(*pmd));
592 /* Are we allowed to use huge pages? */
593 if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
594 return -EFAULT;
595 /* Link gmap segment table entry location to page table. */
596 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
597 if (rc)
598 return rc;
599 ptl = pmd_lock(mm, pmd);
600 spin_lock(&gmap->guest_table_lock);
601 if (*table == _SEGMENT_ENTRY_EMPTY) {
602 rc = radix_tree_insert(&gmap->host_to_guest,
603 vmaddr >> PMD_SHIFT, table);
604 if (!rc) {
605 if (pmd_large(*pmd)) {
606 *table = (pmd_val(*pmd) &
607 _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
608 | _SEGMENT_ENTRY_GMAP_UC;
609 } else
610 *table = pmd_val(*pmd) &
611 _SEGMENT_ENTRY_HARDWARE_BITS;
612 }
613 } else if (*table & _SEGMENT_ENTRY_PROTECT &&
614 !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
615 unprot = (u64)*table;
616 unprot &= ~_SEGMENT_ENTRY_PROTECT;
617 unprot |= _SEGMENT_ENTRY_GMAP_UC;
618 gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
619 }
620 spin_unlock(&gmap->guest_table_lock);
621 spin_unlock(ptl);
622 radix_tree_preload_end();
623 return rc;
624 }
625
626 /**
627 * gmap_fault - resolve a fault on a guest address
628 * @gmap: pointer to guest mapping meta data structure
629 * @gaddr: guest address
630 * @fault_flags: flags to pass down to handle_mm_fault()
631 *
632 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
633 * if the vm address is already mapped to a different guest segment.
634 */
gmap_fault(struct gmap * gmap,unsigned long gaddr,unsigned int fault_flags)635 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
636 unsigned int fault_flags)
637 {
638 unsigned long vmaddr;
639 int rc;
640 bool unlocked;
641
642 mmap_read_lock(gmap->mm);
643
644 retry:
645 unlocked = false;
646 vmaddr = __gmap_translate(gmap, gaddr);
647 if (IS_ERR_VALUE(vmaddr)) {
648 rc = vmaddr;
649 goto out_up;
650 }
651 if (fixup_user_fault(gmap->mm, vmaddr, fault_flags,
652 &unlocked)) {
653 rc = -EFAULT;
654 goto out_up;
655 }
656 /*
657 * In the case that fixup_user_fault unlocked the mmap_lock during
658 * faultin redo __gmap_translate to not race with a map/unmap_segment.
659 */
660 if (unlocked)
661 goto retry;
662
663 rc = __gmap_link(gmap, gaddr, vmaddr);
664 out_up:
665 mmap_read_unlock(gmap->mm);
666 return rc;
667 }
668 EXPORT_SYMBOL_GPL(gmap_fault);
669
670 /*
671 * this function is assumed to be called with mmap_lock held
672 */
__gmap_zap(struct gmap * gmap,unsigned long gaddr)673 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
674 {
675 unsigned long vmaddr;
676 spinlock_t *ptl;
677 pte_t *ptep;
678
679 /* Find the vm address for the guest address */
680 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
681 gaddr >> PMD_SHIFT);
682 if (vmaddr) {
683 vmaddr |= gaddr & ~PMD_MASK;
684 /* Get pointer to the page table entry */
685 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
686 if (likely(ptep))
687 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
688 pte_unmap_unlock(ptep, ptl);
689 }
690 }
691 EXPORT_SYMBOL_GPL(__gmap_zap);
692
gmap_discard(struct gmap * gmap,unsigned long from,unsigned long to)693 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
694 {
695 unsigned long gaddr, vmaddr, size;
696 struct vm_area_struct *vma;
697
698 mmap_read_lock(gmap->mm);
699 for (gaddr = from; gaddr < to;
700 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
701 /* Find the vm address for the guest address */
702 vmaddr = (unsigned long)
703 radix_tree_lookup(&gmap->guest_to_host,
704 gaddr >> PMD_SHIFT);
705 if (!vmaddr)
706 continue;
707 vmaddr |= gaddr & ~PMD_MASK;
708 /* Find vma in the parent mm */
709 vma = find_vma(gmap->mm, vmaddr);
710 if (!vma)
711 continue;
712 /*
713 * We do not discard pages that are backed by
714 * hugetlbfs, so we don't have to refault them.
715 */
716 if (is_vm_hugetlb_page(vma))
717 continue;
718 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
719 zap_page_range(vma, vmaddr, size);
720 }
721 mmap_read_unlock(gmap->mm);
722 }
723 EXPORT_SYMBOL_GPL(gmap_discard);
724
725 static LIST_HEAD(gmap_notifier_list);
726 static DEFINE_SPINLOCK(gmap_notifier_lock);
727
728 /**
729 * gmap_register_pte_notifier - register a pte invalidation callback
730 * @nb: pointer to the gmap notifier block
731 */
gmap_register_pte_notifier(struct gmap_notifier * nb)732 void gmap_register_pte_notifier(struct gmap_notifier *nb)
733 {
734 spin_lock(&gmap_notifier_lock);
735 list_add_rcu(&nb->list, &gmap_notifier_list);
736 spin_unlock(&gmap_notifier_lock);
737 }
738 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
739
740 /**
741 * gmap_unregister_pte_notifier - remove a pte invalidation callback
742 * @nb: pointer to the gmap notifier block
743 */
gmap_unregister_pte_notifier(struct gmap_notifier * nb)744 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
745 {
746 spin_lock(&gmap_notifier_lock);
747 list_del_rcu(&nb->list);
748 spin_unlock(&gmap_notifier_lock);
749 synchronize_rcu();
750 }
751 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
752
753 /**
754 * gmap_call_notifier - call all registered invalidation callbacks
755 * @gmap: pointer to guest mapping meta data structure
756 * @start: start virtual address in the guest address space
757 * @end: end virtual address in the guest address space
758 */
gmap_call_notifier(struct gmap * gmap,unsigned long start,unsigned long end)759 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
760 unsigned long end)
761 {
762 struct gmap_notifier *nb;
763
764 list_for_each_entry(nb, &gmap_notifier_list, list)
765 nb->notifier_call(gmap, start, end);
766 }
767
768 /**
769 * gmap_table_walk - walk the gmap page tables
770 * @gmap: pointer to guest mapping meta data structure
771 * @gaddr: virtual address in the guest address space
772 * @level: page table level to stop at
773 *
774 * Returns a table entry pointer for the given guest address and @level
775 * @level=0 : returns a pointer to a page table table entry (or NULL)
776 * @level=1 : returns a pointer to a segment table entry (or NULL)
777 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
778 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
779 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
780 *
781 * Returns NULL if the gmap page tables could not be walked to the
782 * requested level.
783 *
784 * Note: Can also be called for shadow gmaps.
785 */
gmap_table_walk(struct gmap * gmap,unsigned long gaddr,int level)786 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
787 unsigned long gaddr, int level)
788 {
789 const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
790 unsigned long *table = gmap->table;
791
792 if (gmap_is_shadow(gmap) && gmap->removed)
793 return NULL;
794
795 if (WARN_ON_ONCE(level > (asce_type >> 2) + 1))
796 return NULL;
797
798 if (asce_type != _ASCE_TYPE_REGION1 &&
799 gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
800 return NULL;
801
802 switch (asce_type) {
803 case _ASCE_TYPE_REGION1:
804 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
805 if (level == 4)
806 break;
807 if (*table & _REGION_ENTRY_INVALID)
808 return NULL;
809 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
810 fallthrough;
811 case _ASCE_TYPE_REGION2:
812 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
813 if (level == 3)
814 break;
815 if (*table & _REGION_ENTRY_INVALID)
816 return NULL;
817 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
818 fallthrough;
819 case _ASCE_TYPE_REGION3:
820 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
821 if (level == 2)
822 break;
823 if (*table & _REGION_ENTRY_INVALID)
824 return NULL;
825 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
826 fallthrough;
827 case _ASCE_TYPE_SEGMENT:
828 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
829 if (level == 1)
830 break;
831 if (*table & _REGION_ENTRY_INVALID)
832 return NULL;
833 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
834 table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
835 }
836 return table;
837 }
838
839 /**
840 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
841 * and return the pte pointer
842 * @gmap: pointer to guest mapping meta data structure
843 * @gaddr: virtual address in the guest address space
844 * @ptl: pointer to the spinlock pointer
845 *
846 * Returns a pointer to the locked pte for a guest address, or NULL
847 */
gmap_pte_op_walk(struct gmap * gmap,unsigned long gaddr,spinlock_t ** ptl)848 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
849 spinlock_t **ptl)
850 {
851 unsigned long *table;
852
853 BUG_ON(gmap_is_shadow(gmap));
854 /* Walk the gmap page table, lock and get pte pointer */
855 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
856 if (!table || *table & _SEGMENT_ENTRY_INVALID)
857 return NULL;
858 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
859 }
860
861 /**
862 * gmap_pte_op_fixup - force a page in and connect the gmap page table
863 * @gmap: pointer to guest mapping meta data structure
864 * @gaddr: virtual address in the guest address space
865 * @vmaddr: address in the host process address space
866 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
867 *
868 * Returns 0 if the caller can retry __gmap_translate (might fail again),
869 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
870 * up or connecting the gmap page table.
871 */
gmap_pte_op_fixup(struct gmap * gmap,unsigned long gaddr,unsigned long vmaddr,int prot)872 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
873 unsigned long vmaddr, int prot)
874 {
875 struct mm_struct *mm = gmap->mm;
876 unsigned int fault_flags;
877 bool unlocked = false;
878
879 BUG_ON(gmap_is_shadow(gmap));
880 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
881 if (fixup_user_fault(mm, vmaddr, fault_flags, &unlocked))
882 return -EFAULT;
883 if (unlocked)
884 /* lost mmap_lock, caller has to retry __gmap_translate */
885 return 0;
886 /* Connect the page tables */
887 return __gmap_link(gmap, gaddr, vmaddr);
888 }
889
890 /**
891 * gmap_pte_op_end - release the page table lock
892 * @ptl: pointer to the spinlock pointer
893 */
gmap_pte_op_end(spinlock_t * ptl)894 static void gmap_pte_op_end(spinlock_t *ptl)
895 {
896 if (ptl)
897 spin_unlock(ptl);
898 }
899
900 /**
901 * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
902 * and return the pmd pointer
903 * @gmap: pointer to guest mapping meta data structure
904 * @gaddr: virtual address in the guest address space
905 *
906 * Returns a pointer to the pmd for a guest address, or NULL
907 */
gmap_pmd_op_walk(struct gmap * gmap,unsigned long gaddr)908 static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
909 {
910 pmd_t *pmdp;
911
912 BUG_ON(gmap_is_shadow(gmap));
913 pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
914 if (!pmdp)
915 return NULL;
916
917 /* without huge pages, there is no need to take the table lock */
918 if (!gmap->mm->context.allow_gmap_hpage_1m)
919 return pmd_none(*pmdp) ? NULL : pmdp;
920
921 spin_lock(&gmap->guest_table_lock);
922 if (pmd_none(*pmdp)) {
923 spin_unlock(&gmap->guest_table_lock);
924 return NULL;
925 }
926
927 /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
928 if (!pmd_large(*pmdp))
929 spin_unlock(&gmap->guest_table_lock);
930 return pmdp;
931 }
932
933 /**
934 * gmap_pmd_op_end - release the guest_table_lock if needed
935 * @gmap: pointer to the guest mapping meta data structure
936 * @pmdp: pointer to the pmd
937 */
gmap_pmd_op_end(struct gmap * gmap,pmd_t * pmdp)938 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
939 {
940 if (pmd_large(*pmdp))
941 spin_unlock(&gmap->guest_table_lock);
942 }
943
944 /*
945 * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
946 * @pmdp: pointer to the pmd to be protected
947 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
948 * @bits: notification bits to set
949 *
950 * Returns:
951 * 0 if successfully protected
952 * -EAGAIN if a fixup is needed
953 * -EINVAL if unsupported notifier bits have been specified
954 *
955 * Expected to be called with sg->mm->mmap_lock in read and
956 * guest_table_lock held.
957 */
gmap_protect_pmd(struct gmap * gmap,unsigned long gaddr,pmd_t * pmdp,int prot,unsigned long bits)958 static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
959 pmd_t *pmdp, int prot, unsigned long bits)
960 {
961 int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
962 int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
963 pmd_t new = *pmdp;
964
965 /* Fixup needed */
966 if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
967 return -EAGAIN;
968
969 if (prot == PROT_NONE && !pmd_i) {
970 pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
971 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
972 }
973
974 if (prot == PROT_READ && !pmd_p) {
975 pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
976 pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
977 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
978 }
979
980 if (bits & GMAP_NOTIFY_MPROT)
981 pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
982
983 /* Shadow GMAP protection needs split PMDs */
984 if (bits & GMAP_NOTIFY_SHADOW)
985 return -EINVAL;
986
987 return 0;
988 }
989
990 /*
991 * gmap_protect_pte - remove access rights to memory and set pgste bits
992 * @gmap: pointer to guest mapping meta data structure
993 * @gaddr: virtual address in the guest address space
994 * @pmdp: pointer to the pmd associated with the pte
995 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
996 * @bits: notification bits to set
997 *
998 * Returns 0 if successfully protected, -ENOMEM if out of memory and
999 * -EAGAIN if a fixup is needed.
1000 *
1001 * Expected to be called with sg->mm->mmap_lock in read
1002 */
gmap_protect_pte(struct gmap * gmap,unsigned long gaddr,pmd_t * pmdp,int prot,unsigned long bits)1003 static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
1004 pmd_t *pmdp, int prot, unsigned long bits)
1005 {
1006 int rc;
1007 pte_t *ptep;
1008 spinlock_t *ptl = NULL;
1009 unsigned long pbits = 0;
1010
1011 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1012 return -EAGAIN;
1013
1014 ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
1015 if (!ptep)
1016 return -ENOMEM;
1017
1018 pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
1019 pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
1020 /* Protect and unlock. */
1021 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
1022 gmap_pte_op_end(ptl);
1023 return rc;
1024 }
1025
1026 /*
1027 * gmap_protect_range - remove access rights to memory and set pgste bits
1028 * @gmap: pointer to guest mapping meta data structure
1029 * @gaddr: virtual address in the guest address space
1030 * @len: size of area
1031 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1032 * @bits: pgste notification bits to set
1033 *
1034 * Returns 0 if successfully protected, -ENOMEM if out of memory and
1035 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
1036 *
1037 * Called with sg->mm->mmap_lock in read.
1038 */
gmap_protect_range(struct gmap * gmap,unsigned long gaddr,unsigned long len,int prot,unsigned long bits)1039 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
1040 unsigned long len, int prot, unsigned long bits)
1041 {
1042 unsigned long vmaddr, dist;
1043 pmd_t *pmdp;
1044 int rc;
1045
1046 BUG_ON(gmap_is_shadow(gmap));
1047 while (len) {
1048 rc = -EAGAIN;
1049 pmdp = gmap_pmd_op_walk(gmap, gaddr);
1050 if (pmdp) {
1051 if (!pmd_large(*pmdp)) {
1052 rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
1053 bits);
1054 if (!rc) {
1055 len -= PAGE_SIZE;
1056 gaddr += PAGE_SIZE;
1057 }
1058 } else {
1059 rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
1060 bits);
1061 if (!rc) {
1062 dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
1063 len = len < dist ? 0 : len - dist;
1064 gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
1065 }
1066 }
1067 gmap_pmd_op_end(gmap, pmdp);
1068 }
1069 if (rc) {
1070 if (rc == -EINVAL)
1071 return rc;
1072
1073 /* -EAGAIN, fixup of userspace mm and gmap */
1074 vmaddr = __gmap_translate(gmap, gaddr);
1075 if (IS_ERR_VALUE(vmaddr))
1076 return vmaddr;
1077 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
1078 if (rc)
1079 return rc;
1080 }
1081 }
1082 return 0;
1083 }
1084
1085 /**
1086 * gmap_mprotect_notify - change access rights for a range of ptes and
1087 * call the notifier if any pte changes again
1088 * @gmap: pointer to guest mapping meta data structure
1089 * @gaddr: virtual address in the guest address space
1090 * @len: size of area
1091 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1092 *
1093 * Returns 0 if for each page in the given range a gmap mapping exists,
1094 * the new access rights could be set and the notifier could be armed.
1095 * If the gmap mapping is missing for one or more pages -EFAULT is
1096 * returned. If no memory could be allocated -ENOMEM is returned.
1097 * This function establishes missing page table entries.
1098 */
gmap_mprotect_notify(struct gmap * gmap,unsigned long gaddr,unsigned long len,int prot)1099 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
1100 unsigned long len, int prot)
1101 {
1102 int rc;
1103
1104 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
1105 return -EINVAL;
1106 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
1107 return -EINVAL;
1108 mmap_read_lock(gmap->mm);
1109 rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
1110 mmap_read_unlock(gmap->mm);
1111 return rc;
1112 }
1113 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
1114
1115 /**
1116 * gmap_read_table - get an unsigned long value from a guest page table using
1117 * absolute addressing, without marking the page referenced.
1118 * @gmap: pointer to guest mapping meta data structure
1119 * @gaddr: virtual address in the guest address space
1120 * @val: pointer to the unsigned long value to return
1121 *
1122 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
1123 * if reading using the virtual address failed. -EINVAL if called on a gmap
1124 * shadow.
1125 *
1126 * Called with gmap->mm->mmap_lock in read.
1127 */
gmap_read_table(struct gmap * gmap,unsigned long gaddr,unsigned long * val)1128 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
1129 {
1130 unsigned long address, vmaddr;
1131 spinlock_t *ptl;
1132 pte_t *ptep, pte;
1133 int rc;
1134
1135 if (gmap_is_shadow(gmap))
1136 return -EINVAL;
1137
1138 while (1) {
1139 rc = -EAGAIN;
1140 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
1141 if (ptep) {
1142 pte = *ptep;
1143 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
1144 address = pte_val(pte) & PAGE_MASK;
1145 address += gaddr & ~PAGE_MASK;
1146 *val = *(unsigned long *) address;
1147 pte_val(*ptep) |= _PAGE_YOUNG;
1148 /* Do *NOT* clear the _PAGE_INVALID bit! */
1149 rc = 0;
1150 }
1151 gmap_pte_op_end(ptl);
1152 }
1153 if (!rc)
1154 break;
1155 vmaddr = __gmap_translate(gmap, gaddr);
1156 if (IS_ERR_VALUE(vmaddr)) {
1157 rc = vmaddr;
1158 break;
1159 }
1160 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
1161 if (rc)
1162 break;
1163 }
1164 return rc;
1165 }
1166 EXPORT_SYMBOL_GPL(gmap_read_table);
1167
1168 /**
1169 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1170 * @sg: pointer to the shadow guest address space structure
1171 * @vmaddr: vm address associated with the rmap
1172 * @rmap: pointer to the rmap structure
1173 *
1174 * Called with the sg->guest_table_lock
1175 */
gmap_insert_rmap(struct gmap * sg,unsigned long vmaddr,struct gmap_rmap * rmap)1176 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1177 struct gmap_rmap *rmap)
1178 {
1179 void __rcu **slot;
1180
1181 BUG_ON(!gmap_is_shadow(sg));
1182 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1183 if (slot) {
1184 rmap->next = radix_tree_deref_slot_protected(slot,
1185 &sg->guest_table_lock);
1186 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
1187 } else {
1188 rmap->next = NULL;
1189 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1190 rmap);
1191 }
1192 }
1193
1194 /**
1195 * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
1196 * @sg: pointer to the shadow guest address space structure
1197 * @raddr: rmap address in the shadow gmap
1198 * @paddr: address in the parent guest address space
1199 * @len: length of the memory area to protect
1200 *
1201 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1202 * if out of memory and -EFAULT if paddr is invalid.
1203 */
gmap_protect_rmap(struct gmap * sg,unsigned long raddr,unsigned long paddr,unsigned long len)1204 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1205 unsigned long paddr, unsigned long len)
1206 {
1207 struct gmap *parent;
1208 struct gmap_rmap *rmap;
1209 unsigned long vmaddr;
1210 spinlock_t *ptl;
1211 pte_t *ptep;
1212 int rc;
1213
1214 BUG_ON(!gmap_is_shadow(sg));
1215 parent = sg->parent;
1216 while (len) {
1217 vmaddr = __gmap_translate(parent, paddr);
1218 if (IS_ERR_VALUE(vmaddr))
1219 return vmaddr;
1220 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
1221 if (!rmap)
1222 return -ENOMEM;
1223 rmap->raddr = raddr;
1224 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
1225 if (rc) {
1226 kfree(rmap);
1227 return rc;
1228 }
1229 rc = -EAGAIN;
1230 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1231 if (ptep) {
1232 spin_lock(&sg->guest_table_lock);
1233 rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
1234 PGSTE_VSIE_BIT);
1235 if (!rc)
1236 gmap_insert_rmap(sg, vmaddr, rmap);
1237 spin_unlock(&sg->guest_table_lock);
1238 gmap_pte_op_end(ptl);
1239 }
1240 radix_tree_preload_end();
1241 if (rc) {
1242 kfree(rmap);
1243 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
1244 if (rc)
1245 return rc;
1246 continue;
1247 }
1248 paddr += PAGE_SIZE;
1249 len -= PAGE_SIZE;
1250 }
1251 return 0;
1252 }
1253
1254 #define _SHADOW_RMAP_MASK 0x7
1255 #define _SHADOW_RMAP_REGION1 0x5
1256 #define _SHADOW_RMAP_REGION2 0x4
1257 #define _SHADOW_RMAP_REGION3 0x3
1258 #define _SHADOW_RMAP_SEGMENT 0x2
1259 #define _SHADOW_RMAP_PGTABLE 0x1
1260
1261 /**
1262 * gmap_idte_one - invalidate a single region or segment table entry
1263 * @asce: region or segment table *origin* + table-type bits
1264 * @vaddr: virtual address to identify the table entry to flush
1265 *
1266 * The invalid bit of a single region or segment table entry is set
1267 * and the associated TLB entries depending on the entry are flushed.
1268 * The table-type of the @asce identifies the portion of the @vaddr
1269 * that is used as the invalidation index.
1270 */
gmap_idte_one(unsigned long asce,unsigned long vaddr)1271 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1272 {
1273 asm volatile(
1274 " .insn rrf,0xb98e0000,%0,%1,0,0"
1275 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1276 }
1277
1278 /**
1279 * gmap_unshadow_page - remove a page from a shadow page table
1280 * @sg: pointer to the shadow guest address space structure
1281 * @raddr: rmap address in the shadow guest address space
1282 *
1283 * Called with the sg->guest_table_lock
1284 */
gmap_unshadow_page(struct gmap * sg,unsigned long raddr)1285 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1286 {
1287 unsigned long *table;
1288
1289 BUG_ON(!gmap_is_shadow(sg));
1290 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1291 if (!table || *table & _PAGE_INVALID)
1292 return;
1293 gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
1294 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1295 }
1296
1297 /**
1298 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1299 * @sg: pointer to the shadow guest address space structure
1300 * @raddr: rmap address in the shadow guest address space
1301 * @pgt: pointer to the start of a shadow page table
1302 *
1303 * Called with the sg->guest_table_lock
1304 */
__gmap_unshadow_pgt(struct gmap * sg,unsigned long raddr,unsigned long * pgt)1305 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1306 unsigned long *pgt)
1307 {
1308 int i;
1309
1310 BUG_ON(!gmap_is_shadow(sg));
1311 for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
1312 pgt[i] = _PAGE_INVALID;
1313 }
1314
1315 /**
1316 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1317 * @sg: pointer to the shadow guest address space structure
1318 * @raddr: address in the shadow guest address space
1319 *
1320 * Called with the sg->guest_table_lock
1321 */
gmap_unshadow_pgt(struct gmap * sg,unsigned long raddr)1322 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1323 {
1324 unsigned long sto, *ste, *pgt;
1325 struct page *page;
1326
1327 BUG_ON(!gmap_is_shadow(sg));
1328 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1329 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1330 return;
1331 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1332 sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
1333 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1334 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1335 *ste = _SEGMENT_ENTRY_EMPTY;
1336 __gmap_unshadow_pgt(sg, raddr, pgt);
1337 /* Free page table */
1338 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1339 list_del(&page->lru);
1340 page_table_free_pgste(page);
1341 }
1342
1343 /**
1344 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1345 * @sg: pointer to the shadow guest address space structure
1346 * @raddr: rmap address in the shadow guest address space
1347 * @sgt: pointer to the start of a shadow segment table
1348 *
1349 * Called with the sg->guest_table_lock
1350 */
__gmap_unshadow_sgt(struct gmap * sg,unsigned long raddr,unsigned long * sgt)1351 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1352 unsigned long *sgt)
1353 {
1354 unsigned long *pgt;
1355 struct page *page;
1356 int i;
1357
1358 BUG_ON(!gmap_is_shadow(sg));
1359 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
1360 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1361 continue;
1362 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1363 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1364 __gmap_unshadow_pgt(sg, raddr, pgt);
1365 /* Free page table */
1366 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1367 list_del(&page->lru);
1368 page_table_free_pgste(page);
1369 }
1370 }
1371
1372 /**
1373 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1374 * @sg: pointer to the shadow guest address space structure
1375 * @raddr: rmap address in the shadow guest address space
1376 *
1377 * Called with the shadow->guest_table_lock
1378 */
gmap_unshadow_sgt(struct gmap * sg,unsigned long raddr)1379 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1380 {
1381 unsigned long r3o, *r3e, *sgt;
1382 struct page *page;
1383
1384 BUG_ON(!gmap_is_shadow(sg));
1385 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1386 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1387 return;
1388 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1389 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
1390 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1391 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1392 *r3e = _REGION3_ENTRY_EMPTY;
1393 __gmap_unshadow_sgt(sg, raddr, sgt);
1394 /* Free segment table */
1395 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1396 list_del(&page->lru);
1397 __free_pages(page, CRST_ALLOC_ORDER);
1398 }
1399
1400 /**
1401 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1402 * @sg: pointer to the shadow guest address space structure
1403 * @raddr: address in the shadow guest address space
1404 * @r3t: pointer to the start of a shadow region-3 table
1405 *
1406 * Called with the sg->guest_table_lock
1407 */
__gmap_unshadow_r3t(struct gmap * sg,unsigned long raddr,unsigned long * r3t)1408 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1409 unsigned long *r3t)
1410 {
1411 unsigned long *sgt;
1412 struct page *page;
1413 int i;
1414
1415 BUG_ON(!gmap_is_shadow(sg));
1416 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
1417 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1418 continue;
1419 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1420 r3t[i] = _REGION3_ENTRY_EMPTY;
1421 __gmap_unshadow_sgt(sg, raddr, sgt);
1422 /* Free segment table */
1423 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1424 list_del(&page->lru);
1425 __free_pages(page, CRST_ALLOC_ORDER);
1426 }
1427 }
1428
1429 /**
1430 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1431 * @sg: pointer to the shadow guest address space structure
1432 * @raddr: rmap address in the shadow guest address space
1433 *
1434 * Called with the sg->guest_table_lock
1435 */
gmap_unshadow_r3t(struct gmap * sg,unsigned long raddr)1436 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1437 {
1438 unsigned long r2o, *r2e, *r3t;
1439 struct page *page;
1440
1441 BUG_ON(!gmap_is_shadow(sg));
1442 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1443 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1444 return;
1445 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1446 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
1447 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1448 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1449 *r2e = _REGION2_ENTRY_EMPTY;
1450 __gmap_unshadow_r3t(sg, raddr, r3t);
1451 /* Free region 3 table */
1452 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1453 list_del(&page->lru);
1454 __free_pages(page, CRST_ALLOC_ORDER);
1455 }
1456
1457 /**
1458 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1459 * @sg: pointer to the shadow guest address space structure
1460 * @raddr: rmap address in the shadow guest address space
1461 * @r2t: pointer to the start of a shadow region-2 table
1462 *
1463 * Called with the sg->guest_table_lock
1464 */
__gmap_unshadow_r2t(struct gmap * sg,unsigned long raddr,unsigned long * r2t)1465 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1466 unsigned long *r2t)
1467 {
1468 unsigned long *r3t;
1469 struct page *page;
1470 int i;
1471
1472 BUG_ON(!gmap_is_shadow(sg));
1473 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
1474 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1475 continue;
1476 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1477 r2t[i] = _REGION2_ENTRY_EMPTY;
1478 __gmap_unshadow_r3t(sg, raddr, r3t);
1479 /* Free region 3 table */
1480 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1481 list_del(&page->lru);
1482 __free_pages(page, CRST_ALLOC_ORDER);
1483 }
1484 }
1485
1486 /**
1487 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1488 * @sg: pointer to the shadow guest address space structure
1489 * @raddr: rmap address in the shadow guest address space
1490 *
1491 * Called with the sg->guest_table_lock
1492 */
gmap_unshadow_r2t(struct gmap * sg,unsigned long raddr)1493 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1494 {
1495 unsigned long r1o, *r1e, *r2t;
1496 struct page *page;
1497
1498 BUG_ON(!gmap_is_shadow(sg));
1499 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1500 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1501 return;
1502 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1503 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
1504 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1505 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1506 *r1e = _REGION1_ENTRY_EMPTY;
1507 __gmap_unshadow_r2t(sg, raddr, r2t);
1508 /* Free region 2 table */
1509 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1510 list_del(&page->lru);
1511 __free_pages(page, CRST_ALLOC_ORDER);
1512 }
1513
1514 /**
1515 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1516 * @sg: pointer to the shadow guest address space structure
1517 * @raddr: rmap address in the shadow guest address space
1518 * @r1t: pointer to the start of a shadow region-1 table
1519 *
1520 * Called with the shadow->guest_table_lock
1521 */
__gmap_unshadow_r1t(struct gmap * sg,unsigned long raddr,unsigned long * r1t)1522 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1523 unsigned long *r1t)
1524 {
1525 unsigned long asce, *r2t;
1526 struct page *page;
1527 int i;
1528
1529 BUG_ON(!gmap_is_shadow(sg));
1530 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1531 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
1532 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1533 continue;
1534 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1535 __gmap_unshadow_r2t(sg, raddr, r2t);
1536 /* Clear entry and flush translation r1t -> r2t */
1537 gmap_idte_one(asce, raddr);
1538 r1t[i] = _REGION1_ENTRY_EMPTY;
1539 /* Free region 2 table */
1540 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1541 list_del(&page->lru);
1542 __free_pages(page, CRST_ALLOC_ORDER);
1543 }
1544 }
1545
1546 /**
1547 * gmap_unshadow - remove a shadow page table completely
1548 * @sg: pointer to the shadow guest address space structure
1549 *
1550 * Called with sg->guest_table_lock
1551 */
gmap_unshadow(struct gmap * sg)1552 static void gmap_unshadow(struct gmap *sg)
1553 {
1554 unsigned long *table;
1555
1556 BUG_ON(!gmap_is_shadow(sg));
1557 if (sg->removed)
1558 return;
1559 sg->removed = 1;
1560 gmap_call_notifier(sg, 0, -1UL);
1561 gmap_flush_tlb(sg);
1562 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1563 switch (sg->asce & _ASCE_TYPE_MASK) {
1564 case _ASCE_TYPE_REGION1:
1565 __gmap_unshadow_r1t(sg, 0, table);
1566 break;
1567 case _ASCE_TYPE_REGION2:
1568 __gmap_unshadow_r2t(sg, 0, table);
1569 break;
1570 case _ASCE_TYPE_REGION3:
1571 __gmap_unshadow_r3t(sg, 0, table);
1572 break;
1573 case _ASCE_TYPE_SEGMENT:
1574 __gmap_unshadow_sgt(sg, 0, table);
1575 break;
1576 }
1577 }
1578
1579 /**
1580 * gmap_find_shadow - find a specific asce in the list of shadow tables
1581 * @parent: pointer to the parent gmap
1582 * @asce: ASCE for which the shadow table is created
1583 * @edat_level: edat level to be used for the shadow translation
1584 *
1585 * Returns the pointer to a gmap if a shadow table with the given asce is
1586 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1587 * otherwise NULL
1588 */
gmap_find_shadow(struct gmap * parent,unsigned long asce,int edat_level)1589 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1590 int edat_level)
1591 {
1592 struct gmap *sg;
1593
1594 list_for_each_entry(sg, &parent->children, list) {
1595 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1596 sg->removed)
1597 continue;
1598 if (!sg->initialized)
1599 return ERR_PTR(-EAGAIN);
1600 refcount_inc(&sg->ref_count);
1601 return sg;
1602 }
1603 return NULL;
1604 }
1605
1606 /**
1607 * gmap_shadow_valid - check if a shadow guest address space matches the
1608 * given properties and is still valid
1609 * @sg: pointer to the shadow guest address space structure
1610 * @asce: ASCE for which the shadow table is requested
1611 * @edat_level: edat level to be used for the shadow translation
1612 *
1613 * Returns 1 if the gmap shadow is still valid and matches the given
1614 * properties, the caller can continue using it. Returns 0 otherwise, the
1615 * caller has to request a new shadow gmap in this case.
1616 *
1617 */
gmap_shadow_valid(struct gmap * sg,unsigned long asce,int edat_level)1618 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1619 {
1620 if (sg->removed)
1621 return 0;
1622 return sg->orig_asce == asce && sg->edat_level == edat_level;
1623 }
1624 EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1625
1626 /**
1627 * gmap_shadow - create/find a shadow guest address space
1628 * @parent: pointer to the parent gmap
1629 * @asce: ASCE for which the shadow table is created
1630 * @edat_level: edat level to be used for the shadow translation
1631 *
1632 * The pages of the top level page table referred by the asce parameter
1633 * will be set to read-only and marked in the PGSTEs of the kvm process.
1634 * The shadow table will be removed automatically on any change to the
1635 * PTE mapping for the source table.
1636 *
1637 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1638 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1639 * parent gmap table could not be protected.
1640 */
gmap_shadow(struct gmap * parent,unsigned long asce,int edat_level)1641 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1642 int edat_level)
1643 {
1644 struct gmap *sg, *new;
1645 unsigned long limit;
1646 int rc;
1647
1648 BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
1649 BUG_ON(gmap_is_shadow(parent));
1650 spin_lock(&parent->shadow_lock);
1651 sg = gmap_find_shadow(parent, asce, edat_level);
1652 spin_unlock(&parent->shadow_lock);
1653 if (sg)
1654 return sg;
1655 /* Create a new shadow gmap */
1656 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1657 if (asce & _ASCE_REAL_SPACE)
1658 limit = -1UL;
1659 new = gmap_alloc(limit);
1660 if (!new)
1661 return ERR_PTR(-ENOMEM);
1662 new->mm = parent->mm;
1663 new->parent = gmap_get(parent);
1664 new->orig_asce = asce;
1665 new->edat_level = edat_level;
1666 new->initialized = false;
1667 spin_lock(&parent->shadow_lock);
1668 /* Recheck if another CPU created the same shadow */
1669 sg = gmap_find_shadow(parent, asce, edat_level);
1670 if (sg) {
1671 spin_unlock(&parent->shadow_lock);
1672 gmap_free(new);
1673 return sg;
1674 }
1675 if (asce & _ASCE_REAL_SPACE) {
1676 /* only allow one real-space gmap shadow */
1677 list_for_each_entry(sg, &parent->children, list) {
1678 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1679 spin_lock(&sg->guest_table_lock);
1680 gmap_unshadow(sg);
1681 spin_unlock(&sg->guest_table_lock);
1682 list_del(&sg->list);
1683 gmap_put(sg);
1684 break;
1685 }
1686 }
1687 }
1688 refcount_set(&new->ref_count, 2);
1689 list_add(&new->list, &parent->children);
1690 if (asce & _ASCE_REAL_SPACE) {
1691 /* nothing to protect, return right away */
1692 new->initialized = true;
1693 spin_unlock(&parent->shadow_lock);
1694 return new;
1695 }
1696 spin_unlock(&parent->shadow_lock);
1697 /* protect after insertion, so it will get properly invalidated */
1698 mmap_read_lock(parent->mm);
1699 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1700 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
1701 PROT_READ, GMAP_NOTIFY_SHADOW);
1702 mmap_read_unlock(parent->mm);
1703 spin_lock(&parent->shadow_lock);
1704 new->initialized = true;
1705 if (rc) {
1706 list_del(&new->list);
1707 gmap_free(new);
1708 new = ERR_PTR(rc);
1709 }
1710 spin_unlock(&parent->shadow_lock);
1711 return new;
1712 }
1713 EXPORT_SYMBOL_GPL(gmap_shadow);
1714
1715 /**
1716 * gmap_shadow_r2t - create an empty shadow region 2 table
1717 * @sg: pointer to the shadow guest address space structure
1718 * @saddr: faulting address in the shadow gmap
1719 * @r2t: parent gmap address of the region 2 table to get shadowed
1720 * @fake: r2t references contiguous guest memory block, not a r2t
1721 *
1722 * The r2t parameter specifies the address of the source table. The
1723 * four pages of the source table are made read-only in the parent gmap
1724 * address space. A write to the source table area @r2t will automatically
1725 * remove the shadow r2 table and all of its decendents.
1726 *
1727 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1728 * shadow table structure is incomplete, -ENOMEM if out of memory and
1729 * -EFAULT if an address in the parent gmap could not be resolved.
1730 *
1731 * Called with sg->mm->mmap_lock in read.
1732 */
gmap_shadow_r2t(struct gmap * sg,unsigned long saddr,unsigned long r2t,int fake)1733 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1734 int fake)
1735 {
1736 unsigned long raddr, origin, offset, len;
1737 unsigned long *s_r2t, *table;
1738 struct page *page;
1739 int rc;
1740
1741 BUG_ON(!gmap_is_shadow(sg));
1742 /* Allocate a shadow region second table */
1743 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
1744 if (!page)
1745 return -ENOMEM;
1746 page->index = r2t & _REGION_ENTRY_ORIGIN;
1747 if (fake)
1748 page->index |= GMAP_SHADOW_FAKE_TABLE;
1749 s_r2t = (unsigned long *) page_to_phys(page);
1750 /* Install shadow region second table */
1751 spin_lock(&sg->guest_table_lock);
1752 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1753 if (!table) {
1754 rc = -EAGAIN; /* Race with unshadow */
1755 goto out_free;
1756 }
1757 if (!(*table & _REGION_ENTRY_INVALID)) {
1758 rc = 0; /* Already established */
1759 goto out_free;
1760 } else if (*table & _REGION_ENTRY_ORIGIN) {
1761 rc = -EAGAIN; /* Race with shadow */
1762 goto out_free;
1763 }
1764 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1765 /* mark as invalid as long as the parent table is not protected */
1766 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1767 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1768 if (sg->edat_level >= 1)
1769 *table |= (r2t & _REGION_ENTRY_PROTECT);
1770 list_add(&page->lru, &sg->crst_list);
1771 if (fake) {
1772 /* nothing to protect for fake tables */
1773 *table &= ~_REGION_ENTRY_INVALID;
1774 spin_unlock(&sg->guest_table_lock);
1775 return 0;
1776 }
1777 spin_unlock(&sg->guest_table_lock);
1778 /* Make r2t read-only in parent gmap page table */
1779 raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
1780 origin = r2t & _REGION_ENTRY_ORIGIN;
1781 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1782 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1783 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1784 spin_lock(&sg->guest_table_lock);
1785 if (!rc) {
1786 table = gmap_table_walk(sg, saddr, 4);
1787 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1788 (unsigned long) s_r2t)
1789 rc = -EAGAIN; /* Race with unshadow */
1790 else
1791 *table &= ~_REGION_ENTRY_INVALID;
1792 } else {
1793 gmap_unshadow_r2t(sg, raddr);
1794 }
1795 spin_unlock(&sg->guest_table_lock);
1796 return rc;
1797 out_free:
1798 spin_unlock(&sg->guest_table_lock);
1799 __free_pages(page, CRST_ALLOC_ORDER);
1800 return rc;
1801 }
1802 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1803
1804 /**
1805 * gmap_shadow_r3t - create a shadow region 3 table
1806 * @sg: pointer to the shadow guest address space structure
1807 * @saddr: faulting address in the shadow gmap
1808 * @r3t: parent gmap address of the region 3 table to get shadowed
1809 * @fake: r3t references contiguous guest memory block, not a r3t
1810 *
1811 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1812 * shadow table structure is incomplete, -ENOMEM if out of memory and
1813 * -EFAULT if an address in the parent gmap could not be resolved.
1814 *
1815 * Called with sg->mm->mmap_lock in read.
1816 */
gmap_shadow_r3t(struct gmap * sg,unsigned long saddr,unsigned long r3t,int fake)1817 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1818 int fake)
1819 {
1820 unsigned long raddr, origin, offset, len;
1821 unsigned long *s_r3t, *table;
1822 struct page *page;
1823 int rc;
1824
1825 BUG_ON(!gmap_is_shadow(sg));
1826 /* Allocate a shadow region second table */
1827 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
1828 if (!page)
1829 return -ENOMEM;
1830 page->index = r3t & _REGION_ENTRY_ORIGIN;
1831 if (fake)
1832 page->index |= GMAP_SHADOW_FAKE_TABLE;
1833 s_r3t = (unsigned long *) page_to_phys(page);
1834 /* Install shadow region second table */
1835 spin_lock(&sg->guest_table_lock);
1836 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1837 if (!table) {
1838 rc = -EAGAIN; /* Race with unshadow */
1839 goto out_free;
1840 }
1841 if (!(*table & _REGION_ENTRY_INVALID)) {
1842 rc = 0; /* Already established */
1843 goto out_free;
1844 } else if (*table & _REGION_ENTRY_ORIGIN) {
1845 rc = -EAGAIN; /* Race with shadow */
1846 goto out_free;
1847 }
1848 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1849 /* mark as invalid as long as the parent table is not protected */
1850 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1851 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1852 if (sg->edat_level >= 1)
1853 *table |= (r3t & _REGION_ENTRY_PROTECT);
1854 list_add(&page->lru, &sg->crst_list);
1855 if (fake) {
1856 /* nothing to protect for fake tables */
1857 *table &= ~_REGION_ENTRY_INVALID;
1858 spin_unlock(&sg->guest_table_lock);
1859 return 0;
1860 }
1861 spin_unlock(&sg->guest_table_lock);
1862 /* Make r3t read-only in parent gmap page table */
1863 raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
1864 origin = r3t & _REGION_ENTRY_ORIGIN;
1865 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1866 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1867 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1868 spin_lock(&sg->guest_table_lock);
1869 if (!rc) {
1870 table = gmap_table_walk(sg, saddr, 3);
1871 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1872 (unsigned long) s_r3t)
1873 rc = -EAGAIN; /* Race with unshadow */
1874 else
1875 *table &= ~_REGION_ENTRY_INVALID;
1876 } else {
1877 gmap_unshadow_r3t(sg, raddr);
1878 }
1879 spin_unlock(&sg->guest_table_lock);
1880 return rc;
1881 out_free:
1882 spin_unlock(&sg->guest_table_lock);
1883 __free_pages(page, CRST_ALLOC_ORDER);
1884 return rc;
1885 }
1886 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1887
1888 /**
1889 * gmap_shadow_sgt - create a shadow segment table
1890 * @sg: pointer to the shadow guest address space structure
1891 * @saddr: faulting address in the shadow gmap
1892 * @sgt: parent gmap address of the segment table to get shadowed
1893 * @fake: sgt references contiguous guest memory block, not a sgt
1894 *
1895 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1896 * shadow table structure is incomplete, -ENOMEM if out of memory and
1897 * -EFAULT if an address in the parent gmap could not be resolved.
1898 *
1899 * Called with sg->mm->mmap_lock in read.
1900 */
gmap_shadow_sgt(struct gmap * sg,unsigned long saddr,unsigned long sgt,int fake)1901 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1902 int fake)
1903 {
1904 unsigned long raddr, origin, offset, len;
1905 unsigned long *s_sgt, *table;
1906 struct page *page;
1907 int rc;
1908
1909 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1910 /* Allocate a shadow segment table */
1911 page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
1912 if (!page)
1913 return -ENOMEM;
1914 page->index = sgt & _REGION_ENTRY_ORIGIN;
1915 if (fake)
1916 page->index |= GMAP_SHADOW_FAKE_TABLE;
1917 s_sgt = (unsigned long *) page_to_phys(page);
1918 /* Install shadow region second table */
1919 spin_lock(&sg->guest_table_lock);
1920 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1921 if (!table) {
1922 rc = -EAGAIN; /* Race with unshadow */
1923 goto out_free;
1924 }
1925 if (!(*table & _REGION_ENTRY_INVALID)) {
1926 rc = 0; /* Already established */
1927 goto out_free;
1928 } else if (*table & _REGION_ENTRY_ORIGIN) {
1929 rc = -EAGAIN; /* Race with shadow */
1930 goto out_free;
1931 }
1932 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1933 /* mark as invalid as long as the parent table is not protected */
1934 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1935 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1936 if (sg->edat_level >= 1)
1937 *table |= sgt & _REGION_ENTRY_PROTECT;
1938 list_add(&page->lru, &sg->crst_list);
1939 if (fake) {
1940 /* nothing to protect for fake tables */
1941 *table &= ~_REGION_ENTRY_INVALID;
1942 spin_unlock(&sg->guest_table_lock);
1943 return 0;
1944 }
1945 spin_unlock(&sg->guest_table_lock);
1946 /* Make sgt read-only in parent gmap page table */
1947 raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
1948 origin = sgt & _REGION_ENTRY_ORIGIN;
1949 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1950 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1951 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1952 spin_lock(&sg->guest_table_lock);
1953 if (!rc) {
1954 table = gmap_table_walk(sg, saddr, 2);
1955 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1956 (unsigned long) s_sgt)
1957 rc = -EAGAIN; /* Race with unshadow */
1958 else
1959 *table &= ~_REGION_ENTRY_INVALID;
1960 } else {
1961 gmap_unshadow_sgt(sg, raddr);
1962 }
1963 spin_unlock(&sg->guest_table_lock);
1964 return rc;
1965 out_free:
1966 spin_unlock(&sg->guest_table_lock);
1967 __free_pages(page, CRST_ALLOC_ORDER);
1968 return rc;
1969 }
1970 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1971
1972 /**
1973 * gmap_shadow_pgt_lookup - find a shadow page table
1974 * @sg: pointer to the shadow guest address space structure
1975 * @saddr: the address in the shadow aguest address space
1976 * @pgt: parent gmap address of the page table to get shadowed
1977 * @dat_protection: if the pgtable is marked as protected by dat
1978 * @fake: pgt references contiguous guest memory block, not a pgtable
1979 *
1980 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1981 * table was not found.
1982 *
1983 * Called with sg->mm->mmap_lock in read.
1984 */
gmap_shadow_pgt_lookup(struct gmap * sg,unsigned long saddr,unsigned long * pgt,int * dat_protection,int * fake)1985 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1986 unsigned long *pgt, int *dat_protection,
1987 int *fake)
1988 {
1989 unsigned long *table;
1990 struct page *page;
1991 int rc;
1992
1993 BUG_ON(!gmap_is_shadow(sg));
1994 spin_lock(&sg->guest_table_lock);
1995 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1996 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1997 /* Shadow page tables are full pages (pte+pgste) */
1998 page = pfn_to_page(*table >> PAGE_SHIFT);
1999 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
2000 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
2001 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
2002 rc = 0;
2003 } else {
2004 rc = -EAGAIN;
2005 }
2006 spin_unlock(&sg->guest_table_lock);
2007 return rc;
2008
2009 }
2010 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
2011
2012 /**
2013 * gmap_shadow_pgt - instantiate a shadow page table
2014 * @sg: pointer to the shadow guest address space structure
2015 * @saddr: faulting address in the shadow gmap
2016 * @pgt: parent gmap address of the page table to get shadowed
2017 * @fake: pgt references contiguous guest memory block, not a pgtable
2018 *
2019 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2020 * shadow table structure is incomplete, -ENOMEM if out of memory,
2021 * -EFAULT if an address in the parent gmap could not be resolved and
2022 *
2023 * Called with gmap->mm->mmap_lock in read
2024 */
gmap_shadow_pgt(struct gmap * sg,unsigned long saddr,unsigned long pgt,int fake)2025 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
2026 int fake)
2027 {
2028 unsigned long raddr, origin;
2029 unsigned long *s_pgt, *table;
2030 struct page *page;
2031 int rc;
2032
2033 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
2034 /* Allocate a shadow page table */
2035 page = page_table_alloc_pgste(sg->mm);
2036 if (!page)
2037 return -ENOMEM;
2038 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
2039 if (fake)
2040 page->index |= GMAP_SHADOW_FAKE_TABLE;
2041 s_pgt = (unsigned long *) page_to_phys(page);
2042 /* Install shadow page table */
2043 spin_lock(&sg->guest_table_lock);
2044 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
2045 if (!table) {
2046 rc = -EAGAIN; /* Race with unshadow */
2047 goto out_free;
2048 }
2049 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
2050 rc = 0; /* Already established */
2051 goto out_free;
2052 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
2053 rc = -EAGAIN; /* Race with shadow */
2054 goto out_free;
2055 }
2056 /* mark as invalid as long as the parent table is not protected */
2057 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
2058 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
2059 list_add(&page->lru, &sg->pt_list);
2060 if (fake) {
2061 /* nothing to protect for fake tables */
2062 *table &= ~_SEGMENT_ENTRY_INVALID;
2063 spin_unlock(&sg->guest_table_lock);
2064 return 0;
2065 }
2066 spin_unlock(&sg->guest_table_lock);
2067 /* Make pgt read-only in parent gmap page table (not the pgste) */
2068 raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
2069 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
2070 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
2071 spin_lock(&sg->guest_table_lock);
2072 if (!rc) {
2073 table = gmap_table_walk(sg, saddr, 1);
2074 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
2075 (unsigned long) s_pgt)
2076 rc = -EAGAIN; /* Race with unshadow */
2077 else
2078 *table &= ~_SEGMENT_ENTRY_INVALID;
2079 } else {
2080 gmap_unshadow_pgt(sg, raddr);
2081 }
2082 spin_unlock(&sg->guest_table_lock);
2083 return rc;
2084 out_free:
2085 spin_unlock(&sg->guest_table_lock);
2086 page_table_free_pgste(page);
2087 return rc;
2088
2089 }
2090 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
2091
2092 /**
2093 * gmap_shadow_page - create a shadow page mapping
2094 * @sg: pointer to the shadow guest address space structure
2095 * @saddr: faulting address in the shadow gmap
2096 * @pte: pte in parent gmap address space to get shadowed
2097 *
2098 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2099 * shadow table structure is incomplete, -ENOMEM if out of memory and
2100 * -EFAULT if an address in the parent gmap could not be resolved.
2101 *
2102 * Called with sg->mm->mmap_lock in read.
2103 */
gmap_shadow_page(struct gmap * sg,unsigned long saddr,pte_t pte)2104 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
2105 {
2106 struct gmap *parent;
2107 struct gmap_rmap *rmap;
2108 unsigned long vmaddr, paddr;
2109 spinlock_t *ptl;
2110 pte_t *sptep, *tptep;
2111 int prot;
2112 int rc;
2113
2114 BUG_ON(!gmap_is_shadow(sg));
2115 parent = sg->parent;
2116 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
2117
2118 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL_ACCOUNT);
2119 if (!rmap)
2120 return -ENOMEM;
2121 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
2122
2123 while (1) {
2124 paddr = pte_val(pte) & PAGE_MASK;
2125 vmaddr = __gmap_translate(parent, paddr);
2126 if (IS_ERR_VALUE(vmaddr)) {
2127 rc = vmaddr;
2128 break;
2129 }
2130 rc = radix_tree_preload(GFP_KERNEL_ACCOUNT);
2131 if (rc)
2132 break;
2133 rc = -EAGAIN;
2134 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
2135 if (sptep) {
2136 spin_lock(&sg->guest_table_lock);
2137 /* Get page table pointer */
2138 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
2139 if (!tptep) {
2140 spin_unlock(&sg->guest_table_lock);
2141 gmap_pte_op_end(ptl);
2142 radix_tree_preload_end();
2143 break;
2144 }
2145 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
2146 if (rc > 0) {
2147 /* Success and a new mapping */
2148 gmap_insert_rmap(sg, vmaddr, rmap);
2149 rmap = NULL;
2150 rc = 0;
2151 }
2152 gmap_pte_op_end(ptl);
2153 spin_unlock(&sg->guest_table_lock);
2154 }
2155 radix_tree_preload_end();
2156 if (!rc)
2157 break;
2158 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
2159 if (rc)
2160 break;
2161 }
2162 kfree(rmap);
2163 return rc;
2164 }
2165 EXPORT_SYMBOL_GPL(gmap_shadow_page);
2166
2167 /*
2168 * gmap_shadow_notify - handle notifications for shadow gmap
2169 *
2170 * Called with sg->parent->shadow_lock.
2171 */
gmap_shadow_notify(struct gmap * sg,unsigned long vmaddr,unsigned long gaddr)2172 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2173 unsigned long gaddr)
2174 {
2175 struct gmap_rmap *rmap, *rnext, *head;
2176 unsigned long start, end, bits, raddr;
2177
2178 BUG_ON(!gmap_is_shadow(sg));
2179
2180 spin_lock(&sg->guest_table_lock);
2181 if (sg->removed) {
2182 spin_unlock(&sg->guest_table_lock);
2183 return;
2184 }
2185 /* Check for top level table */
2186 start = sg->orig_asce & _ASCE_ORIGIN;
2187 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
2188 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2189 gaddr < end) {
2190 /* The complete shadow table has to go */
2191 gmap_unshadow(sg);
2192 spin_unlock(&sg->guest_table_lock);
2193 list_del(&sg->list);
2194 gmap_put(sg);
2195 return;
2196 }
2197 /* Remove the page table tree from on specific entry */
2198 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
2199 gmap_for_each_rmap_safe(rmap, rnext, head) {
2200 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2201 raddr = rmap->raddr ^ bits;
2202 switch (bits) {
2203 case _SHADOW_RMAP_REGION1:
2204 gmap_unshadow_r2t(sg, raddr);
2205 break;
2206 case _SHADOW_RMAP_REGION2:
2207 gmap_unshadow_r3t(sg, raddr);
2208 break;
2209 case _SHADOW_RMAP_REGION3:
2210 gmap_unshadow_sgt(sg, raddr);
2211 break;
2212 case _SHADOW_RMAP_SEGMENT:
2213 gmap_unshadow_pgt(sg, raddr);
2214 break;
2215 case _SHADOW_RMAP_PGTABLE:
2216 gmap_unshadow_page(sg, raddr);
2217 break;
2218 }
2219 kfree(rmap);
2220 }
2221 spin_unlock(&sg->guest_table_lock);
2222 }
2223
2224 /**
2225 * ptep_notify - call all invalidation callbacks for a specific pte.
2226 * @mm: pointer to the process mm_struct
2227 * @vmaddr: virtual address in the process address space
2228 * @pte: pointer to the page table entry
2229 * @bits: bits from the pgste that caused the notify call
2230 *
2231 * This function is assumed to be called with the page table lock held
2232 * for the pte to notify.
2233 */
ptep_notify(struct mm_struct * mm,unsigned long vmaddr,pte_t * pte,unsigned long bits)2234 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2235 pte_t *pte, unsigned long bits)
2236 {
2237 unsigned long offset, gaddr = 0;
2238 unsigned long *table;
2239 struct gmap *gmap, *sg, *next;
2240
2241 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2242 offset = offset * (PAGE_SIZE / sizeof(pte_t));
2243 rcu_read_lock();
2244 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2245 spin_lock(&gmap->guest_table_lock);
2246 table = radix_tree_lookup(&gmap->host_to_guest,
2247 vmaddr >> PMD_SHIFT);
2248 if (table)
2249 gaddr = __gmap_segment_gaddr(table) + offset;
2250 spin_unlock(&gmap->guest_table_lock);
2251 if (!table)
2252 continue;
2253
2254 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2255 spin_lock(&gmap->shadow_lock);
2256 list_for_each_entry_safe(sg, next,
2257 &gmap->children, list)
2258 gmap_shadow_notify(sg, vmaddr, gaddr);
2259 spin_unlock(&gmap->shadow_lock);
2260 }
2261 if (bits & PGSTE_IN_BIT)
2262 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2263 }
2264 rcu_read_unlock();
2265 }
2266 EXPORT_SYMBOL_GPL(ptep_notify);
2267
pmdp_notify_gmap(struct gmap * gmap,pmd_t * pmdp,unsigned long gaddr)2268 static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
2269 unsigned long gaddr)
2270 {
2271 pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
2272 gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
2273 }
2274
2275 /**
2276 * gmap_pmdp_xchg - exchange a gmap pmd with another
2277 * @gmap: pointer to the guest address space structure
2278 * @pmdp: pointer to the pmd entry
2279 * @new: replacement entry
2280 * @gaddr: the affected guest address
2281 *
2282 * This function is assumed to be called with the guest_table_lock
2283 * held.
2284 */
gmap_pmdp_xchg(struct gmap * gmap,pmd_t * pmdp,pmd_t new,unsigned long gaddr)2285 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
2286 unsigned long gaddr)
2287 {
2288 gaddr &= HPAGE_MASK;
2289 pmdp_notify_gmap(gmap, pmdp, gaddr);
2290 pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
2291 if (MACHINE_HAS_TLB_GUEST)
2292 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
2293 IDTE_GLOBAL);
2294 else if (MACHINE_HAS_IDTE)
2295 __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
2296 else
2297 __pmdp_csp(pmdp);
2298 *pmdp = new;
2299 }
2300
gmap_pmdp_clear(struct mm_struct * mm,unsigned long vmaddr,int purge)2301 static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
2302 int purge)
2303 {
2304 pmd_t *pmdp;
2305 struct gmap *gmap;
2306 unsigned long gaddr;
2307
2308 rcu_read_lock();
2309 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2310 spin_lock(&gmap->guest_table_lock);
2311 pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
2312 vmaddr >> PMD_SHIFT);
2313 if (pmdp) {
2314 gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
2315 pmdp_notify_gmap(gmap, pmdp, gaddr);
2316 WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2317 _SEGMENT_ENTRY_GMAP_UC));
2318 if (purge)
2319 __pmdp_csp(pmdp);
2320 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
2321 }
2322 spin_unlock(&gmap->guest_table_lock);
2323 }
2324 rcu_read_unlock();
2325 }
2326
2327 /**
2328 * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
2329 * flushing
2330 * @mm: pointer to the process mm_struct
2331 * @vmaddr: virtual address in the process address space
2332 */
gmap_pmdp_invalidate(struct mm_struct * mm,unsigned long vmaddr)2333 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
2334 {
2335 gmap_pmdp_clear(mm, vmaddr, 0);
2336 }
2337 EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
2338
2339 /**
2340 * gmap_pmdp_csp - csp all affected guest pmd entries
2341 * @mm: pointer to the process mm_struct
2342 * @vmaddr: virtual address in the process address space
2343 */
gmap_pmdp_csp(struct mm_struct * mm,unsigned long vmaddr)2344 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
2345 {
2346 gmap_pmdp_clear(mm, vmaddr, 1);
2347 }
2348 EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
2349
2350 /**
2351 * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
2352 * @mm: pointer to the process mm_struct
2353 * @vmaddr: virtual address in the process address space
2354 */
gmap_pmdp_idte_local(struct mm_struct * mm,unsigned long vmaddr)2355 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
2356 {
2357 unsigned long *entry, gaddr;
2358 struct gmap *gmap;
2359 pmd_t *pmdp;
2360
2361 rcu_read_lock();
2362 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2363 spin_lock(&gmap->guest_table_lock);
2364 entry = radix_tree_delete(&gmap->host_to_guest,
2365 vmaddr >> PMD_SHIFT);
2366 if (entry) {
2367 pmdp = (pmd_t *)entry;
2368 gaddr = __gmap_segment_gaddr(entry);
2369 pmdp_notify_gmap(gmap, pmdp, gaddr);
2370 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2371 _SEGMENT_ENTRY_GMAP_UC));
2372 if (MACHINE_HAS_TLB_GUEST)
2373 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2374 gmap->asce, IDTE_LOCAL);
2375 else if (MACHINE_HAS_IDTE)
2376 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
2377 *entry = _SEGMENT_ENTRY_EMPTY;
2378 }
2379 spin_unlock(&gmap->guest_table_lock);
2380 }
2381 rcu_read_unlock();
2382 }
2383 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
2384
2385 /**
2386 * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
2387 * @mm: pointer to the process mm_struct
2388 * @vmaddr: virtual address in the process address space
2389 */
gmap_pmdp_idte_global(struct mm_struct * mm,unsigned long vmaddr)2390 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
2391 {
2392 unsigned long *entry, gaddr;
2393 struct gmap *gmap;
2394 pmd_t *pmdp;
2395
2396 rcu_read_lock();
2397 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2398 spin_lock(&gmap->guest_table_lock);
2399 entry = radix_tree_delete(&gmap->host_to_guest,
2400 vmaddr >> PMD_SHIFT);
2401 if (entry) {
2402 pmdp = (pmd_t *)entry;
2403 gaddr = __gmap_segment_gaddr(entry);
2404 pmdp_notify_gmap(gmap, pmdp, gaddr);
2405 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2406 _SEGMENT_ENTRY_GMAP_UC));
2407 if (MACHINE_HAS_TLB_GUEST)
2408 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2409 gmap->asce, IDTE_GLOBAL);
2410 else if (MACHINE_HAS_IDTE)
2411 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
2412 else
2413 __pmdp_csp(pmdp);
2414 *entry = _SEGMENT_ENTRY_EMPTY;
2415 }
2416 spin_unlock(&gmap->guest_table_lock);
2417 }
2418 rcu_read_unlock();
2419 }
2420 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
2421
2422 /**
2423 * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
2424 * @gmap: pointer to guest address space
2425 * @pmdp: pointer to the pmd to be tested
2426 * @gaddr: virtual address in the guest address space
2427 *
2428 * This function is assumed to be called with the guest_table_lock
2429 * held.
2430 */
gmap_test_and_clear_dirty_pmd(struct gmap * gmap,pmd_t * pmdp,unsigned long gaddr)2431 static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
2432 unsigned long gaddr)
2433 {
2434 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
2435 return false;
2436
2437 /* Already protected memory, which did not change is clean */
2438 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
2439 !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
2440 return false;
2441
2442 /* Clear UC indication and reset protection */
2443 pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
2444 gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
2445 return true;
2446 }
2447
2448 /**
2449 * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
2450 * @gmap: pointer to guest address space
2451 * @bitmap: dirty bitmap for this pmd
2452 * @gaddr: virtual address in the guest address space
2453 * @vmaddr: virtual address in the host address space
2454 *
2455 * This function is assumed to be called with the guest_table_lock
2456 * held.
2457 */
gmap_sync_dirty_log_pmd(struct gmap * gmap,unsigned long bitmap[4],unsigned long gaddr,unsigned long vmaddr)2458 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
2459 unsigned long gaddr, unsigned long vmaddr)
2460 {
2461 int i;
2462 pmd_t *pmdp;
2463 pte_t *ptep;
2464 spinlock_t *ptl;
2465
2466 pmdp = gmap_pmd_op_walk(gmap, gaddr);
2467 if (!pmdp)
2468 return;
2469
2470 if (pmd_large(*pmdp)) {
2471 if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
2472 bitmap_fill(bitmap, _PAGE_ENTRIES);
2473 } else {
2474 for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
2475 ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
2476 if (!ptep)
2477 continue;
2478 if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
2479 set_bit(i, bitmap);
2480 spin_unlock(ptl);
2481 }
2482 }
2483 gmap_pmd_op_end(gmap, pmdp);
2484 }
2485 EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
2486
2487 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
thp_split_walk_pmd_entry(pmd_t * pmd,unsigned long addr,unsigned long end,struct mm_walk * walk)2488 static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
2489 unsigned long end, struct mm_walk *walk)
2490 {
2491 struct vm_area_struct *vma = walk->vma;
2492
2493 split_huge_pmd(vma, pmd, addr);
2494 return 0;
2495 }
2496
2497 static const struct mm_walk_ops thp_split_walk_ops = {
2498 .pmd_entry = thp_split_walk_pmd_entry,
2499 };
2500
thp_split_mm(struct mm_struct * mm)2501 static inline void thp_split_mm(struct mm_struct *mm)
2502 {
2503 struct vm_area_struct *vma;
2504
2505 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2506 vma->vm_flags &= ~VM_HUGEPAGE;
2507 vma->vm_flags |= VM_NOHUGEPAGE;
2508 walk_page_vma(vma, &thp_split_walk_ops, NULL);
2509 }
2510 mm->def_flags |= VM_NOHUGEPAGE;
2511 }
2512 #else
thp_split_mm(struct mm_struct * mm)2513 static inline void thp_split_mm(struct mm_struct *mm)
2514 {
2515 }
2516 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2517
2518 /*
2519 * Remove all empty zero pages from the mapping for lazy refaulting
2520 * - This must be called after mm->context.has_pgste is set, to avoid
2521 * future creation of zero pages
2522 * - This must be called after THP was enabled
2523 */
__zap_zero_pages(pmd_t * pmd,unsigned long start,unsigned long end,struct mm_walk * walk)2524 static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2525 unsigned long end, struct mm_walk *walk)
2526 {
2527 unsigned long addr;
2528
2529 for (addr = start; addr != end; addr += PAGE_SIZE) {
2530 pte_t *ptep;
2531 spinlock_t *ptl;
2532
2533 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2534 if (is_zero_pfn(pte_pfn(*ptep)))
2535 ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2536 pte_unmap_unlock(ptep, ptl);
2537 }
2538 return 0;
2539 }
2540
2541 static const struct mm_walk_ops zap_zero_walk_ops = {
2542 .pmd_entry = __zap_zero_pages,
2543 };
2544
2545 /*
2546 * switch on pgstes for its userspace process (for kvm)
2547 */
s390_enable_sie(void)2548 int s390_enable_sie(void)
2549 {
2550 struct mm_struct *mm = current->mm;
2551
2552 /* Do we have pgstes? if yes, we are done */
2553 if (mm_has_pgste(mm))
2554 return 0;
2555 /* Fail if the page tables are 2K */
2556 if (!mm_alloc_pgste(mm))
2557 return -EINVAL;
2558 mmap_write_lock(mm);
2559 mm->context.has_pgste = 1;
2560 /* split thp mappings and disable thp for future mappings */
2561 thp_split_mm(mm);
2562 walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
2563 mmap_write_unlock(mm);
2564 return 0;
2565 }
2566 EXPORT_SYMBOL_GPL(s390_enable_sie);
2567
gmap_mark_unmergeable(void)2568 int gmap_mark_unmergeable(void)
2569 {
2570 struct mm_struct *mm = current->mm;
2571 struct vm_area_struct *vma;
2572 int ret;
2573
2574 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2575 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
2576 MADV_UNMERGEABLE, &vma->vm_flags);
2577 if (ret)
2578 return ret;
2579 }
2580 mm->def_flags &= ~VM_MERGEABLE;
2581 return 0;
2582 }
2583 EXPORT_SYMBOL_GPL(gmap_mark_unmergeable);
2584
2585 /*
2586 * Enable storage key handling from now on and initialize the storage
2587 * keys with the default key.
2588 */
__s390_enable_skey_pte(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)2589 static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
2590 unsigned long next, struct mm_walk *walk)
2591 {
2592 /* Clear storage key */
2593 ptep_zap_key(walk->mm, addr, pte);
2594 return 0;
2595 }
2596
__s390_enable_skey_hugetlb(pte_t * pte,unsigned long addr,unsigned long hmask,unsigned long next,struct mm_walk * walk)2597 static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
2598 unsigned long hmask, unsigned long next,
2599 struct mm_walk *walk)
2600 {
2601 pmd_t *pmd = (pmd_t *)pte;
2602 unsigned long start, end;
2603 struct page *page = pmd_page(*pmd);
2604
2605 /*
2606 * The write check makes sure we do not set a key on shared
2607 * memory. This is needed as the walker does not differentiate
2608 * between actual guest memory and the process executable or
2609 * shared libraries.
2610 */
2611 if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
2612 !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
2613 return 0;
2614
2615 start = pmd_val(*pmd) & HPAGE_MASK;
2616 end = start + HPAGE_SIZE - 1;
2617 __storage_key_init_range(start, end);
2618 set_bit(PG_arch_1, &page->flags);
2619 return 0;
2620 }
2621
2622 static const struct mm_walk_ops enable_skey_walk_ops = {
2623 .hugetlb_entry = __s390_enable_skey_hugetlb,
2624 .pte_entry = __s390_enable_skey_pte,
2625 };
2626
s390_enable_skey(void)2627 int s390_enable_skey(void)
2628 {
2629 struct mm_struct *mm = current->mm;
2630 int rc = 0;
2631
2632 mmap_write_lock(mm);
2633 if (mm_uses_skeys(mm))
2634 goto out_up;
2635
2636 mm->context.uses_skeys = 1;
2637 rc = gmap_mark_unmergeable();
2638 if (rc) {
2639 mm->context.uses_skeys = 0;
2640 goto out_up;
2641 }
2642 walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
2643
2644 out_up:
2645 mmap_write_unlock(mm);
2646 return rc;
2647 }
2648 EXPORT_SYMBOL_GPL(s390_enable_skey);
2649
2650 /*
2651 * Reset CMMA state, make all pages stable again.
2652 */
__s390_reset_cmma(pte_t * pte,unsigned long addr,unsigned long next,struct mm_walk * walk)2653 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2654 unsigned long next, struct mm_walk *walk)
2655 {
2656 ptep_zap_unused(walk->mm, addr, pte, 1);
2657 return 0;
2658 }
2659
2660 static const struct mm_walk_ops reset_cmma_walk_ops = {
2661 .pte_entry = __s390_reset_cmma,
2662 };
2663
s390_reset_cmma(struct mm_struct * mm)2664 void s390_reset_cmma(struct mm_struct *mm)
2665 {
2666 mmap_write_lock(mm);
2667 walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
2668 mmap_write_unlock(mm);
2669 }
2670 EXPORT_SYMBOL_GPL(s390_reset_cmma);
2671
2672 /*
2673 * make inaccessible pages accessible again
2674 */
__s390_reset_acc(pte_t * ptep,unsigned long addr,unsigned long next,struct mm_walk * walk)2675 static int __s390_reset_acc(pte_t *ptep, unsigned long addr,
2676 unsigned long next, struct mm_walk *walk)
2677 {
2678 pte_t pte = READ_ONCE(*ptep);
2679
2680 if (pte_present(pte))
2681 WARN_ON_ONCE(uv_destroy_page(pte_val(pte) & PAGE_MASK));
2682 return 0;
2683 }
2684
2685 static const struct mm_walk_ops reset_acc_walk_ops = {
2686 .pte_entry = __s390_reset_acc,
2687 };
2688
2689 #include <linux/sched/mm.h>
s390_reset_acc(struct mm_struct * mm)2690 void s390_reset_acc(struct mm_struct *mm)
2691 {
2692 if (!mm_is_protected(mm))
2693 return;
2694 /*
2695 * we might be called during
2696 * reset: we walk the pages and clear
2697 * close of all kvm file descriptors: we walk the pages and clear
2698 * exit of process on fd closure: vma already gone, do nothing
2699 */
2700 if (!mmget_not_zero(mm))
2701 return;
2702 mmap_read_lock(mm);
2703 walk_page_range(mm, 0, TASK_SIZE, &reset_acc_walk_ops, NULL);
2704 mmap_read_unlock(mm);
2705 mmput(mm);
2706 }
2707 EXPORT_SYMBOL_GPL(s390_reset_acc);
2708