1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Secure pages management: Migration of pages between normal and secure
4 * memory of KVM guests.
5 *
6 * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com>
7 */
8
9 /*
10 * A pseries guest can be run as secure guest on Ultravisor-enabled
11 * POWER platforms. On such platforms, this driver will be used to manage
12 * the movement of guest pages between the normal memory managed by
13 * hypervisor (HV) and secure memory managed by Ultravisor (UV).
14 *
15 * The page-in or page-out requests from UV will come to HV as hcalls and
16 * HV will call back into UV via ultracalls to satisfy these page requests.
17 *
18 * Private ZONE_DEVICE memory equal to the amount of secure memory
19 * available in the platform for running secure guests is hotplugged.
20 * Whenever a page belonging to the guest becomes secure, a page from this
21 * private device memory is used to represent and track that secure page
22 * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
23 * shared between UV and HV. However such pages aren't represented by
24 * device private memory and mappings to shared memory exist in both
25 * UV and HV page tables.
26 */
27
28 /*
29 * Notes on locking
30 *
31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32 * page-in and page-out requests for the same GPA. Concurrent accesses
33 * can either come via UV (guest vCPUs requesting for same page)
34 * or when HV and guest simultaneously access the same page.
35 * This mutex serializes the migration of page from HV(normal) to
36 * UV(secure) and vice versa. So the serialization points are around
37 * migrate_vma routines and page-in/out routines.
38 *
39 * Per-guest mutex comes with a cost though. Mainly it serializes the
40 * fault path as page-out can occur when HV faults on accessing secure
41 * guest pages. Currently UV issues page-in requests for all the guest
42 * PFNs one at a time during early boot (UV_ESM uvcall), so this is
43 * not a cause for concern. Also currently the number of page-outs caused
44 * by HV touching secure pages is very very low. If an when UV supports
45 * overcommitting, then we might see concurrent guest driven page-outs.
46 *
47 * Locking order
48 *
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52 * as sync-points for page-in/out
53 */
54
55 /*
56 * Notes on page size
57 *
58 * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
59 * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
60 * secure GPAs at 64K page size and maintains one device PFN for each
61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
62 * for 64K page at a time.
63 *
64 * HV faulting on secure pages: When HV touches any secure page, it
65 * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
66 * UV splits and remaps the 2MB page if necessary and copies out the
67 * required 64K page contents.
68 *
69 * Shared pages: Whenever guest shares a secure page, UV will split and
70 * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
71 *
72 * HV invalidating a page: When a regular page belonging to secure
73 * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
74 * page size. Using 64K page size is correct here because any non-secure
75 * page will essentially be of 64K page size. Splitting by UV during sharing
76 * and page-out ensures this.
77 *
78 * Page fault handling: When HV handles page fault of a page belonging
79 * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
80 * Using 64K size is correct here too as UV would have split the 2MB page
81 * into 64k mappings and would have done page-outs earlier.
82 *
83 * In summary, the current secure pages handling code in HV assumes
84 * 64K page size and in fact fails any page-in/page-out requests of
85 * non-64K size upfront. If and when UV starts supporting multiple
86 * page-sizes, we need to break this assumption.
87 */
88
89 #include <linux/pagemap.h>
90 #include <linux/migrate.h>
91 #include <linux/kvm_host.h>
92 #include <linux/ksm.h>
93 #include <linux/of.h>
94 #include <asm/ultravisor.h>
95 #include <asm/mman.h>
96 #include <asm/kvm_ppc.h>
97 #include <asm/kvm_book3s_uvmem.h>
98
99 static struct dev_pagemap kvmppc_uvmem_pgmap;
100 static unsigned long *kvmppc_uvmem_bitmap;
101 static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
102
103 /*
104 * States of a GFN
105 * ---------------
106 * The GFN can be in one of the following states.
107 *
108 * (a) Secure - The GFN is secure. The GFN is associated with
109 * a Secure VM, the contents of the GFN is not accessible
110 * to the Hypervisor. This GFN can be backed by a secure-PFN,
111 * or can be backed by a normal-PFN with contents encrypted.
112 * The former is true when the GFN is paged-in into the
113 * ultravisor. The latter is true when the GFN is paged-out
114 * of the ultravisor.
115 *
116 * (b) Shared - The GFN is shared. The GFN is associated with a
117 * a secure VM. The contents of the GFN is accessible to
118 * Hypervisor. This GFN is backed by a normal-PFN and its
119 * content is un-encrypted.
120 *
121 * (c) Normal - The GFN is a normal. The GFN is associated with
122 * a normal VM. The contents of the GFN is accesible to
123 * the Hypervisor. Its content is never encrypted.
124 *
125 * States of a VM.
126 * ---------------
127 *
128 * Normal VM: A VM whose contents are always accessible to
129 * the hypervisor. All its GFNs are normal-GFNs.
130 *
131 * Secure VM: A VM whose contents are not accessible to the
132 * hypervisor without the VM's consent. Its GFNs are
133 * either Shared-GFN or Secure-GFNs.
134 *
135 * Transient VM: A Normal VM that is transitioning to secure VM.
136 * The transition starts on successful return of
137 * H_SVM_INIT_START, and ends on successful return
138 * of H_SVM_INIT_DONE. This transient VM, can have GFNs
139 * in any of the three states; i.e Secure-GFN, Shared-GFN,
140 * and Normal-GFN. The VM never executes in this state
141 * in supervisor-mode.
142 *
143 * Memory slot State.
144 * -----------------------------
145 * The state of a memory slot mirrors the state of the
146 * VM the memory slot is associated with.
147 *
148 * VM State transition.
149 * --------------------
150 *
151 * A VM always starts in Normal Mode.
152 *
153 * H_SVM_INIT_START moves the VM into transient state. During this
154 * time the Ultravisor may request some of its GFNs to be shared or
155 * secured. So its GFNs can be in one of the three GFN states.
156 *
157 * H_SVM_INIT_DONE moves the VM entirely from transient state to
158 * secure-state. At this point any left-over normal-GFNs are
159 * transitioned to Secure-GFN.
160 *
161 * H_SVM_INIT_ABORT moves the transient VM back to normal VM.
162 * All its GFNs are moved to Normal-GFNs.
163 *
164 * UV_TERMINATE transitions the secure-VM back to normal-VM. All
165 * the secure-GFN and shared-GFNs are tranistioned to normal-GFN
166 * Note: The contents of the normal-GFN is undefined at this point.
167 *
168 * GFN state implementation:
169 * -------------------------
170 *
171 * Secure GFN is associated with a secure-PFN; also called uvmem_pfn,
172 * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag
173 * set, and contains the value of the secure-PFN.
174 * It is associated with a normal-PFN; also called mem_pfn, when
175 * the GFN is pagedout. Its pfn[] has KVMPPC_GFN_MEM_PFN flag set.
176 * The value of the normal-PFN is not tracked.
177 *
178 * Shared GFN is associated with a normal-PFN. Its pfn[] has
179 * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN
180 * is not tracked.
181 *
182 * Normal GFN is associated with normal-PFN. Its pfn[] has
183 * no flag set. The value of the normal-PFN is not tracked.
184 *
185 * Life cycle of a GFN
186 * --------------------
187 *
188 * --------------------------------------------------------------
189 * | | Share | Unshare | SVM |H_SVM_INIT_DONE|
190 * | |operation |operation | abort/ | |
191 * | | | | terminate | |
192 * -------------------------------------------------------------
193 * | | | | | |
194 * | Secure | Shared | Secure |Normal |Secure |
195 * | | | | | |
196 * | Shared | Shared | Secure |Normal |Shared |
197 * | | | | | |
198 * | Normal | Shared | Secure |Normal |Secure |
199 * --------------------------------------------------------------
200 *
201 * Life cycle of a VM
202 * --------------------
203 *
204 * --------------------------------------------------------------------
205 * | | start | H_SVM_ |H_SVM_ |H_SVM_ |UV_SVM_ |
206 * | | VM |INIT_START|INIT_DONE|INIT_ABORT |TERMINATE |
207 * | | | | | | |
208 * --------- ----------------------------------------------------------
209 * | | | | | | |
210 * | Normal | Normal | Transient|Error |Error |Normal |
211 * | | | | | | |
212 * | Secure | Error | Error |Error |Error |Normal |
213 * | | | | | | |
214 * |Transient| N/A | Error |Secure |Normal |Normal |
215 * --------------------------------------------------------------------
216 */
217
218 #define KVMPPC_GFN_UVMEM_PFN (1UL << 63)
219 #define KVMPPC_GFN_MEM_PFN (1UL << 62)
220 #define KVMPPC_GFN_SHARED (1UL << 61)
221 #define KVMPPC_GFN_SECURE (KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN)
222 #define KVMPPC_GFN_FLAG_MASK (KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED)
223 #define KVMPPC_GFN_PFN_MASK (~KVMPPC_GFN_FLAG_MASK)
224
225 struct kvmppc_uvmem_slot {
226 struct list_head list;
227 unsigned long nr_pfns;
228 unsigned long base_pfn;
229 unsigned long *pfns;
230 };
231 struct kvmppc_uvmem_page_pvt {
232 struct kvm *kvm;
233 unsigned long gpa;
234 bool skip_page_out;
235 bool remove_gfn;
236 };
237
kvmppc_uvmem_available(void)238 bool kvmppc_uvmem_available(void)
239 {
240 /*
241 * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
242 * and our data structures have been initialized successfully.
243 */
244 return !!kvmppc_uvmem_bitmap;
245 }
246
kvmppc_uvmem_slot_init(struct kvm * kvm,const struct kvm_memory_slot * slot)247 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
248 {
249 struct kvmppc_uvmem_slot *p;
250
251 p = kzalloc(sizeof(*p), GFP_KERNEL);
252 if (!p)
253 return -ENOMEM;
254 p->pfns = vzalloc(array_size(slot->npages, sizeof(*p->pfns)));
255 if (!p->pfns) {
256 kfree(p);
257 return -ENOMEM;
258 }
259 p->nr_pfns = slot->npages;
260 p->base_pfn = slot->base_gfn;
261
262 mutex_lock(&kvm->arch.uvmem_lock);
263 list_add(&p->list, &kvm->arch.uvmem_pfns);
264 mutex_unlock(&kvm->arch.uvmem_lock);
265
266 return 0;
267 }
268
269 /*
270 * All device PFNs are already released by the time we come here.
271 */
kvmppc_uvmem_slot_free(struct kvm * kvm,const struct kvm_memory_slot * slot)272 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
273 {
274 struct kvmppc_uvmem_slot *p, *next;
275
276 mutex_lock(&kvm->arch.uvmem_lock);
277 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
278 if (p->base_pfn == slot->base_gfn) {
279 vfree(p->pfns);
280 list_del(&p->list);
281 kfree(p);
282 break;
283 }
284 }
285 mutex_unlock(&kvm->arch.uvmem_lock);
286 }
287
kvmppc_mark_gfn(unsigned long gfn,struct kvm * kvm,unsigned long flag,unsigned long uvmem_pfn)288 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
289 unsigned long flag, unsigned long uvmem_pfn)
290 {
291 struct kvmppc_uvmem_slot *p;
292
293 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
294 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
295 unsigned long index = gfn - p->base_pfn;
296
297 if (flag == KVMPPC_GFN_UVMEM_PFN)
298 p->pfns[index] = uvmem_pfn | flag;
299 else
300 p->pfns[index] = flag;
301 return;
302 }
303 }
304 }
305
306 /* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */
kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,unsigned long uvmem_pfn,struct kvm * kvm)307 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,
308 unsigned long uvmem_pfn, struct kvm *kvm)
309 {
310 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
311 }
312
313 /* mark the GFN as secure-GFN associated with a memory-PFN. */
kvmppc_gfn_secure_mem_pfn(unsigned long gfn,struct kvm * kvm)314 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
315 {
316 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
317 }
318
319 /* mark the GFN as a shared GFN. */
kvmppc_gfn_shared(unsigned long gfn,struct kvm * kvm)320 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
321 {
322 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
323 }
324
325 /* mark the GFN as a non-existent GFN. */
kvmppc_gfn_remove(unsigned long gfn,struct kvm * kvm)326 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
327 {
328 kvmppc_mark_gfn(gfn, kvm, 0, 0);
329 }
330
331 /* return true, if the GFN is a secure-GFN backed by a secure-PFN */
kvmppc_gfn_is_uvmem_pfn(unsigned long gfn,struct kvm * kvm,unsigned long * uvmem_pfn)332 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
333 unsigned long *uvmem_pfn)
334 {
335 struct kvmppc_uvmem_slot *p;
336
337 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
338 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
339 unsigned long index = gfn - p->base_pfn;
340
341 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) {
342 if (uvmem_pfn)
343 *uvmem_pfn = p->pfns[index] &
344 KVMPPC_GFN_PFN_MASK;
345 return true;
346 } else
347 return false;
348 }
349 }
350 return false;
351 }
352
353 /*
354 * starting from *gfn search for the next available GFN that is not yet
355 * transitioned to a secure GFN. return the value of that GFN in *gfn. If a
356 * GFN is found, return true, else return false
357 *
358 * Must be called with kvm->arch.uvmem_lock held.
359 */
kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot * memslot,struct kvm * kvm,unsigned long * gfn)360 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
361 struct kvm *kvm, unsigned long *gfn)
362 {
363 struct kvmppc_uvmem_slot *p;
364 bool ret = false;
365 unsigned long i;
366
367 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list)
368 if (*gfn >= p->base_pfn && *gfn < p->base_pfn + p->nr_pfns)
369 break;
370 if (!p)
371 return ret;
372 /*
373 * The code below assumes, one to one correspondence between
374 * kvmppc_uvmem_slot and memslot.
375 */
376 for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
377 unsigned long index = i - p->base_pfn;
378
379 if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
380 *gfn = i;
381 ret = true;
382 break;
383 }
384 }
385 return ret;
386 }
387
kvmppc_memslot_page_merge(struct kvm * kvm,const struct kvm_memory_slot * memslot,bool merge)388 static int kvmppc_memslot_page_merge(struct kvm *kvm,
389 const struct kvm_memory_slot *memslot, bool merge)
390 {
391 unsigned long gfn = memslot->base_gfn;
392 unsigned long end, start = gfn_to_hva(kvm, gfn);
393 int ret = 0;
394 struct vm_area_struct *vma;
395 int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
396
397 if (kvm_is_error_hva(start))
398 return H_STATE;
399
400 end = start + (memslot->npages << PAGE_SHIFT);
401
402 mmap_write_lock(kvm->mm);
403 do {
404 vma = find_vma_intersection(kvm->mm, start, end);
405 if (!vma) {
406 ret = H_STATE;
407 break;
408 }
409 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
410 merge_flag, &vma->vm_flags);
411 if (ret) {
412 ret = H_STATE;
413 break;
414 }
415 start = vma->vm_end;
416 } while (end > vma->vm_end);
417
418 mmap_write_unlock(kvm->mm);
419 return ret;
420 }
421
__kvmppc_uvmem_memslot_delete(struct kvm * kvm,const struct kvm_memory_slot * memslot)422 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
423 const struct kvm_memory_slot *memslot)
424 {
425 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
426 kvmppc_uvmem_slot_free(kvm, memslot);
427 kvmppc_memslot_page_merge(kvm, memslot, true);
428 }
429
__kvmppc_uvmem_memslot_create(struct kvm * kvm,const struct kvm_memory_slot * memslot)430 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
431 const struct kvm_memory_slot *memslot)
432 {
433 int ret = H_PARAMETER;
434
435 if (kvmppc_memslot_page_merge(kvm, memslot, false))
436 return ret;
437
438 if (kvmppc_uvmem_slot_init(kvm, memslot))
439 goto out1;
440
441 ret = uv_register_mem_slot(kvm->arch.lpid,
442 memslot->base_gfn << PAGE_SHIFT,
443 memslot->npages * PAGE_SIZE,
444 0, memslot->id);
445 if (ret < 0) {
446 ret = H_PARAMETER;
447 goto out;
448 }
449 return 0;
450 out:
451 kvmppc_uvmem_slot_free(kvm, memslot);
452 out1:
453 kvmppc_memslot_page_merge(kvm, memslot, true);
454 return ret;
455 }
456
kvmppc_h_svm_init_start(struct kvm * kvm)457 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
458 {
459 struct kvm_memslots *slots;
460 struct kvm_memory_slot *memslot, *m;
461 int ret = H_SUCCESS;
462 int srcu_idx;
463
464 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
465
466 if (!kvmppc_uvmem_bitmap)
467 return H_UNSUPPORTED;
468
469 /* Only radix guests can be secure guests */
470 if (!kvm_is_radix(kvm))
471 return H_UNSUPPORTED;
472
473 /* NAK the transition to secure if not enabled */
474 if (!kvm->arch.svm_enabled)
475 return H_AUTHORITY;
476
477 srcu_idx = srcu_read_lock(&kvm->srcu);
478
479 /* register the memslot */
480 slots = kvm_memslots(kvm);
481 kvm_for_each_memslot(memslot, slots) {
482 ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
483 if (ret)
484 break;
485 }
486
487 if (ret) {
488 slots = kvm_memslots(kvm);
489 kvm_for_each_memslot(m, slots) {
490 if (m == memslot)
491 break;
492 __kvmppc_uvmem_memslot_delete(kvm, memslot);
493 }
494 }
495
496 srcu_read_unlock(&kvm->srcu, srcu_idx);
497 return ret;
498 }
499
500 /*
501 * Provision a new page on HV side and copy over the contents
502 * from secure memory using UV_PAGE_OUT uvcall.
503 * Caller must held kvm->arch.uvmem_lock.
504 */
__kvmppc_svm_page_out(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long page_shift,struct kvm * kvm,unsigned long gpa)505 static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
506 unsigned long start,
507 unsigned long end, unsigned long page_shift,
508 struct kvm *kvm, unsigned long gpa)
509 {
510 unsigned long src_pfn, dst_pfn = 0;
511 struct migrate_vma mig;
512 struct page *dpage, *spage;
513 struct kvmppc_uvmem_page_pvt *pvt;
514 unsigned long pfn;
515 int ret = U_SUCCESS;
516
517 memset(&mig, 0, sizeof(mig));
518 mig.vma = vma;
519 mig.start = start;
520 mig.end = end;
521 mig.src = &src_pfn;
522 mig.dst = &dst_pfn;
523 mig.pgmap_owner = &kvmppc_uvmem_pgmap;
524 mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
525
526 /* The requested page is already paged-out, nothing to do */
527 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
528 return ret;
529
530 ret = migrate_vma_setup(&mig);
531 if (ret)
532 return -1;
533
534 spage = migrate_pfn_to_page(*mig.src);
535 if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
536 goto out_finalize;
537
538 if (!is_zone_device_page(spage))
539 goto out_finalize;
540
541 dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
542 if (!dpage) {
543 ret = -1;
544 goto out_finalize;
545 }
546
547 lock_page(dpage);
548 pvt = spage->zone_device_data;
549 pfn = page_to_pfn(dpage);
550
551 /*
552 * This function is used in two cases:
553 * - When HV touches a secure page, for which we do UV_PAGE_OUT
554 * - When a secure page is converted to shared page, we *get*
555 * the page to essentially unmap the device page. In this
556 * case we skip page-out.
557 */
558 if (!pvt->skip_page_out)
559 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
560 gpa, 0, page_shift);
561
562 if (ret == U_SUCCESS)
563 *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
564 else {
565 unlock_page(dpage);
566 __free_page(dpage);
567 goto out_finalize;
568 }
569
570 migrate_vma_pages(&mig);
571
572 out_finalize:
573 migrate_vma_finalize(&mig);
574 return ret;
575 }
576
kvmppc_svm_page_out(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long page_shift,struct kvm * kvm,unsigned long gpa)577 static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
578 unsigned long start, unsigned long end,
579 unsigned long page_shift,
580 struct kvm *kvm, unsigned long gpa)
581 {
582 int ret;
583
584 mutex_lock(&kvm->arch.uvmem_lock);
585 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
586 mutex_unlock(&kvm->arch.uvmem_lock);
587
588 return ret;
589 }
590
591 /*
592 * Drop device pages that we maintain for the secure guest
593 *
594 * We first mark the pages to be skipped from UV_PAGE_OUT when there
595 * is HV side fault on these pages. Next we *get* these pages, forcing
596 * fault on them, do fault time migration to replace the device PTEs in
597 * QEMU page table with normal PTEs from newly allocated pages.
598 */
kvmppc_uvmem_drop_pages(const struct kvm_memory_slot * slot,struct kvm * kvm,bool skip_page_out)599 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
600 struct kvm *kvm, bool skip_page_out)
601 {
602 int i;
603 struct kvmppc_uvmem_page_pvt *pvt;
604 struct page *uvmem_page;
605 struct vm_area_struct *vma = NULL;
606 unsigned long uvmem_pfn, gfn;
607 unsigned long addr;
608
609 mmap_read_lock(kvm->mm);
610
611 addr = slot->userspace_addr;
612
613 gfn = slot->base_gfn;
614 for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
615
616 /* Fetch the VMA if addr is not in the latest fetched one */
617 if (!vma || addr >= vma->vm_end) {
618 vma = vma_lookup(kvm->mm, addr);
619 if (!vma) {
620 pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
621 break;
622 }
623 }
624
625 mutex_lock(&kvm->arch.uvmem_lock);
626
627 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
628 uvmem_page = pfn_to_page(uvmem_pfn);
629 pvt = uvmem_page->zone_device_data;
630 pvt->skip_page_out = skip_page_out;
631 pvt->remove_gfn = true;
632
633 if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
634 PAGE_SHIFT, kvm, pvt->gpa))
635 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
636 pvt->gpa, addr);
637 } else {
638 /* Remove the shared flag if any */
639 kvmppc_gfn_remove(gfn, kvm);
640 }
641
642 mutex_unlock(&kvm->arch.uvmem_lock);
643 }
644
645 mmap_read_unlock(kvm->mm);
646 }
647
kvmppc_h_svm_init_abort(struct kvm * kvm)648 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
649 {
650 int srcu_idx;
651 struct kvm_memory_slot *memslot;
652
653 /*
654 * Expect to be called only after INIT_START and before INIT_DONE.
655 * If INIT_DONE was completed, use normal VM termination sequence.
656 */
657 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
658 return H_UNSUPPORTED;
659
660 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
661 return H_STATE;
662
663 srcu_idx = srcu_read_lock(&kvm->srcu);
664
665 kvm_for_each_memslot(memslot, kvm_memslots(kvm))
666 kvmppc_uvmem_drop_pages(memslot, kvm, false);
667
668 srcu_read_unlock(&kvm->srcu, srcu_idx);
669
670 kvm->arch.secure_guest = 0;
671 uv_svm_terminate(kvm->arch.lpid);
672
673 return H_PARAMETER;
674 }
675
676 /*
677 * Get a free device PFN from the pool
678 *
679 * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
680 * PFN will be used to keep track of the secure page on HV side.
681 *
682 * Called with kvm->arch.uvmem_lock held
683 */
kvmppc_uvmem_get_page(unsigned long gpa,struct kvm * kvm)684 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
685 {
686 struct page *dpage = NULL;
687 unsigned long bit, uvmem_pfn;
688 struct kvmppc_uvmem_page_pvt *pvt;
689 unsigned long pfn_last, pfn_first;
690
691 pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
692 pfn_last = pfn_first +
693 (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
694
695 spin_lock(&kvmppc_uvmem_bitmap_lock);
696 bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
697 pfn_last - pfn_first);
698 if (bit >= (pfn_last - pfn_first))
699 goto out;
700 bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
701 spin_unlock(&kvmppc_uvmem_bitmap_lock);
702
703 pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
704 if (!pvt)
705 goto out_clear;
706
707 uvmem_pfn = bit + pfn_first;
708 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
709
710 pvt->gpa = gpa;
711 pvt->kvm = kvm;
712
713 dpage = pfn_to_page(uvmem_pfn);
714 dpage->zone_device_data = pvt;
715 get_page(dpage);
716 lock_page(dpage);
717 return dpage;
718 out_clear:
719 spin_lock(&kvmppc_uvmem_bitmap_lock);
720 bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
721 out:
722 spin_unlock(&kvmppc_uvmem_bitmap_lock);
723 return NULL;
724 }
725
726 /*
727 * Alloc a PFN from private device memory pool. If @pagein is true,
728 * copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
729 */
kvmppc_svm_page_in(struct vm_area_struct * vma,unsigned long start,unsigned long end,unsigned long gpa,struct kvm * kvm,unsigned long page_shift,bool pagein)730 static int kvmppc_svm_page_in(struct vm_area_struct *vma,
731 unsigned long start,
732 unsigned long end, unsigned long gpa, struct kvm *kvm,
733 unsigned long page_shift,
734 bool pagein)
735 {
736 unsigned long src_pfn, dst_pfn = 0;
737 struct migrate_vma mig;
738 struct page *spage;
739 unsigned long pfn;
740 struct page *dpage;
741 int ret = 0;
742
743 memset(&mig, 0, sizeof(mig));
744 mig.vma = vma;
745 mig.start = start;
746 mig.end = end;
747 mig.src = &src_pfn;
748 mig.dst = &dst_pfn;
749 mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
750
751 ret = migrate_vma_setup(&mig);
752 if (ret)
753 return ret;
754
755 if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
756 ret = -1;
757 goto out_finalize;
758 }
759
760 dpage = kvmppc_uvmem_get_page(gpa, kvm);
761 if (!dpage) {
762 ret = -1;
763 goto out_finalize;
764 }
765
766 if (pagein) {
767 pfn = *mig.src >> MIGRATE_PFN_SHIFT;
768 spage = migrate_pfn_to_page(*mig.src);
769 if (spage) {
770 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
771 gpa, 0, page_shift);
772 if (ret)
773 goto out_finalize;
774 }
775 }
776
777 *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
778 migrate_vma_pages(&mig);
779 out_finalize:
780 migrate_vma_finalize(&mig);
781 return ret;
782 }
783
kvmppc_uv_migrate_mem_slot(struct kvm * kvm,const struct kvm_memory_slot * memslot)784 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
785 const struct kvm_memory_slot *memslot)
786 {
787 unsigned long gfn = memslot->base_gfn;
788 struct vm_area_struct *vma;
789 unsigned long start, end;
790 int ret = 0;
791
792 mmap_read_lock(kvm->mm);
793 mutex_lock(&kvm->arch.uvmem_lock);
794 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
795 ret = H_STATE;
796 start = gfn_to_hva(kvm, gfn);
797 if (kvm_is_error_hva(start))
798 break;
799
800 end = start + (1UL << PAGE_SHIFT);
801 vma = find_vma_intersection(kvm->mm, start, end);
802 if (!vma || vma->vm_start > start || vma->vm_end < end)
803 break;
804
805 ret = kvmppc_svm_page_in(vma, start, end,
806 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
807 if (ret) {
808 ret = H_STATE;
809 break;
810 }
811
812 /* relinquish the cpu if needed */
813 cond_resched();
814 }
815 mutex_unlock(&kvm->arch.uvmem_lock);
816 mmap_read_unlock(kvm->mm);
817 return ret;
818 }
819
kvmppc_h_svm_init_done(struct kvm * kvm)820 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
821 {
822 struct kvm_memslots *slots;
823 struct kvm_memory_slot *memslot;
824 int srcu_idx;
825 long ret = H_SUCCESS;
826
827 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
828 return H_UNSUPPORTED;
829
830 /* migrate any unmoved normal pfn to device pfns*/
831 srcu_idx = srcu_read_lock(&kvm->srcu);
832 slots = kvm_memslots(kvm);
833 kvm_for_each_memslot(memslot, slots) {
834 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
835 if (ret) {
836 /*
837 * The pages will remain transitioned.
838 * Its the callers responsibility to
839 * terminate the VM, which will undo
840 * all state of the VM. Till then
841 * this VM is in a erroneous state.
842 * Its KVMPPC_SECURE_INIT_DONE will
843 * remain unset.
844 */
845 ret = H_STATE;
846 goto out;
847 }
848 }
849
850 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
851 pr_info("LPID %d went secure\n", kvm->arch.lpid);
852
853 out:
854 srcu_read_unlock(&kvm->srcu, srcu_idx);
855 return ret;
856 }
857
858 /*
859 * Shares the page with HV, thus making it a normal page.
860 *
861 * - If the page is already secure, then provision a new page and share
862 * - If the page is a normal page, share the existing page
863 *
864 * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
865 * to unmap the device page from QEMU's page tables.
866 */
kvmppc_share_page(struct kvm * kvm,unsigned long gpa,unsigned long page_shift)867 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
868 unsigned long page_shift)
869 {
870
871 int ret = H_PARAMETER;
872 struct page *uvmem_page;
873 struct kvmppc_uvmem_page_pvt *pvt;
874 unsigned long pfn;
875 unsigned long gfn = gpa >> page_shift;
876 int srcu_idx;
877 unsigned long uvmem_pfn;
878
879 srcu_idx = srcu_read_lock(&kvm->srcu);
880 mutex_lock(&kvm->arch.uvmem_lock);
881 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
882 uvmem_page = pfn_to_page(uvmem_pfn);
883 pvt = uvmem_page->zone_device_data;
884 pvt->skip_page_out = true;
885 /*
886 * do not drop the GFN. It is a valid GFN
887 * that is transitioned to a shared GFN.
888 */
889 pvt->remove_gfn = false;
890 }
891
892 retry:
893 mutex_unlock(&kvm->arch.uvmem_lock);
894 pfn = gfn_to_pfn(kvm, gfn);
895 if (is_error_noslot_pfn(pfn))
896 goto out;
897
898 mutex_lock(&kvm->arch.uvmem_lock);
899 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
900 uvmem_page = pfn_to_page(uvmem_pfn);
901 pvt = uvmem_page->zone_device_data;
902 pvt->skip_page_out = true;
903 pvt->remove_gfn = false; /* it continues to be a valid GFN */
904 kvm_release_pfn_clean(pfn);
905 goto retry;
906 }
907
908 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
909 page_shift)) {
910 kvmppc_gfn_shared(gfn, kvm);
911 ret = H_SUCCESS;
912 }
913 kvm_release_pfn_clean(pfn);
914 mutex_unlock(&kvm->arch.uvmem_lock);
915 out:
916 srcu_read_unlock(&kvm->srcu, srcu_idx);
917 return ret;
918 }
919
920 /*
921 * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
922 *
923 * H_PAGE_IN_SHARED flag makes the page shared which means that the same
924 * memory in is visible from both UV and HV.
925 */
kvmppc_h_svm_page_in(struct kvm * kvm,unsigned long gpa,unsigned long flags,unsigned long page_shift)926 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
927 unsigned long flags,
928 unsigned long page_shift)
929 {
930 unsigned long start, end;
931 struct vm_area_struct *vma;
932 int srcu_idx;
933 unsigned long gfn = gpa >> page_shift;
934 int ret;
935
936 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
937 return H_UNSUPPORTED;
938
939 if (page_shift != PAGE_SHIFT)
940 return H_P3;
941
942 if (flags & ~H_PAGE_IN_SHARED)
943 return H_P2;
944
945 if (flags & H_PAGE_IN_SHARED)
946 return kvmppc_share_page(kvm, gpa, page_shift);
947
948 ret = H_PARAMETER;
949 srcu_idx = srcu_read_lock(&kvm->srcu);
950 mmap_read_lock(kvm->mm);
951
952 start = gfn_to_hva(kvm, gfn);
953 if (kvm_is_error_hva(start))
954 goto out;
955
956 mutex_lock(&kvm->arch.uvmem_lock);
957 /* Fail the page-in request of an already paged-in page */
958 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
959 goto out_unlock;
960
961 end = start + (1UL << page_shift);
962 vma = find_vma_intersection(kvm->mm, start, end);
963 if (!vma || vma->vm_start > start || vma->vm_end < end)
964 goto out_unlock;
965
966 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
967 true))
968 goto out_unlock;
969
970 ret = H_SUCCESS;
971
972 out_unlock:
973 mutex_unlock(&kvm->arch.uvmem_lock);
974 out:
975 mmap_read_unlock(kvm->mm);
976 srcu_read_unlock(&kvm->srcu, srcu_idx);
977 return ret;
978 }
979
980
981 /*
982 * Fault handler callback that gets called when HV touches any page that
983 * has been moved to secure memory, we ask UV to give back the page by
984 * issuing UV_PAGE_OUT uvcall.
985 *
986 * This eventually results in dropping of device PFN and the newly
987 * provisioned page/PFN gets populated in QEMU page tables.
988 */
kvmppc_uvmem_migrate_to_ram(struct vm_fault * vmf)989 static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
990 {
991 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
992
993 if (kvmppc_svm_page_out(vmf->vma, vmf->address,
994 vmf->address + PAGE_SIZE, PAGE_SHIFT,
995 pvt->kvm, pvt->gpa))
996 return VM_FAULT_SIGBUS;
997 else
998 return 0;
999 }
1000
1001 /*
1002 * Release the device PFN back to the pool
1003 *
1004 * Gets called when secure GFN tranistions from a secure-PFN
1005 * to a normal PFN during H_SVM_PAGE_OUT.
1006 * Gets called with kvm->arch.uvmem_lock held.
1007 */
kvmppc_uvmem_page_free(struct page * page)1008 static void kvmppc_uvmem_page_free(struct page *page)
1009 {
1010 unsigned long pfn = page_to_pfn(page) -
1011 (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
1012 struct kvmppc_uvmem_page_pvt *pvt;
1013
1014 spin_lock(&kvmppc_uvmem_bitmap_lock);
1015 bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
1016 spin_unlock(&kvmppc_uvmem_bitmap_lock);
1017
1018 pvt = page->zone_device_data;
1019 page->zone_device_data = NULL;
1020 if (pvt->remove_gfn)
1021 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1022 else
1023 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1024 kfree(pvt);
1025 }
1026
1027 static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
1028 .page_free = kvmppc_uvmem_page_free,
1029 .migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
1030 };
1031
1032 /*
1033 * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
1034 */
1035 unsigned long
kvmppc_h_svm_page_out(struct kvm * kvm,unsigned long gpa,unsigned long flags,unsigned long page_shift)1036 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
1037 unsigned long flags, unsigned long page_shift)
1038 {
1039 unsigned long gfn = gpa >> page_shift;
1040 unsigned long start, end;
1041 struct vm_area_struct *vma;
1042 int srcu_idx;
1043 int ret;
1044
1045 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
1046 return H_UNSUPPORTED;
1047
1048 if (page_shift != PAGE_SHIFT)
1049 return H_P3;
1050
1051 if (flags)
1052 return H_P2;
1053
1054 ret = H_PARAMETER;
1055 srcu_idx = srcu_read_lock(&kvm->srcu);
1056 mmap_read_lock(kvm->mm);
1057 start = gfn_to_hva(kvm, gfn);
1058 if (kvm_is_error_hva(start))
1059 goto out;
1060
1061 end = start + (1UL << page_shift);
1062 vma = find_vma_intersection(kvm->mm, start, end);
1063 if (!vma || vma->vm_start > start || vma->vm_end < end)
1064 goto out;
1065
1066 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
1067 ret = H_SUCCESS;
1068 out:
1069 mmap_read_unlock(kvm->mm);
1070 srcu_read_unlock(&kvm->srcu, srcu_idx);
1071 return ret;
1072 }
1073
kvmppc_send_page_to_uv(struct kvm * kvm,unsigned long gfn)1074 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
1075 {
1076 unsigned long pfn;
1077 int ret = U_SUCCESS;
1078
1079 pfn = gfn_to_pfn(kvm, gfn);
1080 if (is_error_noslot_pfn(pfn))
1081 return -EFAULT;
1082
1083 mutex_lock(&kvm->arch.uvmem_lock);
1084 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
1085 goto out;
1086
1087 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
1088 0, PAGE_SHIFT);
1089 out:
1090 kvm_release_pfn_clean(pfn);
1091 mutex_unlock(&kvm->arch.uvmem_lock);
1092 return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
1093 }
1094
kvmppc_uvmem_memslot_create(struct kvm * kvm,const struct kvm_memory_slot * new)1095 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
1096 {
1097 int ret = __kvmppc_uvmem_memslot_create(kvm, new);
1098
1099 if (!ret)
1100 ret = kvmppc_uv_migrate_mem_slot(kvm, new);
1101
1102 return ret;
1103 }
1104
kvmppc_uvmem_memslot_delete(struct kvm * kvm,const struct kvm_memory_slot * old)1105 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
1106 {
1107 __kvmppc_uvmem_memslot_delete(kvm, old);
1108 }
1109
kvmppc_get_secmem_size(void)1110 static u64 kvmppc_get_secmem_size(void)
1111 {
1112 struct device_node *np;
1113 int i, len;
1114 const __be32 *prop;
1115 u64 size = 0;
1116
1117 /*
1118 * First try the new ibm,secure-memory nodes which supersede the
1119 * secure-memory-ranges property.
1120 * If we found some, no need to read the deprecated ones.
1121 */
1122 for_each_compatible_node(np, NULL, "ibm,secure-memory") {
1123 prop = of_get_property(np, "reg", &len);
1124 if (!prop)
1125 continue;
1126 size += of_read_number(prop + 2, 2);
1127 }
1128 if (size)
1129 return size;
1130
1131 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
1132 if (!np)
1133 goto out;
1134
1135 prop = of_get_property(np, "secure-memory-ranges", &len);
1136 if (!prop)
1137 goto out_put;
1138
1139 for (i = 0; i < len / (sizeof(*prop) * 4); i++)
1140 size += of_read_number(prop + (i * 4) + 2, 2);
1141
1142 out_put:
1143 of_node_put(np);
1144 out:
1145 return size;
1146 }
1147
kvmppc_uvmem_init(void)1148 int kvmppc_uvmem_init(void)
1149 {
1150 int ret = 0;
1151 unsigned long size;
1152 struct resource *res;
1153 void *addr;
1154 unsigned long pfn_last, pfn_first;
1155
1156 size = kvmppc_get_secmem_size();
1157 if (!size) {
1158 /*
1159 * Don't fail the initialization of kvm-hv module if
1160 * the platform doesn't export ibm,uv-firmware node.
1161 * Let normal guests run on such PEF-disabled platform.
1162 */
1163 pr_info("KVMPPC-UVMEM: No support for secure guests\n");
1164 goto out;
1165 }
1166
1167 res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
1168 if (IS_ERR(res)) {
1169 ret = PTR_ERR(res);
1170 goto out;
1171 }
1172
1173 kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
1174 kvmppc_uvmem_pgmap.range.start = res->start;
1175 kvmppc_uvmem_pgmap.range.end = res->end;
1176 kvmppc_uvmem_pgmap.nr_range = 1;
1177 kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
1178 /* just one global instance: */
1179 kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
1180 addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
1181 if (IS_ERR(addr)) {
1182 ret = PTR_ERR(addr);
1183 goto out_free_region;
1184 }
1185
1186 pfn_first = res->start >> PAGE_SHIFT;
1187 pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
1188 kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
1189 sizeof(unsigned long), GFP_KERNEL);
1190 if (!kvmppc_uvmem_bitmap) {
1191 ret = -ENOMEM;
1192 goto out_unmap;
1193 }
1194
1195 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
1196 return ret;
1197 out_unmap:
1198 memunmap_pages(&kvmppc_uvmem_pgmap);
1199 out_free_region:
1200 release_mem_region(res->start, size);
1201 out:
1202 return ret;
1203 }
1204
kvmppc_uvmem_free(void)1205 void kvmppc_uvmem_free(void)
1206 {
1207 if (!kvmppc_uvmem_bitmap)
1208 return;
1209
1210 memunmap_pages(&kvmppc_uvmem_pgmap);
1211 release_mem_region(kvmppc_uvmem_pgmap.range.start,
1212 range_len(&kvmppc_uvmem_pgmap.range));
1213 kfree(kvmppc_uvmem_bitmap);
1214 }
1215