Lines Matching +full:gpa +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0
10 * A pseries guest can be run as secure guest on Ultravisor-enabled
15 * The page-in or page-out requests from UV will come to HV as hcalls and
31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32 * page-in and page-out requests for the same GPA. Concurrent accesses
37 * migrate_vma routines and page-in/out routines.
39 * Per-guest mutex comes with a cost though. Mainly it serializes the
40 * fault path as page-out can occur when HV faults on accessing secure
41 * guest pages. Currently UV issues page-in requests for all the guest
43 * not a cause for concern. Also currently the number of page-outs caused
45 * overcommitting, then we might see concurrent guest driven page-outs.
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52 * as sync-points for page-in/out
61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
74 * page size. Using 64K page size is correct here because any non-secure
76 * and page-out ensures this.
81 * into 64k mappings and would have done page-outs earlier.
84 * 64K page size and in fact fails any page-in/page-out requests of
85 * non-64K size upfront. If and when UV starts supporting multiple
86 * page-sizes, we need to break this assumption.
106 * ---------------
109 * (a) Secure - The GFN is secure. The GFN is associated with
111 * to the Hypervisor. This GFN can be backed by a secure-PFN,
112 * or can be backed by a normal-PFN with contents encrypted.
113 * The former is true when the GFN is paged-in into the
114 * ultravisor. The latter is true when the GFN is paged-out
117 * (b) Shared - The GFN is shared. The GFN is associated with a
119 * Hypervisor. This GFN is backed by a normal-PFN and its
120 * content is un-encrypted.
122 * (c) Normal - The GFN is a normal. The GFN is associated with
127 * ---------------
130 * the hypervisor. All its GFNs are normal-GFNs.
134 * either Shared-GFN or Secure-GFNs.
140 * in any of the three states; i.e Secure-GFN, Shared-GFN,
141 * and Normal-GFN. The VM never executes in this state
142 * in supervisor-mode.
145 * -----------------------------
150 * --------------------
159 * secure-state. At this point any left-over normal-GFNs are
160 * transitioned to Secure-GFN.
163 * All its GFNs are moved to Normal-GFNs.
165 * UV_TERMINATE transitions the secure-VM back to normal-VM. All
166 * the secure-GFN and shared-GFNs are tranistioned to normal-GFN
167 * Note: The contents of the normal-GFN is undefined at this point.
170 * -------------------------
172 * Secure GFN is associated with a secure-PFN; also called uvmem_pfn,
173 * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag
174 * set, and contains the value of the secure-PFN.
175 * It is associated with a normal-PFN; also called mem_pfn, when
177 * The value of the normal-PFN is not tracked.
179 * Shared GFN is associated with a normal-PFN. Its pfn[] has
180 * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN
183 * Normal GFN is associated with normal-PFN. Its pfn[] has
184 * no flag set. The value of the normal-PFN is not tracked.
187 * --------------------
189 * --------------------------------------------------------------
193 * -------------------------------------------------------------
200 * --------------------------------------------------------------
203 * --------------------
205 * --------------------------------------------------------------------
209 * --------- ----------------------------------------------------------
216 * --------------------------------------------------------------------
219 #define KVMPPC_GFN_UVMEM_PFN (1UL << 63)
220 #define KVMPPC_GFN_MEM_PFN (1UL << 62)
221 #define KVMPPC_GFN_SHARED (1UL << 61)
234 unsigned long gpa; member
254 return -ENOMEM; in kvmppc_uvmem_slot_init()
255 p->pfns = vcalloc(slot->npages, sizeof(*p->pfns)); in kvmppc_uvmem_slot_init()
256 if (!p->pfns) { in kvmppc_uvmem_slot_init()
258 return -ENOMEM; in kvmppc_uvmem_slot_init()
260 p->nr_pfns = slot->npages; in kvmppc_uvmem_slot_init()
261 p->base_pfn = slot->base_gfn; in kvmppc_uvmem_slot_init()
263 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
264 list_add(&p->list, &kvm->arch.uvmem_pfns); in kvmppc_uvmem_slot_init()
265 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_init()
277 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
278 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) { in kvmppc_uvmem_slot_free()
279 if (p->base_pfn == slot->base_gfn) { in kvmppc_uvmem_slot_free()
280 vfree(p->pfns); in kvmppc_uvmem_slot_free()
281 list_del(&p->list); in kvmppc_uvmem_slot_free()
286 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_slot_free()
294 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_mark_gfn()
295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn()
296 unsigned long index = gfn - p->base_pfn; in kvmppc_mark_gfn()
299 p->pfns[index] = uvmem_pfn | flag; in kvmppc_mark_gfn()
301 p->pfns[index] = flag; in kvmppc_mark_gfn()
307 /* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */
314 /* mark the GFN as secure-GFN associated with a memory-PFN. */
326 /* mark the GFN as a non-existent GFN. */
332 /* return true, if the GFN is a secure-GFN backed by a secure-PFN */
338 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) { in kvmppc_gfn_is_uvmem_pfn()
339 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_gfn_is_uvmem_pfn()
340 unsigned long index = gfn - p->base_pfn; in kvmppc_gfn_is_uvmem_pfn()
342 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) { in kvmppc_gfn_is_uvmem_pfn()
344 *uvmem_pfn = p->pfns[index] & in kvmppc_gfn_is_uvmem_pfn()
359 * Must be called with kvm->arch.uvmem_lock held.
368 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list) in kvmppc_next_nontransitioned_gfn()
369 if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) { in kvmppc_next_nontransitioned_gfn()
379 for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) { in kvmppc_next_nontransitioned_gfn()
380 unsigned long index = i - p->base_pfn; in kvmppc_next_nontransitioned_gfn()
382 if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) { in kvmppc_next_nontransitioned_gfn()
394 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge()
403 end = start + (memslot->npages << PAGE_SHIFT); in kvmppc_memslot_page_merge()
405 mmap_write_lock(kvm->mm); in kvmppc_memslot_page_merge()
407 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_memslot_page_merge()
412 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, in kvmppc_memslot_page_merge()
413 merge_flag, &vma->vm_flags); in kvmppc_memslot_page_merge()
418 start = vma->vm_end; in kvmppc_memslot_page_merge()
419 } while (end > vma->vm_end); in kvmppc_memslot_page_merge()
421 mmap_write_unlock(kvm->mm); in kvmppc_memslot_page_merge()
428 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in __kvmppc_uvmem_memslot_delete()
444 ret = uv_register_mem_slot(kvm->arch.lpid, in __kvmppc_uvmem_memslot_create()
445 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create()
446 memslot->npages * PAGE_SIZE, in __kvmppc_uvmem_memslot_create()
447 0, memslot->id); in __kvmppc_uvmem_memslot_create()
467 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START; in kvmppc_h_svm_init_start()
477 if (!kvm->arch.svm_enabled) in kvmppc_h_svm_init_start()
480 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_start()
499 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_start()
506 * Caller must held kvm->arch.uvmem_lock.
511 struct kvm *kvm, unsigned long gpa, struct page *fault_page) in __kvmppc_svm_page_out() argument
530 /* The requested page is already paged-out, nothing to do */ in __kvmppc_svm_page_out()
531 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL)) in __kvmppc_svm_page_out()
536 return -1; in __kvmppc_svm_page_out()
547 ret = -1; in __kvmppc_svm_page_out()
552 pvt = spage->zone_device_data; in __kvmppc_svm_page_out()
557 * - When HV touches a secure page, for which we do UV_PAGE_OUT in __kvmppc_svm_page_out()
558 * - When a secure page is converted to shared page, we *get* in __kvmppc_svm_page_out()
560 * case we skip page-out. in __kvmppc_svm_page_out()
562 if (!pvt->skip_page_out) in __kvmppc_svm_page_out()
563 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift, in __kvmppc_svm_page_out()
564 gpa, 0, page_shift); in __kvmppc_svm_page_out()
584 struct kvm *kvm, unsigned long gpa, in kvmppc_svm_page_out() argument
589 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
590 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, in kvmppc_svm_page_out()
592 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_svm_page_out()
615 mmap_read_lock(kvm->mm); in kvmppc_uvmem_drop_pages()
617 addr = slot->userspace_addr; in kvmppc_uvmem_drop_pages()
619 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages()
620 for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) { in kvmppc_uvmem_drop_pages()
623 if (!vma || addr >= vma->vm_end) { in kvmppc_uvmem_drop_pages()
624 vma = vma_lookup(kvm->mm, addr); in kvmppc_uvmem_drop_pages()
631 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
635 pvt = uvmem_page->zone_device_data; in kvmppc_uvmem_drop_pages()
636 pvt->skip_page_out = skip_page_out; in kvmppc_uvmem_drop_pages()
637 pvt->remove_gfn = true; in kvmppc_uvmem_drop_pages()
640 PAGE_SHIFT, kvm, pvt->gpa, NULL)) in kvmppc_uvmem_drop_pages()
641 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n", in kvmppc_uvmem_drop_pages()
642 pvt->gpa, addr); in kvmppc_uvmem_drop_pages()
648 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uvmem_drop_pages()
651 mmap_read_unlock(kvm->mm); in kvmppc_uvmem_drop_pages()
663 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_abort()
666 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvmppc_h_svm_init_abort()
669 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_abort()
674 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_abort()
676 kvm->arch.secure_guest = 0; in kvmppc_h_svm_init_abort()
677 uv_svm_terminate(kvm->arch.lpid); in kvmppc_h_svm_init_abort()
688 * Called with kvm->arch.uvmem_lock held
690 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) in kvmppc_uvmem_get_page() argument
703 pfn_last - pfn_first); in kvmppc_uvmem_get_page()
704 if (bit >= (pfn_last - pfn_first)) in kvmppc_uvmem_get_page()
706 bitmap_set(kvmppc_uvmem_bitmap, bit, 1); in kvmppc_uvmem_get_page()
714 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm); in kvmppc_uvmem_get_page()
716 pvt->gpa = gpa; in kvmppc_uvmem_get_page()
717 pvt->kvm = kvm; in kvmppc_uvmem_get_page()
720 dpage->zone_device_data = pvt; in kvmppc_uvmem_get_page()
725 bitmap_clear(kvmppc_uvmem_bitmap, bit, 1); in kvmppc_uvmem_get_page()
737 unsigned long end, unsigned long gpa, struct kvm *kvm, in kvmppc_svm_page_in() argument
761 ret = -1; in kvmppc_svm_page_in()
765 dpage = kvmppc_uvmem_get_page(gpa, kvm); in kvmppc_svm_page_in()
767 ret = -1; in kvmppc_svm_page_in()
775 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift, in kvmppc_svm_page_in()
776 gpa, 0, page_shift); in kvmppc_svm_page_in()
792 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot()
797 mmap_read_lock(kvm->mm); in kvmppc_uv_migrate_mem_slot()
798 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
805 end = start + (1UL << PAGE_SHIFT); in kvmppc_uv_migrate_mem_slot()
806 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_uv_migrate_mem_slot()
807 if (!vma || vma->vm_start > start || vma->vm_end < end) in kvmppc_uv_migrate_mem_slot()
820 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_uv_migrate_mem_slot()
821 mmap_read_unlock(kvm->mm); in kvmppc_uv_migrate_mem_slot()
832 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_init_done()
836 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_init_done()
855 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE; in kvmppc_h_svm_init_done()
856 pr_info("LPID %d went secure\n", kvm->arch.lpid); in kvmppc_h_svm_init_done()
859 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_init_done()
866 * - If the page is already secure, then provision a new page and share
867 * - If the page is a normal page, share the existing page
872 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa, in kvmppc_share_page() argument
880 unsigned long gfn = gpa >> page_shift; in kvmppc_share_page()
884 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_share_page()
885 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
888 pvt = uvmem_page->zone_device_data; in kvmppc_share_page()
889 pvt->skip_page_out = true; in kvmppc_share_page()
894 pvt->remove_gfn = false; in kvmppc_share_page()
898 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
903 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
906 pvt = uvmem_page->zone_device_data; in kvmppc_share_page()
907 pvt->skip_page_out = true; in kvmppc_share_page()
908 pvt->remove_gfn = false; /* it continues to be a valid GFN */ in kvmppc_share_page()
913 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0, in kvmppc_share_page()
919 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_share_page()
921 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_share_page()
931 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa, in kvmppc_h_svm_page_in() argument
938 unsigned long gfn = gpa >> page_shift; in kvmppc_h_svm_page_in()
941 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_in()
951 return kvmppc_share_page(kvm, gpa, page_shift); in kvmppc_h_svm_page_in()
954 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_page_in()
955 mmap_read_lock(kvm->mm); in kvmppc_h_svm_page_in()
961 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
962 /* Fail the page-in request of an already paged-in page */ in kvmppc_h_svm_page_in()
966 end = start + (1UL << page_shift); in kvmppc_h_svm_page_in()
967 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_h_svm_page_in()
968 if (!vma || vma->vm_start > start || vma->vm_end < end) in kvmppc_h_svm_page_in()
971 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift, in kvmppc_h_svm_page_in()
978 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_h_svm_page_in()
980 mmap_read_unlock(kvm->mm); in kvmppc_h_svm_page_in()
981 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_page_in()
996 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data; in kvmppc_uvmem_migrate_to_ram()
998 if (kvmppc_svm_page_out(vmf->vma, vmf->address, in kvmppc_uvmem_migrate_to_ram()
999 vmf->address + PAGE_SIZE, PAGE_SHIFT, in kvmppc_uvmem_migrate_to_ram()
1000 pvt->kvm, pvt->gpa, vmf->page)) in kvmppc_uvmem_migrate_to_ram()
1009 * Gets called when secure GFN tranistions from a secure-PFN
1011 * Gets called with kvm->arch.uvmem_lock held.
1015 unsigned long pfn = page_to_pfn(page) - in kvmppc_uvmem_page_free()
1020 bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1); in kvmppc_uvmem_page_free()
1023 pvt = page->zone_device_data; in kvmppc_uvmem_page_free()
1024 page->zone_device_data = NULL; in kvmppc_uvmem_page_free()
1025 if (pvt->remove_gfn) in kvmppc_uvmem_page_free()
1026 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm); in kvmppc_uvmem_page_free()
1028 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm); in kvmppc_uvmem_page_free()
1041 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa, in kvmppc_h_svm_page_out() argument
1044 unsigned long gfn = gpa >> page_shift; in kvmppc_h_svm_page_out()
1050 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmppc_h_svm_page_out()
1060 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_h_svm_page_out()
1061 mmap_read_lock(kvm->mm); in kvmppc_h_svm_page_out()
1066 end = start + (1UL << page_shift); in kvmppc_h_svm_page_out()
1067 vma = find_vma_intersection(kvm->mm, start, end); in kvmppc_h_svm_page_out()
1068 if (!vma || vma->vm_start > start || vma->vm_end < end) in kvmppc_h_svm_page_out()
1071 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa, NULL)) in kvmppc_h_svm_page_out()
1074 mmap_read_unlock(kvm->mm); in kvmppc_h_svm_page_out()
1075 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_h_svm_page_out()
1086 return -EFAULT; in kvmppc_send_page_to_uv()
1088 mutex_lock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1092 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT, in kvmppc_send_page_to_uv()
1096 mutex_unlock(&kvm->arch.uvmem_lock); in kvmppc_send_page_to_uv()
1097 return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT; in kvmppc_send_page_to_uv()
1123 * First try the new ibm,secure-memory nodes which supersede the in kvmppc_get_secmem_size()
1124 * secure-memory-ranges property. in kvmppc_get_secmem_size()
1127 for_each_compatible_node(np, NULL, "ibm,secure-memory") { in kvmppc_get_secmem_size()
1136 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware"); in kvmppc_get_secmem_size()
1140 prop = of_get_property(np, "secure-memory-ranges", &len); in kvmppc_get_secmem_size()
1164 * Don't fail the initialization of kvm-hv module if in kvmppc_uvmem_init()
1165 * the platform doesn't export ibm,uv-firmware node. in kvmppc_uvmem_init()
1166 * Let normal guests run on such PEF-disabled platform. in kvmppc_uvmem_init()
1168 pr_info("KVMPPC-UVMEM: No support for secure guests\n"); in kvmppc_uvmem_init()
1179 kvmppc_uvmem_pgmap.range.start = res->start; in kvmppc_uvmem_init()
1180 kvmppc_uvmem_pgmap.range.end = res->end; in kvmppc_uvmem_init()
1181 kvmppc_uvmem_pgmap.nr_range = 1; in kvmppc_uvmem_init()
1191 pfn_first = res->start >> PAGE_SHIFT; in kvmppc_uvmem_init()
1193 kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first), in kvmppc_uvmem_init()
1196 ret = -ENOMEM; in kvmppc_uvmem_init()
1200 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size); in kvmppc_uvmem_init()
1205 release_mem_region(res->start, size); in kvmppc_uvmem_init()