Home
last modified time | relevance | path

Searched refs:pfns (Results 1 – 19 of 19) sorted by relevance

/Linux-v5.4/mm/
Dhmm.c261 uint64_t *pfns = range->pfns; in hmm_pfns_bad() local
266 pfns[i] = range->values[HMM_PFN_ERROR]; in hmm_pfns_bad()
289 uint64_t *pfns = range->pfns; in hmm_vma_walk_hole_() local
299 pfns[i] = range->values[HMM_PFN_NONE]; in hmm_vma_walk_hole_()
304 &pfns[i]); in hmm_vma_walk_hole_()
314 uint64_t pfns, uint64_t cpu_flags, in hmm_pte_need_fault() argument
332 pfns = (pfns & range->pfn_flags_mask) | range->default_flags; in hmm_pte_need_fault()
335 if (!(pfns & range->flags[HMM_PFN_VALID])) in hmm_pte_need_fault()
340 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { in hmm_pte_need_fault()
341 *write_fault = pfns & range->flags[HMM_PFN_WRITE]; in hmm_pte_need_fault()
[all …]
Dsparse.c241 unsigned long pfns; in subsection_map_init() local
243 pfns = min(nr_pages, PAGES_PER_SECTION in subsection_map_init()
246 subsection_mask_set(ms->usage->subsection_map, pfn, pfns); in subsection_map_init()
249 pfns, subsection_map_index(pfn), in subsection_map_init()
250 subsection_map_index(pfn + pfns - 1)); in subsection_map_init()
252 pfn += pfns; in subsection_map_init()
253 nr_pages -= pfns; in subsection_map_init()
Dmemory_hotplug.c313 unsigned long pfns; in __add_pages() local
315 pfns = min(nr_pages, PAGES_PER_SECTION in __add_pages()
317 err = sparse_add_section(nid, pfn, pfns, altmap); in __add_pages()
320 pfn += pfns; in __add_pages()
321 nr_pages -= pfns; in __add_pages()
531 unsigned long pfns; in __remove_pages() local
534 pfns = min(nr_pages, PAGES_PER_SECTION in __remove_pages()
536 __remove_section(zone, pfn, pfns, map_offset, altmap); in __remove_pages()
537 pfn += pfns; in __remove_pages()
538 nr_pages -= pfns; in __remove_pages()
/Linux-v5.4/drivers/xen/
Dxlate_mmu.c193 xen_pfn_t *pfns; member
201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
218 xen_pfn_t *pfns; in xen_xlate_map_ballooned_pages() local
230 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages()
231 if (!pfns) { in xen_xlate_map_ballooned_pages()
240 kfree(pfns); in xen_xlate_map_ballooned_pages()
244 data.pfns = pfns; in xen_xlate_map_ballooned_pages()
254 kfree(pfns); in xen_xlate_map_ballooned_pages()
259 *gfns = pfns; in xen_xlate_map_ballooned_pages()
Dprivcmd.c733 xen_pfn_t *pfns = NULL; in privcmd_ioctl_mmap_resource() local
752 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL); in privcmd_ioctl_mmap_resource()
753 if (!pfns) { in privcmd_ioctl_mmap_resource()
773 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE); in privcmd_ioctl_mmap_resource()
784 set_xen_guest_handle(xdata.frame_list, pfns); in privcmd_ioctl_mmap_resource()
804 pfns, kdata.num, (int *)pfns, in privcmd_ioctl_mmap_resource()
814 rc = pfns[i]; in privcmd_ioctl_mmap_resource()
824 kfree(pfns); in privcmd_ioctl_mmap_resource()
/Linux-v5.4/drivers/virtio/
Dvirtio_balloon.c109 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; member
144 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); in tell_host()
156 __virtio32 pfns[], struct page *page) in set_page_pfns() argument
165 pfns[i] = cpu_to_virtio32(vb->vdev, in set_page_pfns()
177 num = min(num, ARRAY_SIZE(vb->pfns)); in fill_balloon()
202 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in fill_balloon()
241 num = min(num, ARRAY_SIZE(vb->pfns)); in leak_balloon()
251 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in leak_balloon()
731 set_page_pfns(vb, vb->pfns, newpage); in virtballoon_migratepage()
739 set_page_pfns(vb, vb->pfns, page); in virtballoon_migratepage()
/Linux-v5.4/drivers/gpu/drm/nouveau/
Dnouveau_dmem.c325 unsigned long pfns[1]; in nouveau_dmem_page_alloc_locked() local
330 ret = nouveau_dmem_pages_alloc(drm, 1, pfns); in nouveau_dmem_page_alloc_locked()
334 page = pfn_to_page(pfns[0]); in nouveau_dmem_page_alloc_locked()
689 page = hmm_device_entry_to_page(range, range->pfns[i]); in nouveau_dmem_convert_pfn()
693 if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) { in nouveau_dmem_convert_pfn()
699 range->pfns[i] = 0; in nouveau_dmem_convert_pfn()
704 range->pfns[i] &= ((1UL << range->pfn_shift) - 1); in nouveau_dmem_convert_pfn()
705 range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift; in nouveau_dmem_convert_pfn()
Dnouveau_svm.c685 range.pfns = args.phys; in nouveau_svm_fault()
719 !(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_V) || in nouveau_svm_fault()
720 (!(range.pfns[pi] & NVIF_VMM_PFNMAP_V0_W) && in nouveau_svm_fault()
/Linux-v5.4/drivers/iommu/
Diova.c781 unsigned long pfns[IOVA_MAG_SIZE]; member
812 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns()
842 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--) in iova_magazine_pop()
847 pfn = mag->pfns[i]; in iova_magazine_pop()
848 mag->pfns[i] = mag->pfns[--mag->size]; in iova_magazine_pop()
857 mag->pfns[mag->size++] = pfn; in iova_magazine_push()
/Linux-v5.4/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ttm.c796 uint64_t *pfns; in amdgpu_ttm_tt_get_user_pages() local
825 pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL); in amdgpu_ttm_tt_get_user_pages()
826 if (unlikely(!pfns)) { in amdgpu_ttm_tt_get_user_pages()
836 range->pfns = pfns; in amdgpu_ttm_tt_get_user_pages()
857 pages[i] = hmm_device_entry_to_page(range, pfns[i]); in amdgpu_ttm_tt_get_user_pages()
860 i, pfns[i]); in amdgpu_ttm_tt_get_user_pages()
873 kvfree(pfns); in amdgpu_ttm_tt_get_user_pages()
897 WARN_ONCE(!gtt->range || !gtt->range->pfns, in amdgpu_ttm_tt_get_user_pages_done()
904 kvfree(gtt->range->pfns); in amdgpu_ttm_tt_get_user_pages_done()
992 gtt->range->pfns[0])) in amdgpu_ttm_tt_unpin_userptr()
/Linux-v5.4/include/linux/
Dhmm.h166 uint64_t *pfns; member
/Linux-v5.4/include/xen/
Dxen-ops.h210 int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
/Linux-v5.4/Documentation/vm/
Dhmm.rst205 Both functions copy CPU page table entries into their pfns array argument. Each
221 range.pfns = ...;
257 // Use pfns array content to update device page table
289 for each entry in the pfns array.
310 range->pfns[index_of_write] = (1 << 62);
317 Note that HMM will populate the pfns array with write permission for any page
Dmemory-model.rst195 :c:func:`get_user_pages` service for the given range of pfns. Since the
/Linux-v5.4/arch/sparc/
DKconfig297 # between a node's start and end pfns, it may not
/Linux-v5.4/Documentation/virt/kvm/
Dmmu.txt42 spte shadow pte (referring to pfns)
/Linux-v5.4/arch/s390/
DKconfig443 # between a node's start and end pfns, it may not
/Linux-v5.4/arch/powerpc/
DKconfig666 # between a node's start and end pfns, it may not
/Linux-v5.4/arch/x86/
DKconfig1597 # between a node's start and end pfns, it may not