/Linux-v4.19/drivers/xen/ |
D | xlate_mmu.c | 193 xen_pfn_t *pfns; member 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn() 218 xen_pfn_t *pfns; in xen_xlate_map_ballooned_pages() local 230 pfns = kcalloc(nr_grant_frames, sizeof(pfns[0]), GFP_KERNEL); in xen_xlate_map_ballooned_pages() 231 if (!pfns) { in xen_xlate_map_ballooned_pages() 240 kfree(pfns); in xen_xlate_map_ballooned_pages() 244 data.pfns = pfns; in xen_xlate_map_ballooned_pages() 254 kfree(pfns); in xen_xlate_map_ballooned_pages() 259 *gfns = pfns; in xen_xlate_map_ballooned_pages()
|
D | privcmd.c | 752 xen_pfn_t *pfns = NULL; in privcmd_ioctl_mmap_resource() local 771 pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL); in privcmd_ioctl_mmap_resource() 772 if (!pfns) { in privcmd_ioctl_mmap_resource() 791 pfns[i] = pfn + (i % XEN_PFN_PER_PAGE); in privcmd_ioctl_mmap_resource() 802 set_xen_guest_handle(xdata.frame_list, pfns); in privcmd_ioctl_mmap_resource() 829 pfns, kdata.num, (int *)pfns, in privcmd_ioctl_mmap_resource() 839 rc = pfns[i]; in privcmd_ioctl_mmap_resource() 849 kfree(pfns); in privcmd_ioctl_mmap_resource()
|
/Linux-v4.19/mm/ |
D | hmm.c | 143 memset(&range->pfns[idx], 0, sizeof(*range->pfns) * npages); in hmm_invalidate_range() 326 uint64_t *pfns = range->pfns; in hmm_pfns_bad() local 331 pfns[i] = range->values[HMM_PFN_ERROR]; in hmm_pfns_bad() 354 uint64_t *pfns = range->pfns; in hmm_vma_walk_hole_() local 360 pfns[i] = range->values[HMM_PFN_NONE]; in hmm_vma_walk_hole_() 365 &pfns[i]); in hmm_vma_walk_hole_() 375 uint64_t pfns, uint64_t cpu_flags, in hmm_pte_need_fault() argument 385 if (!(pfns & range->flags[HMM_PFN_VALID])) in hmm_pte_need_fault() 390 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) { in hmm_pte_need_fault() 391 *write_fault = pfns & range->flags[HMM_PFN_WRITE]; in hmm_pte_need_fault() [all …]
|
/Linux-v4.19/drivers/virtio/ |
D | virtio_balloon.c | 78 __virtio32 pfns[VIRTIO_BALLOON_ARRAY_PFNS_MAX]; member 113 sg_init_one(&sg, vb->pfns, sizeof(vb->pfns[0]) * vb->num_pfns); in tell_host() 125 __virtio32 pfns[], struct page *page) in set_page_pfns() argument 134 pfns[i] = cpu_to_virtio32(vb->vdev, in set_page_pfns() 146 num = min(num, ARRAY_SIZE(vb->pfns)); in fill_balloon() 171 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in fill_balloon() 210 num = min(num, ARRAY_SIZE(vb->pfns)); in leak_balloon() 220 set_page_pfns(vb, vb->pfns + vb->num_pfns, page); in leak_balloon() 478 set_page_pfns(vb, vb->pfns, newpage); in virtballoon_migratepage() 486 set_page_pfns(vb, vb->pfns, page); in virtballoon_migratepage()
|
/Linux-v4.19/drivers/iommu/ |
D | iova.c | 771 unsigned long pfns[IOVA_MAG_SIZE]; member 802 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns() 832 for (i = mag->size - 1; mag->pfns[i] > limit_pfn; i--) in iova_magazine_pop() 837 pfn = mag->pfns[i]; in iova_magazine_pop() 838 mag->pfns[i] = mag->pfns[--mag->size]; in iova_magazine_pop() 847 mag->pfns[mag->size++] = pfn; in iova_magazine_push()
|
/Linux-v4.19/include/linux/ |
D | hmm.h | 148 uint64_t *pfns; member
|
/Linux-v4.19/include/xen/ |
D | xen-ops.h | 162 int xen_xlate_map_ballooned_pages(xen_pfn_t **pfns, void **vaddr,
|
/Linux-v4.19/Documentation/vm/ |
D | hmm.rst | 196 hmm_pfn_t *pfns); 201 hmm_pfn_t *pfns, 211 Both functions copy CPU page table entries into their pfns array argument. Each 224 ret = hmm_vma_get_pfns(vma, &range, start, end, pfns); 233 // Use pfns array content to update device page table
|
/Linux-v4.19/arch/sparc/ |
D | Kconfig | 301 # between a node's start and end pfns, it may not
|
/Linux-v4.19/Documentation/virtual/kvm/ |
D | mmu.txt | 42 spte shadow pte (referring to pfns)
|
/Linux-v4.19/arch/s390/ |
D | Kconfig | 423 # between a node's start and end pfns, it may not
|
/Linux-v4.19/arch/powerpc/ |
D | Kconfig | 651 # between a node's start and end pfns, it may not
|
/Linux-v4.19/arch/x86/ |
D | Kconfig | 1566 # between a node's start and end pfns, it may not
|