/Linux-v5.15/include/linux/ |
D | pfn_t.h | 29 static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags) in __pfn_to_pfn_t() argument 31 pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; in __pfn_to_pfn_t() 37 static inline pfn_t pfn_to_pfn_t(unsigned long pfn) in pfn_to_pfn_t() argument 39 return __pfn_to_pfn_t(pfn, 0); in pfn_to_pfn_t() 47 static inline bool pfn_t_has_page(pfn_t pfn) in pfn_t_has_page() argument 49 return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0; in pfn_t_has_page() 52 static inline unsigned long pfn_t_to_pfn(pfn_t pfn) in pfn_t_to_pfn() argument 54 return pfn.val & ~PFN_FLAGS_MASK; in pfn_t_to_pfn() 57 static inline struct page *pfn_t_to_page(pfn_t pfn) in pfn_t_to_page() argument 59 if (pfn_t_has_page(pfn)) in pfn_t_to_page() [all …]
|
/Linux-v5.15/mm/ |
D | page_isolation.c | 73 unsigned long pfn, buddy_pfn; in unset_migratetype_isolate() local 92 pfn = page_to_pfn(page); in unset_migratetype_isolate() 93 buddy_pfn = __find_buddy_pfn(pfn, order); in unset_migratetype_isolate() 94 buddy = page + (buddy_pfn - pfn); in unset_migratetype_isolate() 126 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument 133 page = pfn_to_online_page(pfn + i); in __first_valid_page() 185 unsigned long pfn; in start_isolate_page_range() local 192 for (pfn = start_pfn; in start_isolate_page_range() 193 pfn < end_pfn; in start_isolate_page_range() 194 pfn += pageblock_nr_pages) { in start_isolate_page_range() [all …]
|
D | page_ext.c | 134 unsigned long pfn = page_to_pfn(page); in lookup_page_ext() local 147 index = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext() 208 unsigned long pfn = page_to_pfn(page); in lookup_page_ext() local 209 struct mem_section *section = __pfn_to_section(pfn); in lookup_page_ext() 218 return get_entry(section->page_ext, pfn); in lookup_page_ext() 237 static int __meminit init_section_page_ext(unsigned long pfn, int nid) in init_section_page_ext() argument 243 section = __pfn_to_section(pfn); in init_section_page_ext() 267 pfn &= PAGE_SECTION_MASK; in init_section_page_ext() 268 section->page_ext = (void *)base - page_ext_size * pfn; in init_section_page_ext() 289 static void __free_page_ext(unsigned long pfn) in __free_page_ext() argument [all …]
|
D | memory-failure.c | 250 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) in kill_proc() argument 257 pfn, t->comm, t->pid); in kill_proc() 398 unsigned long pfn, int flags) in kill_procs() argument 411 pfn, tk->tsk->comm, tk->tsk->pid); in kill_procs() 422 else if (kill_proc(tk, pfn, flags) < 0) in kill_procs() 424 pfn, tk->tsk->comm, tk->tsk->pid); in kill_procs() 571 unsigned long pfn; member 584 unsigned long pfn = 0; in check_hwpoisoned_entry() local 587 pfn = pte_pfn(pte); in check_hwpoisoned_entry() 592 pfn = hwpoison_entry_to_pfn(swp); in check_hwpoisoned_entry() [all …]
|
D | sparse.c | 185 static void subsection_mask_set(unsigned long *map, unsigned long pfn, in subsection_mask_set() argument 188 int idx = subsection_map_index(pfn); in subsection_mask_set() 189 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set() 194 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument 196 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init() 197 unsigned long nr, start_sec = pfn_to_section_nr(pfn); in subsection_map_init() 207 - (pfn & ~PAGE_SECTION_MASK)); in subsection_map_init() 209 subsection_mask_set(ms->usage->subsection_map, pfn, pfns); in subsection_map_init() 212 pfns, subsection_map_index(pfn), in subsection_map_init() 213 subsection_map_index(pfn + pfns - 1)); in subsection_map_init() [all …]
|
D | memory_hotplug.c | 224 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages, in check_pfn_span() argument 242 if (!IS_ALIGNED(pfn, min_align) in check_pfn_span() 245 reason, pfn, pfn + nr_pages - 1); in check_pfn_span() 256 struct page *pfn_to_online_page(unsigned long pfn) in pfn_to_online_page() argument 258 unsigned long nr = pfn_to_section_nr(pfn); in pfn_to_online_page() 273 if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) in pfn_to_online_page() 276 if (!pfn_section_valid(ms, pfn)) in pfn_to_online_page() 280 return pfn_to_page(pfn); in pfn_to_online_page() 288 pgmap = get_dev_pagemap(pfn, NULL); in pfn_to_online_page() 295 return pfn_to_page(pfn); in pfn_to_online_page() [all …]
|
D | cma.c | 81 static void cma_clear_bitmap(struct cma *cma, unsigned long pfn, in cma_clear_bitmap() argument 87 bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit; in cma_clear_bitmap() 97 unsigned long base_pfn = cma->base_pfn, pfn; in cma_activate_area() local 111 for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) { in cma_activate_area() 112 WARN_ON_ONCE(!pfn_valid(pfn)); in cma_activate_area() 113 if (page_zone(pfn_to_page(pfn)) != zone) in cma_activate_area() 117 for (pfn = base_pfn; pfn < base_pfn + cma->count; in cma_activate_area() 118 pfn += pageblock_nr_pages) in cma_activate_area() 119 init_cma_reserved_pageblock(pfn_to_page(pfn)); in cma_activate_area() 134 for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++) in cma_activate_area() [all …]
|
D | page_idle.c | 32 static struct page *page_idle_get_page(unsigned long pfn) in page_idle_get_page() argument 34 struct page *page = pfn_to_online_page(pfn); in page_idle_get_page() 120 unsigned long pfn, end_pfn; in page_idle_bitmap_read() local 126 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_read() 127 if (pfn >= max_pfn) in page_idle_bitmap_read() 130 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read() 134 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_read() 135 bit = pfn % BITMAP_CHUNK_BITS; in page_idle_bitmap_read() 138 page = page_idle_get_page(pfn); in page_idle_bitmap_read() 165 unsigned long pfn, end_pfn; in page_idle_bitmap_write() local [all …]
|
D | page_owner.c | 252 unsigned long pfn, block_end_pfn; in pagetypeinfo_showmixedcount_print() local 259 pfn = zone->zone_start_pfn; in pagetypeinfo_showmixedcount_print() 266 for (; pfn < end_pfn; ) { in pagetypeinfo_showmixedcount_print() 267 page = pfn_to_online_page(pfn); in pagetypeinfo_showmixedcount_print() 269 pfn = ALIGN(pfn + 1, MAX_ORDER_NR_PAGES); in pagetypeinfo_showmixedcount_print() 273 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); in pagetypeinfo_showmixedcount_print() 278 for (; pfn < block_end_pfn; pfn++) { in pagetypeinfo_showmixedcount_print() 280 page = pfn_to_page(pfn); in pagetypeinfo_showmixedcount_print() 290 pfn += (1UL << freepage_order) - 1; in pagetypeinfo_showmixedcount_print() 312 pfn = block_end_pfn; in pagetypeinfo_showmixedcount_print() [all …]
|
/Linux-v5.15/arch/x86/xen/ |
D | p2m.c | 126 static inline unsigned p2m_top_index(unsigned long pfn) in p2m_top_index() argument 128 BUG_ON(pfn >= MAX_P2M_PFN); in p2m_top_index() 129 return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); in p2m_top_index() 132 static inline unsigned p2m_mid_index(unsigned long pfn) in p2m_mid_index() argument 134 return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; in p2m_mid_index() 137 static inline unsigned p2m_index(unsigned long pfn) in p2m_index() argument 139 return pfn % P2M_PER_PAGE; in p2m_index() 174 static void p2m_init_identity(unsigned long *p2m, unsigned long pfn) in p2m_init_identity() argument 179 p2m[i] = IDENTITY_FRAME(pfn + i); in p2m_init_identity() 219 unsigned long pfn, mfn; in xen_build_mfn_list_list() local [all …]
|
D | setup.c | 164 unsigned long __ref xen_chk_extra_mem(unsigned long pfn) in xen_chk_extra_mem() argument 169 if (pfn >= xen_extra_mem[i].start_pfn && in xen_chk_extra_mem() 170 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns) in xen_chk_extra_mem() 174 return IDENTITY_FRAME(pfn); in xen_chk_extra_mem() 182 unsigned long pfn, pfn_s, pfn_e; in xen_inv_extra_mem() local 190 for (pfn = pfn_s; pfn < pfn_e; pfn++) in xen_inv_extra_mem() 191 set_phys_to_machine(pfn, INVALID_P2M_ENTRY); in xen_inv_extra_mem() 257 unsigned long pfn, end; in xen_set_identity_and_release_chunk() local 264 for (pfn = start_pfn; pfn < end; pfn++) { in xen_set_identity_and_release_chunk() 265 unsigned long mfn = pfn_to_mfn(pfn); in xen_set_identity_and_release_chunk() [all …]
|
/Linux-v5.15/arch/x86/include/asm/xen/ |
D | page.h | 56 extern int xen_alloc_p2m_entry(unsigned long pfn); 58 extern unsigned long get_phys_to_machine(unsigned long pfn); 59 extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 60 extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 141 static inline unsigned long __pfn_to_mfn(unsigned long pfn) in __pfn_to_mfn() argument 145 if (pfn < xen_p2m_size) in __pfn_to_mfn() 146 mfn = xen_p2m_addr[pfn]; in __pfn_to_mfn() 147 else if (unlikely(pfn < xen_max_p2m_pfn)) in __pfn_to_mfn() 148 return get_phys_to_machine(pfn); in __pfn_to_mfn() 150 return IDENTITY_FRAME(pfn); in __pfn_to_mfn() [all …]
|
/Linux-v5.15/tools/testing/scatterlist/ |
D | main.c | 11 unsigned *pfn; member 28 #define pfn(...) (unsigned []){ __VA_ARGS__ } macro 42 printf(" %x", test->pfn[i]); in fail() 56 { -EINVAL, 1, pfn(0), NULL, PAGE_SIZE, 0, 1 }, in main() 57 { 0, 1, pfn(0), NULL, PAGE_SIZE, PAGE_SIZE + 1, 1 }, in main() 58 { 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax, 1 }, in main() 59 { 0, 1, pfn(0), NULL, 1, sgmax, 1 }, in main() 60 { 0, 2, pfn(0, 1), NULL, 2 * PAGE_SIZE, sgmax, 1 }, in main() 61 { 0, 2, pfn(1, 0), NULL, 2 * PAGE_SIZE, sgmax, 2 }, in main() 62 { 0, 3, pfn(0, 1, 2), NULL, 3 * PAGE_SIZE, sgmax, 1 }, in main() [all …]
|
/Linux-v5.15/arch/arm/xen/ |
D | p2m.c | 23 unsigned long pfn; member 44 if (new->pfn == entry->pfn) in xen_add_phys_to_mach_entry() 47 if (new->pfn < entry->pfn) in xen_add_phys_to_mach_entry() 59 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); in xen_add_phys_to_mach_entry() 64 unsigned long __pfn_to_mfn(unsigned long pfn) in __pfn_to_mfn() argument 73 if (entry->pfn <= pfn && in __pfn_to_mfn() 74 entry->pfn + entry->nr_pages > pfn) { in __pfn_to_mfn() 75 unsigned long mfn = entry->mfn + (pfn - entry->pfn); in __pfn_to_mfn() 79 if (pfn < entry->pfn) in __pfn_to_mfn() 149 bool __set_phys_to_machine_multi(unsigned long pfn, in __set_phys_to_machine_multi() argument [all …]
|
/Linux-v5.15/include/trace/events/ |
D | cma.h | 13 TP_PROTO(const char *name, unsigned long pfn, const struct page *page, 16 TP_ARGS(name, pfn, page, count, align), 20 __field(unsigned long, pfn) 28 __entry->pfn = pfn; 36 __entry->pfn, 44 TP_PROTO(const char *name, unsigned long pfn, const struct page *page, 47 TP_ARGS(name, pfn, page, count), 51 __field(unsigned long, pfn) 58 __entry->pfn = pfn; 65 __entry->pfn, [all …]
|
D | kmem.h | 167 __field( unsigned long, pfn ) 172 __entry->pfn = page_to_pfn(page); 177 pfn_to_page(__entry->pfn), 178 __entry->pfn, 189 __field( unsigned long, pfn ) 193 __entry->pfn = page_to_pfn(page); 197 pfn_to_page(__entry->pfn), 198 __entry->pfn) 209 __field( unsigned long, pfn ) 216 __entry->pfn = page ? page_to_pfn(page) : -1UL; [all …]
|
/Linux-v5.15/drivers/gpu/drm/i915/selftests/ |
D | scatterlist.c | 48 unsigned long pfn, n; in expect_pfn_sg() local 50 pfn = pt->start; in expect_pfn_sg() 55 if (page_to_pfn(page) != pfn) { in expect_pfn_sg() 57 __func__, who, pfn, page_to_pfn(page)); in expect_pfn_sg() 70 pfn += npages; in expect_pfn_sg() 72 if (pfn != pt->end) { in expect_pfn_sg() 74 __func__, who, pt->end, pfn); in expect_pfn_sg() 86 unsigned long pfn; in expect_pfn_sg_page_iter() local 88 pfn = pt->start; in expect_pfn_sg_page_iter() 92 if (page != pfn_to_page(pfn)) { in expect_pfn_sg_page_iter() [all …]
|
/Linux-v5.15/arch/arm/mach-omap2/ |
D | io.c | 71 .pfn = __phys_to_pfn(L3_24XX_PHYS), 77 .pfn = __phys_to_pfn(L4_24XX_PHYS), 87 .pfn = __phys_to_pfn(DSP_MEM_2420_PHYS), 93 .pfn = __phys_to_pfn(DSP_IPI_2420_PHYS), 99 .pfn = __phys_to_pfn(DSP_MMU_2420_PHYS), 111 .pfn = __phys_to_pfn(L4_WK_243X_PHYS), 117 .pfn = __phys_to_pfn(OMAP243X_GPMC_PHYS), 123 .pfn = __phys_to_pfn(OMAP243X_SDRC_PHYS), 129 .pfn = __phys_to_pfn(OMAP243X_SMS_PHYS), 141 .pfn = __phys_to_pfn(L3_34XX_PHYS), [all …]
|
/Linux-v5.15/include/xen/arm/ |
D | page.h | 15 #define phys_to_machine_mapping_valid(pfn) (1) argument 43 unsigned long __pfn_to_mfn(unsigned long pfn); 47 static inline unsigned long pfn_to_gfn(unsigned long pfn) in pfn_to_gfn() argument 49 return pfn; in pfn_to_gfn() 58 static inline unsigned long pfn_to_bfn(unsigned long pfn) in pfn_to_bfn() argument 63 mfn = __pfn_to_mfn(pfn); in pfn_to_bfn() 68 return pfn; in pfn_to_bfn() 103 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 104 bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, 107 static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) in set_phys_to_machine() argument [all …]
|
/Linux-v5.15/include/asm-generic/ |
D | memory_model.h | 18 #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) argument 25 #define __pfn_to_page(pfn) (vmemmap + (pfn)) argument 39 #define __pfn_to_page(pfn) \ argument 40 ({ unsigned long __pfn = (pfn); \ 50 #define __pfn_to_phys(pfn) PFN_PHYS(pfn) argument
|
/Linux-v5.15/kernel/power/ |
D | snapshot.c | 740 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn, in memory_bm_find_bit() argument 749 if (pfn >= zone->start_pfn && pfn < zone->end_pfn) in memory_bm_find_bit() 756 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) { in memory_bm_find_bit() 778 ((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn) in memory_bm_find_bit() 782 block_nr = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT; in memory_bm_find_bit() 797 bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK; in memory_bm_find_bit() 801 *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK; in memory_bm_find_bit() 806 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn) in memory_bm_set_bit() argument 812 error = memory_bm_find_bit(bm, pfn, &addr, &bit); in memory_bm_set_bit() 817 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn) in mem_bm_set_bit_check() argument [all …]
|
/Linux-v5.15/arch/arm/mm/ |
D | flush.c | 38 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) in flush_pfn_alias() argument 43 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); in flush_pfn_alias() 52 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) in flush_icache_alias() argument 58 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); in flush_icache_alias() 98 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) in flush_cache_page() argument 101 vivt_flush_cache_page(vma, user_addr, pfn); in flush_cache_page() 106 flush_pfn_alias(pfn, user_addr); in flush_cache_page() 115 #define flush_pfn_alias(pfn,vaddr) do { } while (0) argument 116 #define flush_icache_alias(pfn,vaddr,len) do { } while (0) argument 271 unsigned long pfn; in __sync_icache_dcache() local [all …]
|
D | fault-armv.c | 37 unsigned long pfn, pte_t *ptep) in do_adjust_pte() argument 52 flush_cache_page(vma, address, pfn); in do_adjust_pte() 53 outer_flush_range((pfn << PAGE_SHIFT), in do_adjust_pte() 54 (pfn << PAGE_SHIFT) + PAGE_SIZE); in do_adjust_pte() 89 unsigned long pfn) in adjust_pte() argument 124 ret = do_adjust_pte(vma, address, pfn, pte); in adjust_pte() 134 unsigned long addr, pte_t *ptep, unsigned long pfn) in make_coherent() argument 161 aliases += adjust_pte(mpnt, mpnt->vm_start + offset, pfn); in make_coherent() 165 do_adjust_pte(vma, addr, pfn, ptep); in make_coherent() 184 unsigned long pfn = pte_pfn(*ptep); in update_mmu_cache() local [all …]
|
/Linux-v5.15/arch/arm/include/asm/ |
D | dma-direct.h | 12 static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) in pfn_to_dma() argument 15 pfn = PFN_DOWN(translate_phys_to_dma(dev, PFN_PHYS(pfn))); in pfn_to_dma() 16 return (dma_addr_t)__pfn_to_bus(pfn); in pfn_to_dma() 21 unsigned long pfn = __bus_to_pfn(addr); in dma_to_pfn() local 24 pfn = PFN_DOWN(translate_dma_to_phys(dev, PFN_PHYS(pfn))); in dma_to_pfn() 25 return pfn; in dma_to_pfn()
|
/Linux-v5.15/drivers/gpu/drm/i915/ |
D | i915_mm.c | 33 unsigned long pfn; member 45 set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); in remap_pfn() 46 r->pfn++; in remap_pfn() 58 return r->sgt.pfn + (r->sgt.curr >> PAGE_SHIFT); in sgt_pfn() 71 r->pfn++; /* track insertions in case we need to unwind later */ in remap_sg() 91 unsigned long addr, unsigned long pfn, unsigned long size, in remap_io_mapping() argument 102 r.pfn = pfn; in remap_io_mapping() 108 zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); in remap_io_mapping() 145 zap_vma_ptes(vma, addr, r.pfn << PAGE_SHIFT); in remap_io_sg()
|