Home
last modified time | relevance | path

Searched refs:page_to_nid (Results 1 – 25 of 45) sorted by relevance

12

/Linux-v5.10/mm/
Dpage_ext.c121 base = NODE_DATA(page_to_nid(page))->node_page_ext; in lookup_page_ext()
130 index = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext()
Dlist_lru.c117 int nid = page_to_nid(virt_to_page(item)); in list_lru_add()
141 int nid = page_to_nid(virt_to_page(item)); in list_lru_del()
Dhugetlb.c1027 int nid = page_to_nid(page); in enqueue_huge_page()
1230 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order)) in free_gigantic_page()
1303 h->nr_huge_pages_node[page_to_nid(page)]--; in update_and_free_page()
1393 int nid = page_to_nid(page); in __free_huge_page()
1675 prep_new_huge_page(h, page, page_to_nid(page)); in alloc_fresh_huge_page()
1771 int nid = page_to_nid(head); in dissolve_free_huge_page()
1856 h->surplus_huge_pages_node[page_to_nid(page)]++; in alloc_surplus_huge_page()
2440 prep_new_huge_page(h, page, page_to_nid(page)); in gather_bootmem_prealloc()
2552 h->free_huge_pages_node[page_to_nid(page)]--; in try_to_free_low()
5579 int old_nid = page_to_nid(oldpage); in move_hugetlb_state()
[all …]
Dslub.c1210 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in free_debug_processing()
1809 inc_slabs_node(s, page_to_nid(page), page->objects); in allocate_slab()
1864 dec_slabs_node(s, page_to_nid(page), page->objects); in discard_slab()
2153 struct kmem_cache_node *n = get_node(s, page_to_nid(page)); in deactivate_slab()
2317 n2 = get_node(s, page_to_nid(page)); in unfreeze_partials()
2493 if (node != NUMA_NO_NODE && page_to_nid(page) != node) in node_match()
3004 n = get_node(s, page_to_nid(page)); in __slab_free()
3525 if (page_to_nid(page) != node) { in early_kmem_cache_node_alloc()
4679 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location()
4710 node_set(page_to_nid(virt_to_page(track)), l->nodes); in add_location()
[all …]
Dsparse.c45 int page_to_nid(const struct page *page) in page_to_nid() function
49 EXPORT_SYMBOL(page_to_nid);
Dmemremap.c134 nid = page_to_nid(first_page); in pageunmap_range()
Dhuge_memory.c474 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue()
484 struct pglist_data *pgdat = NODE_DATA(page_to_nid(page)); in get_deferred_split_queue()
1434 page_nid = page_to_nid(page); in do_huge_pmd_numa_page()
2620 struct pglist_data *pgdata = NODE_DATA(page_to_nid(head)); in split_huge_page_to_list()
2793 memcg_set_shrinker_bit(memcg, page_to_nid(page), in deferred_split_huge_page()
Dmempolicy.c453 int nid = page_to_nid(page); in queue_pages_required()
930 err = page_to_nid(p); in lookup_node()
2141 if (page && page_to_nid(page) == nid) { in alloc_page_interleave()
2467 int curnid = page_to_nid(page); in mpol_misplaced()
Dmprotect.c112 if (target_node == page_to_nid(page)) in change_pte_range()
Dslab.c558 page_node = page_to_nid(page); in cache_free_pfmemalloc()
795 int page_node = page_to_nid(virt_to_page(objp)); in cache_free_alien()
2596 page_node = page_to_nid(page); in cache_grow_begin()
2651 n = get_node(cachep, page_to_nid(page)); in cache_grow_end()
3142 nid = page_to_nid(page); in fallback_alloc()
Dslob.c326 if (node != NUMA_NO_NODE && page_to_nid(sp) != node) in slob_alloc()
Dmigrate.c1553 nid = page_to_nid(page); in alloc_migration_target()
1648 if (page_to_nid(page) == node) in add_page_for_migration()
1826 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
Dkhugepaged.c1306 node = page_to_nid(page); in khugepaged_scan_pmd()
1986 node = page_to_nid(page); in khugepaged_scan_file()
Dmemory-failure.c288 drop_slab_node(page_to_nid(p)); in shake_page()
Dvmscan.c2122 nid = page_to_nid(page); in reclaim_pages()
2126 if (nid == page_to_nid(page)) { in reclaim_pages()
/Linux-v5.10/include/asm-generic/
Dmemory_model.h46 struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
/Linux-v5.10/arch/m68k/include/asm/
Dpage_mm.h165 pgdat = &pg_data_map[page_to_nid(__p)]; \
/Linux-v5.10/include/linux/
Dmm.h1296 extern int page_to_nid(const struct page *page);
1298 static inline int page_to_nid(const struct page *page) in page_to_nid() function
1373 return page_to_nid(page); /* XXX */ in page_cpupid_xchg_last()
1378 return page_to_nid(page); /* XXX */ in page_cpupid_last()
1444 return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)]; in page_zone()
1449 return NODE_DATA(page_to_nid(page)); in page_pgdat()
Dmmzone.h1417 page_to_nid(pfn_to_page(__pfn_to_nid_pfn)); \
/Linux-v5.10/kernel/dma/
Dcontiguous.c360 if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)], in dma_free_contiguous()
/Linux-v5.10/net/core/
Dpage_pool.c133 if (likely(page_to_nid(page) == pref_nid)) { in page_pool_refill_alloc_cache()
/Linux-v5.10/drivers/net/ethernet/cavium/liquidio/
Docteon_network.h342 unlikely(page_to_nid(pg_info->page) != numa_node_id())) { in recv_buffer_recycle()
/Linux-v5.10/fs/proc/
Dtask_mmu.c1727 md->node[page_to_nid(page)] += nr_pages; in gather_stats()
1746 nid = page_to_nid(page); in can_gather_numa_stats()
1771 nid = page_to_nid(page); in can_gather_numa_stats_pmd()
/Linux-v5.10/Documentation/vm/
Dmemory-model.rst96 :c:func:`page_to_nid` is generic as it uses the node number encoded in
/Linux-v5.10/drivers/virt/nitro_enclaves/
Dne_misc_dev.c816 if (ne_enclave->numa_node != page_to_nid(mem_region_page)) { in ne_sanity_check_user_mem_region_page()

12