/Linux-v4.19/arch/x86/kernel/ |
D | espfix_64.c | 175 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); in init_espfix_ap() 187 struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0); in init_espfix_ap() 197 stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0)); in init_espfix_ap()
|
D | irq_32.c | 119 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), in irq_ctx_init() 124 irqstk = page_address(alloc_pages_node(cpu_to_node(cpu), in irq_ctx_init()
|
/Linux-v4.19/include/linux/ |
D | gfp.h | 494 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() function 518 alloc_pages_node(numa_node_id(), gfp_mask, order)
|
/Linux-v4.19/arch/ia64/include/asm/ |
D | thread_info.h | 85 struct page *page = alloc_pages_node(node, GFP_KERNEL | __GFP_COMP, \
|
/Linux-v4.19/kernel/dma/ |
D | direct.c | 89 page = alloc_pages_node(dev_to_node(dev), gfp, page_order); in dma_direct_alloc()
|
/Linux-v4.19/arch/sparc/kernel/ |
D | iommu.c | 123 page = alloc_pages_node(numa_node, GFP_KERNEL, 0); in iommu_table_init() 134 page = alloc_pages_node(numa_node, GFP_KERNEL, order); in iommu_table_init() 215 page = alloc_pages_node(nid, gfp, order); in dma_4u_alloc_coherent()
|
/Linux-v4.19/drivers/iommu/ |
D | intel-pasid.c | 149 pages = alloc_pages_node(info->iommu->node, in intel_pasid_alloc_table()
|
/Linux-v4.19/net/core/ |
D | page_pool.c | 132 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order); in __page_pool_alloc_pages_slow()
|
/Linux-v4.19/drivers/char/agp/ |
D | sgi-agp.c | 49 page = alloc_pages_node(nid, GFP_KERNEL, 0); in sgi_tioca_alloc_page()
|
/Linux-v4.19/mm/ |
D | percpu-vm.c | 95 *pagep = alloc_pages_node(cpu_to_node(cpu), gfp, 0); in pcpu_alloc_pages()
|
D | sparse-vmemmap.c | 58 page = alloc_pages_node(node, gfp_mask, order); in vmemmap_alloc_block()
|
/Linux-v4.19/kernel/events/ |
D | ring_buffer.c | 533 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page() 711 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); in perf_mmap_alloc_page()
|
/Linux-v4.19/arch/powerpc/platforms/cell/ |
D | iommu.c | 323 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size)); in cell_iommu_setup_stab() 348 page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size)); in cell_iommu_alloc_ptab() 522 page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0); in cell_iommu_setup_window()
|
/Linux-v4.19/arch/powerpc/kernel/ |
D | dma.c | 108 page = alloc_pages_node(node, flag, get_order(size)); in __dma_nommu_alloc_coherent()
|
D | iommu.c | 665 page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz)); in iommu_init_table() 860 page = alloc_pages_node(node, flag, order); in iommu_alloc_coherent()
|
/Linux-v4.19/arch/powerpc/platforms/powernv/ |
D | pci-ioda-tce.c | 39 tce_mem = alloc_pages_node(nid, GFP_KERNEL, shift - PAGE_SHIFT); in pnv_alloc_tce_level()
|
/Linux-v4.19/arch/ia64/sn/pci/ |
D | tioca_provider.c | 123 alloc_pages_node(tioca_kern->ca_closest_node, in tioca_gart_init()
|
/Linux-v4.19/arch/powerpc/perf/ |
D | imc-pmu.c | 567 mem_info->vbase = page_address(alloc_pages_node(nid, in core_imc_mem_init() 836 local_mem = page_address(alloc_pages_node(nid, in thread_imc_mem_alloc()
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx4/ |
D | icm.c | 103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages()
|
/Linux-v4.19/drivers/misc/sgi-gru/ |
D | grufile.c | 279 page = alloc_pages_node(nid, GFP_KERNEL, order); in gru_init_tables()
|
/Linux-v4.19/drivers/net/ethernet/mellanox/mlx5/core/ |
D | pagealloc.c | 217 page = alloc_pages_node(nid, GFP_HIGHUSER, 0); in alloc_system_page()
|
/Linux-v4.19/drivers/net/ethernet/amd/xgbe/ |
D | xgbe-desc.c | 300 pages = alloc_pages_node(node, gfp, order); in xgbe_alloc_pages()
|
/Linux-v4.19/drivers/hv/ |
D | channel.c | 112 page = alloc_pages_node(cpu_to_node(newchannel->target_cpu), in vmbus_open()
|
/Linux-v4.19/arch/powerpc/sysdev/xive/ |
D | common.c | 1452 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); in xive_queue_page_alloc()
|
/Linux-v4.19/kernel/trace/ |
D | ring_buffer.c | 1210 page = alloc_pages_node(cpu_to_node(cpu), mflags, 0); in __rb_allocate_pages() 1292 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); in rb_allocate_cpu_buffer() 4571 page = alloc_pages_node(cpu_to_node(cpu), in ring_buffer_alloc_read_page()
|