Lines Matching refs:gfp_mask

1560 preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)  in preload_this_cpu_lock()  argument
1574 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in preload_this_cpu_lock()
1589 int node, gfp_t gfp_mask) in alloc_vmap_area() argument
1605 gfp_mask = gfp_mask & GFP_RECLAIM_MASK; in alloc_vmap_area()
1607 va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node); in alloc_vmap_area()
1615 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); in alloc_vmap_area()
1618 preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node); in alloc_vmap_area()
1665 if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit()) in alloc_vmap_area()
1946 static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) in new_vmap_block() argument
1958 gfp_mask & GFP_RECLAIM_MASK, node); in new_vmap_block()
1964 node, gfp_mask); in new_vmap_block()
1982 err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask); in new_vmap_block()
2051 static void *vb_alloc(unsigned long size, gfp_t gfp_mask) in vb_alloc() argument
2098 vaddr = new_vmap_block(order, gfp_mask); in vb_alloc()
2479 gfp_t gfp_mask, const void *caller) in __get_vm_area_node() argument
2494 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
2501 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); in __get_vm_area_node()
3005 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
3009 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; in __vmalloc_area_node()
3010 bool nofail = gfp_mask & __GFP_NOFAIL; in __vmalloc_area_node()
3020 gfp_mask |= __GFP_NOWARN; in __vmalloc_area_node()
3021 if (!(gfp_mask & (GFP_DMA | GFP_DMA32))) in __vmalloc_area_node()
3022 gfp_mask |= __GFP_HIGHMEM; in __vmalloc_area_node()
3033 warn_alloc(gfp_mask, NULL, in __vmalloc_area_node()
3043 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, in __vmalloc_area_node()
3047 if (gfp_mask & __GFP_ACCOUNT) { in __vmalloc_area_node()
3059 warn_alloc(gfp_mask, NULL, in __vmalloc_area_node()
3069 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) in __vmalloc_area_node()
3071 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) in __vmalloc_area_node()
3081 if ((gfp_mask & (__GFP_FS | __GFP_IO)) == __GFP_IO) in __vmalloc_area_node()
3083 else if ((gfp_mask & (__GFP_FS | __GFP_IO)) == 0) in __vmalloc_area_node()
3087 warn_alloc(gfp_mask, NULL, in __vmalloc_area_node()
3129 unsigned long start, unsigned long end, gfp_t gfp_mask, in __vmalloc_node_range() argument
3144 warn_alloc(gfp_mask, NULL, in __vmalloc_node_range()
3175 gfp_mask, caller); in __vmalloc_node_range()
3177 bool nofail = gfp_mask & __GFP_NOFAIL; in __vmalloc_node_range()
3178 warn_alloc(gfp_mask, NULL, in __vmalloc_node_range()
3205 gfp_mask |= __GFP_SKIP_KASAN_UNPOISON | __GFP_SKIP_ZERO; in __vmalloc_node_range()
3213 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); in __vmalloc_node_range()
3226 if (!want_init_on_free() && want_init_on_alloc(gfp_mask) && in __vmalloc_node_range()
3227 (gfp_mask & __GFP_SKIP_ZERO)) in __vmalloc_node_range()
3241 kmemleak_vmalloc(area, size, gfp_mask); in __vmalloc_node_range()
3276 gfp_t gfp_mask, int node, const void *caller) in __vmalloc_node() argument
3279 gfp_mask, PAGE_KERNEL, 0, node, caller); in __vmalloc_node()
3290 void *__vmalloc(unsigned long size, gfp_t gfp_mask) in __vmalloc() argument
3292 return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE, in __vmalloc()
3328 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) in vmalloc_huge() argument
3331 gfp_mask, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP, in vmalloc_huge()