Home
last modified time | relevance | path

Searched refs:gfp_mask (Results 1 – 25 of 220) sorted by relevance

123456789

/Linux-v5.4/include/linux/
Dgfp.h490 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
494 __alloc_pages(gfp_t gfp_mask, unsigned int order, int preferred_nid) in __alloc_pages() argument
496 return __alloc_pages_nodemask(gfp_mask, order, preferred_nid, NULL); in __alloc_pages()
504 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument
507 VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid)); in __alloc_pages_node()
509 return __alloc_pages(gfp_mask, order, nid); in __alloc_pages_node()
517 static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask, in alloc_pages_node() argument
523 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node()
527 extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
530 alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages() argument
[all …]
Dcpuset.h67 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
69 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument
72 return __cpuset_node_allowed(node, gfp_mask); in cpuset_node_allowed()
76 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument
78 return __cpuset_node_allowed(zone_to_nid(z), gfp_mask); in __cpuset_zone_allowed()
81 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
84 return __cpuset_zone_allowed(z, gfp_mask); in cpuset_zone_allowed()
207 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed() argument
212 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed() argument
217 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed() argument
Dmempool.h13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
36 gfp_t gfp_mask, int node_id);
44 gfp_t gfp_mask, int nid);
48 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
56 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
77 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
96 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
Dpagemap.h102 return mapping->gfp_mask; in mapping_gfp_mask()
107 gfp_t gfp_mask) in mapping_gfp_constraint() argument
109 return mapping_gfp_mask(mapping) & gfp_mask; in mapping_gfp_constraint()
118 m->gfp_mask = mask; in mapping_set_gfp_mask()
308 pgoff_t offset, gfp_t gfp_mask) in find_or_create_page() argument
312 gfp_mask); in find_or_create_page()
389 pgoff_t index, gfp_t gfp_mask);
608 pgoff_t index, gfp_t gfp_mask);
610 pgoff_t index, gfp_t gfp_mask);
613 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
[all …]
Dpage_owner.h13 unsigned int order, gfp_t gfp_mask);
28 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
31 __set_page_owner(page, order, gfp_mask); in set_page_owner()
59 unsigned int order, gfp_t gfp_mask) in set_page_owner() argument
Dswap.h353 gfp_t gfp_mask, nodemask_t *mask);
357 gfp_t gfp_mask,
360 gfp_t gfp_mask, bool noswap,
516 static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) in add_swap_count_continuation() argument
539 gfp_t gfp_mask, struct vm_fault *vmf) in swap_cluster_readahead() argument
544 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead() argument
568 gfp_t gfp_mask) in add_to_swap_cache() argument
650 gfp_t gfp_mask);
653 int node, gfp_t gfp_mask) in mem_cgroup_throttle_swaprate() argument
/Linux-v5.4/block/
Dblk-lib.c26 sector_t nr_sects, gfp_t gfp_mask, int flags, in __blkdev_issue_discard() argument
63 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_discard()
98 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) in blkdev_issue_discard() argument
105 ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags, in blkdev_issue_discard()
132 sector_t nr_sects, gfp_t gfp_mask, struct page *page, in __blkdev_issue_write_same() argument
157 bio = blk_next_bio(bio, 1, gfp_mask); in __blkdev_issue_write_same()
193 sector_t nr_sects, gfp_t gfp_mask, in blkdev_issue_write_same() argument
201 ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page, in blkdev_issue_write_same()
213 sector_t sector, sector_t nr_sects, gfp_t gfp_mask, in __blkdev_issue_write_zeroes() argument
233 bio = blk_next_bio(bio, 0, gfp_mask); in __blkdev_issue_write_zeroes()
[all …]
Dblk-map.c66 gfp_t gfp_mask, bool copy) in __blk_rq_map_user_iov() argument
73 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); in __blk_rq_map_user_iov()
75 bio = bio_map_user_iov(q, iter, gfp_mask); in __blk_rq_map_user_iov()
122 const struct iov_iter *iter, gfp_t gfp_mask) in blk_rq_map_user_iov() argument
142 ret =__blk_rq_map_user_iov(rq, map_data, &i, gfp_mask, copy); in blk_rq_map_user_iov()
163 unsigned long len, gfp_t gfp_mask) in blk_rq_map_user() argument
172 return blk_rq_map_user_iov(q, rq, map_data, &i, gfp_mask); in blk_rq_map_user()
222 unsigned int len, gfp_t gfp_mask) in blk_rq_map_kern() argument
237 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); in blk_rq_map_kern()
239 bio = bio_map_kern(q, kbuf, len, gfp_mask); in blk_rq_map_kern()
Dbio.c169 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx, in bvec_alloc() argument
206 bvl = mempool_alloc(pool, gfp_mask); in bvec_alloc()
209 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO); in bvec_alloc()
223 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) { in bvec_alloc()
426 struct bio *bio_alloc_bioset(gfp_t gfp_mask, unsigned int nr_iovecs, in bio_alloc_bioset() argument
429 gfp_t saved_gfp = gfp_mask; in bio_alloc_bioset()
442 gfp_mask); in bio_alloc_bioset()
475 gfp_mask &= ~__GFP_DIRECT_RECLAIM; in bio_alloc_bioset()
477 p = mempool_alloc(&bs->bio_pool, gfp_mask); in bio_alloc_bioset()
478 if (!p && gfp_mask != saved_gfp) { in bio_alloc_bioset()
[all …]
/Linux-v5.4/mm/
Dmempool.c182 gfp_t gfp_mask, int node_id) in mempool_init_node() argument
192 gfp_mask, node_id); in mempool_init_node()
202 element = pool->alloc(gfp_mask, pool->pool_data); in mempool_init_node()
263 gfp_t gfp_mask, int node_id) in mempool_create_node() argument
267 pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); in mempool_create_node()
272 gfp_mask, node_id)) { in mempool_create_node()
375 void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) in mempool_alloc() argument
382 VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); in mempool_alloc()
383 might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM); in mempool_alloc()
385 gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ in mempool_alloc()
[all …]
Dpage_owner.c25 gfp_t gfp_mask; member
164 unsigned int order, gfp_t gfp_mask) in __set_page_owner_handle() argument
173 page_owner->gfp_mask = gfp_mask; in __set_page_owner_handle()
183 gfp_t gfp_mask) in __set_page_owner() argument
191 handle = save_stack(gfp_mask); in __set_page_owner()
192 __set_page_owner_handle(page, page_ext, handle, order, gfp_mask); in __set_page_owner()
235 new_page_owner->gfp_mask = old_page_owner->gfp_mask; in __copy_page_owner()
316 page_owner->gfp_mask); in pagetypeinfo_showmixedcount_print()
354 page_owner->order, page_owner->gfp_mask, in print_page_owner()
355 &page_owner->gfp_mask); in print_page_owner()
[all …]
Dpage_alloc.c3342 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3346 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page()
3348 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page()
3351 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page()
3382 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3389 static noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3391 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page()
3547 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
3551 if (gfp_mask & __GFP_KSWAPD_RECLAIM) in alloc_flags_nofragment()
3580 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
[all …]
Dvmscan.c114 gfp_t gfp_mask; member
593 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument
613 .gfp_mask = gfp_mask, in shrink_slab_memcg()
668 static unsigned long shrink_slab_memcg(gfp_t gfp_mask, int nid, in shrink_slab_memcg() argument
695 static unsigned long shrink_slab(gfp_t gfp_mask, int nid, in shrink_slab() argument
710 return shrink_slab_memcg(gfp_mask, nid, memcg, priority); in shrink_slab()
717 .gfp_mask = gfp_mask, in shrink_slab()
1163 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
1164 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()
1290 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list()
[all …]
Dswap_state.c359 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in __read_swap_cache_async() argument
398 new_page = alloc_page_vma(gfp_mask, vma, addr); in __read_swap_cache_async()
421 err = add_to_swap_cache(new_page, entry, gfp_mask & GFP_KERNEL); in __read_swap_cache_async()
448 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, in read_swap_cache_async() argument
452 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, in read_swap_cache_async()
539 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, in swap_cluster_readahead() argument
578 gfp_mask, vma, addr, &page_allocated); in swap_cluster_readahead()
594 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); in swap_cluster_readahead()
722 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, in swap_vma_readahead() argument
749 page = __read_swap_cache_async(entry, gfp_mask, vma, in swap_vma_readahead()
[all …]
/Linux-v5.4/fs/btrfs/
Dulist.h48 struct ulist *ulist_alloc(gfp_t gfp_mask);
50 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
52 u64 *old_aux, gfp_t gfp_mask);
57 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument
61 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr()
65 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
Dulist.c92 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument
94 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc()
186 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument
188 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add()
192 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument
203 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
/Linux-v5.4/fs/nfs/blocklayout/
Ddev.c231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument
242 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple()
353 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument
402 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument
407 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice()
418 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument
431 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat()
447 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument
460 volumes, v->stripe.volumes[i], gfp_mask); in bl_parse_stripe()
[all …]
/Linux-v5.4/lib/
Dgeneric-radix-tree.c79 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) in genradix_alloc_node() argument
83 node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO); in genradix_alloc_node()
90 kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask); in genradix_alloc_node()
105 gfp_t gfp_mask) in __genradix_ptr_alloc() argument
122 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc()
145 new_node = genradix_alloc_node(gfp_mask); in __genradix_ptr_alloc()
218 gfp_t gfp_mask) in __genradix_prealloc() argument
223 if (!__genradix_ptr_alloc(radix, offset, gfp_mask)) in __genradix_prealloc()
Dscatterlist.c149 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask) in sg_kmalloc() argument
161 void *ptr = (void *) __get_free_page(gfp_mask); in sg_kmalloc()
162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask); in sg_kmalloc()
166 gfp_mask); in sg_kmalloc()
268 unsigned int nents_first_chunk, gfp_t gfp_mask, in __sg_alloc_table() argument
302 sg = alloc_fn(alloc_size, gfp_mask); in __sg_alloc_table()
355 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask) in sg_alloc_table() argument
360 NULL, 0, gfp_mask, sg_kmalloc); in sg_alloc_table()
392 gfp_t gfp_mask) in __sg_alloc_table_from_pages() argument
413 ret = sg_alloc_table(sgt, chunks, gfp_mask); in __sg_alloc_table_from_pages()
[all …]
/Linux-v5.4/drivers/staging/android/ion/
Dion_page_pool.c19 return alloc_pages(pool->gfp_mask, pool->order); in ion_page_pool_alloc_pages()
100 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, in ion_page_pool_shrink() argument
109 high = !!(gfp_mask & __GFP_HIGHMEM); in ion_page_pool_shrink()
134 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order) in ion_page_pool_create() argument
144 pool->gfp_mask = gfp_mask | __GFP_COMP; in ion_page_pool_create()
Dion.h94 int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan);
283 gfp_t gfp_mask; member
288 struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order);
300 int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
/Linux-v5.4/drivers/net/ethernet/mellanox/mlx4/
Dicm.c99 gfp_t gfp_mask, int node) in mlx4_alloc_icm_pages() argument
103 page = alloc_pages_node(node, gfp_mask, order); in mlx4_alloc_icm_pages()
105 page = alloc_pages(gfp_mask, order); in mlx4_alloc_icm_pages()
115 int order, gfp_t gfp_mask) in mlx4_alloc_icm_coherent() argument
118 &buf->dma_addr, gfp_mask); in mlx4_alloc_icm_coherent()
133 gfp_t gfp_mask, int coherent) in mlx4_alloc_icm() argument
142 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM)); in mlx4_alloc_icm()
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN), in mlx4_alloc_icm()
149 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); in mlx4_alloc_icm()
162 gfp_mask & ~(__GFP_HIGHMEM | in mlx4_alloc_icm()
[all …]
/Linux-v5.4/fs/ntfs/
Dmalloc.h28 static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) in __ntfs_malloc() argument
33 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); in __ntfs_malloc()
37 return __vmalloc(size, gfp_mask, PAGE_KERNEL); in __ntfs_malloc()
/Linux-v5.4/net/sunrpc/auth_gss/
Dgss_krb5_mech.c357 context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_des3() argument
384 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_des3()
474 context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) in context_derive_keys_new() argument
490 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
505 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
520 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
530 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
540 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
550 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); in context_derive_keys_new()
586 gfp_t gfp_mask) in gss_import_v2_context() argument
[all …]
/Linux-v5.4/drivers/connector/
Dconnector.c62 gfp_t gfp_mask) in cn_netlink_send_mult() argument
96 skb = nlmsg_new(size, gfp_mask); in cn_netlink_send_mult()
114 gfp_mask); in cn_netlink_send_mult()
116 !gfpflags_allow_blocking(gfp_mask)); in cn_netlink_send_mult()
122 gfp_t gfp_mask) in cn_netlink_send() argument
124 return cn_netlink_send_mult(msg, msg->len, portid, __group, gfp_mask); in cn_netlink_send()

123456789