Home
last modified time | relevance | path

Searched refs:gfp_t (Results 1 – 25 of 1307) sorted by relevance

12345678910>>...53

/Linux-v6.1/include/linux/
Dgfp_types.h16 typedef unsigned int __bitwise gfp_t;
72 #define __GFP_DMA ((__force gfp_t)___GFP_DMA)
73 #define __GFP_HIGHMEM ((__force gfp_t)___GFP_HIGHMEM)
74 #define __GFP_DMA32 ((__force gfp_t)___GFP_DMA32)
75 #define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
105 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
106 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
107 #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL)
108 #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
109 #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
[all …]
Dgfp.h16 static inline int gfp_migratetype(const gfp_t gfp_flags) in gfp_migratetype()
34 static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags) in gfpflags_allow_blocking()
129 static inline enum zone_type gfp_zone(gfp_t flags) in gfp_zone()
147 static inline int gfp_zonelist(gfp_t flags) in gfp_zonelist()
165 static inline struct zonelist *node_zonelist(int nid, gfp_t flags) in node_zonelist()
177 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
179 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
182 unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid,
187 unsigned long alloc_pages_bulk_array_mempolicy(gfp_t gfp,
193 alloc_pages_bulk_list(gfp_t gfp, unsigned long nr_pages, struct list_head *list) in alloc_pages_bulk_list()
[all …]
Dslab.h195 void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
361 static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags) in kmalloc_type()
443 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
444 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
446 gfp_t gfpflags) __assume_slab_alignment __malloc;
457 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
468 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
470 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
473 void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
476 void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
[all …]
Dkmemleak.h19 gfp_t gfp) __ref;
21 gfp_t gfp) __ref;
23 gfp_t gfp) __ref;
30 extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
33 gfp_t gfp) __ref;
39 gfp_t gfp) in kmemleak_alloc_recursive()
62 gfp_t gfp) in kmemleak_alloc()
67 gfp_t gfp) in kmemleak_alloc_recursive()
71 gfp_t gfp) in kmemleak_alloc_percpu()
75 gfp_t gfp) in kmemleak_vmalloc()
[all …]
Dxarray.h267 #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ)
268 #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH)
269 #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U)
270 #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U)
271 #define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U)
272 #define XA_FLAGS_ACCOUNT ((__force gfp_t)32U)
273 #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \
299 gfp_t xa_flags;
352 void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t);
355 void *entry, gfp_t);
[all …]
Dkasan.h196 void *object, gfp_t flags, bool init);
198 struct kmem_cache *s, void *object, gfp_t flags, bool init) in kasan_slab_alloc()
206 size_t size, gfp_t flags);
208 const void *object, size_t size, gfp_t flags) in kasan_kmalloc()
216 size_t size, gfp_t flags);
218 size_t size, gfp_t flags) in kasan_kmalloc_large()
226 size_t new_size, gfp_t flags);
228 size_t new_size, gfp_t flags) in kasan_krealloc()
272 gfp_t flags, bool init) in kasan_slab_alloc()
277 size_t size, gfp_t flags) in kasan_kmalloc()
[all …]
Dmempool.h13 typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data);
36 gfp_t gfp_mask, int node_id);
44 gfp_t gfp_mask, int nid);
48 extern void *mempool_alloc(mempool_t *pool, gfp_t gfp_mask) __malloc;
56 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data);
77 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data);
96 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data);
Ddevcoredump.h56 gfp_t gfp);
59 void *data, size_t datalen, gfp_t gfp,
65 size_t datalen, gfp_t gfp);
68 size_t datalen, gfp_t gfp) in dev_coredumpv()
75 void *data, size_t datalen, gfp_t gfp, in dev_coredumpm()
84 size_t datalen, gfp_t gfp) in dev_coredumpsg()
Didr.h32 #define IDR_RT_MARKER (ROOT_IS_IDR | (__force gfp_t) \
112 void idr_preload(gfp_t gfp_mask);
114 int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
116 unsigned long max, gfp_t);
117 int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t);
255 int ida_alloc_range(struct ida *, unsigned int min, unsigned int max, gfp_t);
271 static inline int ida_alloc(struct ida *ida, gfp_t gfp) in ida_alloc()
289 static inline int ida_alloc_min(struct ida *ida, unsigned int min, gfp_t gfp) in ida_alloc_min()
307 static inline int ida_alloc_max(struct ida *ida, unsigned int max, gfp_t gfp) in ida_alloc_max()
Dcpuset.h83 extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
85 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed()
92 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed()
97 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed()
226 static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask) in cpuset_node_allowed()
231 static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in __cpuset_zone_allowed()
236 static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) in cpuset_zone_allowed()
Dkmsan.h68 void kmsan_alloc_page(struct page *page, unsigned int order, gfp_t flags);
99 void kmsan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
118 void kmsan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
253 gfp_t flags) in kmsan_alloc_page()
267 gfp_t flags) in kmsan_slab_alloc()
276 gfp_t flags) in kmsan_kmalloc_large()
Dvmpressure.h33 extern void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree,
35 extern void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, int prio);
47 static inline void vmpressure(gfp_t gfp, struct mem_cgroup *memcg, bool tree, in vmpressure()
49 static inline void vmpressure_prio(gfp_t gfp, struct mem_cgroup *memcg, in vmpressure_prio()
Dfault-inject.h72 bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order);
74 int should_failslab(struct kmem_cache *s, gfp_t gfpflags);
76 extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags);
78 static inline bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags) in __should_failslab()
Dposix_acl.h65 extern struct posix_acl *posix_acl_alloc(int, gfp_t);
66 extern struct posix_acl *posix_acl_from_mode(umode_t, gfp_t);
68 extern int __posix_acl_create(struct posix_acl **, gfp_t, umode_t *);
69 extern int __posix_acl_chmod(struct posix_acl **, gfp_t, umode_t);
76 struct posix_acl *posix_acl_clone(const struct posix_acl *acl, gfp_t flags);
/Linux-v6.1/include/net/sctp/
Dulpevent.h81 gfp_t gfp);
90 gfp_t gfp);
96 gfp_t gfp);
103 gfp_t gfp);
108 gfp_t gfp);
113 __u32 flags, gfp_t gfp);
116 const struct sctp_association *asoc, gfp_t gfp);
120 gfp_t gfp);
124 __u32 indication, gfp_t gfp);
127 const struct sctp_association *asoc, gfp_t gfp);
[all …]
Dstream_interleave.h25 int len, __u8 flags, gfp_t gfp);
29 struct sctp_chunk *chunk, gfp_t gfp);
33 struct sctp_chunk *chunk, gfp_t gfp);
34 void (*start_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
35 void (*abort_pd)(struct sctp_ulpq *ulpq, gfp_t gfp);
Dauth.h71 struct sctp_shared_key *sctp_auth_shkey_create(__u16 key_id, gfp_t gfp);
73 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp);
79 gfp_t gfp);
80 int sctp_auth_init_hmacs(struct sctp_endpoint *ep, gfp_t gfp);
94 struct sctp_shared_key *ep_key, gfp_t gfp);
110 int sctp_auth_init(struct sctp_endpoint *ep, gfp_t gfp);
Dulpqueue.h44 int sctp_ulpq_tail_data(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
50 void sctp_ulpq_renege(struct sctp_ulpq *, struct sctp_chunk *, gfp_t);
53 void sctp_ulpq_partial_delivery(struct sctp_ulpq *, gfp_t);
56 void sctp_ulpq_abort_pd(struct sctp_ulpq *, gfp_t);
/Linux-v6.1/net/wireless/
Dnl80211.h60 const u8 *buf, size_t len, gfp_t gfp);
67 bool reconnect, gfp_t gfp);
71 bool reconnect, gfp_t gfp);
74 const u8 *addr, gfp_t gfp);
77 const u8 *addr, gfp_t gfp);
81 gfp_t gfp);
84 struct cfg80211_roam_info *info, gfp_t gfp);
95 int key_id, const u8 *tsc, gfp_t gfp);
104 gfp_t gfp);
108 struct cfg80211_rx_info *info, gfp_t gfp);
[all …]
/Linux-v6.1/mm/
Dswap.h36 gfp_t gfp, void **shadowp);
46 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
51 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
55 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
57 struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
85 gfp_t gfp_mask, struct vm_fault *vmf) in swap_cluster_readahead()
90 static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask, in swapin_readahead()
124 gfp_t gfp_mask, void **shadowp) in add_to_swap_cache()
/Linux-v6.1/tools/virtio/linux/
Dkernel.h54 static inline void *kmalloc(size_t s, gfp_t gfp) in kmalloc()
60 static inline void *kmalloc_array(unsigned n, size_t s, gfp_t gfp) in kmalloc_array()
65 static inline void *kzalloc(size_t s, gfp_t gfp) in kzalloc()
73 static inline void *alloc_pages_exact(size_t s, gfp_t gfp) in alloc_pages_exact()
90 static inline void *krealloc(void *p, size_t s, gfp_t gfp) in krealloc()
96 static inline unsigned long __get_free_page(gfp_t gfp) in __get_free_page()
120 static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t gfp) in krealloc_array()
/Linux-v6.1/security/apparmor/include/
Dlabel.h60 gfp_t gfp);
280 bool aa_label_init(struct aa_label *label, int size, gfp_t gfp);
281 struct aa_label *aa_label_alloc(int size, struct aa_proxy *proxy, gfp_t gfp);
301 gfp_t gfp);
304 bool aa_update_label_name(struct aa_ns *ns, struct aa_label *label, gfp_t gfp);
314 int flags, gfp_t gfp);
316 struct aa_label *label, int flags, gfp_t gfp);
318 struct aa_label *label, int flags, gfp_t gfp);
320 struct aa_label *label, int flags, gfp_t gfp);
322 gfp_t gfp);
[all …]
/Linux-v6.1/include/drm/
Ddrm_managed.h48 void *drmm_kmalloc(struct drm_device *dev, size_t size, gfp_t gfp) __malloc;
60 static inline void *drmm_kzalloc(struct drm_device *dev, size_t size, gfp_t gfp) in drmm_kzalloc()
77 size_t n, size_t size, gfp_t flags) in drmm_kmalloc_array()
99 size_t n, size_t size, gfp_t flags) in drmm_kcalloc()
104 char *drmm_kstrdup(struct drm_device *dev, const char *s, gfp_t gfp);
/Linux-v6.1/arch/powerpc/include/asm/
Dpgalloc.h8 static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) in pgtable_gfp_flags()
15 static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp) in pgtable_gfp_flags()
/Linux-v6.1/include/linux/sched/
Dmm.h203 static inline gfp_t current_gfp_context(gfp_t flags) in current_gfp_context()
226 extern void fs_reclaim_acquire(gfp_t gfp_mask);
227 extern void fs_reclaim_release(gfp_t gfp_mask);
231 static inline void fs_reclaim_acquire(gfp_t gfp_mask) { } in fs_reclaim_acquire()
232 static inline void fs_reclaim_release(gfp_t gfp_mask) { } in fs_reclaim_release()
242 static inline void memalloc_retry_wait(gfp_t gfp_flags) in memalloc_retry_wait()
269 static inline void might_alloc(gfp_t gfp_mask) in might_alloc()

12345678910>>...53