Searched refs:__GFP_IO (Results 1 – 23 of 23) sorted by relevance
11 #define __GFP_IO 0x40u macro24 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
214 #define __GFP_IO ((__force gfp_t)___GFP_IO) macro324 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)328 #define GFP_NOFS (__GFP_RECLAIM | __GFP_IO)329 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
20 respectively __GFP_IO (note the latter implies clearing the first as well) in35 scope will inherently drop __GFP_FS respectively __GFP_IO from the given
67 #define NILFS_MDT_GFP (__GFP_RECLAIM | __GFP_IO | __GFP_HIGHMEM)
205 if ((flags & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) { in ceph_kvmalloc()207 } else if ((flags & (__GFP_IO | __GFP_FS)) == __GFP_IO) { in ceph_kvmalloc()
21 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\27 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
260 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure()
389 gfp_temp = gfp_mask & ~(__GFP_DIRECT_RECLAIM|__GFP_IO); in mempool_alloc()
1411 (PageSwapCache(page) && (sc->gfp_mask & __GFP_IO)); in shrink_page_list()1548 if (!(sc->gfp_mask & __GFP_IO)) in shrink_page_list()2142 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in too_many_isolated()
2554 int may_perform_io = gfp_mask & __GFP_IO; in try_to_compact_pages()
255 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); in pm_restrict_gfp_mask()260 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in pm_suspended_storage()
3787 if (!(gfp_mask & __GFP_IO)) in __cgroup_throttle_swaprate()
1532 gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM; in limit_gfp_mask()
2843 return mapping_gfp_mask(vm_file->f_mapping) | __GFP_FS | __GFP_IO; in __get_fault_gfp_mask()
166 flags &= ~(__GFP_IO | __GFP_FS); in current_gfp_context()
10 well as for non __GFP_IO allocations.19 __GFP_IO allocation requests are made to prevent file system deadlocks.
33 {(unsigned long)__GFP_IO, "__GFP_IO"}, \
783 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_change_fd()1274 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_configure()
173 return (gfp & ~(__GFP_DIRECT_RECLAIM | __GFP_IO)) | in bvec_alloc_gfp()
1634 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; in qib_setup_eagerbufs()
1840 gfp_flags = __GFP_RECLAIM | __GFP_IO | __GFP_COMP; in hfi1_setup_eagerbufs()
1786 cpage = alloc_page(__GFP_NOWARN | __GFP_IO); in f2fs_cache_compressed_page()
662 if (sc->gfp_mask & __GFP_IO) in bch_mca_scan()