Searched refs:__GFP_FS (Results 1 – 25 of 37) sorted by relevance
12
12 #define __GFP_FS 0x80u macro24 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
164 flags &= ~(__GFP_IO | __GFP_FS); in current_gfp_context()166 flags &= ~__GFP_FS; in current_gfp_context()
199 #define __GFP_FS ((__force gfp_t)___GFP_FS) macro299 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)304 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
38 lflags &= ~__GFP_FS; in kmem_flags_convert()
509 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) in xfs_qm_shrink_scan()
1360 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); in xfs_setup_inode()
19 The traditional way to avoid this deadlock problem is to clear __GFP_FS35 scope will inherently drop __GFP_FS respectively __GFP_IO from the given
21 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\27 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
256 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure()
1115 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()1818 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in too_many_isolated()3214 if (!(gfp_mask & __GFP_FS)) { in throttle_direct_reclaim()
1079 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory()
263 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); in pm_restrict_gfp_mask()268 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in pm_suspended_storage()4280 if (!(gfp_mask & __GFP_FS)) in __need_fs_reclaim()
205 if ((flags & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) { in ceph_kvmalloc()207 } else if ((flags & (__GFP_IO | __GFP_FS)) == __GFP_IO) { in ceph_kvmalloc()
34 {(unsigned long)__GFP_FS, "__GFP_FS"}, \
41 mapping_gfp_constraint(mapping, ~__GFP_FS)); in erofs_get_meta_page()
338 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in nilfs_new_inode()509 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in __nilfs_read_inode()
482 if (!(sc->gfp_mask & __GFP_FS)) in ashmem_shrink_scan()
1524 ~__GFP_FS)); in ceph_filemap_fault()1688 ~__GFP_FS)); in ceph_fill_inline_data()
746 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_change_fd()1147 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_configure()
128 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) { in __fscache_maybe_release_page()
775 gfp |= __GFP_RECLAIM | __GFP_FS; in alloc_private_pages()
727 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS; in ubifs_do_bulk_read()
538 ~__GFP_FS)); in add_ra_bio_pages()
77 if (!(sc->gfp_mask & __GFP_FS)) in super_cache_scan()
357 if ((gfp_mask & __GFP_FS) == 0) in start_this_handle()