Searched refs:__GFP_FS (Results 1 – 25 of 36) sorted by relevance
12
12 #define __GFP_FS 0x80u macro24 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
187 flags &= ~(__GFP_IO | __GFP_FS); in current_gfp_context()189 flags &= ~__GFP_FS; in current_gfp_context()
192 #define __GFP_FS ((__force gfp_t)___GFP_FS) macro290 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)295 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
19 The traditional way to avoid this deadlock problem is to clear __GFP_FS35 scope will inherently drop __GFP_FS respectively __GFP_IO from the given
37 lflags &= ~__GFP_FS; in kmem_flags_convert()
505 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) in xfs_qm_shrink_scan()
1305 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); in xfs_setup_inode()
21 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\27 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
256 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure()
1163 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()1852 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in too_many_isolated()3242 if (!(gfp_mask & __GFP_FS)) { in throttle_direct_reclaim()
1075 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory()
233 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); in pm_restrict_gfp_mask()238 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in pm_suspended_storage()4073 if (!(gfp_mask & __GFP_FS)) in __need_fs_reclaim()
948 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) in isolate_migratepages_block()
202 if ((flags & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) { in ceph_kvmalloc()204 } else if ((flags & (__GFP_IO | __GFP_FS)) == __GFP_IO) { in ceph_kvmalloc()
34 {(unsigned long)__GFP_FS, "__GFP_FS"}, \
41 mapping_gfp_constraint(mapping, ~__GFP_FS)); in erofs_get_meta_page()
346 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in nilfs_new_inode()516 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in __nilfs_read_inode()
442 if (!(sc->gfp_mask & __GFP_FS)) in ashmem_shrink_scan()
1525 if (!(gfp & __GFP_FS)) { in __try_evict_buffer()1581 if (sc->gfp_mask & __GFP_FS) in dm_bufio_shrink_scan()
1480 ~__GFP_FS)); in ceph_filemap_fault()1629 ~__GFP_FS)); in ceph_fill_inline_data()
719 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_change_fd()992 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_set_fd()
128 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) { in __fscache_maybe_release_page()
470 ~__GFP_FS)); in add_ra_bio_pages()
727 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS; in ubifs_do_bulk_read()
77 if (!(sc->gfp_mask & __GFP_FS)) in super_cache_scan()