Searched refs:__GFP_FS (Results 1 – 25 of 34) sorted by relevance
12
12 #define __GFP_FS 0x80u macro24 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
159 flags &= ~(__GFP_IO | __GFP_FS); in current_gfp_context()161 flags &= ~__GFP_FS; in current_gfp_context()
192 #define __GFP_FS ((__force gfp_t)___GFP_FS) macro290 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)295 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
17 The traditional way to avoid this deadlock problem is to clear __GFP_FS33 scope will inherently drop __GFP_FS respectively __GFP_IO from the given
42 lflags &= ~__GFP_FS; in kmem_flags_convert()
509 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) in xfs_qm_shrink_scan()
1297 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); in xfs_setup_inode()
25 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\31 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
259 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure()
1147 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()1818 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in too_many_isolated()3195 if (!(gfp_mask & __GFP_FS)) { in throttle_direct_reclaim()
1085 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS)) in out_of_memory()
179 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); in pm_restrict_gfp_mask()184 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in pm_suspended_storage()3717 if (!(gfp_mask & __GFP_FS)) in __need_fs_reclaim()
845 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) in isolate_migratepages_block()
34 {(unsigned long)__GFP_FS, "__GFP_FS"}, \
55 mapping_gfp_constraint(mapping, ~__GFP_FS) | __GFP_NOFAIL); in erofs_get_meta_page()
346 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in nilfs_new_inode()516 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in __nilfs_read_inode()
445 if (!(sc->gfp_mask & __GFP_FS)) in ashmem_shrink_scan()
1549 if (!(gfp & __GFP_FS)) { in __try_evict_buffer()1605 if (sc->gfp_mask & __GFP_FS) in dm_bufio_shrink_scan()
727 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_change_fd()957 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_set_fd()
1488 ~__GFP_FS)); in ceph_filemap_fault()1637 ~__GFP_FS)); in ceph_fill_inline_data()
132 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) { in __fscache_maybe_release_page()
74 if (!(sc->gfp_mask & __GFP_FS)) in super_cache_scan()
451 ~__GFP_FS)); in add_ra_bio_pages()
739 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS; in ubifs_do_bulk_read()
303 if ((gfp_mask & __GFP_FS) == 0) in start_this_handle()