Home
last modified time | relevance | path

Searched refs:__GFP_FS (Results 1 – 25 of 36) sorted by relevance

12

/Linux-v5.4/tools/testing/radix-tree/linux/
Dgfp.h12 #define __GFP_FS 0x80u macro
24 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
/Linux-v5.4/include/linux/sched/
Dmm.h187 flags &= ~(__GFP_IO | __GFP_FS); in current_gfp_context()
189 flags &= ~__GFP_FS; in current_gfp_context()
/Linux-v5.4/include/linux/
Dgfp.h192 #define __GFP_FS ((__force gfp_t)___GFP_FS) macro
290 #define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
295 #define GFP_USER (__GFP_RECLAIM | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
/Linux-v5.4/Documentation/core-api/
Dgfp_mask-from-fs-io.rst19 The traditional way to avoid this deadlock problem is to clear __GFP_FS
35 scope will inherently drop __GFP_FS respectively __GFP_IO from the given
/Linux-v5.4/fs/xfs/
Dkmem.h37 lflags &= ~__GFP_FS; in kmem_flags_convert()
Dxfs_qm.c505 if ((sc->gfp_mask & (__GFP_FS|__GFP_DIRECT_RECLAIM)) != (__GFP_FS|__GFP_DIRECT_RECLAIM)) in xfs_qm_shrink_scan()
Dxfs_iops.c1305 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); in xfs_setup_inode()
/Linux-v5.4/mm/
Dinternal.h21 #define GFP_RECLAIM_MASK (__GFP_RECLAIM|__GFP_HIGH|__GFP_IO|__GFP_FS|\
27 #define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_RECLAIM|__GFP_IO|__GFP_FS))
Dvmpressure.c256 if (!(gfp & (__GFP_HIGHMEM | __GFP_MOVABLE | __GFP_IO | __GFP_FS))) in vmpressure()
Dvmscan.c1163 may_enter_fs = (sc->gfp_mask & __GFP_FS) || in shrink_page_list()
1852 if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in too_many_isolated()
3242 if (!(gfp_mask & __GFP_FS)) { in throttle_direct_reclaim()
Doom_kill.c1075 if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) in out_of_memory()
Dpage_alloc.c233 gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); in pm_restrict_gfp_mask()
238 if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) in pm_suspended_storage()
4073 if (!(gfp_mask & __GFP_FS)) in __need_fs_reclaim()
Dcompaction.c948 if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page)) in isolate_migratepages_block()
/Linux-v5.4/net/ceph/
Dceph_common.c202 if ((flags & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) { in ceph_kvmalloc()
204 } else if ((flags & (__GFP_IO | __GFP_FS)) == __GFP_IO) { in ceph_kvmalloc()
/Linux-v5.4/include/trace/events/
Dmmflags.h34 {(unsigned long)__GFP_FS, "__GFP_FS"}, \
/Linux-v5.4/fs/erofs/
Ddata.c41 mapping_gfp_constraint(mapping, ~__GFP_FS)); in erofs_get_meta_page()
/Linux-v5.4/fs/nilfs2/
Dinode.c346 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in nilfs_new_inode()
516 mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS)); in __nilfs_read_inode()
/Linux-v5.4/drivers/staging/android/
Dashmem.c442 if (!(sc->gfp_mask & __GFP_FS)) in ashmem_shrink_scan()
/Linux-v5.4/drivers/md/
Ddm-bufio.c1525 if (!(gfp & __GFP_FS)) { in __try_evict_buffer()
1581 if (sc->gfp_mask & __GFP_FS) in dm_bufio_shrink_scan()
/Linux-v5.4/fs/ceph/
Daddr.c1480 ~__GFP_FS)); in ceph_filemap_fault()
1629 ~__GFP_FS)); in ceph_fill_inline_data()
/Linux-v5.4/drivers/block/
Dloop.c719 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_change_fd()
992 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_set_fd()
/Linux-v5.4/fs/fscache/
Dpage.c128 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) { in __fscache_maybe_release_page()
/Linux-v5.4/fs/btrfs/
Dcompression.c470 ~__GFP_FS)); in add_ra_bio_pages()
/Linux-v5.4/fs/ubifs/
Dfile.c727 gfp_t ra_gfp_mask = readahead_gfp_mask(mapping) & ~__GFP_FS; in ubifs_do_bulk_read()
/Linux-v5.4/fs/
Dsuper.c77 if (!(sc->gfp_mask & __GFP_FS)) in super_cache_scan()

12