Lines Matching refs:gfp_mask
3888 static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3894 if (gfp_mask & __GFP_NOFAIL) in __should_fail_alloc_page()
3896 if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) in __should_fail_alloc_page()
3899 (gfp_mask & __GFP_DIRECT_RECLAIM)) in __should_fail_alloc_page()
3903 if (gfp_mask & __GFP_NOWARN) in __should_fail_alloc_page()
3934 static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in __should_fail_alloc_page() argument
3941 noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3943 return __should_fail_alloc_page(gfp_mask, order); in should_fail_alloc_page()
4049 unsigned int alloc_flags, gfp_t gfp_mask) in zone_watermark_fast() argument
4081 if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost in zone_watermark_fast()
4127 alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) in alloc_flags_nofragment() argument
4135 alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM); in alloc_flags_nofragment()
4159 static inline unsigned int gfp_to_alloc_flags_cma(gfp_t gfp_mask, in gfp_to_alloc_flags_cma() argument
4163 if (gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE) in gfp_to_alloc_flags_cma()
4174 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
4197 !__cpuset_zone_allowed(zone, gfp_mask)) in get_page_from_freelist()
4247 gfp_mask)) { in get_page_from_freelist()
4269 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
4289 gfp_mask, alloc_flags, ac->migratetype); in get_page_from_freelist()
4291 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
4324 static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask) in warn_alloc_show_mem() argument
4333 if (!(gfp_mask & __GFP_NOMEMALLOC)) in warn_alloc_show_mem()
4337 if (!in_task() || !(gfp_mask & __GFP_DIRECT_RECLAIM)) in warn_alloc_show_mem()
4340 __show_mem(filter, nodemask, gfp_zone(gfp_mask)); in warn_alloc_show_mem()
4343 void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...) in warn_alloc() argument
4349 if ((gfp_mask & __GFP_NOWARN) || in warn_alloc()
4351 ((gfp_mask & __GFP_DMA) && !has_managed_dma())) in warn_alloc()
4358 current->comm, &vaf, gfp_mask, &gfp_mask, in warn_alloc()
4365 warn_alloc_show_mem(gfp_mask, nodemask); in warn_alloc()
4369 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
4375 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4382 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
4389 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
4396 .gfp_mask = gfp_mask, in __alloc_pages_may_oom()
4420 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & in __alloc_pages_may_oom()
4440 if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE)) in __alloc_pages_may_oom()
4459 WARN_ON_ONCE_GFP(gfp_mask & __GFP_NOFAIL, gfp_mask)) { in __alloc_pages_may_oom()
4466 if (gfp_mask & __GFP_NOFAIL) in __alloc_pages_may_oom()
4467 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
4484 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4499 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
4516 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
4520 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
4624 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
4664 static bool __need_reclaim(gfp_t gfp_mask) in __need_reclaim() argument
4667 if (!(gfp_mask & __GFP_DIRECT_RECLAIM)) in __need_reclaim()
4674 if (gfp_mask & __GFP_NOLOCKDEP) in __need_reclaim()
4690 void fs_reclaim_acquire(gfp_t gfp_mask) in fs_reclaim_acquire() argument
4692 gfp_mask = current_gfp_context(gfp_mask); in fs_reclaim_acquire()
4694 if (__need_reclaim(gfp_mask)) { in fs_reclaim_acquire()
4695 if (gfp_mask & __GFP_FS) in fs_reclaim_acquire()
4707 void fs_reclaim_release(gfp_t gfp_mask) in fs_reclaim_release() argument
4709 gfp_mask = current_gfp_context(gfp_mask); in fs_reclaim_release()
4711 if (__need_reclaim(gfp_mask)) { in fs_reclaim_release()
4712 if (gfp_mask & __GFP_FS) in fs_reclaim_release()
4745 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
4755 fs_reclaim_acquire(gfp_mask); in __perform_reclaim()
4758 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
4762 fs_reclaim_release(gfp_mask); in __perform_reclaim()
4771 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
4780 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
4785 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
4804 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds() argument
4817 wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx); in wake_all_kswapds()
4824 gfp_to_alloc_flags(gfp_t gfp_mask) in gfp_to_alloc_flags() argument
4843 (gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM)); in gfp_to_alloc_flags()
4845 if (gfp_mask & __GFP_ATOMIC) { in gfp_to_alloc_flags()
4850 if (!(gfp_mask & __GFP_NOMEMALLOC)) in gfp_to_alloc_flags()
4860 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); in gfp_to_alloc_flags()
4884 static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask) in __gfp_pfmemalloc_flags() argument
4886 if (unlikely(gfp_mask & __GFP_NOMEMALLOC)) in __gfp_pfmemalloc_flags()
4888 if (gfp_mask & __GFP_MEMALLOC) in __gfp_pfmemalloc_flags()
4902 bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) in gfp_pfmemalloc_allowed() argument
4904 return !!__gfp_pfmemalloc_flags(gfp_mask); in gfp_pfmemalloc_allowed()
4918 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
5023 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
5026 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; in __alloc_pages_slowpath()
5043 if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) == in __alloc_pages_slowpath()
5045 gfp_mask &= ~__GFP_ATOMIC; in __alloc_pages_slowpath()
5059 alloc_flags = gfp_to_alloc_flags(gfp_mask); in __alloc_pages_slowpath()
5077 if (cpusets_insane_config() && (gfp_mask & __GFP_HARDWALL)) { in __alloc_pages_slowpath()
5086 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
5092 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
5108 && !gfp_pfmemalloc_allowed(gfp_mask)) { in __alloc_pages_slowpath()
5109 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
5120 if (costly_order && (gfp_mask & __GFP_NORETRY)) { in __alloc_pages_slowpath()
5154 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
5156 reserve_flags = __gfp_pfmemalloc_flags(gfp_mask); in __alloc_pages_slowpath()
5158 alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, reserve_flags) | in __alloc_pages_slowpath()
5173 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
5186 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
5192 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
5198 if (gfp_mask & __GFP_NORETRY) in __alloc_pages_slowpath()
5205 if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL)) in __alloc_pages_slowpath()
5208 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
5234 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
5241 (gfp_mask & __GFP_NOMEMALLOC))) in __alloc_pages_slowpath()
5263 if (gfp_mask & __GFP_NOFAIL) { in __alloc_pages_slowpath()
5268 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask)) in __alloc_pages_slowpath()
5276 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); in __alloc_pages_slowpath()
5284 WARN_ON_ONCE_GFP(costly_order, gfp_mask); in __alloc_pages_slowpath()
5292 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); in __alloc_pages_slowpath()
5300 warn_alloc(gfp_mask, ac->nodemask, in __alloc_pages_slowpath()
5306 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
5311 ac->highest_zoneidx = gfp_zone(gfp_mask); in prepare_alloc_pages()
5312 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); in prepare_alloc_pages()
5314 ac->migratetype = gfp_migratetype(gfp_mask); in prepare_alloc_pages()
5328 might_alloc(gfp_mask); in prepare_alloc_pages()
5330 if (should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
5333 *alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, *alloc_flags); in prepare_alloc_pages()
5336 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); in prepare_alloc_pages()
5604 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
5608 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); in __get_free_pages()
5615 unsigned long get_zeroed_page(gfp_t gfp_mask) in get_zeroed_page() argument
5617 return __get_free_pages(gfp_mask | __GFP_ZERO, 0); in get_zeroed_page()
5673 gfp_t gfp_mask) in __page_frag_cache_refill() argument
5676 gfp_t gfp = gfp_mask; in __page_frag_cache_refill()
5679 gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY | in __page_frag_cache_refill()
5681 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, in __page_frag_cache_refill()
5703 unsigned int fragsz, gfp_t gfp_mask, in page_frag_alloc_align() argument
5712 page = __page_frag_cache_refill(nc, gfp_mask); in page_frag_alloc_align()
5822 void *alloc_pages_exact(size_t size, gfp_t gfp_mask) in alloc_pages_exact() argument
5827 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) in alloc_pages_exact()
5828 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); in alloc_pages_exact()
5830 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
5847 void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask) in alloc_pages_exact_nid() argument
5852 if (WARN_ON_ONCE(gfp_mask & (__GFP_COMP | __GFP_HIGHMEM))) in alloc_pages_exact_nid()
5853 gfp_mask &= ~(__GFP_COMP | __GFP_HIGHMEM); in alloc_pages_exact_nid()
5855 p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
9197 .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL, in __alloc_contig_migrate_range()
9237 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) in __alloc_contig_migrate_range()
9267 unsigned migratetype, gfp_t gfp_mask) in alloc_contig_range() argument
9280 .gfp_mask = current_gfp_context(gfp_mask), in alloc_contig_range()
9306 ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask); in alloc_contig_range()
9393 unsigned long nr_pages, gfp_t gfp_mask) in __alloc_contig_pages() argument
9398 gfp_mask); in __alloc_contig_pages()
9450 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, in alloc_contig_pages() argument
9458 zonelist = node_zonelist(nid, gfp_mask); in alloc_contig_pages()
9460 gfp_zone(gfp_mask), nodemask) { in alloc_contig_pages()
9475 gfp_mask); in alloc_contig_pages()