Lines Matching +full:oc +full:- +full:delay +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0-only
38 #include <linux/fault-inject.h>
60 /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
69 * reporting it and marking it "reported" - it only skips notifying
78 * page shuffling (relevant code - e.g., memory onlining - is expected to
81 * Note: No code should rely on this flag for correctness - it's purely
88 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
101 /* UP spin_trylock always succeeds so disable IRQs to prevent re-entrancy. */
123 * Generic helper to lookup and a per-cpu variable with an embedded spinlock.
131 spin_lock(&_ret->member); \
140 if (!spin_trylock(&_ret->member)) { \
149 spin_unlock(&ptr->member); \
213 * other index - this ensures that it will be put on the correct CMA freelist.
217 return page->index; in get_pcppage_migratetype()
222 page->index = migratetype; in set_pcppage_migratetype()
234 * 1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
235 * 1G machine -> (16M dma, 784M normal, 224M high)
240 * TBD: should special case ZONE_DMA32 machines here - in those we normally
288 int user_min_free_kbytes = -1;
313 * During boot we initialize deferred pages on-demand, as needed, but once
349 return page_zone(page)->pageblock_flags; in get_pageblock_bitmap()
356 pfn &= (PAGES_PER_SECTION-1); in pfn_to_bitidx()
358 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); in pfn_to_bitidx()
364 …* get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block o…
381 bitidx &= (BITS_PER_LONG-1); in get_pfnblock_flags_mask()
398 …* set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pag…
418 bitidx &= (BITS_PER_LONG-1); in set_pfnblock_flags_mask()
450 start_pfn = zone->zone_start_pfn; in page_outside_zone_boundaries()
451 sp = zone->spanned_pages; in page_outside_zone_boundaries()
456 pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n", in page_outside_zone_boundaries()
457 pfn, zone_to_nid(zone), zone->name, in page_outside_zone_boundaries()
509 current->comm, page_to_pfn(page)); in bad_page()
568 * Higher-order pages are called "compound pages". They are structured thusly:
573 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
575 * The first tail page's ->compound_order holds the order of allocation.
576 * This usage means that zero-order pages may not be compound.
602 free_the_page(&folio->page, folio_order(folio)); in destroy_large_folio()
614 struct capture_control *capc = current->capture_control; in task_capc()
617 !(current->flags & PF_KTHREAD) && in task_capc()
618 !capc->page && in task_capc()
619 capc->cc->zone == zone ? capc : NULL; in task_capc()
626 if (!capc || order != capc->cc->order) in compaction_capture()
637 * and vice-versa but no more than normal fallback logic which can in compaction_capture()
638 * have trouble finding a high-order free page. in compaction_capture()
643 capc->page = page; in compaction_capture()
665 struct free_area *area = &zone->free_area[order]; in add_to_free_list()
667 list_add(&page->buddy_list, &area->free_list[migratetype]); in add_to_free_list()
668 area->nr_free++; in add_to_free_list()
675 struct free_area *area = &zone->free_area[order]; in add_to_free_list_tail()
677 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); in add_to_free_list_tail()
678 area->nr_free++; in add_to_free_list_tail()
683 * of the list - so the moved pages won't immediately be considered for
689 struct free_area *area = &zone->free_area[order]; in move_to_free_list()
691 list_move_tail(&page->buddy_list, &area->free_list[migratetype]); in move_to_free_list()
701 list_del(&page->buddy_list); in del_page_from_free_list()
704 zone->free_area[order].nr_free--; in del_page_from_free_list()
710 return list_first_entry_or_null(&area->free_list[migratetype], in get_page_from_free_area()
716 * of the next-highest order is free. If it is, it's possible
729 if (order >= MAX_ORDER - 1) in buddy_merge_likely()
733 higher_page = page + (higher_page_pfn - pfn); in buddy_merge_likely()
742 * The concept of a buddy system is to maintain direct-mapped table
760 * -- nyc
775 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); in __free_one_page()
777 VM_BUG_ON(migratetype == -1); in __free_one_page()
781 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
786 __mod_zone_freepage_state(zone, -(1 << order), in __free_one_page()
819 page = page + (combined_pfn - pfn); in __free_one_page()
845 * split_free_page() -- split a free page at split_pfn_offset
850 * Return -ENOENT if the free page is changed, otherwise 0
871 spin_lock_irqsave(&zone->lock, flags); in split_free_page()
874 ret = -ENOENT; in split_free_page()
880 __mod_zone_freepage_state(zone, -(1UL << order), mt); in split_free_page()
893 split_pfn_offset -= (1UL << free_page_order); in split_free_page()
896 split_pfn_offset = (1UL << order) - (pfn - free_page_pfn); in split_free_page()
899 spin_unlock_irqrestore(&zone->lock, flags); in split_free_page()
910 if (unlikely(atomic_read(&page->_mapcount) != -1)) in page_expected_state()
913 if (unlikely((unsigned long)page->mapping | in page_expected_state()
916 page->memcg_data | in page_expected_state()
918 (page->flags & check_flags))) in page_expected_state()
928 if (unlikely(atomic_read(&page->_mapcount) != -1)) in page_bad_reason()
930 if (unlikely(page->mapping != NULL)) in page_bad_reason()
931 bad_reason = "non-NULL mapping"; in page_bad_reason()
934 if (unlikely(page->flags & flags)) { in page_bad_reason()
941 if (unlikely(page->memcg_data)) in page_bad_reason()
974 * We rely page->lru.next never has bit 0 set, unless the page in free_tail_page_prepare()
975 * is PageTail(). Let's make sure that's true even for poisoned ->lru. in free_tail_page_prepare()
983 switch (page - head_page) { in free_tail_page_prepare()
985 /* the first tail page: these may be in place of ->mapping */ in free_tail_page_prepare()
990 if (unlikely(atomic_read(&folio->_nr_pages_mapped))) { in free_tail_page_prepare()
994 if (unlikely(atomic_read(&folio->_pincount))) { in free_tail_page_prepare()
1001 * the second tail page: ->mapping is in free_tail_page_prepare()
1002 * deferred_list.next -- ignore value. in free_tail_page_prepare()
1006 if (page->mapping != TAIL_MAPPING) { in free_tail_page_prepare()
1022 page->mapping = NULL; in free_tail_page_prepare()
1031 * Tag-based KASAN modes skip pages freed via deferred memory initialization
1033 * 2. For tag-based KASAN modes: the page has a match-all KASAN tag, indicating
1036 * Pages will have match-all tags in the following circumstances:
1053 * on-demand allocation and then freed again before the deferred pages
1101 * avoid checking PageCompound for order-0 pages. in free_pages_prepare()
1120 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in free_pages_prepare()
1124 page->mapping = NULL; in free_pages_prepare()
1135 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in free_pages_prepare()
1153 * With hardware tag-based KASAN, memory tags must be set before the in free_pages_prepare()
1196 count = min(pcp->count, count); in free_pcppages_bulk()
1199 pindex = pindex - 1; in free_pcppages_bulk()
1201 spin_lock_irqsave(&zone->lock, flags); in free_pcppages_bulk()
1208 /* Remove pages from lists in a round-robin fashion. */ in free_pcppages_bulk()
1210 if (++pindex > NR_PCP_LISTS - 1) in free_pcppages_bulk()
1212 list = &pcp->lists[pindex]; in free_pcppages_bulk()
1224 list_del(&page->pcp_list); in free_pcppages_bulk()
1225 count -= nr_pages; in free_pcppages_bulk()
1226 pcp->count -= nr_pages; in free_pcppages_bulk()
1239 spin_unlock_irqrestore(&zone->lock, flags); in free_pcppages_bulk()
1249 spin_lock_irqsave(&zone->lock, flags); in free_one_page()
1255 spin_unlock_irqrestore(&zone->lock, flags); in free_one_page()
1276 spin_lock_irqsave(&zone->lock, flags); in __free_pages_ok()
1282 spin_unlock_irqrestore(&zone->lock, flags); in __free_pages_ok()
1299 for (loop = 0; loop < (nr_pages - 1); loop++, p++) { in __free_pages_core()
1307 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); in __free_pages_core()
1338 * Note: the function may return non-NULL struct page even for a page block
1341 * will fall into 2 sub-sections, and the end pfn of the pageblock may be hole
1354 end_pfn--; in __pageblock_pfn_to_page()
1387 * -- nyc
1395 high--; in expand()
1415 if (unlikely(page->flags & __PG_HWPOISON)) { in check_new_page_bad()
1459 /* Skip, if hardware tag-based KASAN is not enabled. */ in should_skip_kasan_unpoison()
1464 * With hardware tag-based KASAN enabled, skip if this has been in should_skip_kasan_unpoison()
1472 /* Don't skip, if hardware tag-based KASAN is not enabled. */ in should_skip_init()
1476 /* For hardware tag-based KASAN, skip if requested. */ in should_skip_init()
1574 area = &(zone->free_area[current_order]); in __rmqueue_smallest()
1597 static int fallbacks[MIGRATE_TYPES][MIGRATE_PCPTYPES - 1] = {
1666 end_pfn = pageblock_end_pfn(pfn) - 1; in move_freepages_block()
1681 int nr_pageblocks = 1 << (start_order - pageblock_order); in change_pageblock_range()
1683 while (nr_pageblocks--) { in change_pageblock_range()
1737 max_boost = mult_frac(zone->_watermark[WMARK_HIGH], in boost_watermark()
1753 zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages, in boost_watermark()
1762 * pageblock to our migratetype and determine how many already-allocated pages
1795 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in steal_suitable_fallback()
1817 * to MOVABLE pageblock, consider all non-movable pages as in steal_suitable_fallback()
1820 * exact migratetype of non-movable pages. in steal_suitable_fallback()
1824 - (free_pages + movable_pages); in steal_suitable_fallback()
1832 if (free_pages + alike_pages >= (1 << (pageblock_order-1)) || in steal_suitable_fallback()
1854 if (area->nr_free == 0) in find_suitable_fallback()
1855 return -1; in find_suitable_fallback()
1858 for (i = 0; i < MIGRATE_PCPTYPES - 1 ; i++) { in find_suitable_fallback()
1873 return -1; in find_suitable_fallback()
1877 * Reserve a pageblock for exclusive use of high-order atomic allocations if
1887 * Check is race-prone but harmless. in reserve_highatomic_pageblock()
1890 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
1893 spin_lock_irqsave(&zone->lock, flags); in reserve_highatomic_pageblock()
1896 if (zone->nr_reserved_highatomic >= max_managed) in reserve_highatomic_pageblock()
1903 zone->nr_reserved_highatomic += pageblock_nr_pages; in reserve_highatomic_pageblock()
1909 spin_unlock_irqrestore(&zone->lock, flags); in reserve_highatomic_pageblock()
1914 * potentially hurts the reliability of high-order allocations when under
1924 struct zonelist *zonelist = ac->zonelist; in unreserve_highatomic_pageblock()
1932 for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx, in unreserve_highatomic_pageblock()
1933 ac->nodemask) { in unreserve_highatomic_pageblock()
1938 if (!force && zone->nr_reserved_highatomic <= in unreserve_highatomic_pageblock()
1942 spin_lock_irqsave(&zone->lock, flags); in unreserve_highatomic_pageblock()
1944 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
1954 * from highatomic to ac->migratetype. So we should in unreserve_highatomic_pageblock()
1960 * locking could inadvertently allow a per-cpu in unreserve_highatomic_pageblock()
1965 zone->nr_reserved_highatomic -= min( in unreserve_highatomic_pageblock()
1967 zone->nr_reserved_highatomic); in unreserve_highatomic_pageblock()
1971 * Convert to ac->migratetype and avoid the normal in unreserve_highatomic_pageblock()
1979 set_pageblock_migratetype(page, ac->migratetype); in unreserve_highatomic_pageblock()
1980 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
1983 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
1987 spin_unlock_irqrestore(&zone->lock, flags); in unreserve_highatomic_pageblock()
2028 --current_order) { in __rmqueue_fallback()
2029 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2032 if (fallback_mt == -1) in __rmqueue_fallback()
2055 area = &(zone->free_area[current_order]); in __rmqueue_fallback()
2058 if (fallback_mt != -1) in __rmqueue_fallback()
2063 * This should not happen - we already found a suitable fallback in __rmqueue_fallback()
2083 * Call me with the zone->lock already held.
2130 spin_lock_irqsave(&zone->lock, flags); in rmqueue_bulk()
2147 list_add_tail(&page->pcp_list, list); in rmqueue_bulk()
2150 -(1 << order)); in rmqueue_bulk()
2153 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2154 spin_unlock_irqrestore(&zone->lock, flags); in rmqueue_bulk()
2169 batch = READ_ONCE(pcp->batch); in drain_zone_pages()
2170 to_drain = min(pcp->count, batch); in drain_zone_pages()
2172 spin_lock(&pcp->lock); in drain_zone_pages()
2174 spin_unlock(&pcp->lock); in drain_zone_pages()
2186 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in drain_pages_zone()
2187 if (pcp->count) { in drain_pages_zone()
2188 spin_lock(&pcp->lock); in drain_pages_zone()
2189 free_pcppages_bulk(zone, pcp->count, pcp, 0); in drain_pages_zone()
2190 spin_unlock(&pcp->lock); in drain_pages_zone()
2207 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
2224 * not empty. The check for non-emptiness can however race with a free to
2225 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
2268 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in __drain_all_pages()
2269 if (pcp->count) in __drain_all_pages()
2273 pcp = per_cpu_ptr(z->per_cpu_pageset, cpu); in __drain_all_pages()
2274 if (pcp->count) { in __drain_all_pages()
2298 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
2300 * When zone parameter is non-NULL, spill just the single zone's pages.
2323 int batch = READ_ONCE(pcp->batch); in nr_pcp_free()
2325 /* Free everything if batch freeing high-order pages. */ in nr_pcp_free()
2327 return pcp->count; in nr_pcp_free()
2333 /* Leave at least pcp->batch pages on the list */ in nr_pcp_free()
2335 max_nr_free = high - batch; in nr_pcp_free()
2341 batch <<= pcp->free_factor; in nr_pcp_free()
2343 pcp->free_factor++; in nr_pcp_free()
2352 int high = READ_ONCE(pcp->high); in nr_pcp_high()
2357 if (!test_bit(ZONE_RECLAIM_ACTIVE, &zone->flags)) in nr_pcp_high()
2364 return min(READ_ONCE(pcp->batch) << 2, high); in nr_pcp_high()
2377 list_add(&page->pcp_list, &pcp->lists[pindex]); in free_unref_page_commit()
2378 pcp->count += 1 << order; in free_unref_page_commit()
2381 * As high-order pages other than THP's stored on PCP can contribute in free_unref_page_commit()
2386 free_high = (pcp->free_factor && order && order <= PAGE_ALLOC_COSTLY_ORDER); in free_unref_page_commit()
2389 if (pcp->count >= high) { in free_unref_page_commit()
2426 pcp = pcp_spin_trylock(zone->per_cpu_pageset); in free_unref_page()
2437 * Free a list of 0-order pages
2452 list_del(&page->lru); in free_unref_page_list()
2462 list_del(&page->lru); in free_unref_page_list()
2471 list_del(&page->lru); in free_unref_page_list()
2492 pcp = pcp_spin_trylock(zone->per_cpu_pageset); in free_unref_page_list()
2504 * Non-isolated types over MIGRATE_PCPTYPES get added in free_unref_page_list()
2522 * split_page takes a non-compound higher-order page, and splits it into
2523 * n (1<<order) sub-pages: page[0..n]
2524 * Each sub-page must be freed individually.
2552 * emulate a high-order watermark check with a raised order-0 in __isolate_free_page()
2553 * watermark, because we already know our high-order page in __isolate_free_page()
2556 watermark = zone->_watermark[WMARK_MIN] + (1UL << order); in __isolate_free_page()
2560 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
2569 if (order >= pageblock_order - 1) { in __isolate_free_page()
2570 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
2587 * __putback_isolated_page - Return a now-isolated page back where we got it
2600 lockdep_assert_held(&zone->lock); in __putback_isolated_page()
2643 spin_lock_irqsave(&zone->lock, flags); in rmqueue_buddy()
2652 * failing a high-order atomic allocation in the in rmqueue_buddy()
2659 spin_unlock_irqrestore(&zone->lock, flags); in rmqueue_buddy()
2663 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue_buddy()
2665 spin_unlock_irqrestore(&zone->lock, flags); in rmqueue_buddy()
2674 /* Remove page from the per-cpu list, caller must protect the list */
2686 int batch = READ_ONCE(pcp->batch); in __rmqueue_pcplist()
2702 pcp->count += alloced << order; in __rmqueue_pcplist()
2708 list_del(&page->pcp_list); in __rmqueue_pcplist()
2709 pcp->count -= 1 << order; in __rmqueue_pcplist()
2715 /* Lock and remove page from the per-cpu list */
2727 pcp = pcp_spin_trylock(zone->per_cpu_pageset); in rmqueue_pcplist()
2738 pcp->free_factor >>= 1; in rmqueue_pcplist()
2739 list = &pcp->lists[order_to_pindex(migratetype, order)]; in rmqueue_pcplist()
2752 * Use pcplists for THP or "cheap" high-order allocations.
2772 * allocate greater than order-1 page units with __GFP_NOFAIL. in rmqueue()
2789 unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { in rmqueue()
2790 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); in rmqueue()
2807 long unusable_free = (1 << order) - 1; in __zone_watermark_unusable_free()
2811 * watermark then subtract the high-atomic reserves. This will in __zone_watermark_unusable_free()
2812 * over-estimate the size of the atomic reserve but it avoids a search. in __zone_watermark_unusable_free()
2815 unusable_free += z->nr_reserved_highatomic; in __zone_watermark_unusable_free()
2830 * Return true if free base pages are above 'mark'. For high-order checks it
2831 * will return true of the order-0 watermark is reached and there is at least
2842 /* free_pages may go negative - that's OK */ in __zone_watermark_ok()
2843 free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags); in __zone_watermark_ok()
2851 min -= min / 2; in __zone_watermark_ok()
2854 * Non-blocking allocations (e.g. GFP_ATOMIC) can in __zone_watermark_ok()
2856 * non-blocking allocations requests such as GFP_NOWAIT in __zone_watermark_ok()
2861 min -= min / 4; in __zone_watermark_ok()
2868 * makes during the free path will be small and short-lived. in __zone_watermark_ok()
2871 min -= min / 2; in __zone_watermark_ok()
2875 * Check watermarks for an order-0 allocation request. If these in __zone_watermark_ok()
2876 * are not met, then a high-order request also cannot go ahead in __zone_watermark_ok()
2879 if (free_pages <= min + z->lowmem_reserve[highest_zoneidx]) in __zone_watermark_ok()
2882 /* If this is an order-0 request then the watermark is fine */ in __zone_watermark_ok()
2886 /* For a high-order request, check at least one suitable page is free */ in __zone_watermark_ok()
2888 struct free_area *area = &z->free_area[o]; in __zone_watermark_ok()
2891 if (!area->nr_free) in __zone_watermark_ok()
2929 * Fast check for order-0 only. If this fails then the reserves in zone_watermark_fast()
2939 /* reserved may over estimate high-atomic reserves. */ in zone_watermark_fast()
2940 usable_free -= min(usable_free, reserved); in zone_watermark_fast()
2941 if (usable_free > mark + z->lowmem_reserve[highest_zoneidx]) in zone_watermark_fast()
2950 * Ignore watermark boosting for __GFP_HIGH order-0 allocations in zone_watermark_fast()
2955 if (unlikely(!order && (alloc_flags & ALLOC_MIN_RESERVE) && z->watermark_boost in zone_watermark_fast()
2957 mark = z->_watermark[WMARK_MIN]; in zone_watermark_fast()
2970 if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark) in zone_watermark_ok_safe()
3020 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume in alloc_flags_nofragment()
3023 BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); in alloc_flags_nofragment()
3024 if (nr_online_nodes > 1 && !populated_zone(--zone)) in alloc_flags_nofragment()
3063 z = ac->preferred_zoneref; in get_page_from_freelist()
3064 for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx, in get_page_from_freelist()
3065 ac->nodemask) { in get_page_from_freelist()
3084 * exceed the per-node dirty limit in the slowpath in get_page_from_freelist()
3090 * dirty-throttling and the flusher threads. in get_page_from_freelist()
3092 if (ac->spread_dirty_pages) { in get_page_from_freelist()
3093 if (last_pgdat != zone->zone_pgdat) { in get_page_from_freelist()
3094 last_pgdat = zone->zone_pgdat; in get_page_from_freelist()
3095 last_pgdat_dirty_ok = node_dirty_ok(zone->zone_pgdat); in get_page_from_freelist()
3103 zone != ac->preferred_zoneref->zone) { in get_page_from_freelist()
3111 local_nid = zone_to_nid(ac->preferred_zoneref->zone); in get_page_from_freelist()
3120 ac->highest_zoneidx, alloc_flags, in get_page_from_freelist()
3145 !zone_allows_reclaim(ac->preferred_zoneref->zone, zone)) in get_page_from_freelist()
3148 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3159 ac->highest_zoneidx, alloc_flags)) in get_page_from_freelist()
3167 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3168 gfp_mask, alloc_flags, ac->migratetype); in get_page_from_freelist()
3173 * If this is a high-order atomic allocation then check in get_page_from_freelist()
3219 (current->flags & (PF_MEMALLOC | PF_EXITING))) in warn_alloc_show_mem()
3242 current->comm, &vaf, gfp_mask, &gfp_mask, in warn_alloc()
3276 struct oom_control oc = { in __alloc_pages_may_oom() local
3277 .zonelist = ac->zonelist, in __alloc_pages_may_oom()
3278 .nodemask = ac->nodemask, in __alloc_pages_may_oom()
3289 * making progress for us. in __alloc_pages_may_oom()
3311 if (current->flags & PF_DUMPCORE) in __alloc_pages_may_oom()
3327 if (ac->highest_zoneidx < ZONE_NORMAL) in __alloc_pages_may_oom()
3342 if (out_of_memory(&oc) || in __alloc_pages_may_oom()
3347 * Help non-failing allocations by giving them access to memory in __alloc_pages_may_oom()
3366 /* Try memory compaction for high-order allocations before reclaim */
3409 zone->compact_blockskip_flush = false; in __alloc_pages_direct_compact()
3445 * Compaction was skipped due to a lack of free order-0 in should_compact_retry()
3483 (*compact_priority)--; in should_compact_retry()
3516 * Let's give them a good hope and keep retrying while the order-0 in should_compact_retry()
3519 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_compact_retry()
3520 ac->highest_zoneidx, ac->nodemask) { in should_compact_retry()
3522 ac->highest_zoneidx, alloc_flags)) in should_compact_retry()
3540 if (current->flags & PF_MEMALLOC) in __need_reclaim()
3627 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
3628 ac->nodemask); in __perform_reclaim()
3658 * pages are pinned on the per-cpu lists or in high alloc reserves. in __alloc_pages_direct_reclaim()
3679 enum zone_type highest_zoneidx = ac->highest_zoneidx; in wake_all_kswapds()
3681 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx, in wake_all_kswapds()
3682 ac->nodemask) { in wake_all_kswapds()
3685 if (last_pgdat != zone->zone_pgdat) { in wake_all_kswapds()
3687 last_pgdat = zone->zone_pgdat; in wake_all_kswapds()
3727 * Ignore cpuset mems for non-blocking __GFP_HIGH (probably in gfp_to_alloc_flags()
3766 if (in_serving_softirq() && (current->flags & PF_MEMALLOC)) in __gfp_pfmemalloc_flags()
3769 if (current->flags & PF_MEMALLOC) in __gfp_pfmemalloc_flags()
3827 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, in should_reclaim_retry()
3828 ac->highest_zoneidx, ac->nodemask) { in should_reclaim_retry()
3842 ac->highest_zoneidx, alloc_flags, available); in should_reclaim_retry()
3858 if (current->flags & PF_WQ_WORKER) in should_reclaim_retry()
3874 * This assumes that for all allocations, ac->nodemask can come only in check_retry_cpuset()
3879 if (cpusets_enabled() && ac->nodemask && in check_retry_cpuset()
3880 !cpuset_nodemask_valid_mems_allowed(ac->nodemask)) { in check_retry_cpuset()
3881 ac->nodemask = NULL; in check_retry_cpuset()
3932 * there was a cpuset modification and we are retrying - otherwise we in __alloc_pages_slowpath()
3933 * could end up iterating over non-eligible zones endlessly. in __alloc_pages_slowpath()
3935 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath()
3936 ac->highest_zoneidx, ac->nodemask); in __alloc_pages_slowpath()
3937 if (!ac->preferred_zoneref->zone) in __alloc_pages_slowpath()
3942 * any suitable zone to satisfy the request - e.g. non-movable in __alloc_pages_slowpath()
3946 struct zoneref *z = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath()
3947 ac->highest_zoneidx, in __alloc_pages_slowpath()
3949 if (!z->zone) in __alloc_pages_slowpath()
3966 * that we have enough base pages and don't need to reclaim. For non- in __alloc_pages_slowpath()
3967 * movable high-order allocations, do that as well, as compaction will in __alloc_pages_slowpath()
3975 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
3997 * - potentially very expensive because zones are far in __alloc_pages_slowpath()
4000 * - not guaranteed to help because isolate_freepages() in __alloc_pages_slowpath()
4003 * - unlikely to make entire pageblocks free on its in __alloc_pages_slowpath()
4035 ac->nodemask = NULL; in __alloc_pages_slowpath()
4036 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in __alloc_pages_slowpath()
4037 ac->highest_zoneidx, ac->nodemask); in __alloc_pages_slowpath()
4050 if (current->flags & PF_MEMALLOC) in __alloc_pages_slowpath()
4081 * It doesn't make any sense to retry for the compaction if the order-0 in __alloc_pages_slowpath()
4101 /* Reclaim has failed us, start killing things */ in __alloc_pages_slowpath()
4142 * for somebody to do a work for us in __alloc_pages_slowpath()
4144 WARN_ON_ONCE_GFP(current->flags & PF_MEMALLOC, gfp_mask); in __alloc_pages_slowpath()
4155 * Help non-failing allocations by giving some access to memory in __alloc_pages_slowpath()
4156 * reserves normally used for high priority non-blocking in __alloc_pages_slowpath()
4169 warn_alloc(gfp_mask, ac->nodemask, in __alloc_pages_slowpath()
4180 ac->highest_zoneidx = gfp_zone(gfp_mask); in prepare_alloc_pages()
4181 ac->zonelist = node_zonelist(preferred_nid, gfp_mask); in prepare_alloc_pages()
4182 ac->nodemask = nodemask; in prepare_alloc_pages()
4183 ac->migratetype = gfp_migratetype(gfp_mask); in prepare_alloc_pages()
4191 if (in_task() && !ac->nodemask) in prepare_alloc_pages()
4192 ac->nodemask = &cpuset_current_mems_allowed; in prepare_alloc_pages()
4205 ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE); in prepare_alloc_pages()
4212 ac->preferred_zoneref = first_zones_zonelist(ac->zonelist, in prepare_alloc_pages()
4213 ac->highest_zoneidx, ac->nodemask); in prepare_alloc_pages()
4219 * __alloc_pages_bulk - Allocate a number of order-0 pages to a list or array
4266 if (unlikely(page_array && nr_pages - nr_populated == 0)) in __alloc_pages_bulk()
4274 if (nr_pages - nr_populated == 1) in __alloc_pages_bulk()
4305 if (nr_online_nodes > 1 && zone != ac.preferred_zoneref->zone && in __alloc_pages_bulk()
4306 zone_to_nid(zone) != zone_to_nid(ac.preferred_zoneref->zone)) { in __alloc_pages_bulk()
4327 pcp = pcp_spin_trylock(zone->per_cpu_pageset); in __alloc_pages_bulk()
4332 pcp_list = &pcp->lists[order_to_pindex(ac.migratetype, 0)]; in __alloc_pages_bulk()
4355 list_add(&page->lru, page_list); in __alloc_pages_bulk()
4365 zone_statistics(ac.preferred_zoneref->zone, zone, nr_account); in __alloc_pages_bulk()
4377 list_add(&page->lru, page_list); in __alloc_pages_bulk()
4423 alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); in __alloc_pages()
4435 * &cpuset_current_mems_allowed to optimize the fast-path attempt. in __alloc_pages()
4491 * __free_pages - Free pages allocated with alloc_pages().
4495 * This function can free multi-page allocations that are not compound
4501 * by put_page() which only frees the first page of a non-compound
4518 while (order-- > 0) in __free_pages()
4535 * An arbitrary-length arbitrary-offset area of memory which resides
4542 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
4555 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; in __page_frag_cache_refill()
4560 nc->va = page ? page_address(page) : NULL; in __page_frag_cache_refill()
4582 if (unlikely(!nc->va)) { in page_frag_alloc_align()
4590 size = nc->size; in page_frag_alloc_align()
4598 nc->pfmemalloc = page_is_pfmemalloc(page); in page_frag_alloc_align()
4599 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; in page_frag_alloc_align()
4600 nc->offset = size; in page_frag_alloc_align()
4603 offset = nc->offset - fragsz; in page_frag_alloc_align()
4605 page = virt_to_page(nc->va); in page_frag_alloc_align()
4607 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) in page_frag_alloc_align()
4610 if (unlikely(nc->pfmemalloc)) { in page_frag_alloc_align()
4617 size = nc->size; in page_frag_alloc_align()
4623 nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1; in page_frag_alloc_align()
4624 offset = size - fragsz; in page_frag_alloc_align()
4639 nc->pagecnt_bias--; in page_frag_alloc_align()
4641 nc->offset = offset; in page_frag_alloc_align()
4643 return nc->va + offset; in page_frag_alloc_align()
4669 while (page < --last) in make_alloc_exact()
4680 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
4686 * allocate memory in power-of-two pages.
4708 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
4734 * free_pages_exact - release memory allocated via alloc_pages_exact()
4753 * nr_free_zone_pages - count number of pages beyond high watermark
4760 * nr_free_zone_pages = managed_pages - high_pages
4778 sum += size - high; in nr_free_zone_pages()
4785 * nr_free_buffer_pages - count number of pages beyond high watermark
4801 zoneref->zone = zone; in zoneref_set_zone()
4802 zoneref->zone_idx = zone_idx(zone); in zoneref_set_zone()
4817 zone_type--; in build_zonerefs_node()
4818 zone = pgdat->node_zones + zone_type; in build_zonerefs_node()
4840 return -EINVAL; in __parse_numa_zonelist_order()
4861 * find_next_best_node - find the next node that should appear in a given node's fallback list
4896 /* Penalize nodes under us ("prefer the next node") */ in find_next_best_node()
4922 * This results in maximum locality--normal zone overflows into local
4923 * DMA zone, if any--but risks exhausting DMA zone.
4931 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; in build_zonelists_in_node_order()
4941 zonerefs->zone = NULL; in build_zonelists_in_node_order()
4942 zonerefs->zone_idx = 0; in build_zonelists_in_node_order()
4953 zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs; in build_thisnode_zonelists()
4956 zonerefs->zone = NULL; in build_thisnode_zonelists()
4957 zonerefs->zone_idx = 0; in build_thisnode_zonelists()
4974 /* NUMA-aware ordering of nodes */ in build_zonelists()
4975 local_node = pgdat->node_id; in build_zonelists()
4983 * distance group to make it round-robin. in build_zonelists()
5015 return zone_to_nid(z->zone); in local_memory_node()
5029 local_node = pgdat->node_id; in build_zonelists()
5031 zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs; in build_zonelists()
5056 zonerefs->zone = NULL; in build_zonelists()
5057 zonerefs->zone_idx = 0; in build_zonelists()
5098 * trying to hold port->lock, for in __build_all_zonelists()
5100 * calling kmalloc(GFP_ATOMIC | __GFP_NOWARN) with port->lock held. in __build_all_zonelists()
5110 * building zonelists is fine - no need to touch other nodes. in __build_all_zonelists()
5112 if (self && !node_online(self->node_id)) { in __build_all_zonelists()
5127 * We now know the "local memory node" for each node-- in __build_all_zonelists()
5129 * Set up numa_mem percpu variable for on-line cpus. During in __build_all_zonelists()
5130 * boot, only the boot cpu should be on-line; we'll init the in __build_all_zonelists()
5131 * secondary cpus' numa_mem as they come on-line. During in __build_all_zonelists()
5132 * node/memory hotplug, we'll fixup all on-line cpus. in __build_all_zonelists()
5161 * (a chicken-egg dilemma). in build_all_zonelists_init()
5191 * more accurate, but expensive to check per-zone. This check is in build_all_zonelists()
5192 * made on memory-hotadd so a system can start with mobility in build_all_zonelists()
5226 * Clamp the batch to a 2^n - 1 value. Having a power in zone_batchsize()
5235 batch = rounddown_pow_of_two(batch + batch/2) - 1; in zone_batchsize()
5249 * can be a significant delay between the individual batches being in zone_batchsize()
5251 * fragmented and becoming unavailable for high-order allocations. in zone_batchsize()
5285 * onlined. For memory nodes that have no CPUs, split pcp->high across in zone_highsize()
5307 * pcp->high and pcp->batch values are related and generally batch is lower
5308 * than high. They are also related to pcp->count such that count is lower
5314 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
5316 * pcp->count field on the local CPU with interrupts disabled.
5325 WRITE_ONCE(pcp->batch, batch); in pageset_update()
5326 WRITE_ONCE(pcp->high, high); in pageset_update()
5336 spin_lock_init(&pcp->lock); in per_cpu_pages_init()
5338 INIT_LIST_HEAD(&pcp->lists[pindex]); in per_cpu_pages_init()
5346 pcp->high = BOOT_PAGESET_HIGH; in per_cpu_pages_init()
5347 pcp->batch = BOOT_PAGESET_BATCH; in per_cpu_pages_init()
5348 pcp->free_factor = 0; in per_cpu_pages_init()
5358 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in __zone_set_pageset_high_and_batch()
5364 * Calculate and set new high and batch values for all per-cpu pagesets of a
5374 if (zone->pageset_high == new_high && in zone_set_pageset_high_and_batch()
5375 zone->pageset_batch == new_batch) in zone_set_pageset_high_and_batch()
5378 zone->pageset_high = new_high; in zone_set_pageset_high_and_batch()
5379 zone->pageset_batch = new_batch; in zone_set_pageset_high_and_batch()
5390 zone->per_cpu_zonestats = alloc_percpu(struct per_cpu_zonestat); in setup_zone_pageset()
5392 zone->per_cpu_pageset = alloc_percpu(struct per_cpu_pages); in setup_zone_pageset()
5397 pcp = per_cpu_ptr(zone->per_cpu_pageset, cpu); in setup_zone_pageset()
5398 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in setup_zone_pageset()
5438 memset(pzstats->vm_numa_event, 0, in setup_per_cpu_pageset()
5439 sizeof(pzstats->vm_numa_event)); in setup_per_cpu_pageset()
5444 pgdat->per_cpu_nodestats = in setup_per_cpu_pageset()
5455 zone->per_cpu_pageset = &boot_pageset; in zone_pcp_init()
5456 zone->per_cpu_zonestats = &boot_zonestats; in zone_pcp_init()
5457 zone->pageset_high = BOOT_PAGESET_HIGH; in zone_pcp_init()
5458 zone->pageset_batch = BOOT_PAGESET_BATCH; in zone_pcp_init()
5461 pr_debug(" %s zone: %lu pages, LIFO batch:%u\n", zone->name, in zone_pcp_init()
5462 zone->present_pages, zone_batchsize(zone)); in zone_pcp_init()
5467 atomic_long_add(count, &page_zone(page)->managed_pages); in adjust_managed_page_count()
5496 * Perform a kasan-unchecked memset() since this memory in free_reserved_area()
5564 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
5575 pgdat->totalreserve_pages = 0; in calculate_totalreserve_pages()
5578 struct zone *zone = pgdat->node_zones + i; in calculate_totalreserve_pages()
5584 if (zone->lowmem_reserve[j] > max) in calculate_totalreserve_pages()
5585 max = zone->lowmem_reserve[j]; in calculate_totalreserve_pages()
5594 pgdat->totalreserve_pages += max; in calculate_totalreserve_pages()
5603 * setup_per_zone_lowmem_reserve - called whenever
5614 for (i = 0; i < MAX_NR_ZONES - 1; i++) { in setup_per_zone_lowmem_reserve()
5615 struct zone *zone = &pgdat->node_zones[i]; in setup_per_zone_lowmem_reserve()
5621 struct zone *upper_zone = &pgdat->node_zones[j]; in setup_per_zone_lowmem_reserve()
5626 zone->lowmem_reserve[j] = 0; in setup_per_zone_lowmem_reserve()
5628 zone->lowmem_reserve[j] = managed_pages / ratio; in setup_per_zone_lowmem_reserve()
5639 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); in __setup_per_zone_wmarks()
5653 spin_lock_irqsave(&zone->lock, flags); in __setup_per_zone_wmarks()
5662 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN) in __setup_per_zone_wmarks()
5670 zone->_watermark[WMARK_MIN] = min_pages; in __setup_per_zone_wmarks()
5676 zone->_watermark[WMARK_MIN] = tmp; in __setup_per_zone_wmarks()
5688 zone->watermark_boost = 0; in __setup_per_zone_wmarks()
5689 zone->_watermark[WMARK_LOW] = min_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
5690 zone->_watermark[WMARK_HIGH] = low_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
5691 zone->_watermark[WMARK_PROMO] = high_wmark_pages(zone) + tmp; in __setup_per_zone_wmarks()
5693 spin_unlock_irqrestore(&zone->lock, flags); in __setup_per_zone_wmarks()
5701 * setup_per_zone_wmarks - called when min_free_kbytes changes
5702 * or when memory is hot-{added|removed}
5783 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so in postcore_initcall()
5825 pgdat->min_unmapped_pages = 0; in setup_min_unmapped_ratio()
5828 zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) * in setup_min_unmapped_ratio()
5853 pgdat->min_slab_pages = 0; in setup_min_slab_ratio()
5856 zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) * in setup_min_slab_ratio()
5876 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
5901 * percpu_pagelist_high_fraction - changes the pcp->high for each zone on each
5923 ret = -EINVAL; in percpu_pagelist_high_fraction_sysctl_handler()
6015 /* Usage: See admin-guide/dynamic-debug-howto.rst */
6039 .nid = zone_to_nid(cc->zone), in __alloc_contig_migrate_range()
6045 while (pfn < end || !list_empty(&cc->migratepages)) { in __alloc_contig_migrate_range()
6047 ret = -EINTR; in __alloc_contig_migrate_range()
6051 if (list_empty(&cc->migratepages)) { in __alloc_contig_migrate_range()
6052 cc->nr_migratepages = 0; in __alloc_contig_migrate_range()
6054 if (ret && ret != -EAGAIN) in __alloc_contig_migrate_range()
6056 pfn = cc->migrate_pfn; in __alloc_contig_migrate_range()
6059 ret = -EBUSY; in __alloc_contig_migrate_range()
6063 nr_reclaimed = reclaim_clean_pages_from_list(cc->zone, in __alloc_contig_migrate_range()
6064 &cc->migratepages); in __alloc_contig_migrate_range()
6065 cc->nr_migratepages -= nr_reclaimed; in __alloc_contig_migrate_range()
6067 ret = migrate_pages(&cc->migratepages, alloc_migration_target, in __alloc_contig_migrate_range()
6068 NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE, NULL); in __alloc_contig_migrate_range()
6071 * On -ENOMEM, migrate_pages() bails out right away. It is pointless in __alloc_contig_migrate_range()
6074 if (ret == -ENOMEM) in __alloc_contig_migrate_range()
6080 if (!(cc->gfp_mask & __GFP_NOWARN) && ret == -EBUSY) in __alloc_contig_migrate_range()
6081 alloc_contig_dump_pages(&cc->migratepages); in __alloc_contig_migrate_range()
6082 putback_movable_pages(&cc->migratepages); in __alloc_contig_migrate_range()
6089 * alloc_contig_range() -- tries to allocate given range of pages
6091 * @end: one-past-the-last PFN to allocate
6118 .order = -1, in alloc_contig_range()
6143 * This lets us mark the pageblocks back as in alloc_contig_range()
6156 * In case of -EBUSY, we'd like to know which page causes problem. in alloc_contig_range()
6163 * -EBUSY is not accidentally used or returned to caller. in alloc_contig_range()
6166 if (ret && ret != -EBUSY) in alloc_contig_range()
6183 * We don't have to hold zone->lock here because the pages are in alloc_contig_range()
6212 ret = -EBUSY; in alloc_contig_range()
6219 ret = -EBUSY; in alloc_contig_range()
6225 free_contig_range(outer_start, start - outer_start); in alloc_contig_range()
6227 free_contig_range(end, outer_end - end); in alloc_contig_range()
6270 unsigned long last_pfn = start_pfn + nr_pages - 1; in zone_spans_last_pfn()
6276 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
6307 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
6309 pfn = ALIGN(zone->zone_start_pfn, nr_pages); in alloc_contig_pages()
6319 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
6324 spin_lock_irqsave(&zone->lock, flags); in alloc_contig_pages()
6328 spin_unlock_irqrestore(&zone->lock, flags); in alloc_contig_pages()
6338 for (; nr_pages--; pfn++) { in free_contig_range()
6365 __zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch); in zone_pcp_enable()
6374 if (zone->per_cpu_pageset != &boot_pageset) { in zone_pcp_reset()
6376 pzstats = per_cpu_ptr(zone->per_cpu_zonestats, cpu); in zone_pcp_reset()
6379 free_percpu(zone->per_cpu_pageset); in zone_pcp_reset()
6380 zone->per_cpu_pageset = &boot_pageset; in zone_pcp_reset()
6381 if (zone->per_cpu_zonestats != &boot_zonestats) { in zone_pcp_reset()
6382 free_percpu(zone->per_cpu_zonestats); in zone_pcp_reset()
6383 zone->per_cpu_zonestats = &boot_zonestats; in zone_pcp_reset()
6403 spin_lock_irqsave(&zone->lock, flags); in __offline_isolated_pages()
6431 spin_unlock_irqrestore(&zone->lock, flags); in __offline_isolated_pages()
6444 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
6457 * Break down a higher-order page in sub-pages, and keep our target out of
6468 high--; in break_down_buddy_pages()
6501 spin_lock_irqsave(&zone->lock, flags); in take_page_off_buddy()
6503 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
6516 __mod_zone_freepage_state(zone, -1, migratetype); in take_page_off_buddy()
6523 spin_unlock_irqrestore(&zone->lock, flags); in take_page_off_buddy()
6538 spin_lock_irqsave(&zone->lock, flags); in put_page_back_buddy()
6546 spin_unlock_irqrestore(&zone->lock, flags); in put_page_back_buddy()
6558 struct zone *zone = &pgdat->node_zones[ZONE_DMA]; in has_managed_dma()
6583 return -EINVAL; in accept_memory_parse()
6609 if (list_empty(&zone->unaccepted_pages)) in try_to_accept_memory_one()
6612 spin_lock_irqsave(&zone->lock, flags); in try_to_accept_memory_one()
6613 page = list_first_entry_or_null(&zone->unaccepted_pages, in try_to_accept_memory_one()
6616 spin_unlock_irqrestore(&zone->lock, flags); in try_to_accept_memory_one()
6620 list_del(&page->lru); in try_to_accept_memory_one()
6621 last = list_empty(&zone->unaccepted_pages); in try_to_accept_memory_one()
6623 __mod_zone_freepage_state(zone, -MAX_ORDER_NR_PAGES, MIGRATE_MOVABLE); in try_to_accept_memory_one()
6624 __mod_zone_page_state(zone, NR_UNACCEPTED, -MAX_ORDER_NR_PAGES); in try_to_accept_memory_one()
6625 spin_unlock_irqrestore(&zone->lock, flags); in try_to_accept_memory_one()
6643 to_accept = high_wmark_pages(zone) - in try_to_accept_memory()
6644 (zone_page_state(zone, NR_FREE_PAGES) - in try_to_accept_memory()
6652 to_accept -= MAX_ORDER_NR_PAGES; in try_to_accept_memory()
6672 spin_lock_irqsave(&zone->lock, flags); in __free_unaccepted()
6673 first = list_empty(&zone->unaccepted_pages); in __free_unaccepted()
6674 list_add_tail(&page->lru, &zone->unaccepted_pages); in __free_unaccepted()
6677 spin_unlock_irqrestore(&zone->lock, flags); in __free_unaccepted()