Lines Matching refs:order
194 static void __free_pages_ok(struct page *page, unsigned int order);
569 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
572 int nr_pages = 1 << order; in prep_compound_page()
575 set_compound_order(page, order); in prep_compound_page()
644 unsigned int order, int migratetype) in set_page_guard() argument
651 if (order >= debug_guardpage_minorder()) in set_page_guard()
661 set_page_private(page, order); in set_page_guard()
663 __mod_zone_freepage_state(zone, -(1 << order), migratetype); in set_page_guard()
669 unsigned int order, int migratetype) in clear_page_guard() argument
684 __mod_zone_freepage_state(zone, (1 << order), migratetype); in clear_page_guard()
689 unsigned int order, int migratetype) { return false; } in set_page_guard() argument
691 unsigned int order, int migratetype) {} in clear_page_guard() argument
694 static inline void set_page_order(struct page *page, unsigned int order) in set_page_order() argument
696 set_page_private(page, order); in set_page_order()
720 unsigned int order) in page_is_buddy() argument
722 if (page_is_guard(buddy) && page_order(buddy) == order) { in page_is_buddy()
731 if (PageBuddy(buddy) && page_order(buddy) == order) { in page_is_buddy()
773 struct zone *zone, unsigned int order, in __free_one_page() argument
788 __mod_zone_freepage_state(zone, 1 << order, migratetype); in __free_one_page()
790 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
794 while (order < max_order - 1) { in __free_one_page()
795 buddy_pfn = __find_buddy_pfn(pfn, order); in __free_one_page()
800 if (!page_is_buddy(page, buddy, order)) in __free_one_page()
807 clear_page_guard(zone, buddy, order, migratetype); in __free_one_page()
810 zone->free_area[order].nr_free--; in __free_one_page()
816 order++; in __free_one_page()
830 buddy_pfn = __find_buddy_pfn(pfn, order); in __free_one_page()
844 set_page_order(page, order); in __free_one_page()
854 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) { in __free_one_page()
858 buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1); in __free_one_page()
861 page_is_buddy(higher_page, higher_buddy, order + 1)) { in __free_one_page()
863 &zone->free_area[order].free_list[migratetype]); in __free_one_page()
868 list_add(&page->lru, &zone->free_area[order].free_list[migratetype]); in __free_one_page()
870 zone->free_area[order].nr_free++; in __free_one_page()
981 unsigned int order, bool check_free) in free_pages_prepare() argument
987 trace_mm_page_free(page, order); in free_pages_prepare()
993 if (unlikely(order)) { in free_pages_prepare()
997 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
1001 for (i = 1; i < (1 << order); i++) { in free_pages_prepare()
1014 memcg_kmem_uncharge(page, order); in free_pages_prepare()
1022 reset_page_owner(page, order); in free_pages_prepare()
1026 PAGE_SIZE << order); in free_pages_prepare()
1028 PAGE_SIZE << order); in free_pages_prepare()
1030 arch_free_page(page, order); in free_pages_prepare()
1031 kernel_poison_pages(page, 1 << order, 0); in free_pages_prepare()
1032 kernel_map_pages(page, 1 << order, 0); in free_pages_prepare()
1033 kasan_free_pages(page, order); in free_pages_prepare()
1159 unsigned int order, in free_one_page() argument
1167 __free_one_page(page, pfn, zone, order, migratetype); in free_one_page()
1239 static void __free_pages_ok(struct page *page, unsigned int order) in __free_pages_ok() argument
1245 if (!free_pages_prepare(page, order, true)) in __free_pages_ok()
1250 __count_vm_events(PGFREE, 1 << order); in __free_pages_ok()
1251 free_one_page(page_zone(page), page, pfn, order, migratetype); in __free_pages_ok()
1255 static void __init __free_pages_boot_core(struct page *page, unsigned int order) in __free_pages_boot_core() argument
1257 unsigned int nr_pages = 1 << order; in __free_pages_boot_core()
1272 __free_pages(page, order); in __free_pages_boot_core()
1330 unsigned int order) in __free_pages_bootmem() argument
1334 return __free_pages_boot_core(page, order); in __free_pages_bootmem()
1619 deferred_grow_zone(struct zone *zone, unsigned int order) in deferred_grow_zone() argument
1624 unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION); in deferred_grow_zone()
1701 _deferred_grow_zone(struct zone *zone, unsigned int order) in _deferred_grow_zone() argument
1703 return deferred_grow_zone(zone, order); in _deferred_grow_zone()
1883 static bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
1886 for (i = 0; i < (1 << order); i++) { in check_new_pages()
1896 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
1902 arch_alloc_page(page, order); in post_alloc_hook()
1903 kernel_map_pages(page, 1 << order, 1); in post_alloc_hook()
1904 kernel_poison_pages(page, 1 << order, 1); in post_alloc_hook()
1905 kasan_alloc_pages(page, order); in post_alloc_hook()
1906 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
1909 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
1914 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
1917 for (i = 0; i < (1 << order); i++) in prep_new_page()
1920 if (order && (gfp_flags & __GFP_COMP)) in prep_new_page()
1921 prep_compound_page(page, order); in prep_new_page()
1940 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest() argument
1948 for (current_order = order; current_order < MAX_ORDER; ++current_order) { in __rmqueue_smallest()
1957 expand(zone, page, order, current_order, area, migratetype); in __rmqueue_smallest()
1984 unsigned int order) in __rmqueue_cma_fallback() argument
1986 return __rmqueue_smallest(zone, order, MIGRATE_CMA); in __rmqueue_cma_fallback()
1990 unsigned int order) { return NULL; } in __rmqueue_cma_fallback() argument
2003 unsigned int order; in move_freepages() local
2045 order = page_order(page); in move_freepages()
2047 &zone->free_area[order].free_list[migratetype]); in move_freepages()
2048 page += 1 << order; in move_freepages()
2049 pages_moved += 1 << order; in move_freepages()
2100 static bool can_steal_fallback(unsigned int order, int start_mt) in can_steal_fallback() argument
2109 if (order >= pageblock_order) in can_steal_fallback()
2112 if (order >= pageblock_order / 2 || in can_steal_fallback()
2205 int find_suitable_fallback(struct free_area *area, unsigned int order, in find_suitable_fallback() argument
2223 if (can_steal_fallback(order, migratetype)) in find_suitable_fallback()
2290 int order; in unreserve_highatomic_pageblock() local
2304 for (order = 0; order < MAX_ORDER; order++) { in unreserve_highatomic_pageblock()
2305 struct free_area *area = &(zone->free_area[order]); in unreserve_highatomic_pageblock()
2367 __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) in __rmqueue_fallback() argument
2380 for (current_order = MAX_ORDER - 1; current_order >= order; in __rmqueue_fallback()
2397 && current_order > order) in __rmqueue_fallback()
2406 for (current_order = order; current_order < MAX_ORDER; in __rmqueue_fallback()
2427 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
2439 __rmqueue(struct zone *zone, unsigned int order, int migratetype) in __rmqueue() argument
2444 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2447 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2449 if (!page && __rmqueue_fallback(zone, order, migratetype)) in __rmqueue()
2453 trace_mm_page_alloc_zone_locked(page, order, migratetype); in __rmqueue()
2462 static int rmqueue_bulk(struct zone *zone, unsigned int order, in rmqueue_bulk() argument
2470 struct page *page = __rmqueue(zone, order, migratetype); in rmqueue_bulk()
2491 -(1 << order)); in rmqueue_bulk()
2500 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); in rmqueue_bulk()
2684 unsigned int order, t; in mark_free_pages() local
2709 for_each_migratetype_order(order, t) { in mark_free_pages()
2711 &zone->free_area[order].free_list[t], lru) { in mark_free_pages()
2715 for (i = 0; i < (1UL << order); i++) { in mark_free_pages()
2835 void split_page(struct page *page, unsigned int order) in split_page() argument
2842 for (i = 1; i < (1 << order); i++) in split_page()
2844 split_page_owner(page, order); in split_page()
2848 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
2866 watermark = min_wmark_pages(zone) + (1UL << order); in __isolate_free_page()
2870 __mod_zone_freepage_state(zone, -(1UL << order), mt); in __isolate_free_page()
2875 zone->free_area[order].nr_free--; in __isolate_free_page()
2882 if (order >= pageblock_order - 1) { in __isolate_free_page()
2883 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
2894 return 1UL << order; in __isolate_free_page()
2950 struct zone *zone, unsigned int order, in rmqueue_pcplist() argument
2963 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_pcplist()
2975 struct zone *zone, unsigned int order, in rmqueue() argument
2982 if (likely(order == 0)) { in rmqueue()
2983 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
2992 WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); in rmqueue()
2998 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue()
3000 trace_mm_page_alloc_zone_locked(page, order, migratetype); in rmqueue()
3003 page = __rmqueue(zone, order, migratetype); in rmqueue()
3004 } while (page && check_new_pages(page, order)); in rmqueue()
3008 __mod_zone_freepage_state(zone, -(1 << order), in rmqueue()
3011 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue()
3045 static bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3047 if (order < fail_page_alloc.min_order) in should_fail_alloc_page()
3057 return should_fail(&fail_page_alloc.attr, 1 << order); in should_fail_alloc_page()
3095 static inline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) in should_fail_alloc_page() argument
3108 bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in __zone_watermark_ok() argument
3117 free_pages -= (1 << order) - 1; in __zone_watermark_ok()
3158 if (!order) in __zone_watermark_ok()
3162 for (o = order; o < MAX_ORDER; o++) { in __zone_watermark_ok()
3187 bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, in zone_watermark_ok() argument
3190 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_ok()
3194 static inline bool zone_watermark_fast(struct zone *z, unsigned int order, in zone_watermark_fast() argument
3213 if (!order && (free_pages - cma_pages) > mark + z->lowmem_reserve[classzone_idx]) in zone_watermark_fast()
3216 return __zone_watermark_ok(z, order, mark, classzone_idx, alloc_flags, in zone_watermark_fast()
3220 bool zone_watermark_ok_safe(struct zone *z, unsigned int order, in zone_watermark_ok_safe() argument
3228 return __zone_watermark_ok(z, order, mark, classzone_idx, 0, in zone_watermark_ok_safe()
3250 get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, in get_page_from_freelist() argument
3300 if (!zone_watermark_fast(zone, order, mark, in get_page_from_freelist()
3310 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3323 ret = node_reclaim(zone->zone_pgdat, gfp_mask, order); in get_page_from_freelist()
3333 if (zone_watermark_ok(zone, order, mark, in get_page_from_freelist()
3342 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3345 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
3351 if (unlikely(order && (alloc_flags & ALLOC_HARDER))) in get_page_from_freelist()
3352 reserve_highatomic_pageblock(page, zone, order); in get_page_from_freelist()
3359 if (_deferred_grow_zone(zone, order)) in get_page_from_freelist()
3431 __alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order, in __alloc_pages_cpuset_fallback() argument
3437 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3444 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3451 __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, in __alloc_pages_may_oom() argument
3459 .order = order, in __alloc_pages_may_oom()
3483 ~__GFP_DIRECT_RECLAIM, order, in __alloc_pages_may_oom()
3492 if (order > PAGE_ALLOC_COSTLY_ORDER) in __alloc_pages_may_oom()
3530 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
3547 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3554 if (!order) in __alloc_pages_direct_compact()
3558 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, in __alloc_pages_direct_compact()
3571 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
3577 compaction_defer_reset(zone, order, true); in __alloc_pages_direct_compact()
3594 should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, in should_compact_retry() argument
3605 if (!order) in should_compact_retry()
3626 ret = compaction_zonelist_suitable(ac, order, alloc_flags); in should_compact_retry()
3638 if (order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
3650 min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? in should_compact_retry()
3659 trace_compact_retry(order, priority, compact_result, retries, max_retries, ret); in should_compact_retry()
3664 __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_compact() argument
3673 should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, in should_compact_retry() argument
3681 if (!order || order > PAGE_ALLOC_COSTLY_ORDER) in should_compact_retry()
3753 __perform_reclaim(gfp_t gfp_mask, unsigned int order, in __perform_reclaim() argument
3769 progress = try_to_free_pages(ac->zonelist, order, gfp_mask, in __perform_reclaim()
3783 __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, in __alloc_pages_direct_reclaim() argument
3790 *did_some_progress = __perform_reclaim(gfp_mask, order, ac); in __alloc_pages_direct_reclaim()
3795 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
3812 static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask, in wake_all_kswapds() argument
3823 wakeup_kswapd(zone, gfp_mask, order, high_zoneidx); in wake_all_kswapds()
3919 should_reclaim_retry(gfp_t gfp_mask, unsigned order, in should_reclaim_retry() argument
3931 if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER) in should_reclaim_retry()
3965 wmark = __zone_watermark_ok(zone, order, min_wmark, in should_reclaim_retry()
3967 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
4043 __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, in __alloc_pages_slowpath() argument
4047 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER; in __alloc_pages_slowpath()
4064 if (order >= MAX_ORDER) { in __alloc_pages_slowpath()
4102 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4108 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4123 (order > 0 && ac->migratetype != MIGRATE_MOVABLE)) in __alloc_pages_slowpath()
4125 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
4160 wake_all_kswapds(order, gfp_mask, ac); in __alloc_pages_slowpath()
4178 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4191 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4197 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4213 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, in __alloc_pages_slowpath()
4224 should_compact_retry(ac, order, alloc_flags, in __alloc_pages_slowpath()
4235 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4281 WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER); in __alloc_pages_slowpath()
4289 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac); in __alloc_pages_slowpath()
4298 "page allocation failure: order:%u", order); in __alloc_pages_slowpath()
4303 static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order, in prepare_alloc_pages() argument
4326 if (should_fail_alloc_page(gfp_mask, order)) in prepare_alloc_pages()
4354 __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid, in __alloc_pages_nodemask() argument
4364 if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags)) in __alloc_pages_nodemask()
4370 page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac); in __alloc_pages_nodemask()
4390 page = __alloc_pages_slowpath(alloc_mask, order, &ac); in __alloc_pages_nodemask()
4394 unlikely(memcg_kmem_charge(page, gfp_mask, order) != 0)) { in __alloc_pages_nodemask()
4395 __free_pages(page, order); in __alloc_pages_nodemask()
4399 trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype); in __alloc_pages_nodemask()
4410 unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) in __get_free_pages() argument
4414 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); in __get_free_pages()
4427 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
4430 if (order == 0) in __free_pages()
4433 __free_pages_ok(page, order); in __free_pages()
4439 void free_pages(unsigned long addr, unsigned int order) in free_pages() argument
4443 __free_pages(virt_to_page((void *)addr), order); in free_pages()
4486 unsigned int order = compound_order(page); in __page_frag_cache_drain() local
4488 if (order == 0) in __page_frag_cache_drain()
4491 __free_pages_ok(page, order); in __page_frag_cache_drain()
4562 static void *make_alloc_exact(unsigned long addr, unsigned int order, in make_alloc_exact() argument
4566 unsigned long alloc_end = addr + (PAGE_SIZE << order); in make_alloc_exact()
4569 split_page(virt_to_page((void *)addr), order); in make_alloc_exact()
4593 unsigned int order = get_order(size); in alloc_pages_exact() local
4596 addr = __get_free_pages(gfp_mask, order); in alloc_pages_exact()
4597 return make_alloc_exact(addr, order, size); in alloc_pages_exact()
4613 unsigned int order = get_order(size); in alloc_pages_exact_nid() local
4614 struct page *p = alloc_pages_node(nid, gfp_mask, order); in alloc_pages_exact_nid()
4617 return make_alloc_exact((unsigned long)page_address(p), order, size); in alloc_pages_exact_nid()
5003 unsigned int order; in show_free_areas() local
5013 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
5014 struct free_area *area = &zone->free_area[order]; in show_free_areas()
5017 nr[order] = area->nr_free; in show_free_areas()
5018 total += nr[order] << order; in show_free_areas()
5020 types[order] = 0; in show_free_areas()
5023 types[order] |= 1 << type; in show_free_areas()
5027 for (order = 0; order < MAX_ORDER; order++) { in show_free_areas()
5029 nr[order], K(1UL) << order); in show_free_areas()
5030 if (nr[order]) in show_free_areas()
5031 show_migration_types(types[order]); in show_free_areas()
5547 unsigned int order, t; in zone_init_free_lists() local
5548 for_each_migratetype_order(order, t) { in zone_init_free_lists()
5549 INIT_LIST_HEAD(&zone->free_area[order].free_list[t]); in zone_init_free_lists()
5550 zone->free_area[order].nr_free = 0; in zone_init_free_lists()
6144 unsigned int order; in set_pageblock_order() local
6151 order = HUGETLB_PAGE_ORDER; in set_pageblock_order()
6153 order = MAX_ORDER - 1; in set_pageblock_order()
6160 pageblock_order = order; in set_pageblock_order()
7837 unsigned int order; in alloc_contig_range() local
7842 .order = -1, in alloc_contig_range()
7916 order = 0; in alloc_contig_range()
7919 if (++order >= MAX_ORDER) { in alloc_contig_range()
7923 outer_start &= ~0UL << order; in alloc_contig_range()
7927 order = page_order(pfn_to_page(outer_start)); in alloc_contig_range()
7935 if (outer_start + (1UL << order) <= start) in alloc_contig_range()
8025 unsigned int order, i; in __offline_isolated_pages() local
8056 order = page_order(page); in __offline_isolated_pages()
8059 pfn, 1 << order, end_pfn); in __offline_isolated_pages()
8063 zone->free_area[order].nr_free--; in __offline_isolated_pages()
8064 for (i = 0; i < (1 << order); i++) in __offline_isolated_pages()
8066 pfn += (1 << order); in __offline_isolated_pages()
8077 unsigned int order; in is_free_buddy_page() local
8080 for (order = 0; order < MAX_ORDER; order++) { in is_free_buddy_page()
8081 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
8083 if (PageBuddy(page_head) && page_order(page_head) >= order) in is_free_buddy_page()
8088 return order < MAX_ORDER; in is_free_buddy_page()
8102 unsigned int order; in set_hwpoison_free_buddy_page() local
8106 for (order = 0; order < MAX_ORDER; order++) { in set_hwpoison_free_buddy_page()
8107 struct page *page_head = page - (pfn & ((1 << order) - 1)); in set_hwpoison_free_buddy_page()
8109 if (PageBuddy(page_head) && page_order(page_head) >= order) { in set_hwpoison_free_buddy_page()