Lines Matching refs:page
215 static inline int get_pcppage_migratetype(struct page *page) in get_pcppage_migratetype() argument
217 return page->index; in get_pcppage_migratetype()
220 static inline void set_pcppage_migratetype(struct page *page, int migratetype) in set_pcppage_migratetype() argument
222 page->index = migratetype; in set_pcppage_migratetype()
229 static void __free_pages_ok(struct page *page, unsigned int order,
303 static bool page_contains_unaccepted(struct page *page, unsigned int order);
304 static void accept_page(struct page *page, unsigned int order);
307 static bool __free_unaccepted(struct page *page);
343 static inline unsigned long *get_pageblock_bitmap(const struct page *page, in get_pageblock_bitmap() argument
349 return page_zone(page)->pageblock_flags; in get_pageblock_bitmap()
353 static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn) in pfn_to_bitidx() argument
358 pfn = pfn - pageblock_start_pfn(page_zone(page)->zone_start_pfn); in pfn_to_bitidx()
371 unsigned long get_pfnblock_flags_mask(const struct page *page, in get_pfnblock_flags_mask() argument
378 bitmap = get_pageblock_bitmap(page, pfn); in get_pfnblock_flags_mask()
379 bitidx = pfn_to_bitidx(page, pfn); in get_pfnblock_flags_mask()
391 static __always_inline int get_pfnblock_migratetype(const struct page *page, in get_pfnblock_migratetype() argument
394 return get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK); in get_pfnblock_migratetype()
404 void set_pfnblock_flags_mask(struct page *page, unsigned long flags, in set_pfnblock_flags_mask() argument
415 bitmap = get_pageblock_bitmap(page, pfn); in set_pfnblock_flags_mask()
416 bitidx = pfn_to_bitidx(page, pfn); in set_pfnblock_flags_mask()
420 VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page); in set_pfnblock_flags_mask()
430 void set_pageblock_migratetype(struct page *page, int migratetype) in set_pageblock_migratetype() argument
436 set_pfnblock_flags_mask(page, (unsigned long)migratetype, in set_pageblock_migratetype()
437 page_to_pfn(page), MIGRATETYPE_MASK); in set_pageblock_migratetype()
441 static int page_outside_zone_boundaries(struct zone *zone, struct page *page) in page_outside_zone_boundaries() argument
445 unsigned long pfn = page_to_pfn(page); in page_outside_zone_boundaries()
466 static int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
468 if (page_outside_zone_boundaries(zone, page)) in bad_range()
470 if (zone != page_zone(page)) in bad_range()
476 static inline int __maybe_unused bad_range(struct zone *zone, struct page *page) in bad_range() argument
482 static void bad_page(struct page *page, const char *reason) in bad_page() argument
509 current->comm, page_to_pfn(page)); in bad_page()
510 dump_page(page, reason); in bad_page()
516 page_mapcount_reset(page); /* remove PageBuddy */ in bad_page()
559 static inline void free_the_page(struct page *page, unsigned int order) in free_the_page() argument
562 free_unref_page(page, order); in free_the_page()
564 __free_pages_ok(page, order, FPI_NONE); in free_the_page()
579 void prep_compound_page(struct page *page, unsigned int order) in prep_compound_page() argument
584 __SetPageHead(page); in prep_compound_page()
586 prep_compound_tail(page, i); in prep_compound_page()
588 prep_compound_head(page, order); in prep_compound_page()
602 free_the_page(&folio->page, folio_order(folio)); in destroy_large_folio()
605 static inline void set_buddy_order(struct page *page, unsigned int order) in set_buddy_order() argument
607 set_page_private(page, order); in set_buddy_order()
608 __SetPageBuddy(page); in set_buddy_order()
618 !capc->page && in task_capc()
623 compaction_capture(struct capture_control *capc, struct page *page, in compaction_capture() argument
643 capc->page = page; in compaction_capture()
654 compaction_capture(struct capture_control *capc, struct page *page, in compaction_capture() argument
662 static inline void add_to_free_list(struct page *page, struct zone *zone, in add_to_free_list() argument
667 list_add(&page->buddy_list, &area->free_list[migratetype]); in add_to_free_list()
672 static inline void add_to_free_list_tail(struct page *page, struct zone *zone, in add_to_free_list_tail() argument
677 list_add_tail(&page->buddy_list, &area->free_list[migratetype]); in add_to_free_list_tail()
686 static inline void move_to_free_list(struct page *page, struct zone *zone, in move_to_free_list() argument
691 list_move_tail(&page->buddy_list, &area->free_list[migratetype]); in move_to_free_list()
694 static inline void del_page_from_free_list(struct page *page, struct zone *zone, in del_page_from_free_list() argument
698 if (page_reported(page)) in del_page_from_free_list()
699 __ClearPageReported(page); in del_page_from_free_list()
701 list_del(&page->buddy_list); in del_page_from_free_list()
702 __ClearPageBuddy(page); in del_page_from_free_list()
703 set_page_private(page, 0); in del_page_from_free_list()
707 static inline struct page *get_page_from_free_area(struct free_area *area, in get_page_from_free_area()
711 struct page, buddy_list); in get_page_from_free_area()
724 struct page *page, unsigned int order) in buddy_merge_likely() argument
727 struct page *higher_page; in buddy_merge_likely()
733 higher_page = page + (higher_page_pfn - pfn); in buddy_merge_likely()
763 static inline void __free_one_page(struct page *page, in __free_one_page() argument
771 struct page *buddy; in __free_one_page()
775 VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page); in __free_one_page()
781 VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page); in __free_one_page()
782 VM_BUG_ON_PAGE(bad_range(zone, page), page); in __free_one_page()
785 if (compaction_capture(capc, page, order, migratetype)) { in __free_one_page()
791 buddy = find_buddy_page_pfn(page, pfn, order, &buddy_pfn); in __free_one_page()
819 page = page + (combined_pfn - pfn); in __free_one_page()
825 set_buddy_order(page, order); in __free_one_page()
832 to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order); in __free_one_page()
835 add_to_free_list_tail(page, zone, order, migratetype); in __free_one_page()
837 add_to_free_list(page, zone, order, migratetype); in __free_one_page()
857 int split_free_page(struct page *free_page, in split_free_page()
907 static inline bool page_expected_state(struct page *page, in page_expected_state() argument
910 if (unlikely(atomic_read(&page->_mapcount) != -1)) in page_expected_state()
913 if (unlikely((unsigned long)page->mapping | in page_expected_state()
914 page_ref_count(page) | in page_expected_state()
916 page->memcg_data | in page_expected_state()
918 (page->flags & check_flags))) in page_expected_state()
924 static const char *page_bad_reason(struct page *page, unsigned long flags) in page_bad_reason() argument
928 if (unlikely(atomic_read(&page->_mapcount) != -1)) in page_bad_reason()
930 if (unlikely(page->mapping != NULL)) in page_bad_reason()
932 if (unlikely(page_ref_count(page) != 0)) in page_bad_reason()
934 if (unlikely(page->flags & flags)) { in page_bad_reason()
941 if (unlikely(page->memcg_data)) in page_bad_reason()
947 static void free_page_is_bad_report(struct page *page) in free_page_is_bad_report() argument
949 bad_page(page, in free_page_is_bad_report()
950 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE)); in free_page_is_bad_report()
953 static inline bool free_page_is_bad(struct page *page) in free_page_is_bad() argument
955 if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) in free_page_is_bad()
959 free_page_is_bad_report(page); in free_page_is_bad()
968 static int free_tail_page_prepare(struct page *head_page, struct page *page) in free_tail_page_prepare() argument
983 switch (page - head_page) { in free_tail_page_prepare()
987 bad_page(page, "nonzero entire_mapcount"); in free_tail_page_prepare()
991 bad_page(page, "nonzero nr_pages_mapped"); in free_tail_page_prepare()
995 bad_page(page, "nonzero pincount"); in free_tail_page_prepare()
1006 if (page->mapping != TAIL_MAPPING) { in free_tail_page_prepare()
1007 bad_page(page, "corrupted mapping in tail page"); in free_tail_page_prepare()
1012 if (unlikely(!PageTail(page))) { in free_tail_page_prepare()
1013 bad_page(page, "PageTail not set"); in free_tail_page_prepare()
1016 if (unlikely(compound_head(page) != head_page)) { in free_tail_page_prepare()
1017 bad_page(page, "compound_head not consistent"); in free_tail_page_prepare()
1022 page->mapping = NULL; in free_tail_page_prepare()
1023 clear_compound_head(page); in free_tail_page_prepare()
1056 static inline bool should_skip_kasan_poison(struct page *page, fpi_t fpi_flags) in should_skip_kasan_poison() argument
1061 return page_kasan_tag(page) == 0xff; in should_skip_kasan_poison()
1064 static void kernel_init_pages(struct page *page, int numpages) in kernel_init_pages() argument
1071 clear_highpage_kasan_tagged(page + i); in kernel_init_pages()
1075 static __always_inline bool free_pages_prepare(struct page *page, in free_pages_prepare() argument
1079 bool skip_kasan_poison = should_skip_kasan_poison(page, fpi_flags); in free_pages_prepare()
1082 VM_BUG_ON_PAGE(PageTail(page), page); in free_pages_prepare()
1084 trace_mm_page_free(page, order); in free_pages_prepare()
1085 kmsan_free_page(page, order); in free_pages_prepare()
1087 if (unlikely(PageHWPoison(page)) && !order) { in free_pages_prepare()
1092 if (memcg_kmem_online() && PageMemcgKmem(page)) in free_pages_prepare()
1093 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1094 reset_page_owner(page, order); in free_pages_prepare()
1095 page_table_check_free(page, order); in free_pages_prepare()
1104 bool compound = PageCompound(page); in free_pages_prepare()
1107 VM_BUG_ON_PAGE(compound && compound_order(page) != order, page); in free_pages_prepare()
1110 page[1].flags &= ~PAGE_FLAGS_SECOND; in free_pages_prepare()
1113 bad += free_tail_page_prepare(page, page + i); in free_pages_prepare()
1115 if (free_page_is_bad(page + i)) { in free_pages_prepare()
1120 (page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in free_pages_prepare()
1123 if (PageMappingFlags(page)) in free_pages_prepare()
1124 page->mapping = NULL; in free_pages_prepare()
1125 if (memcg_kmem_online() && PageMemcgKmem(page)) in free_pages_prepare()
1126 __memcg_kmem_uncharge_page(page, order); in free_pages_prepare()
1128 if (free_page_is_bad(page)) in free_pages_prepare()
1134 page_cpupid_reset_last(page); in free_pages_prepare()
1135 page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; in free_pages_prepare()
1136 reset_page_owner(page, order); in free_pages_prepare()
1137 page_table_check_free(page, order); in free_pages_prepare()
1139 if (!PageHighMem(page)) { in free_pages_prepare()
1140 debug_check_no_locks_freed(page_address(page), in free_pages_prepare()
1142 debug_check_no_obj_freed(page_address(page), in free_pages_prepare()
1146 kernel_poison_pages(page, 1 << order); in free_pages_prepare()
1157 kasan_poison_pages(page, order, init); in free_pages_prepare()
1164 kernel_init_pages(page, 1 << order); in free_pages_prepare()
1171 arch_free_page(page, order); in free_pages_prepare()
1173 debug_pagealloc_unmap_pages(page, 1 << order); in free_pages_prepare()
1190 struct page *page; in free_pcppages_bulk() local
1220 page = list_last_entry(list, struct page, pcp_list); in free_pcppages_bulk()
1221 mt = get_pcppage_migratetype(page); in free_pcppages_bulk()
1224 list_del(&page->pcp_list); in free_pcppages_bulk()
1229 VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); in free_pcppages_bulk()
1232 mt = get_pageblock_migratetype(page); in free_pcppages_bulk()
1234 __free_one_page(page, page_to_pfn(page), zone, order, mt, FPI_NONE); in free_pcppages_bulk()
1235 trace_mm_page_pcpu_drain(page, order, mt); in free_pcppages_bulk()
1243 struct page *page, unsigned long pfn, in free_one_page() argument
1252 migratetype = get_pfnblock_migratetype(page, pfn); in free_one_page()
1254 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in free_one_page()
1258 static void __free_pages_ok(struct page *page, unsigned int order, in __free_pages_ok() argument
1263 unsigned long pfn = page_to_pfn(page); in __free_pages_ok()
1264 struct zone *zone = page_zone(page); in __free_pages_ok()
1266 if (!free_pages_prepare(page, order, fpi_flags)) in __free_pages_ok()
1274 migratetype = get_pfnblock_migratetype(page, pfn); in __free_pages_ok()
1279 migratetype = get_pfnblock_migratetype(page, pfn); in __free_pages_ok()
1281 __free_one_page(page, pfn, zone, order, migratetype, fpi_flags); in __free_pages_ok()
1287 void __free_pages_core(struct page *page, unsigned int order) in __free_pages_core() argument
1290 struct page *p = page; in __free_pages_core()
1307 atomic_long_add(nr_pages, &page_zone(page)->managed_pages); in __free_pages_core()
1309 if (page_contains_unaccepted(page, order)) { in __free_pages_core()
1310 if (order == MAX_ORDER && __free_unaccepted(page)) in __free_pages_core()
1313 accept_page(page, order); in __free_pages_core()
1320 __free_pages_ok(page, order, FPI_TO_TAIL); in __free_pages_core()
1347 struct page *__pageblock_pfn_to_page(unsigned long start_pfn, in __pageblock_pfn_to_page()
1350 struct page *start_page; in __pageblock_pfn_to_page()
1351 struct page *end_page; in __pageblock_pfn_to_page()
1389 static inline void expand(struct zone *zone, struct page *page, in expand() argument
1397 VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]); in expand()
1405 if (set_page_guard(zone, &page[size], high, migratetype)) in expand()
1408 add_to_free_list(&page[size], zone, high, migratetype); in expand()
1409 set_buddy_order(&page[size], high); in expand()
1413 static void check_new_page_bad(struct page *page) in check_new_page_bad() argument
1415 if (unlikely(page->flags & __PG_HWPOISON)) { in check_new_page_bad()
1417 page_mapcount_reset(page); /* remove PageBuddy */ in check_new_page_bad()
1421 bad_page(page, in check_new_page_bad()
1422 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP)); in check_new_page_bad()
1428 static int check_new_page(struct page *page) in check_new_page() argument
1430 if (likely(page_expected_state(page, in check_new_page()
1434 check_new_page_bad(page); in check_new_page()
1438 static inline bool check_new_pages(struct page *page, unsigned int order) in check_new_pages() argument
1442 struct page *p = page + i; in check_new_pages()
1480 inline void post_alloc_hook(struct page *page, unsigned int order, in post_alloc_hook() argument
1488 set_page_private(page, 0); in post_alloc_hook()
1489 set_page_refcounted(page); in post_alloc_hook()
1491 arch_alloc_page(page, order); in post_alloc_hook()
1492 debug_pagealloc_map_pages(page, 1 << order); in post_alloc_hook()
1499 kernel_unpoison_pages(page, 1 << order); in post_alloc_hook()
1514 tag_clear_highpage(page + i); in post_alloc_hook()
1520 kasan_unpoison_pages(page, order, init)) { in post_alloc_hook()
1530 page_kasan_tag_reset(page + i); in post_alloc_hook()
1534 kernel_init_pages(page, 1 << order); in post_alloc_hook()
1536 set_page_owner(page, order, gfp_flags); in post_alloc_hook()
1537 page_table_check_alloc(page, order); in post_alloc_hook()
1540 static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags, in prep_new_page() argument
1543 post_alloc_hook(page, order, gfp_flags); in prep_new_page()
1546 prep_compound_page(page, order); in prep_new_page()
1555 set_page_pfmemalloc(page); in prep_new_page()
1557 clear_page_pfmemalloc(page); in prep_new_page()
1565 struct page *__rmqueue_smallest(struct zone *zone, unsigned int order, in __rmqueue_smallest()
1570 struct page *page; in __rmqueue_smallest() local
1575 page = get_page_from_free_area(area, migratetype); in __rmqueue_smallest()
1576 if (!page) in __rmqueue_smallest()
1578 del_page_from_free_list(page, zone, current_order); in __rmqueue_smallest()
1579 expand(zone, page, order, current_order, migratetype); in __rmqueue_smallest()
1580 set_pcppage_migratetype(page, migratetype); in __rmqueue_smallest()
1581 trace_mm_page_alloc_zone_locked(page, order, migratetype, in __rmqueue_smallest()
1584 return page; in __rmqueue_smallest()
1604 static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback()
1610 static inline struct page *__rmqueue_cma_fallback(struct zone *zone, in __rmqueue_cma_fallback()
1623 struct page *page; in move_freepages() local
1629 page = pfn_to_page(pfn); in move_freepages()
1630 if (!PageBuddy(page)) { in move_freepages()
1637 (PageLRU(page) || __PageMovable(page))) in move_freepages()
1644 VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); in move_freepages()
1645 VM_BUG_ON_PAGE(page_zone(page) != zone, page); in move_freepages()
1647 order = buddy_order(page); in move_freepages()
1648 move_to_free_list(page, zone, order, migratetype); in move_freepages()
1656 int move_freepages_block(struct zone *zone, struct page *page, in move_freepages_block() argument
1664 pfn = page_to_pfn(page); in move_freepages_block()
1678 static void change_pageblock_range(struct page *pageblock_page, in change_pageblock_range()
1767 static void steal_suitable_fallback(struct zone *zone, struct page *page, in steal_suitable_fallback() argument
1770 unsigned int current_order = buddy_order(page); in steal_suitable_fallback()
1774 old_block_type = get_pageblock_migratetype(page); in steal_suitable_fallback()
1785 change_pageblock_range(page, current_order, start_type); in steal_suitable_fallback()
1801 free_pages = move_freepages_block(zone, page, start_type, in steal_suitable_fallback()
1834 set_pageblock_migratetype(page, start_type); in steal_suitable_fallback()
1839 move_to_free_list(page, zone, current_order, start_type); in steal_suitable_fallback()
1880 static void reserve_highatomic_pageblock(struct page *page, struct zone *zone) in reserve_highatomic_pageblock() argument
1900 mt = get_pageblock_migratetype(page); in reserve_highatomic_pageblock()
1904 set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC); in reserve_highatomic_pageblock()
1905 move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL); in reserve_highatomic_pageblock()
1928 struct page *page; in unreserve_highatomic_pageblock() local
1946 page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC); in unreserve_highatomic_pageblock()
1947 if (!page) in unreserve_highatomic_pageblock()
1957 if (is_migrate_highatomic_page(page)) { in unreserve_highatomic_pageblock()
1979 set_pageblock_migratetype(page, ac->migratetype); in unreserve_highatomic_pageblock()
1980 ret = move_freepages_block(zone, page, ac->migratetype, in unreserve_highatomic_pageblock()
2010 struct page *page; in __rmqueue_fallback() local
2069 page = get_page_from_free_area(area, fallback_mt); in __rmqueue_fallback()
2071 steal_suitable_fallback(zone, page, alloc_flags, start_migratetype, in __rmqueue_fallback()
2074 trace_mm_page_alloc_extfrag(page, order, current_order, in __rmqueue_fallback()
2085 static __always_inline struct page *
2089 struct page *page; in __rmqueue() local
2100 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2101 if (page) in __rmqueue()
2102 return page; in __rmqueue()
2106 page = __rmqueue_smallest(zone, order, migratetype); in __rmqueue()
2107 if (unlikely(!page)) { in __rmqueue()
2109 page = __rmqueue_cma_fallback(zone, order); in __rmqueue()
2111 if (!page && __rmqueue_fallback(zone, order, migratetype, in __rmqueue()
2115 return page; in __rmqueue()
2132 struct page *page = __rmqueue(zone, order, migratetype, in rmqueue_bulk() local
2134 if (unlikely(page == NULL)) in rmqueue_bulk()
2147 list_add_tail(&page->pcp_list, list); in rmqueue_bulk()
2148 if (is_migrate_cma(get_pcppage_migratetype(page))) in rmqueue_bulk()
2307 static bool free_unref_page_prepare(struct page *page, unsigned long pfn, in free_unref_page_prepare() argument
2312 if (!free_pages_prepare(page, order, FPI_NONE)) in free_unref_page_prepare()
2315 migratetype = get_pfnblock_migratetype(page, pfn); in free_unref_page_prepare()
2316 set_pcppage_migratetype(page, migratetype); in free_unref_page_prepare()
2368 struct page *page, int migratetype, in free_unref_page_commit() argument
2377 list_add(&page->pcp_list, &pcp->lists[pindex]); in free_unref_page_commit()
2397 void free_unref_page(struct page *page, unsigned int order) in free_unref_page() argument
2402 unsigned long pfn = page_to_pfn(page); in free_unref_page()
2405 if (!free_unref_page_prepare(page, pfn, order)) in free_unref_page()
2415 migratetype = pcpmigratetype = get_pcppage_migratetype(page); in free_unref_page()
2418 free_one_page(page_zone(page), page, pfn, order, migratetype, FPI_NONE); in free_unref_page()
2424 zone = page_zone(page); in free_unref_page()
2428 free_unref_page_commit(zone, pcp, page, pcpmigratetype, order); in free_unref_page()
2431 free_one_page(zone, page, pfn, order, migratetype, FPI_NONE); in free_unref_page()
2442 struct page *page, *next; in free_unref_page_list() local
2449 list_for_each_entry_safe(page, next, list, lru) { in free_unref_page_list()
2450 unsigned long pfn = page_to_pfn(page); in free_unref_page_list()
2451 if (!free_unref_page_prepare(page, pfn, 0)) { in free_unref_page_list()
2452 list_del(&page->lru); in free_unref_page_list()
2460 migratetype = get_pcppage_migratetype(page); in free_unref_page_list()
2462 list_del(&page->lru); in free_unref_page_list()
2463 free_one_page(page_zone(page), page, pfn, 0, migratetype, FPI_NONE); in free_unref_page_list()
2468 list_for_each_entry_safe(page, next, list, lru) { in free_unref_page_list()
2469 struct zone *zone = page_zone(page); in free_unref_page_list()
2471 list_del(&page->lru); in free_unref_page_list()
2472 migratetype = get_pcppage_migratetype(page); in free_unref_page_list()
2495 free_one_page(zone, page, page_to_pfn(page), in free_unref_page_list()
2510 trace_mm_page_free_batched(page); in free_unref_page_list()
2511 free_unref_page_commit(zone, pcp, page, migratetype, 0); in free_unref_page_list()
2529 void split_page(struct page *page, unsigned int order) in split_page() argument
2533 VM_BUG_ON_PAGE(PageCompound(page), page); in split_page()
2534 VM_BUG_ON_PAGE(!page_count(page), page); in split_page()
2537 set_page_refcounted(page + i); in split_page()
2538 split_page_owner(page, 1 << order); in split_page()
2539 split_page_memcg(page, 1 << order); in split_page()
2543 int __isolate_free_page(struct page *page, unsigned int order) in __isolate_free_page() argument
2545 struct zone *zone = page_zone(page); in __isolate_free_page()
2546 int mt = get_pageblock_migratetype(page); in __isolate_free_page()
2563 del_page_from_free_list(page, zone, order); in __isolate_free_page()
2570 struct page *endpage = page + (1 << order) - 1; in __isolate_free_page()
2571 for (; page < endpage; page += pageblock_nr_pages) { in __isolate_free_page()
2572 int mt = get_pageblock_migratetype(page); in __isolate_free_page()
2578 set_pageblock_migratetype(page, in __isolate_free_page()
2595 void __putback_isolated_page(struct page *page, unsigned int order, int mt) in __putback_isolated_page() argument
2597 struct zone *zone = page_zone(page); in __putback_isolated_page()
2603 __free_one_page(page, page_to_pfn(page), zone, order, mt, in __putback_isolated_page()
2634 struct page *rmqueue_buddy(struct zone *preferred_zone, struct zone *zone, in rmqueue_buddy()
2638 struct page *page; in rmqueue_buddy() local
2642 page = NULL; in rmqueue_buddy()
2645 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
2646 if (!page) { in rmqueue_buddy()
2647 page = __rmqueue(zone, order, migratetype, alloc_flags); in rmqueue_buddy()
2655 if (!page && (alloc_flags & ALLOC_OOM)) in rmqueue_buddy()
2656 page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); in rmqueue_buddy()
2658 if (!page) { in rmqueue_buddy()
2664 get_pcppage_migratetype(page)); in rmqueue_buddy()
2666 } while (check_new_pages(page, order)); in rmqueue_buddy()
2668 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_buddy()
2671 return page; in rmqueue_buddy()
2676 struct page *__rmqueue_pcplist(struct zone *zone, unsigned int order, in __rmqueue_pcplist()
2682 struct page *page; in __rmqueue_pcplist() local
2707 page = list_first_entry(list, struct page, pcp_list); in __rmqueue_pcplist()
2708 list_del(&page->pcp_list); in __rmqueue_pcplist()
2710 } while (check_new_pages(page, order)); in __rmqueue_pcplist()
2712 return page; in __rmqueue_pcplist()
2716 static struct page *rmqueue_pcplist(struct zone *preferred_zone, in rmqueue_pcplist()
2722 struct page *page; in rmqueue_pcplist() local
2740 page = __rmqueue_pcplist(zone, order, migratetype, alloc_flags, pcp, list); in rmqueue_pcplist()
2743 if (page) { in rmqueue_pcplist()
2744 __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); in rmqueue_pcplist()
2747 return page; in rmqueue_pcplist()
2763 struct page *rmqueue(struct zone *preferred_zone, in rmqueue()
2768 struct page *page; in rmqueue() local
2777 page = rmqueue_pcplist(preferred_zone, zone, order, in rmqueue()
2779 if (likely(page)) in rmqueue()
2783 page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, in rmqueue()
2794 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); in rmqueue()
2795 return page; in rmqueue()
3047 static struct page *
3066 struct page *page; in get_page_from_freelist() local
3167 page = rmqueue(ac->preferred_zoneref->zone, zone, order, in get_page_from_freelist()
3169 if (page) { in get_page_from_freelist()
3170 prep_new_page(page, order, gfp_mask, alloc_flags); in get_page_from_freelist()
3177 reserve_highatomic_pageblock(page, zone); in get_page_from_freelist()
3179 return page; in get_page_from_freelist()
3252 static inline struct page *
3257 struct page *page; in __alloc_pages_cpuset_fallback() local
3259 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3265 if (!page) in __alloc_pages_cpuset_fallback()
3266 page = get_page_from_freelist(gfp_mask, order, in __alloc_pages_cpuset_fallback()
3269 return page; in __alloc_pages_cpuset_fallback()
3272 static inline struct page *
3283 struct page *page; in __alloc_pages_may_oom() local
3304 page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) & in __alloc_pages_may_oom()
3307 if (page) in __alloc_pages_may_oom()
3351 page = __alloc_pages_cpuset_fallback(gfp_mask, order, in __alloc_pages_may_oom()
3356 return page; in __alloc_pages_may_oom()
3367 static struct page *
3372 struct page *page = NULL; in __alloc_pages_direct_compact() local
3384 prio, &page); in __alloc_pages_direct_compact()
3399 if (page) in __alloc_pages_direct_compact()
3400 prep_new_page(page, order, gfp_mask, alloc_flags); in __alloc_pages_direct_compact()
3403 if (!page) in __alloc_pages_direct_compact()
3404 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_compact()
3406 if (page) { in __alloc_pages_direct_compact()
3407 struct zone *zone = page_zone(page); in __alloc_pages_direct_compact()
3412 return page; in __alloc_pages_direct_compact()
3492 static inline struct page *
3639 static inline struct page *
3644 struct page *page = NULL; in __alloc_pages_direct_reclaim() local
3654 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_direct_reclaim()
3661 if (!page && !drained) { in __alloc_pages_direct_reclaim()
3670 return page; in __alloc_pages_direct_reclaim()
3898 static inline struct page *
3904 struct page *page = NULL; in __alloc_pages_slowpath() local
3960 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
3961 if (page) in __alloc_pages_slowpath()
3977 page = __alloc_pages_direct_compact(gfp_mask, order, in __alloc_pages_slowpath()
3981 if (page) in __alloc_pages_slowpath()
4041 page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac); in __alloc_pages_slowpath()
4042 if (page) in __alloc_pages_slowpath()
4054 page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4056 if (page) in __alloc_pages_slowpath()
4060 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, in __alloc_pages_slowpath()
4062 if (page) in __alloc_pages_slowpath()
4102 page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress); in __alloc_pages_slowpath()
4103 if (page) in __alloc_pages_slowpath()
4161 page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_MIN_RESERVE, ac); in __alloc_pages_slowpath()
4162 if (page) in __alloc_pages_slowpath()
4172 return page; in __alloc_pages_slowpath()
4241 struct page **page_array) in __alloc_pages_bulk()
4243 struct page *page; in __alloc_pages_bulk() local
4341 page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, in __alloc_pages_bulk()
4343 if (unlikely(!page)) { in __alloc_pages_bulk()
4353 prep_new_page(page, 0, gfp, 0); in __alloc_pages_bulk()
4355 list_add(&page->lru, page_list); in __alloc_pages_bulk()
4357 page_array[nr_populated] = page; in __alloc_pages_bulk()
4374 page = __alloc_pages(gfp, 0, preferred_nid, nodemask); in __alloc_pages_bulk()
4375 if (page) { in __alloc_pages_bulk()
4377 list_add(&page->lru, page_list); in __alloc_pages_bulk()
4379 page_array[nr_populated] = page; in __alloc_pages_bulk()
4390 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, in __alloc_pages()
4393 struct page *page; in __alloc_pages() local
4426 page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); in __alloc_pages()
4427 if (likely(page)) in __alloc_pages()
4439 page = __alloc_pages_slowpath(alloc_gfp, order, &ac); in __alloc_pages()
4442 if (memcg_kmem_online() && (gfp & __GFP_ACCOUNT) && page && in __alloc_pages()
4443 unlikely(__memcg_kmem_charge_page(page, gfp, order) != 0)) { in __alloc_pages()
4444 __free_pages(page, order); in __alloc_pages()
4445 page = NULL; in __alloc_pages()
4448 trace_mm_page_alloc(page, order, alloc_gfp, ac.migratetype); in __alloc_pages()
4449 kmsan_alloc_page(page, order, alloc_gfp); in __alloc_pages()
4451 return page; in __alloc_pages()
4458 struct page *page = __alloc_pages(gfp | __GFP_COMP, order, in __folio_alloc() local
4460 struct folio *folio = (struct folio *)page; in __folio_alloc()
4475 struct page *page; in __get_free_pages() local
4477 page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order); in __get_free_pages()
4478 if (!page) in __get_free_pages()
4480 return (unsigned long) page_address(page); in __get_free_pages()
4510 void __free_pages(struct page *page, unsigned int order) in __free_pages() argument
4513 int head = PageHead(page); in __free_pages()
4515 if (put_page_testzero(page)) in __free_pages()
4516 free_the_page(page, order); in __free_pages()
4519 free_the_page(page + (1 << order), order); in __free_pages()
4544 static struct page *__page_frag_cache_refill(struct page_frag_cache *nc, in __page_frag_cache_refill()
4547 struct page *page = NULL; in __page_frag_cache_refill() local
4553 page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, in __page_frag_cache_refill()
4555 nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE; in __page_frag_cache_refill()
4557 if (unlikely(!page)) in __page_frag_cache_refill()
4558 page = alloc_pages_node(NUMA_NO_NODE, gfp, 0); in __page_frag_cache_refill()
4560 nc->va = page ? page_address(page) : NULL; in __page_frag_cache_refill()
4562 return page; in __page_frag_cache_refill()
4565 void __page_frag_cache_drain(struct page *page, unsigned int count) in __page_frag_cache_drain() argument
4567 VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); in __page_frag_cache_drain()
4569 if (page_ref_sub_and_test(page, count)) in __page_frag_cache_drain()
4570 free_the_page(page, compound_order(page)); in __page_frag_cache_drain()
4579 struct page *page; in page_frag_alloc_align() local
4584 page = __page_frag_cache_refill(nc, gfp_mask); in page_frag_alloc_align()
4585 if (!page) in page_frag_alloc_align()
4595 page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE); in page_frag_alloc_align()
4598 nc->pfmemalloc = page_is_pfmemalloc(page); in page_frag_alloc_align()
4605 page = virt_to_page(nc->va); in page_frag_alloc_align()
4607 if (!page_ref_sub_and_test(page, nc->pagecnt_bias)) in page_frag_alloc_align()
4611 free_the_page(page, compound_order(page)); in page_frag_alloc_align()
4620 set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1); in page_frag_alloc_align()
4652 struct page *page = virt_to_head_page(addr); in page_frag_free() local
4654 if (unlikely(put_page_testzero(page))) in page_frag_free()
4655 free_the_page(page, compound_order(page)); in page_frag_free()
4664 struct page *page = virt_to_page((void *)addr); in make_alloc_exact() local
4665 struct page *last = page + nr; in make_alloc_exact()
4667 split_page_owner(page, 1 << order); in make_alloc_exact()
4668 split_page_memcg(page, 1 << order); in make_alloc_exact()
4669 while (page < --last) in make_alloc_exact()
4672 last = page + (1UL << order); in make_alloc_exact()
4673 for (page += nr; page < last; page++) in make_alloc_exact()
4674 __free_pages_ok(page, 0, FPI_TO_TAIL); in make_alloc_exact()
4722 struct page *p; in alloc_pages_exact_nid()
5465 void adjust_managed_page_count(struct page *page, long count) in adjust_managed_page_count() argument
5467 atomic_long_add(count, &page_zone(page)->managed_pages); in adjust_managed_page_count()
5470 if (PageHighMem(page)) in adjust_managed_page_count()
5484 struct page *page = virt_to_page(pos); in free_reserved_area() local
5494 direct_map_addr = page_address(page); in free_reserved_area()
5503 free_reserved_page(page); in free_reserved_area()
6021 struct page *page; in alloc_contig_dump_pages() local
6024 list_for_each_entry(page, page_list, lru) in alloc_contig_dump_pages()
6025 dump_page(page, "migration failure"); in alloc_contig_dump_pages()
6248 struct page *page; in pfn_range_valid_contig() local
6251 page = pfn_to_online_page(i); in pfn_range_valid_contig()
6252 if (!page) in pfn_range_valid_contig()
6255 if (page_zone(page) != z) in pfn_range_valid_contig()
6258 if (PageReserved(page)) in pfn_range_valid_contig()
6261 if (PageHuge(page)) in pfn_range_valid_contig()
6296 struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask, in alloc_contig_pages()
6339 struct page *page = pfn_to_page(pfn); in free_contig_range() local
6341 count += page_count(page) != 1; in free_contig_range()
6342 __free_page(page); in free_contig_range()
6396 struct page *page; in __offline_isolated_pages() local
6405 page = pfn_to_page(pfn); in __offline_isolated_pages()
6410 if (unlikely(!PageBuddy(page) && PageHWPoison(page))) { in __offline_isolated_pages()
6418 if (PageOffline(page)) { in __offline_isolated_pages()
6419 BUG_ON(page_count(page)); in __offline_isolated_pages()
6420 BUG_ON(PageBuddy(page)); in __offline_isolated_pages()
6425 BUG_ON(page_count(page)); in __offline_isolated_pages()
6426 BUG_ON(!PageBuddy(page)); in __offline_isolated_pages()
6427 order = buddy_order(page); in __offline_isolated_pages()
6428 del_page_from_free_list(page, zone, order); in __offline_isolated_pages()
6438 bool is_free_buddy_page(struct page *page) in is_free_buddy_page() argument
6440 unsigned long pfn = page_to_pfn(page); in is_free_buddy_page()
6444 struct page *page_head = page - (pfn & ((1 << order) - 1)); in is_free_buddy_page()
6460 static void break_down_buddy_pages(struct zone *zone, struct page *page, in break_down_buddy_pages() argument
6461 struct page *target, int low, int high, in break_down_buddy_pages()
6465 struct page *current_buddy, *next_page; in break_down_buddy_pages()
6471 if (target >= &page[size]) { in break_down_buddy_pages()
6472 next_page = page + size; in break_down_buddy_pages()
6473 current_buddy = page; in break_down_buddy_pages()
6475 next_page = page; in break_down_buddy_pages()
6476 current_buddy = page + size; in break_down_buddy_pages()
6478 page = next_page; in break_down_buddy_pages()
6493 bool take_page_off_buddy(struct page *page) in take_page_off_buddy() argument
6495 struct zone *zone = page_zone(page); in take_page_off_buddy()
6496 unsigned long pfn = page_to_pfn(page); in take_page_off_buddy()
6503 struct page *page_head = page - (pfn & ((1 << order) - 1)); in take_page_off_buddy()
6512 break_down_buddy_pages(zone, page_head, page, 0, in take_page_off_buddy()
6514 SetPageHWPoisonTakenOff(page); in take_page_off_buddy()
6530 bool put_page_back_buddy(struct page *page) in put_page_back_buddy() argument
6532 struct zone *zone = page_zone(page); in put_page_back_buddy()
6533 unsigned long pfn = page_to_pfn(page); in put_page_back_buddy()
6535 int migratetype = get_pfnblock_migratetype(page, pfn); in put_page_back_buddy()
6539 if (put_page_testzero(page)) { in put_page_back_buddy()
6540 ClearPageHWPoisonTakenOff(page); in put_page_back_buddy()
6541 __free_one_page(page, pfn, zone, 0, migratetype, FPI_NONE); in put_page_back_buddy()
6542 if (TestClearPageHWPoison(page)) { in put_page_back_buddy()
6588 static bool page_contains_unaccepted(struct page *page, unsigned int order) in page_contains_unaccepted() argument
6590 phys_addr_t start = page_to_phys(page); in page_contains_unaccepted()
6596 static void accept_page(struct page *page, unsigned int order) in accept_page() argument
6598 phys_addr_t start = page_to_phys(page); in accept_page()
6606 struct page *page; in try_to_accept_memory_one() local
6613 page = list_first_entry_or_null(&zone->unaccepted_pages, in try_to_accept_memory_one()
6614 struct page, lru); in try_to_accept_memory_one()
6615 if (!page) { in try_to_accept_memory_one()
6620 list_del(&page->lru); in try_to_accept_memory_one()
6627 accept_page(page, MAX_ORDER); in try_to_accept_memory_one()
6629 __free_pages_ok(page, MAX_ORDER, FPI_TO_TAIL); in try_to_accept_memory_one()
6663 static bool __free_unaccepted(struct page *page) in __free_unaccepted() argument
6665 struct zone *zone = page_zone(page); in __free_unaccepted()
6674 list_add_tail(&page->lru, &zone->unaccepted_pages); in __free_unaccepted()
6687 static bool page_contains_unaccepted(struct page *page, unsigned int order) in page_contains_unaccepted() argument
6692 static void accept_page(struct page *page, unsigned int order) in accept_page() argument
6706 static bool __free_unaccepted(struct page *page) in __free_unaccepted() argument