Lines Matching full:order

53 #define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))  argument
54 #define block_end_pfn(pfn, order) ALIGN((pfn) + 1, 1UL << (order)) argument
57 * Page order with-respect-to which proactive compaction
87 unsigned int i, order, nr_pages; in split_map_pages() local
94 order = page_private(page); in split_map_pages()
95 nr_pages = 1 << order; in split_map_pages()
97 post_alloc_hook(page, order, __GFP_MOVABLE); in split_map_pages()
98 if (order) in split_map_pages()
99 split_page(page, order); in split_map_pages()
153 static void defer_compaction(struct zone *zone, int order) in defer_compaction() argument
158 if (order < zone->compact_order_failed) in defer_compaction()
159 zone->compact_order_failed = order; in defer_compaction()
164 trace_mm_compaction_defer_compaction(zone, order); in defer_compaction()
168 static bool compaction_deferred(struct zone *zone, int order) in compaction_deferred() argument
172 if (order < zone->compact_order_failed) in compaction_deferred()
181 trace_mm_compaction_deferred(zone, order); in compaction_deferred()
187 * Update defer tracking counters after successful compaction of given order,
191 void compaction_defer_reset(struct zone *zone, int order, in compaction_defer_reset() argument
198 if (order >= zone->compact_order_failed) in compaction_defer_reset()
199 zone->compact_order_failed = order + 1; in compaction_defer_reset()
201 trace_mm_compaction_defer_reset(zone, order); in compaction_defer_reset()
205 static bool compaction_restarting(struct zone *zone, int order) in compaction_restarting() argument
207 if (order < zone->compact_order_failed) in compaction_restarting()
286 * released. It is always pointless to compact pages of such order (if they are
596 unsigned int order; in isolate_freepages_block() local
627 const unsigned int order = compound_order(page); in isolate_freepages_block() local
629 if (likely(order <= MAX_ORDER)) { in isolate_freepages_block()
630 blockpfn += (1UL << order) - 1; in isolate_freepages_block()
631 page += (1UL << order) - 1; in isolate_freepages_block()
632 nr_scanned += (1UL << order) - 1; in isolate_freepages_block()
650 /* Found a free page, will break it into order-0 pages */ in isolate_freepages_block()
651 order = buddy_order(page); in isolate_freepages_block()
652 isolated = __isolate_free_page(page, order); in isolate_freepages_block()
655 set_page_private(page, order); in isolate_freepages_block()
742 * is more than pageblock order. In this case, we adjust in isolate_freepages_range()
769 * pageblock_nr_pages for some non-negative n. (Max order in isolate_freepages_range()
881 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
890 * previous order-aligned block, and did not skip it due in isolate_migratepages_block()
898 * We failed to isolate in the previous order-aligned in isolate_migratepages_block()
901 * next_skip_pfn by 1 << order, as low_pfn might have in isolate_migratepages_block()
903 * a compound or a high-order buddy page in the in isolate_migratepages_block()
906 next_skip_pfn = block_end_pfn(low_pfn, cc->order); in isolate_migratepages_block()
990 * Skip if free. We read page order here without zone lock in isolate_migratepages_block()
1000 * a valid page order. Consider only values in the in isolate_migratepages_block()
1001 * valid order range to prevent low_pfn overflow. in isolate_migratepages_block()
1019 const unsigned int order = compound_order(page); in isolate_migratepages_block() local
1021 if (likely(order <= MAX_ORDER)) { in isolate_migratepages_block()
1022 low_pfn += (1UL << order) - 1; in isolate_migratepages_block()
1023 nr_scanned += (1UL << order) - 1; in isolate_migratepages_block()
1206 * instead of migrating, as we cannot form the cc->order buddy in isolate_migratepages_block()
1225 next_skip_pfn += 1UL << cc->order; in isolate_migratepages_block()
1353 * pageblock, so it's not worth to check order for valid range. in suitable_migration_target()
1451 static int next_search_order(struct compact_control *cc, int order) in next_search_order() argument
1453 order--; in next_search_order()
1454 if (order < 0) in next_search_order()
1455 order = cc->order - 1; in next_search_order()
1458 if (order == cc->search_order) { in next_search_order()
1461 cc->search_order = cc->order - 1; in next_search_order()
1465 return order; in next_search_order()
1477 int order; in fast_isolate_freepages() local
1479 /* Full compaction passes in a negative order */ in fast_isolate_freepages()
1480 if (cc->order <= 0) in fast_isolate_freepages()
1504 * Search starts from the last successful isolation order or the next in fast_isolate_freepages()
1505 * order to search after a previous failure in fast_isolate_freepages()
1507 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order); in fast_isolate_freepages()
1509 for (order = cc->search_order; in fast_isolate_freepages()
1510 !page && order >= 0; in fast_isolate_freepages()
1511 order = next_search_order(cc, order)) { in fast_isolate_freepages()
1512 struct free_area *area = &cc->zone->free_area[order]; in fast_isolate_freepages()
1537 cc->search_order = order; in fast_isolate_freepages()
1566 if (__isolate_free_page(page, order)) { in fast_isolate_freepages()
1567 set_page_private(page, order); in fast_isolate_freepages()
1568 nr_isolated = 1 << order; in fast_isolate_freepages()
1576 order = cc->search_order + 1; in fast_isolate_freepages()
1588 * Smaller scan on next order so the total scan is related in fast_isolate_freepages()
1852 int order; in fast_find_migrateblock() local
1879 if (cc->order <= PAGE_ALLOC_COSTLY_ORDER) in fast_find_migrateblock()
1902 for (order = cc->order - 1; in fast_find_migrateblock()
1903 order >= PAGE_ALLOC_COSTLY_ORDER && !found_block && nr_scanned < limit; in fast_find_migrateblock()
1904 order--) { in fast_find_migrateblock()
1905 struct free_area *area = &cc->zone->free_area[order]; in fast_find_migrateblock()
2069 * order == -1 is expected when compacting via
2072 static inline bool is_via_compact_memory(int order) in is_via_compact_memory() argument
2074 return order == -1; in is_via_compact_memory()
2171 unsigned int order; in __compact_finished() local
2214 if (is_via_compact_memory(cc->order)) in __compact_finished()
2228 for (order = cc->order; order <= MAX_ORDER; order++) { in __compact_finished()
2229 struct free_area *area = &cc->zone->free_area[order]; in __compact_finished()
2246 if (find_suitable_fallback(area, order, migratetype, in __compact_finished()
2271 trace_mm_compaction_finished(cc->zone, cc->order, ret); in compact_finished()
2278 static bool __compaction_suitable(struct zone *zone, int order, in __compaction_suitable() argument
2284 * Watermarks for order-0 must be met for compaction to be able to in __compaction_suitable()
2297 watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? in __compaction_suitable()
2299 watermark += compact_gap(order); in __compaction_suitable()
2307 bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) in compaction_suitable() argument
2312 suitable = __compaction_suitable(zone, order, highest_zoneidx, in compaction_suitable()
2319 * watermarks, but we already failed the high-order watermark check in compaction_suitable()
2332 if (order > PAGE_ALLOC_COSTLY_ORDER) { in compaction_suitable()
2333 int fragindex = fragmentation_index(zone, order); in compaction_suitable()
2345 trace_mm_compaction_suitable(zone, order, compact_result); in compaction_suitable()
2350 bool compaction_zonelist_suitable(struct alloc_context *ac, int order, in compaction_zonelist_suitable() argument
2366 * want to trash just for a single high order allocation which in compaction_zonelist_suitable()
2370 available = zone_reclaimable_pages(zone) / order; in compaction_zonelist_suitable()
2372 if (__compaction_suitable(zone, order, ac->highest_zoneidx, in compaction_zonelist_suitable()
2404 if (!is_via_compact_memory(cc->order)) { in compact_zone()
2410 if (zone_watermark_ok(cc->zone, cc->order, watermark, in compact_zone()
2415 if (!compaction_suitable(cc->zone, cc->order, in compact_zone()
2424 if (compaction_restarting(cc->zone, cc->order)) in compact_zone()
2506 * previous cc->order aligned block. in compact_zone()
2554 if (cc->order == COMPACTION_HPAGE_ORDER) in compact_zone()
2570 * cc->order aligned block where we migrated from? If yes, in compact_zone()
2575 if (cc->order > 0 && last_migrated_pfn) { in compact_zone()
2577 block_start_pfn(cc->migrate_pfn, cc->order); in compact_zone()
2618 static enum compact_result compact_zone_order(struct zone *zone, int order, in compact_zone_order() argument
2625 .order = order, in compact_zone_order()
2626 .search_order = order, in compact_zone_order()
2673 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
2675 * @order: The order of the current allocation
2683 enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, in try_to_compact_pages() argument
2699 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio); in try_to_compact_pages()
2707 && compaction_deferred(zone, order)) { in try_to_compact_pages()
2712 status = compact_zone_order(zone, order, gfp_mask, prio, in try_to_compact_pages()
2724 compaction_defer_reset(zone, order, false); in try_to_compact_pages()
2736 defer_compaction(zone, order); in try_to_compact_pages()
2765 .order = -1, in proactive_compact_node()
2796 .order = -1, in compact_node()
2941 * order is allocatable. in kcompactd_do_work()
2946 .order = pgdat->kcompactd_max_order, in kcompactd_do_work()
2953 trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order, in kcompactd_do_work()
2964 if (compaction_deferred(zone, cc.order)) in kcompactd_do_work()
2968 if (zone_watermark_ok(zone, cc.order, in kcompactd_do_work()
2972 if (!compaction_suitable(zone, cc.order, zoneid)) in kcompactd_do_work()
2982 compaction_defer_reset(zone, cc.order, false); in kcompactd_do_work()
2987 * order >= cc.order. This is ratelimited by the in kcompactd_do_work()
2996 defer_compaction(zone, cc.order); in kcompactd_do_work()
3007 * the requested order/highest_zoneidx in case it was higher/tighter in kcompactd_do_work()
3010 if (pgdat->kcompactd_max_order <= cc.order) in kcompactd_do_work()
3016 void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx) in wakeup_kcompactd() argument
3018 if (!order) in wakeup_kcompactd()
3021 if (pgdat->kcompactd_max_order < order) in wakeup_kcompactd()
3022 pgdat->kcompactd_max_order = order; in wakeup_kcompactd()
3037 trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order, in wakeup_kcompactd()