Searched refs:zone_managed_pages (Results 1 – 5 of 5) sorted by relevance
28 reserved += zone->present_pages - zone_managed_pages(zone); in show_mem()
2618 if ((pageblock_nr_pages * 4) > zone_managed_pages(zone)) in boost_watermark()2778 max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages; in reserve_highatomic_pageblock()5697 unsigned long size = zone_managed_pages(zone); in nr_free_zone_pages()5796 managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); in si_meminfo_node()5805 managed_highpages += zone_managed_pages(zone); in si_meminfo_node()6020 K(zone_managed_pages(zone)), in show_free_areas()6773 batch = min(zone_managed_pages(zone) >> 10, (1024 * 1024) / PAGE_SIZE); in zone_batchsize()6830 total_pages = zone_managed_pages(zone) / percpu_pagelist_high_fraction; in zone_highsize()8294 unsigned long managed_pages = zone_managed_pages(zone); in calculate_totalreserve_pages()8331 bool clear = !ratio || !zone_managed_pages(zone); in setup_per_zone_lowmem_reserve()[all …]
229 mem = zone_managed_pages(zone) >> (27 - PAGE_SHIFT); in calculate_normal_threshold()1672 zone_managed_pages(zone), in zoneinfo_show_print()
678 static inline unsigned long zone_managed_pages(struct zone *zone) in zone_managed_pages() function994 return zone_managed_pages(zone); in managed_zone()
1668 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]); in kfd_fill_mem_info_for_cpu()