| /Linux-v5.4/include/linux/ |
| D | mmzone.h | 27 #define MAX_ORDER 11 macro 29 #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER macro 31 #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) 86 for (order = 0; order < MAX_ORDER; order++) \ 516 struct free_area free_area[MAX_ORDER]; 1144 #if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS 1145 #error Allocator MAX_ORDER exceeds SECTION_SIZE
|
| D | slab.h | 247 #define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \ 248 (MAX_ORDER + PAGE_SHIFT - 1) : 25) 261 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1) 274 #define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
|
| D | pageblock-flags.h | 48 #define pageblock_order (MAX_ORDER-1)
|
| /Linux-v5.4/mm/ |
| D | shuffle.h | 19 #define SHUFFLE_ORDER (MAX_ORDER-1)
|
| D | page_owner.c | 299 if (freepage_order < MAX_ORDER) in pagetypeinfo_showmixedcount_print() 496 if (freepage_order < MAX_ORDER) in read_page_owner() 590 if (order > 0 && order < MAX_ORDER) in init_pages_in_zone()
|
| D | vmstat.c | 1030 for (order = 0; order < MAX_ORDER; order++) { in fill_contig_page_info() 1058 if (WARN_ON_ONCE(order >= MAX_ORDER)) in __fragmentation_index() 1357 for (order = 0; order < MAX_ORDER; ++order) in frag_show_print() 1382 for (order = 0; order < MAX_ORDER; ++order) { in pagetypeinfo_showfree_print() 1422 for (order = 0; order < MAX_ORDER; ++order) in pagetypeinfo_showfree() 2032 for (order = 0; order < MAX_ORDER; ++order) { in unusable_show_print() 2094 for (order = 0; order < MAX_ORDER; ++order) { in extfrag_show_print()
|
| D | page_alloc.c | 735 if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { in debug_guardpage_minorder_setup() 915 max_order = min_t(unsigned int, MAX_ORDER, pageblock_order + 1); in __free_one_page() 954 if (max_order < MAX_ORDER) { in __free_one_page() 990 if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn) in __free_one_page() 1996 if (pageblock_order >= MAX_ORDER) { in init_cma_reserved_pageblock() 2001 __free_pages(p, MAX_ORDER - 1); in init_cma_reserved_pageblock() 2201 for (current_order = order; current_order < MAX_ORDER; ++current_order) { in __rmqueue_smallest() 2577 for (order = 0; order < MAX_ORDER; order++) { in unreserve_highatomic_pageblock() 2661 for (current_order = MAX_ORDER - 1; current_order >= min_order; in __rmqueue_fallback() 2687 for (current_order = order; current_order < MAX_ORDER; in __rmqueue_fallback() [all …]
|
| D | cma.c | 193 max_t(unsigned long, MAX_ORDER - 1, pageblock_order); in cma_init_reserved_mem() 280 max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); in cma_declare_contiguous()
|
| D | compaction.c | 583 if (likely(order < MAX_ORDER)) { in isolate_freepages_block() 890 if (freepage_order > 0 && freepage_order < MAX_ORDER) in isolate_migratepages_block() 905 if (likely(order < MAX_ORDER)) in isolate_migratepages_block() 1889 for (order = cc->order; order < MAX_ORDER; order++) { in __compact_finished()
|
| /Linux-v5.4/arch/powerpc/include/asm/ |
| D | fadump-internal.h | 24 max_t(unsigned long, MAX_ORDER - 1, \
|
| /Linux-v5.4/kernel/events/ |
| D | ring_buffer.c | 571 if (order > MAX_ORDER) in rb_alloc_aux_page() 572 order = MAX_ORDER; in rb_alloc_aux_page() 766 if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER) in rb_alloc()
|
| /Linux-v5.4/drivers/gpu/drm/i915/gem/ |
| D | i915_gem_internal.c | 45 max_order = MAX_ORDER; in i915_gem_object_get_pages_internal()
|
| /Linux-v5.4/arch/ia64/mm/ |
| D | hugetlbpage.c | 182 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) { in hugetlb_setup_sz()
|
| /Linux-v5.4/arch/alpha/mm/ |
| D | numa.c | 153 node_min_pfn &= ~((1UL << (MAX_ORDER-1))-1); in setup_memory_node()
|
| /Linux-v5.4/fs/ramfs/ |
| D | file-nommu.c | 73 if (unlikely(order >= MAX_ORDER)) in ramfs_nommu_expand_for_mapping()
|
| /Linux-v5.4/arch/sparc/mm/ |
| D | tsb.c | 405 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) in tsb_grow() 406 max_tsb_size = (PAGE_SIZE << MAX_ORDER); in tsb_grow()
|
| /Linux-v5.4/arch/um/kernel/ |
| D | um_arch.c | 309 max_physmem &= ~((1 << (PAGE_SHIFT + MAX_ORDER)) - 1); in linux_main()
|
| /Linux-v5.4/kernel/dma/ |
| D | contiguous.c | 301 phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); in rmem_cma_setup()
|
| /Linux-v5.4/Documentation/admin-guide/kdump/ |
| D | vmcoreinfo.rst | 164 Each zone has a free_area structure array called free_area[MAX_ORDER]. 181 (zone.free_area, MAX_ORDER) 185 free_area ranges. MAX_ORDER is used by the zone buddy allocator.
|
| /Linux-v5.4/drivers/misc/mic/scif/ |
| D | scif_rma.h | 417 if (align && get_order(align) < MAX_ORDER) in scif_zalloc()
|
| /Linux-v5.4/Documentation/networking/ |
| D | packet_mmap.txt | 260 region allocated by __get_free_pages is determined by the MAX_ORDER macro. More 263 PAGE_SIZE << MAX_ORDER 266 In a 2.4/i386 kernel MAX_ORDER is 10 267 In a 2.6/i386 kernel MAX_ORDER is 11 273 /usr/include/linux/mmzone.h to get PAGE_SIZE MAX_ORDER declarations. 319 <max-order> : is the value defined with MAX_ORDER
|
| /Linux-v5.4/drivers/of/ |
| D | of_reserved_mem.c | 113 max_t(unsigned long, MAX_ORDER - 1, pageblock_order); in __reserved_mem_alloc_size()
|
| /Linux-v5.4/arch/powerpc/mm/book3s64/ |
| D | iommu_api.c | 100 chunk = (1UL << (PAGE_SHIFT + MAX_ORDER - 1)) / in mm_iommu_do_alloc()
|
| /Linux-v5.4/kernel/ |
| D | crash_core.c | 448 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); in crash_save_vmcoreinfo_init()
|
| /Linux-v5.4/drivers/misc/genwqe/ |
| D | card_utils.c | 210 if (get_order(size) >= MAX_ORDER) in __genwqe_alloc_consistent() 308 if (get_order(sgl->sgl_size) > MAX_ORDER) { in genwqe_alloc_sync_sgl()
|