Lines Matching full:order

63  * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
171 static inline void arch_free_page(struct page *page, int order) { } in arch_free_page() argument
174 static inline void arch_alloc_page(struct page *page, int order) { } in arch_alloc_page() argument
177 struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid,
179 struct folio *__folio_alloc(gfp_t gfp, unsigned int order, int preferred_nid,
191 /* Bulk allocate order-0 pages */
232 __alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order) in __alloc_pages_node() argument
237 return __alloc_pages(gfp_mask, order, nid, NULL); in __alloc_pages_node()
241 struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid) in __folio_alloc_node() argument
246 return __folio_alloc(gfp, order, nid, NULL); in __folio_alloc_node()
255 unsigned int order) in alloc_pages_node() argument
260 return __alloc_pages_node(nid, gfp_mask, order); in alloc_pages_node()
264 struct page *alloc_pages(gfp_t gfp, unsigned int order);
265 struct folio *folio_alloc(gfp_t gfp, unsigned order);
266 struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
269 static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order) in alloc_pages() argument
271 return alloc_pages_node(numa_node_id(), gfp_mask, order); in alloc_pages()
273 static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order) in folio_alloc() argument
275 return __folio_alloc_node(gfp, order, numa_node_id()); in folio_alloc()
277 #define vma_alloc_folio(gfp, order, vma, addr, hugepage) \ argument
278 folio_alloc(gfp, order)
289 extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
299 #define __get_dma_pages(gfp_mask, order) \ argument
300 __get_free_pages((gfp_mask) | GFP_DMA, (order))
302 extern void __free_pages(struct page *page, unsigned int order);
303 extern void free_pages(unsigned long addr, unsigned int order);