/Linux-v5.4/include/trace/events/ |
D | kmem.h | 281 int alloc_order, int fallback_order, 285 alloc_order, fallback_order, 290 __field( int, alloc_order ) 299 __entry->alloc_order = alloc_order; 310 __entry->alloc_order,
|
/Linux-v5.4/drivers/lightnvm/ |
D | pblk-rb.c | 80 unsigned int alloc_order, order, iter; in pblk_rb_init() local 104 alloc_order = power_size; in pblk_rb_init() 105 if (alloc_order >= max_order) { in pblk_rb_init() 107 iter = (1 << (alloc_order - max_order)); in pblk_rb_init() 109 order = alloc_order; in pblk_rb_init()
|
/Linux-v5.4/Documentation/trace/postprocess/ |
D | trace-pagealloc-postprocess.pl | 242 my ($alloc_order, $fallback_order, $pageblock_order); 254 $alloc_order = $3;
|
/Linux-v5.4/arch/powerpc/sysdev/xive/ |
D | spapr.c | 519 unsigned int alloc_order; in xive_spapr_cleanup_queue() local 528 alloc_order = xive_alloc_order(xive_queue_shift); in xive_spapr_cleanup_queue() 529 free_pages((unsigned long)q->qpage, alloc_order); in xive_spapr_cleanup_queue()
|
D | native.c | 231 unsigned int alloc_order; in xive_native_cleanup_queue() local 238 alloc_order = xive_alloc_order(xive_queue_shift); in xive_native_cleanup_queue() 239 free_pages((unsigned long)q->qpage, alloc_order); in xive_native_cleanup_queue()
|
D | common.c | 1519 unsigned int alloc_order; in xive_queue_page_alloc() local 1523 alloc_order = xive_alloc_order(queue_shift); in xive_queue_page_alloc() 1524 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); in xive_queue_page_alloc()
|
/Linux-v5.4/drivers/dma/ioat/ |
D | dma.h | 127 u16 alloc_order; member 310 return 1 << ioat_chan->alloc_order; in ioat_ring_size()
|
D | sysfs.c | 115 return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1); in ring_size_show()
|
D | init.c | 616 const int total_descs = 1 << ioat_chan->alloc_order; in ioat_free_chan_resources() 664 ioat_chan->alloc_order = 0; in ioat_free_chan_resources() 689 return 1 << ioat_chan->alloc_order; in ioat_alloc_chan_resources() 718 ioat_chan->alloc_order = order; in ioat_alloc_chan_resources() 754 return 1 << ioat_chan->alloc_order; in ioat_alloc_chan_resources()
|
/Linux-v5.4/mm/ |
D | vmscan.c | 3798 static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, in kswapd_try_to_sleep() argument 3829 wakeup_kcompactd(pgdat, alloc_order, classzone_idx); in kswapd_try_to_sleep() 3893 unsigned int alloc_order, reclaim_order; in kswapd() local 3922 alloc_order = reclaim_order = pgdat->kswapd_order; in kswapd() 3926 kswapd_try_to_sleep(pgdat, alloc_order, reclaim_order, in kswapd() 3930 alloc_order = reclaim_order = pgdat->kswapd_order; in kswapd() 3955 alloc_order); in kswapd() 3956 reclaim_order = balance_pgdat(pgdat, alloc_order, classzone_idx); in kswapd() 3957 if (reclaim_order < alloc_order) in kswapd()
|
D | page_alloc.c | 2514 unsigned int alloc_order) in reserve_highatomic_pageblock() argument
|
/Linux-v5.4/drivers/net/ethernet/amd/xgbe/ |
D | xgbe-desc.c | 286 struct xgbe_page_alloc *pa, int alloc_order, in xgbe_alloc_pages() argument 295 order = alloc_order; in xgbe_alloc_pages()
|
/Linux-v5.4/drivers/net/ethernet/brocade/bna/ |
D | bnad.h | 253 int alloc_order; member
|
D | bnad.c | 252 unmap_q->alloc_order = -1; in bnad_rxq_alloc_uninit() 271 unmap_q->alloc_order = 0; in bnad_rxq_alloc_init() 275 unmap_q->alloc_order = 0; in bnad_rxq_alloc_init() 279 unmap_q->alloc_order = order; in bnad_rxq_alloc_init() 352 alloc_size = PAGE_SIZE << unmap_q->alloc_order; in bnad_rxq_refill_page() 360 unmap_q->alloc_order); in bnad_rxq_refill_page()
|
/Linux-v5.4/Documentation/trace/ |
D | events-kmem.rst | 107 …mm_page_alloc_extfrag page=%p pfn=%lu alloc_order=%d fallback_order=%d pageblock_order=%d alloc_m…
|
/Linux-v5.4/arch/powerpc/include/asm/ |
D | opal.h | 266 int64_t opal_xive_alloc_vp_block(uint32_t alloc_order);
|