Lines Matching full:mem

41 	BUILD_BUG_ON(sizeof_field(struct xdp_mem_allocator, mem.id)  in xdp_mem_id_hashfn()
54 return xa->mem.id != mem_id; in xdp_mem_id_cmp()
60 .key_offset = offsetof(struct xdp_mem_allocator, mem.id),
61 .key_len = sizeof_field(struct xdp_mem_allocator, mem.id),
76 ida_simple_remove(&mem_id_pool, xa->mem.id); in __xdp_mem_allocator_rcu_free()
113 void xdp_unreg_mem_model(struct xdp_mem_info *mem) in xdp_unreg_mem_model() argument
116 int type = mem->type; in xdp_unreg_mem_model()
117 int id = mem->id; in xdp_unreg_mem_model()
119 /* Reset mem info to defaults */ in xdp_unreg_mem_model()
120 mem->id = 0; in xdp_unreg_mem_model()
121 mem->type = 0; in xdp_unreg_mem_model()
142 xdp_unreg_mem_model(&xdp_rxq->mem); in xdp_rxq_info_unreg_mem_model()
270 static struct xdp_mem_allocator *__xdp_reg_mem_model(struct xdp_mem_info *mem, in __xdp_reg_mem_model() argument
282 mem->type = type; in __xdp_reg_mem_model()
311 mem->id = id; in __xdp_reg_mem_model()
312 xdp_alloc->mem = *mem; in __xdp_reg_mem_model()
318 ida_simple_remove(&mem_id_pool, mem->id); in __xdp_reg_mem_model()
319 mem->id = 0; in __xdp_reg_mem_model()
325 page_pool_use_xdp_mem(allocator, mem_allocator_disconnect, mem); in __xdp_reg_mem_model()
336 int xdp_reg_mem_model(struct xdp_mem_info *mem, in xdp_reg_mem_model() argument
341 xdp_alloc = __xdp_reg_mem_model(mem, type, allocator); in xdp_reg_mem_model()
358 xdp_alloc = __xdp_reg_mem_model(&xdp_rxq->mem, type, allocator); in xdp_rxq_info_reg_mem_model()
375 void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, in __xdp_return() argument
380 switch (mem->type) { in __xdp_return()
386 * as mem->type knows this a page_pool page in __xdp_return()
403 WARN(1, "Incorrect XDP memory type (%d) usage", mem->type); in __xdp_return()
420 __xdp_return(page_address(page), &xdpf->mem, false, NULL); in xdp_return_frame()
423 __xdp_return(xdpf->data, &xdpf->mem, false, NULL); in xdp_return_frame()
439 __xdp_return(page_address(page), &xdpf->mem, true, NULL); in xdp_return_frame_rx_napi()
442 __xdp_return(xdpf->data, &xdpf->mem, true, NULL); in xdp_return_frame_rx_napi()
448 * (identified via the mem.id field) in bulk to optimize
452 * it is full or when mem.id changes.
464 /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */ in xdp_flush_frame_bulk()
473 struct xdp_mem_info *mem = &xdpf->mem; in xdp_return_frame_bulk() local
476 if (mem->type != MEM_TYPE_PAGE_POOL) { in xdp_return_frame_bulk()
483 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in xdp_return_frame_bulk()
491 if (unlikely(mem->id != xa->mem.id)) { in xdp_return_frame_bulk()
493 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in xdp_return_frame_bulk()
525 __xdp_return(page_address(page), &xdp->rxq->mem, true, xdp); in xdp_return_buff()
528 __xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); in xdp_return_buff()
533 void __xdp_release_frame(void *data, struct xdp_mem_info *mem) in __xdp_release_frame() argument
539 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in __xdp_release_frame()
589 xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; in xdp_convert_zc_to_xdp_frame()
707 nxdpf->mem.type = MEM_TYPE_PAGE_ORDER0; in xdpf_clone()
708 nxdpf->mem.id = 0; in xdpf_clone()