Lines Matching +full:0 +full:xa
22 #define REG_STATE_NEW 0x0
23 #define REG_STATE_REGISTERED 0x1
24 #define REG_STATE_UNREGISTERED 0x2
25 #define REG_STATE_UNUSED 0x3
29 #define MEM_ID_MAX 0xFFFE
51 const struct xdp_mem_allocator *xa = ptr; in xdp_mem_id_cmp() local
54 return xa->mem.id != mem_id; in xdp_mem_id_cmp()
71 struct xdp_mem_allocator *xa; in __xdp_mem_allocator_rcu_free() local
73 xa = container_of(rcu, struct xdp_mem_allocator, rcu); in __xdp_mem_allocator_rcu_free()
76 ida_simple_remove(&mem_id_pool, xa->mem.id); in __xdp_mem_allocator_rcu_free()
78 kfree(xa); in __xdp_mem_allocator_rcu_free()
81 static void mem_xa_remove(struct xdp_mem_allocator *xa) in mem_xa_remove() argument
83 trace_mem_disconnect(xa); in mem_xa_remove()
85 if (!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) in mem_xa_remove()
86 call_rcu(&xa->rcu, __xdp_mem_allocator_rcu_free); in mem_xa_remove()
91 struct xdp_mem_allocator *xa; in mem_allocator_disconnect() local
100 while ((xa = rhashtable_walk_next(&iter)) && !IS_ERR(xa)) { in mem_allocator_disconnect()
101 if (xa->allocator == allocator) in mem_allocator_disconnect()
102 mem_xa_remove(xa); in mem_allocator_disconnect()
107 } while (xa == ERR_PTR(-EAGAIN)); in mem_allocator_disconnect()
115 struct xdp_mem_allocator *xa; in xdp_rxq_info_unreg_mem_model() local
120 xdp_rxq->mem.id = 0; in xdp_rxq_info_unreg_mem_model()
121 xdp_rxq->mem.type = 0; in xdp_rxq_info_unreg_mem_model()
128 if (id == 0) in xdp_rxq_info_unreg_mem_model()
133 xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); in xdp_rxq_info_unreg_mem_model()
134 page_pool_destroy(xa->page_pool); in xdp_rxq_info_unreg_mem_model()
157 memset(xdp_rxq, 0, sizeof(*xdp_rxq)); in xdp_rxq_info_init()
160 /* Returns 0 on success, negative on failure */
186 return 0; in xdp_rxq_info_reg()
208 return 0; in __mem_id_init_hash_table()
215 if (ret < 0) { in __mem_id_init_hash_table()
223 return 0; in __mem_id_init_hash_table()
238 if (id < 0) { in __mem_id_cyclic_get()
285 return 0; in xdp_rxq_info_reg_mem_model()
293 if (ret < 0) { in xdp_rxq_info_reg_mem_model()
305 if (id < 0) { in xdp_rxq_info_reg_mem_model()
317 xdp_rxq->mem.id = 0; in xdp_rxq_info_reg_mem_model()
328 return 0; in xdp_rxq_info_reg_mem_model()
345 struct xdp_mem_allocator *xa; in __xdp_return() local
352 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in __xdp_return()
356 page_pool_put_full_page(xa->page_pool, page, napi_direct); in __xdp_return()
401 struct xdp_mem_allocator *xa = bq->xa; in xdp_flush_frame_bulk() local
403 if (unlikely(!xa || !bq->count)) in xdp_flush_frame_bulk()
406 page_pool_put_page_bulk(xa->page_pool, bq->q, bq->count); in xdp_flush_frame_bulk()
407 /* bq->xa is not cleared to save lookup, if mem.id same in next bulk */ in xdp_flush_frame_bulk()
408 bq->count = 0; in xdp_flush_frame_bulk()
417 struct xdp_mem_allocator *xa; in xdp_return_frame_bulk() local
424 xa = bq->xa; in xdp_return_frame_bulk()
425 if (unlikely(!xa)) { in xdp_return_frame_bulk()
426 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in xdp_return_frame_bulk()
427 bq->count = 0; in xdp_return_frame_bulk()
428 bq->xa = xa; in xdp_return_frame_bulk()
434 if (unlikely(mem->id != xa->mem.id)) { in xdp_return_frame_bulk()
436 bq->xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in xdp_return_frame_bulk()
451 struct xdp_mem_allocator *xa; in __xdp_release_frame() local
455 xa = rhashtable_lookup(mem_id_ht, &mem->id, mem_id_rht_params); in __xdp_release_frame()
457 if (xa) in __xdp_release_frame()
458 page_pool_release_page(xa->page_pool, page); in __xdp_release_frame()
481 metasize = xdp_data_meta_unsupported(xdp) ? 0 : in xdp_convert_zc_to_xdp_frame()
494 memset(xdpf, 0, sizeof(*xdpf)); in xdp_convert_zc_to_xdp_frame()
502 xdpf->headroom = 0; in xdp_convert_zc_to_xdp_frame()
526 return 0; in xdp_alloc_skb_bulk()
583 memset(skb, 0, offsetof(struct sk_buff, tail)); in xdp_build_skb_from_frame()
612 nxdpf->mem.id = 0; in xdpf_clone()