Lines Matching +full:dma +full:- +full:pool
1 // SPDX-License-Identifier: GPL-2.0
11 void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_add_xsk() argument
15 if (!xs->tx) in xp_add_xsk()
18 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
19 list_add_rcu(&xs->tx_list, &pool->xsk_tx_list); in xp_add_xsk()
20 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_add_xsk()
23 void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_del_xsk() argument
27 if (!xs->tx) in xp_del_xsk()
30 spin_lock_irqsave(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
31 list_del_rcu(&xs->tx_list); in xp_del_xsk()
32 spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags); in xp_del_xsk()
35 void xp_destroy(struct xsk_buff_pool *pool) in xp_destroy() argument
37 if (!pool) in xp_destroy()
40 kvfree(pool->tx_descs); in xp_destroy()
41 kvfree(pool->heads); in xp_destroy()
42 kvfree(pool); in xp_destroy()
45 int xp_alloc_tx_descs(struct xsk_buff_pool *pool, struct xdp_sock *xs) in xp_alloc_tx_descs() argument
47 pool->tx_descs = kvcalloc(xs->tx->nentries, sizeof(*pool->tx_descs), in xp_alloc_tx_descs()
49 if (!pool->tx_descs) in xp_alloc_tx_descs()
50 return -ENOMEM; in xp_alloc_tx_descs()
58 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xp_create_and_assign_umem()
59 struct xsk_buff_pool *pool; in xp_create_and_assign_umem() local
63 entries = unaligned ? umem->chunks : 0; in xp_create_and_assign_umem()
64 pool = kvzalloc(struct_size(pool, free_heads, entries), GFP_KERNEL); in xp_create_and_assign_umem()
65 if (!pool) in xp_create_and_assign_umem()
68 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); in xp_create_and_assign_umem()
69 if (!pool->heads) in xp_create_and_assign_umem()
72 if (xs->tx) in xp_create_and_assign_umem()
73 if (xp_alloc_tx_descs(pool, xs)) in xp_create_and_assign_umem()
76 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem()
77 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
78 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
79 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem()
80 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
81 pool->chunk_size = umem->chunk_size; in xp_create_and_assign_umem()
82 pool->chunk_shift = ffs(umem->chunk_size) - 1; in xp_create_and_assign_umem()
83 pool->unaligned = unaligned; in xp_create_and_assign_umem()
84 pool->frame_len = umem->chunk_size - umem->headroom - in xp_create_and_assign_umem()
86 pool->umem = umem; in xp_create_and_assign_umem()
87 pool->addrs = umem->addrs; in xp_create_and_assign_umem()
88 INIT_LIST_HEAD(&pool->free_list); in xp_create_and_assign_umem()
89 INIT_LIST_HEAD(&pool->xsk_tx_list); in xp_create_and_assign_umem()
90 spin_lock_init(&pool->xsk_tx_list_lock); in xp_create_and_assign_umem()
91 spin_lock_init(&pool->cq_lock); in xp_create_and_assign_umem()
92 refcount_set(&pool->users, 1); in xp_create_and_assign_umem()
94 pool->fq = xs->fq_tmp; in xp_create_and_assign_umem()
95 pool->cq = xs->cq_tmp; in xp_create_and_assign_umem()
97 for (i = 0; i < pool->free_heads_cnt; i++) { in xp_create_and_assign_umem()
98 xskb = &pool->heads[i]; in xp_create_and_assign_umem()
99 xskb->pool = pool; in xp_create_and_assign_umem()
100 xskb->xdp.frame_sz = umem->chunk_size - umem->headroom; in xp_create_and_assign_umem()
101 INIT_LIST_HEAD(&xskb->free_list_node); in xp_create_and_assign_umem()
102 if (pool->unaligned) in xp_create_and_assign_umem()
103 pool->free_heads[i] = xskb; in xp_create_and_assign_umem()
105 xp_init_xskb_addr(xskb, pool, i * pool->chunk_size); in xp_create_and_assign_umem()
108 return pool; in xp_create_and_assign_umem()
111 xp_destroy(pool); in xp_create_and_assign_umem()
115 void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq) in xp_set_rxq_info() argument
119 for (i = 0; i < pool->heads_cnt; i++) in xp_set_rxq_info()
120 pool->heads[i].xdp.rxq = rxq; in xp_set_rxq_info()
124 static void xp_disable_drv_zc(struct xsk_buff_pool *pool) in xp_disable_drv_zc() argument
131 if (pool->umem->zc) { in xp_disable_drv_zc()
133 bpf.xsk.pool = NULL; in xp_disable_drv_zc()
134 bpf.xsk.queue_id = pool->queue_id; in xp_disable_drv_zc()
136 err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf); in xp_disable_drv_zc()
139 WARN(1, "Failed to disable zero-copy!\n"); in xp_disable_drv_zc()
143 int xp_assign_dev(struct xsk_buff_pool *pool, in xp_assign_dev() argument
156 return -EINVAL; in xp_assign_dev()
159 return -EBUSY; in xp_assign_dev()
161 pool->netdev = netdev; in xp_assign_dev()
162 pool->queue_id = queue_id; in xp_assign_dev()
163 err = xsk_reg_pool_at_qid(netdev, pool, queue_id); in xp_assign_dev()
168 pool->uses_need_wakeup = true; in xp_assign_dev()
173 pool->cached_need_wakeup = XDP_WAKEUP_TX; in xp_assign_dev()
178 /* For copy-mode, we are done. */ in xp_assign_dev()
181 if (!netdev->netdev_ops->ndo_bpf || in xp_assign_dev()
182 !netdev->netdev_ops->ndo_xsk_wakeup) { in xp_assign_dev()
183 err = -EOPNOTSUPP; in xp_assign_dev()
188 bpf.xsk.pool = pool; in xp_assign_dev()
191 err = netdev->netdev_ops->ndo_bpf(netdev, &bpf); in xp_assign_dev()
195 if (!pool->dma_pages) { in xp_assign_dev()
196 WARN(1, "Driver did not DMA map zero-copy buffers"); in xp_assign_dev()
197 err = -EINVAL; in xp_assign_dev()
200 pool->umem->zc = true; in xp_assign_dev()
204 xp_disable_drv_zc(pool); in xp_assign_dev()
215 int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_sock *umem_xs, in xp_assign_dev_shared() argument
219 struct xdp_umem *umem = umem_xs->umem; in xp_assign_dev_shared()
222 if (!pool->fq || !pool->cq) in xp_assign_dev_shared()
223 return -EINVAL; in xp_assign_dev_shared()
225 flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY; in xp_assign_dev_shared()
226 if (umem_xs->pool->uses_need_wakeup) in xp_assign_dev_shared()
229 return xp_assign_dev(pool, dev, queue_id, flags); in xp_assign_dev_shared()
232 void xp_clear_dev(struct xsk_buff_pool *pool) in xp_clear_dev() argument
234 if (!pool->netdev) in xp_clear_dev()
237 xp_disable_drv_zc(pool); in xp_clear_dev()
238 xsk_clear_pool_at_qid(pool->netdev, pool->queue_id); in xp_clear_dev()
239 dev_put(pool->netdev); in xp_clear_dev()
240 pool->netdev = NULL; in xp_clear_dev()
245 struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool, in xp_release_deferred() local
249 xp_clear_dev(pool); in xp_release_deferred()
252 if (pool->fq) { in xp_release_deferred()
253 xskq_destroy(pool->fq); in xp_release_deferred()
254 pool->fq = NULL; in xp_release_deferred()
257 if (pool->cq) { in xp_release_deferred()
258 xskq_destroy(pool->cq); in xp_release_deferred()
259 pool->cq = NULL; in xp_release_deferred()
262 xdp_put_umem(pool->umem, false); in xp_release_deferred()
263 xp_destroy(pool); in xp_release_deferred()
266 void xp_get_pool(struct xsk_buff_pool *pool) in xp_get_pool() argument
268 refcount_inc(&pool->users); in xp_get_pool()
271 bool xp_put_pool(struct xsk_buff_pool *pool) in xp_put_pool() argument
273 if (!pool) in xp_put_pool()
276 if (refcount_dec_and_test(&pool->users)) { in xp_put_pool()
277 INIT_WORK(&pool->work, xp_release_deferred); in xp_put_pool()
278 schedule_work(&pool->work); in xp_put_pool()
285 static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool) in xp_find_dma_map() argument
289 list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) { in xp_find_dma_map()
290 if (dma_map->netdev == pool->netdev) in xp_find_dma_map()
306 dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL); in xp_create_dma_map()
307 if (!dma_map->dma_pages) { in xp_create_dma_map()
312 dma_map->netdev = netdev; in xp_create_dma_map()
313 dma_map->dev = dev; in xp_create_dma_map()
314 dma_map->dma_need_sync = false; in xp_create_dma_map()
315 dma_map->dma_pages_cnt = nr_pages; in xp_create_dma_map()
316 refcount_set(&dma_map->users, 1); in xp_create_dma_map()
317 list_add(&dma_map->list, &umem->xsk_dma_list); in xp_create_dma_map()
323 list_del(&dma_map->list); in xp_destroy_dma_map()
324 kvfree(dma_map->dma_pages); in xp_destroy_dma_map()
330 dma_addr_t *dma; in __xp_dma_unmap() local
333 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in __xp_dma_unmap()
334 dma = &dma_map->dma_pages[i]; in __xp_dma_unmap()
335 if (*dma) { in __xp_dma_unmap()
336 *dma &= ~XSK_NEXT_PG_CONTIG_MASK; in __xp_dma_unmap()
337 dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE, in __xp_dma_unmap()
339 *dma = 0; in __xp_dma_unmap()
346 void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs) in xp_dma_unmap() argument
350 if (pool->dma_pages_cnt == 0) in xp_dma_unmap()
353 dma_map = xp_find_dma_map(pool); in xp_dma_unmap()
359 if (!refcount_dec_and_test(&dma_map->users)) in xp_dma_unmap()
363 kvfree(pool->dma_pages); in xp_dma_unmap()
364 pool->dma_pages_cnt = 0; in xp_dma_unmap()
365 pool->dev = NULL; in xp_dma_unmap()
373 for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) { in xp_check_dma_contiguity()
374 if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1]) in xp_check_dma_contiguity()
375 dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
377 dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK; in xp_check_dma_contiguity()
381 static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map) in xp_init_dma_info() argument
383 if (!pool->unaligned) { in xp_init_dma_info()
386 for (i = 0; i < pool->heads_cnt; i++) { in xp_init_dma_info()
387 struct xdp_buff_xsk *xskb = &pool->heads[i]; in xp_init_dma_info()
389 xp_init_xskb_dma(xskb, pool, dma_map->dma_pages, xskb->orig_addr); in xp_init_dma_info()
393 pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL); in xp_init_dma_info()
394 if (!pool->dma_pages) in xp_init_dma_info()
395 return -ENOMEM; in xp_init_dma_info()
397 pool->dev = dma_map->dev; in xp_init_dma_info()
398 pool->dma_pages_cnt = dma_map->dma_pages_cnt; in xp_init_dma_info()
399 pool->dma_need_sync = dma_map->dma_need_sync; in xp_init_dma_info()
400 memcpy(pool->dma_pages, dma_map->dma_pages, in xp_init_dma_info()
401 pool->dma_pages_cnt * sizeof(*pool->dma_pages)); in xp_init_dma_info()
406 int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev, in xp_dma_map() argument
410 dma_addr_t dma; in xp_dma_map() local
414 dma_map = xp_find_dma_map(pool); in xp_dma_map()
416 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
420 refcount_inc(&dma_map->users); in xp_dma_map()
424 dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem); in xp_dma_map()
426 return -ENOMEM; in xp_dma_map()
428 for (i = 0; i < dma_map->dma_pages_cnt; i++) { in xp_dma_map()
429 dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, in xp_dma_map()
431 if (dma_mapping_error(dev, dma)) { in xp_dma_map()
433 return -ENOMEM; in xp_dma_map()
435 if (dma_need_sync(dev, dma)) in xp_dma_map()
436 dma_map->dma_need_sync = true; in xp_dma_map()
437 dma_map->dma_pages[i] = dma; in xp_dma_map()
440 if (pool->unaligned) in xp_dma_map()
443 err = xp_init_dma_info(pool, dma_map); in xp_dma_map()
453 static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool, in xp_addr_crosses_non_contig_pg() argument
456 return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size); in xp_addr_crosses_non_contig_pg()
459 static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr) in xp_check_unaligned() argument
462 if (*addr >= pool->addrs_cnt || in xp_check_unaligned()
463 *addr + pool->chunk_size > pool->addrs_cnt || in xp_check_unaligned()
464 xp_addr_crosses_non_contig_pg(pool, *addr)) in xp_check_unaligned()
469 static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr) in xp_check_aligned() argument
471 *addr = xp_aligned_extract_addr(pool, *addr); in xp_check_aligned()
472 return *addr < pool->addrs_cnt; in xp_check_aligned()
475 static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool) in __xp_alloc() argument
481 if (pool->free_heads_cnt == 0) in __xp_alloc()
485 if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) { in __xp_alloc()
486 pool->fq->queue_empty_descs++; in __xp_alloc()
490 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : in __xp_alloc()
491 xp_check_aligned(pool, &addr); in __xp_alloc()
493 pool->fq->invalid_descs++; in __xp_alloc()
494 xskq_cons_release(pool->fq); in __xp_alloc()
500 if (pool->unaligned) { in __xp_alloc()
501 xskb = pool->free_heads[--pool->free_heads_cnt]; in __xp_alloc()
502 xp_init_xskb_addr(xskb, pool, addr); in __xp_alloc()
503 if (pool->dma_pages_cnt) in __xp_alloc()
504 xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); in __xp_alloc()
506 xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; in __xp_alloc()
509 xskq_cons_release(pool->fq); in __xp_alloc()
513 struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool) in xp_alloc() argument
517 if (!pool->free_list_cnt) { in xp_alloc()
518 xskb = __xp_alloc(pool); in xp_alloc()
522 pool->free_list_cnt--; in xp_alloc()
523 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, in xp_alloc()
525 list_del_init(&xskb->free_list_node); in xp_alloc()
528 xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM; in xp_alloc()
529 xskb->xdp.data_meta = xskb->xdp.data; in xp_alloc()
531 if (pool->dma_need_sync) { in xp_alloc()
532 dma_sync_single_range_for_device(pool->dev, xskb->dma, 0, in xp_alloc()
533 pool->frame_len, in xp_alloc()
536 return &xskb->xdp; in xp_alloc()
540 static u32 xp_alloc_new_from_fq(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) in xp_alloc_new_from_fq() argument
544 if (max > pool->free_heads_cnt) in xp_alloc_new_from_fq()
545 max = pool->free_heads_cnt; in xp_alloc_new_from_fq()
546 max = xskq_cons_nb_entries(pool->fq, max); in xp_alloc_new_from_fq()
548 cached_cons = pool->fq->cached_cons; in xp_alloc_new_from_fq()
551 while (i--) { in xp_alloc_new_from_fq()
556 __xskq_cons_read_addr_unchecked(pool->fq, cached_cons++, &addr); in xp_alloc_new_from_fq()
558 ok = pool->unaligned ? xp_check_unaligned(pool, &addr) : in xp_alloc_new_from_fq()
559 xp_check_aligned(pool, &addr); in xp_alloc_new_from_fq()
561 pool->fq->invalid_descs++; in xp_alloc_new_from_fq()
562 nb_entries--; in xp_alloc_new_from_fq()
566 if (pool->unaligned) { in xp_alloc_new_from_fq()
567 xskb = pool->free_heads[--pool->free_heads_cnt]; in xp_alloc_new_from_fq()
568 xp_init_xskb_addr(xskb, pool, addr); in xp_alloc_new_from_fq()
569 if (pool->dma_pages_cnt) in xp_alloc_new_from_fq()
570 xp_init_xskb_dma(xskb, pool, pool->dma_pages, addr); in xp_alloc_new_from_fq()
572 xskb = &pool->heads[xp_aligned_extract_idx(pool, addr)]; in xp_alloc_new_from_fq()
575 *xdp = &xskb->xdp; in xp_alloc_new_from_fq()
579 xskq_cons_release_n(pool->fq, max); in xp_alloc_new_from_fq()
583 static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 nb_entries) in xp_alloc_reused() argument
588 nb_entries = min_t(u32, nb_entries, pool->free_list_cnt); in xp_alloc_reused()
591 while (i--) { in xp_alloc_reused()
592 xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node); in xp_alloc_reused()
593 list_del_init(&xskb->free_list_node); in xp_alloc_reused()
595 *xdp = &xskb->xdp; in xp_alloc_reused()
598 pool->free_list_cnt -= nb_entries; in xp_alloc_reused()
603 u32 xp_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max) in xp_alloc_batch() argument
607 if (unlikely(pool->dma_need_sync)) { in xp_alloc_batch()
611 buff = xp_alloc(pool); in xp_alloc_batch()
617 if (unlikely(pool->free_list_cnt)) { in xp_alloc_batch()
618 nb_entries1 = xp_alloc_reused(pool, xdp, max); in xp_alloc_batch()
622 max -= nb_entries1; in xp_alloc_batch()
626 nb_entries2 = xp_alloc_new_from_fq(pool, xdp, max); in xp_alloc_batch()
628 pool->fq->queue_empty_descs++; in xp_alloc_batch()
634 bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count) in xp_can_alloc() argument
636 if (pool->free_list_cnt >= count) in xp_can_alloc()
638 return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt); in xp_can_alloc()
644 if (!list_empty(&xskb->free_list_node)) in xp_free()
647 xskb->pool->free_list_cnt++; in xp_free()
648 list_add(&xskb->free_list_node, &xskb->pool->free_list); in xp_free()
652 void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr) in xp_raw_get_data() argument
654 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; in xp_raw_get_data()
655 return pool->addrs + addr; in xp_raw_get_data()
659 dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr) in xp_raw_get_dma() argument
661 addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr; in xp_raw_get_dma()
662 return (pool->dma_pages[addr >> PAGE_SHIFT] & in xp_raw_get_dma()
670 dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0, in xp_dma_sync_for_cpu_slow()
671 xskb->pool->frame_len, DMA_BIDIRECTIONAL); in xp_dma_sync_for_cpu_slow()
675 void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma, in xp_dma_sync_for_device_slow() argument
678 dma_sync_single_range_for_device(pool->dev, dma, 0, in xp_dma_sync_for_device_slow()