Lines Matching full:q
56 static void *txq_end(const struct funeth_txq *q) in txq_end() argument
58 return (void *)q->hw_wb; in txq_end()
64 static unsigned int txq_to_end(const struct funeth_txq *q, void *p) in txq_to_end() argument
66 return txq_end(q) - p; in txq_to_end()
78 static struct fun_dataop_gl *fun_write_gl(const struct funeth_txq *q, in fun_write_gl() argument
90 i < ngle && txq_to_end(q, gle); i++, gle++) in fun_write_gl()
93 if (txq_to_end(q, gle) == 0) { in fun_write_gl()
94 gle = (struct fun_dataop_gl *)q->desc; in fun_write_gl()
107 static struct sk_buff *fun_tls_tx(struct sk_buff *skb, struct funeth_txq *q, in fun_tls_tx() argument
132 FUN_QSTAT_INC(q, tx_tls_fallback); in fun_tls_tx()
135 FUN_QSTAT_INC(q, tx_tls_drops); in fun_tls_tx()
149 static unsigned int write_pkt_desc(struct sk_buff *skb, struct funeth_txq *q, in write_pkt_desc() argument
153 unsigned int idx = q->prod_cnt & q->mask; in write_pkt_desc()
165 if (unlikely(fun_map_pkt(q->dma_dev, shinfo, skb->data, in write_pkt_desc()
167 FUN_QSTAT_INC(q, tx_map_err); in write_pkt_desc()
171 req = fun_tx_desc_addr(q, idx); in write_pkt_desc()
218 FUN_QSTAT_INC(q, tx_encap_tso); in write_pkt_desc()
236 FUN_QSTAT_INC(q, tx_uso); in write_pkt_desc()
253 FUN_QSTAT_INC(q, tx_tso); in write_pkt_desc()
256 u64_stats_update_begin(&q->syncp); in write_pkt_desc()
257 q->stats.tx_cso += shinfo->gso_segs; in write_pkt_desc()
258 u64_stats_update_end(&q->syncp); in write_pkt_desc()
269 FUN_QSTAT_INC(q, tx_cso); in write_pkt_desc()
277 gle = fun_write_gl(q, req, addrs, lens, ngle); in write_pkt_desc()
290 u64_stats_update_begin(&q->syncp); in write_pkt_desc()
291 q->stats.tx_tls_bytes += tls_len; in write_pkt_desc()
292 q->stats.tx_tls_pkts += 1 + extra_pkts; in write_pkt_desc()
293 u64_stats_update_end(&q->syncp); in write_pkt_desc()
296 u64_stats_update_begin(&q->syncp); in write_pkt_desc()
297 q->stats.tx_bytes += skb->len + extra_bytes; in write_pkt_desc()
298 q->stats.tx_pkts += 1 + extra_pkts; in write_pkt_desc()
299 u64_stats_update_end(&q->syncp); in write_pkt_desc()
301 q->info[idx].skb = skb; in write_pkt_desc()
303 trace_funeth_tx(q, skb->len, idx, req->dataop.ngather); in write_pkt_desc()
311 static unsigned int fun_txq_avail(const struct funeth_txq *q) in fun_txq_avail() argument
313 return q->mask - q->prod_cnt + q->cons_cnt; in fun_txq_avail()
317 static void fun_tx_check_stop(struct funeth_txq *q) in fun_tx_check_stop() argument
319 if (likely(fun_txq_avail(q) >= FUNETH_MAX_PKT_DESC)) in fun_tx_check_stop()
322 netif_tx_stop_queue(q->ndq); in fun_tx_check_stop()
329 if (likely(fun_txq_avail(q) < FUNETH_MAX_PKT_DESC)) in fun_tx_check_stop()
330 FUN_QSTAT_INC(q, tx_nstops); in fun_tx_check_stop()
332 netif_tx_start_queue(q->ndq); in fun_tx_check_stop()
338 static bool fun_txq_may_restart(struct funeth_txq *q) in fun_txq_may_restart() argument
340 return fun_txq_avail(q) >= q->mask / 4; in fun_txq_may_restart()
347 struct funeth_txq *q = fp->txqs[qid]; in fun_start_xmit() local
353 skb = fun_tls_tx(skb, q, &tls_len); in fun_start_xmit()
358 ndesc = write_pkt_desc(skb, q, tls_len); in fun_start_xmit()
364 q->prod_cnt += ndesc; in fun_start_xmit()
365 fun_tx_check_stop(q); in fun_start_xmit()
369 if (__netdev_tx_sent_queue(q->ndq, skb->len, netdev_xmit_more())) in fun_start_xmit()
370 fun_txq_wr_db(q); in fun_start_xmit()
372 FUN_QSTAT_INC(q, tx_more); in fun_start_xmit()
381 fun_txq_wr_db(q); in fun_start_xmit()
386 static u16 txq_hw_head(const struct funeth_txq *q) in txq_hw_head() argument
388 return (u16)be64_to_cpu(*q->hw_wb); in txq_hw_head()
394 static unsigned int fun_unmap_pkt(const struct funeth_txq *q, unsigned int idx) in fun_unmap_pkt() argument
396 const struct fun_eth_tx_req *req = fun_tx_desc_addr(q, idx); in fun_unmap_pkt()
402 dma_unmap_single(q->dma_dev, be64_to_cpu(gle->sgl_data), in fun_unmap_pkt()
405 for (gle++; --ngle && txq_to_end(q, gle); gle++) in fun_unmap_pkt()
406 dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data), in fun_unmap_pkt()
410 for (gle = (struct fun_dataop_gl *)q->desc; ngle; ngle--, gle++) in fun_unmap_pkt()
411 dma_unmap_page(q->dma_dev, be64_to_cpu(gle->sgl_data), in fun_unmap_pkt()
424 static bool fun_txq_reclaim(struct funeth_txq *q, int budget) in fun_txq_reclaim() argument
432 for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; in fun_txq_reclaim()
433 head != reclaim_idx && npkts < limit; head = txq_hw_head(q)) { in fun_txq_reclaim()
442 unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx); in fun_txq_reclaim()
443 struct sk_buff *skb = q->info[reclaim_idx].skb; in fun_txq_reclaim()
445 trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head); in fun_txq_reclaim()
450 reclaim_idx = (reclaim_idx + pkt_desc) & q->mask; in fun_txq_reclaim()
455 q->cons_cnt += ndesc; in fun_txq_reclaim()
456 netdev_tx_completed_queue(q->ndq, npkts, nbytes); in fun_txq_reclaim()
459 if (unlikely(netif_tx_queue_stopped(q->ndq) && in fun_txq_reclaim()
460 fun_txq_may_restart(q))) { in fun_txq_reclaim()
461 netif_tx_wake_queue(q->ndq); in fun_txq_reclaim()
462 FUN_QSTAT_INC(q, tx_nrestarts); in fun_txq_reclaim()
472 struct funeth_txq *q = irq->txq; in fun_txq_napi_poll() local
475 if (fun_txq_reclaim(q, budget)) in fun_txq_napi_poll()
479 db_val = READ_ONCE(q->irq_db_val) | (q->cons_cnt & q->mask); in fun_txq_napi_poll()
480 writel(db_val, q->db); in fun_txq_napi_poll()
485 static unsigned int fun_xdpq_clean(struct funeth_txq *q, unsigned int budget) in fun_xdpq_clean() argument
489 for (head = txq_hw_head(q), reclaim_idx = q->cons_cnt & q->mask; in fun_xdpq_clean()
490 head != reclaim_idx && npkts < budget; head = txq_hw_head(q)) { in fun_xdpq_clean()
499 unsigned int pkt_desc = fun_unmap_pkt(q, reclaim_idx); in fun_xdpq_clean()
501 xdp_return_frame(q->info[reclaim_idx].xdpf); in fun_xdpq_clean()
503 trace_funeth_tx_free(q, reclaim_idx, pkt_desc, head); in fun_xdpq_clean()
505 reclaim_idx = (reclaim_idx + pkt_desc) & q->mask; in fun_xdpq_clean()
511 q->cons_cnt += ndesc; in fun_xdpq_clean()
515 bool fun_xdp_tx(struct funeth_txq *q, struct xdp_frame *xdpf) in fun_xdp_tx() argument
523 if (fun_txq_avail(q) < FUN_XDP_CLEAN_THRES) in fun_xdp_tx()
524 fun_xdpq_clean(q, FUN_XDP_CLEAN_BATCH); in fun_xdp_tx()
535 if (unlikely(fun_txq_avail(q) < ndesc)) { in fun_xdp_tx()
536 FUN_QSTAT_INC(q, tx_xdp_full); in fun_xdp_tx()
540 if (unlikely(fun_map_pkt(q->dma_dev, si, xdpf->data, xdpf->len, dma, in fun_xdp_tx()
542 FUN_QSTAT_INC(q, tx_map_err); in fun_xdp_tx()
546 idx = q->prod_cnt & q->mask; in fun_xdp_tx()
547 req = fun_tx_desc_addr(q, idx); in fun_xdp_tx()
557 fun_write_gl(q, req, dma, lens, nfrags); in fun_xdp_tx()
559 q->info[idx].xdpf = xdpf; in fun_xdp_tx()
561 u64_stats_update_begin(&q->syncp); in fun_xdp_tx()
562 q->stats.tx_bytes += tot_len; in fun_xdp_tx()
563 q->stats.tx_pkts++; in fun_xdp_tx()
564 u64_stats_update_end(&q->syncp); in fun_xdp_tx()
566 trace_funeth_tx(q, tot_len, idx, nfrags); in fun_xdp_tx()
567 q->prod_cnt += ndesc; in fun_xdp_tx()
576 struct funeth_txq *q, **xdpqs; in fun_xdp_xmit_frames() local
590 for (q = xdpqs[q_idx], i = 0; i < n; i++) in fun_xdp_xmit_frames()
591 if (!fun_xdp_tx(q, frames[i])) in fun_xdp_xmit_frames()
595 fun_txq_wr_db(q); in fun_xdp_xmit_frames()
602 static void fun_txq_purge(struct funeth_txq *q) in fun_txq_purge() argument
604 while (q->cons_cnt != q->prod_cnt) { in fun_txq_purge()
605 unsigned int idx = q->cons_cnt & q->mask; in fun_txq_purge()
607 q->cons_cnt += fun_unmap_pkt(q, idx); in fun_txq_purge()
608 dev_kfree_skb_any(q->info[idx].skb); in fun_txq_purge()
610 netdev_tx_reset_queue(q->ndq); in fun_txq_purge()
613 static void fun_xdpq_purge(struct funeth_txq *q) in fun_xdpq_purge() argument
615 while (q->cons_cnt != q->prod_cnt) { in fun_xdpq_purge()
616 unsigned int idx = q->cons_cnt & q->mask; in fun_xdpq_purge()
618 q->cons_cnt += fun_unmap_pkt(q, idx); in fun_xdpq_purge()
619 xdp_return_frame(q->info[idx].xdpf); in fun_xdpq_purge()
630 struct funeth_txq *q; in fun_txq_create_sw() local
638 q = kzalloc_node(sizeof(*q), GFP_KERNEL, numa_node); in fun_txq_create_sw()
639 if (!q) in fun_txq_create_sw()
642 q->dma_dev = &fp->pdev->dev; in fun_txq_create_sw()
643 q->desc = fun_alloc_ring_mem(q->dma_dev, ndesc, FUNETH_SQE_SIZE, in fun_txq_create_sw()
644 sizeof(*q->info), true, numa_node, in fun_txq_create_sw()
645 &q->dma_addr, (void **)&q->info, in fun_txq_create_sw()
646 &q->hw_wb); in fun_txq_create_sw()
647 if (!q->desc) in fun_txq_create_sw()
650 q->netdev = dev; in fun_txq_create_sw()
651 q->mask = ndesc - 1; in fun_txq_create_sw()
652 q->qidx = qidx; in fun_txq_create_sw()
653 q->numa_node = numa_node; in fun_txq_create_sw()
654 u64_stats_init(&q->syncp); in fun_txq_create_sw()
655 q->init_state = FUN_QSTATE_INIT_SW; in fun_txq_create_sw()
656 return q; in fun_txq_create_sw()
659 kfree(q); in fun_txq_create_sw()
666 static void fun_txq_free_sw(struct funeth_txq *q) in fun_txq_free_sw() argument
668 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_txq_free_sw()
670 fun_free_ring_mem(q->dma_dev, q->mask + 1, FUNETH_SQE_SIZE, true, in fun_txq_free_sw()
671 q->desc, q->dma_addr, q->info); in fun_txq_free_sw()
673 fp->tx_packets += q->stats.tx_pkts; in fun_txq_free_sw()
674 fp->tx_bytes += q->stats.tx_bytes; in fun_txq_free_sw()
675 fp->tx_dropped += q->stats.tx_map_err; in fun_txq_free_sw()
677 kfree(q); in fun_txq_free_sw()
681 int fun_txq_create_dev(struct funeth_txq *q, struct fun_irq *irq) in fun_txq_create_dev() argument
683 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_txq_create_dev()
684 unsigned int irq_idx, ndesc = q->mask + 1; in fun_txq_create_dev()
687 q->irq = irq; in fun_txq_create_dev()
688 *q->hw_wb = 0; in fun_txq_create_dev()
689 q->prod_cnt = 0; in fun_txq_create_dev()
690 q->cons_cnt = 0; in fun_txq_create_dev()
697 q->dma_addr, fp->tx_coal_count, fp->tx_coal_usec, in fun_txq_create_dev()
699 &q->hw_qid, &q->db); in fun_txq_create_dev()
703 err = fun_create_and_bind_tx(fp, q->hw_qid); in fun_txq_create_dev()
706 q->ethid = err; in fun_txq_create_dev()
709 irq->txq = q; in fun_txq_create_dev()
710 q->ndq = netdev_get_tx_queue(q->netdev, q->qidx); in fun_txq_create_dev()
711 q->irq_db_val = FUN_IRQ_SQ_DB(fp->tx_coal_usec, in fun_txq_create_dev()
713 writel(q->irq_db_val, q->db); in fun_txq_create_dev()
716 q->init_state = FUN_QSTATE_INIT_FULL; in fun_txq_create_dev()
717 netif_info(fp, ifup, q->netdev, in fun_txq_create_dev()
719 irq ? "Tx" : "XDP", q->qidx, ndesc, q->hw_qid, irq_idx, in fun_txq_create_dev()
720 q->ethid, q->numa_node); in fun_txq_create_dev()
724 fun_destroy_sq(fp->fdev, q->hw_qid); in fun_txq_create_dev()
726 netdev_err(q->netdev, in fun_txq_create_dev()
728 irq ? "Tx" : "XDP", q->qidx, err); in fun_txq_create_dev()
732 static void fun_txq_free_dev(struct funeth_txq *q) in fun_txq_free_dev() argument
734 struct funeth_priv *fp = netdev_priv(q->netdev); in fun_txq_free_dev()
736 if (q->init_state < FUN_QSTATE_INIT_FULL) in fun_txq_free_dev()
739 netif_info(fp, ifdown, q->netdev, in fun_txq_free_dev()
741 q->irq ? "Tx" : "XDP", q->qidx, q->hw_qid, in fun_txq_free_dev()
742 q->irq ? q->irq->irq_idx : 0, q->ethid); in fun_txq_free_dev()
744 fun_destroy_sq(fp->fdev, q->hw_qid); in fun_txq_free_dev()
745 fun_res_destroy(fp->fdev, FUN_ADMIN_OP_ETH, 0, q->ethid); in fun_txq_free_dev()
747 if (q->irq) { in fun_txq_free_dev()
748 q->irq->txq = NULL; in fun_txq_free_dev()
749 fun_txq_purge(q); in fun_txq_free_dev()
751 fun_xdpq_purge(q); in fun_txq_free_dev()
754 q->init_state = FUN_QSTATE_INIT_SW; in fun_txq_free_dev()
764 struct funeth_txq *q = *qp; in funeth_txq_create() local
767 if (!q) in funeth_txq_create()
768 q = fun_txq_create_sw(dev, qidx, ndesc, irq); in funeth_txq_create()
769 if (!q) in funeth_txq_create()
772 if (q->init_state >= state) in funeth_txq_create()
775 err = fun_txq_create_dev(q, irq); in funeth_txq_create()
778 fun_txq_free_sw(q); in funeth_txq_create()
783 *qp = q; in funeth_txq_create()
790 struct funeth_txq *funeth_txq_free(struct funeth_txq *q, int state) in funeth_txq_free() argument
793 fun_txq_free_dev(q); in funeth_txq_free()
796 fun_txq_free_sw(q); in funeth_txq_free()
797 q = NULL; in funeth_txq_free()
800 return q; in funeth_txq_free()