Lines Matching refs:tq
103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_stopped() argument
105 return tq->stopped; in vmxnet3_tq_stopped()
110 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_start() argument
112 tq->stopped = false; in vmxnet3_tq_start()
113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); in vmxnet3_tq_start()
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_wake() argument
120 tq->stopped = false; in vmxnet3_tq_wake()
121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_wake()
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_stop() argument
128 tq->stopped = true; in vmxnet3_tq_stop()
129 tq->num_stop++; in vmxnet3_tq_stop()
130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_stop()
329 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq, in vmxnet3_unmap_pkt() argument
336 BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp); in vmxnet3_unmap_pkt()
337 BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1); in vmxnet3_unmap_pkt()
339 skb = tq->buf_info[eop_idx].skb; in vmxnet3_unmap_pkt()
341 tq->buf_info[eop_idx].skb = NULL; in vmxnet3_unmap_pkt()
343 VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size); in vmxnet3_unmap_pkt()
345 while (tq->tx_ring.next2comp != eop_idx) { in vmxnet3_unmap_pkt()
346 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp, in vmxnet3_unmap_pkt()
354 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_unmap_pkt()
364 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_tx_complete() argument
370 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
371 while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) { in vmxnet3_tq_tx_complete()
378 &gdesc->tcd), tq, adapter->pdev, in vmxnet3_tq_tx_complete()
381 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring); in vmxnet3_tq_tx_complete()
382 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc; in vmxnet3_tq_tx_complete()
386 spin_lock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
387 if (unlikely(vmxnet3_tq_stopped(tq, adapter) && in vmxnet3_tq_tx_complete()
388 vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) > in vmxnet3_tq_tx_complete()
389 VMXNET3_WAKE_QUEUE_THRESHOLD(tq) && in vmxnet3_tq_tx_complete()
391 vmxnet3_tq_wake(tq, adapter); in vmxnet3_tq_tx_complete()
393 spin_unlock(&tq->tx_lock); in vmxnet3_tq_tx_complete()
400 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_cleanup() argument
405 while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) { in vmxnet3_tq_cleanup()
408 tbi = tq->buf_info + tq->tx_ring.next2comp; in vmxnet3_tq_cleanup()
415 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring); in vmxnet3_tq_cleanup()
419 for (i = 0; i < tq->tx_ring.size; i++) { in vmxnet3_tq_cleanup()
420 BUG_ON(tq->buf_info[i].skb != NULL || in vmxnet3_tq_cleanup()
421 tq->buf_info[i].map_type != VMXNET3_MAP_NONE); in vmxnet3_tq_cleanup()
424 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
425 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_cleanup()
427 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_cleanup()
428 tq->comp_ring.next2proc = 0; in vmxnet3_tq_cleanup()
433 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_destroy() argument
436 if (tq->tx_ring.base) { in vmxnet3_tq_destroy()
437 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size * in vmxnet3_tq_destroy()
439 tq->tx_ring.base, tq->tx_ring.basePA); in vmxnet3_tq_destroy()
440 tq->tx_ring.base = NULL; in vmxnet3_tq_destroy()
442 if (tq->data_ring.base) { in vmxnet3_tq_destroy()
444 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_destroy()
445 tq->data_ring.base, tq->data_ring.basePA); in vmxnet3_tq_destroy()
446 tq->data_ring.base = NULL; in vmxnet3_tq_destroy()
448 if (tq->comp_ring.base) { in vmxnet3_tq_destroy()
449 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * in vmxnet3_tq_destroy()
451 tq->comp_ring.base, tq->comp_ring.basePA); in vmxnet3_tq_destroy()
452 tq->comp_ring.base = NULL; in vmxnet3_tq_destroy()
454 if (tq->buf_info) { in vmxnet3_tq_destroy()
456 tq->tx_ring.size * sizeof(tq->buf_info[0]), in vmxnet3_tq_destroy()
457 tq->buf_info, tq->buf_info_pa); in vmxnet3_tq_destroy()
458 tq->buf_info = NULL; in vmxnet3_tq_destroy()
475 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_init() argument
481 memset(tq->tx_ring.base, 0, tq->tx_ring.size * in vmxnet3_tq_init()
483 tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0; in vmxnet3_tq_init()
484 tq->tx_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
486 memset(tq->data_ring.base, 0, in vmxnet3_tq_init()
487 tq->data_ring.size * tq->txdata_desc_size); in vmxnet3_tq_init()
490 memset(tq->comp_ring.base, 0, tq->comp_ring.size * in vmxnet3_tq_init()
492 tq->comp_ring.next2proc = 0; in vmxnet3_tq_init()
493 tq->comp_ring.gen = VMXNET3_INIT_GEN; in vmxnet3_tq_init()
496 memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size); in vmxnet3_tq_init()
497 for (i = 0; i < tq->tx_ring.size; i++) in vmxnet3_tq_init()
498 tq->buf_info[i].map_type = VMXNET3_MAP_NONE; in vmxnet3_tq_init()
505 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, in vmxnet3_tq_create() argument
510 BUG_ON(tq->tx_ring.base || tq->data_ring.base || in vmxnet3_tq_create()
511 tq->comp_ring.base || tq->buf_info); in vmxnet3_tq_create()
513 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
514 tq->tx_ring.size * sizeof(struct Vmxnet3_TxDesc), in vmxnet3_tq_create()
515 &tq->tx_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
516 if (!tq->tx_ring.base) { in vmxnet3_tq_create()
521 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
522 tq->data_ring.size * tq->txdata_desc_size, in vmxnet3_tq_create()
523 &tq->data_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
524 if (!tq->data_ring.base) { in vmxnet3_tq_create()
529 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, in vmxnet3_tq_create()
530 tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), in vmxnet3_tq_create()
531 &tq->comp_ring.basePA, GFP_KERNEL); in vmxnet3_tq_create()
532 if (!tq->comp_ring.base) { in vmxnet3_tq_create()
537 sz = tq->tx_ring.size * sizeof(tq->buf_info[0]); in vmxnet3_tq_create()
538 tq->buf_info = dma_zalloc_coherent(&adapter->pdev->dev, sz, in vmxnet3_tq_create()
539 &tq->buf_info_pa, GFP_KERNEL); in vmxnet3_tq_create()
540 if (!tq->buf_info) in vmxnet3_tq_create()
546 vmxnet3_tq_destroy(tq, adapter); in vmxnet3_tq_create()
676 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev, in vmxnet3_map_pkt() argument
688 dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
690 ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
695 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA + in vmxnet3_map_pkt()
696 tq->tx_ring.next2fill * in vmxnet3_map_pkt()
697 tq->txdata_desc_size); in vmxnet3_map_pkt()
701 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
706 tq->tx_ring.next2fill, in vmxnet3_map_pkt()
709 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
712 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
729 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
739 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
740 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
748 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
750 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
751 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
764 tbi = tq->buf_info + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
781 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; in vmxnet3_map_pkt()
782 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); in vmxnet3_map_pkt()
790 tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), in vmxnet3_map_pkt()
792 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); in vmxnet3_map_pkt()
793 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; in vmxnet3_map_pkt()
804 tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; in vmxnet3_map_pkt()
839 vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, in vmxnet3_parse_hdr() argument
882 tq->txdata_desc_size, in vmxnet3_parse_hdr()
894 if (unlikely(ctx->copy_size > tq->txdata_desc_size)) { in vmxnet3_parse_hdr()
895 tq->stats.oversized_hdr++; in vmxnet3_parse_hdr()
916 vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, in vmxnet3_copy_hdr() argument
922 tdd = (struct Vmxnet3_TxDataDesc *)((u8 *)tq->data_ring.base + in vmxnet3_copy_hdr()
923 tq->tx_ring.next2fill * in vmxnet3_copy_hdr()
924 tq->txdata_desc_size); in vmxnet3_copy_hdr()
929 ctx->copy_size, tq->tx_ring.next2fill); in vmxnet3_copy_hdr()
980 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, in vmxnet3_tq_xmit() argument
1005 tq->stats.drop_tso++; in vmxnet3_tq_xmit()
1008 tq->stats.copy_skb_header++; in vmxnet3_tq_xmit()
1018 tq->stats.drop_too_many_frags++; in vmxnet3_tq_xmit()
1021 tq->stats.linearized++; in vmxnet3_tq_xmit()
1028 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter); in vmxnet3_tq_xmit()
1035 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1043 tq->stats.drop_oversized_hdr++; in vmxnet3_tq_xmit()
1049 tq->stats.drop_hdr_inspect_err++; in vmxnet3_tq_xmit()
1053 spin_lock_irqsave(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1055 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) { in vmxnet3_tq_xmit()
1056 tq->stats.tx_ring_full++; in vmxnet3_tq_xmit()
1060 tq->tx_ring.next2comp, tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1062 vmxnet3_tq_stop(tq, adapter); in vmxnet3_tq_xmit()
1063 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1068 vmxnet3_copy_hdr(skb, tq, &ctx, adapter); in vmxnet3_tq_xmit()
1071 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) in vmxnet3_tq_xmit()
1085 tx_num_deferred = le32_to_cpu(tq->shared->txNumDeferred); in vmxnet3_tq_xmit()
1103 le32_add_cpu(&tq->shared->txNumDeferred, num_pkts); in vmxnet3_tq_xmit()
1130 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr), in vmxnet3_tq_xmit()
1133 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1135 if (tx_num_deferred >= le32_to_cpu(tq->shared->txThreshold)) { in vmxnet3_tq_xmit()
1136 tq->shared->txNumDeferred = 0; in vmxnet3_tq_xmit()
1138 VMXNET3_REG_TXPROD + tq->qid * 8, in vmxnet3_tq_xmit()
1139 tq->tx_ring.next2fill); in vmxnet3_tq_xmit()
1145 spin_unlock_irqrestore(&tq->tx_lock, flags); in vmxnet3_tq_xmit()
1147 tq->stats.drop_total++; in vmxnet3_tq_xmit()
1912 struct vmxnet3_tx_queue *tq = in vmxnet3_poll_rx_only() local
1914 vmxnet3_tq_tx_complete(tq, adapter); in vmxnet3_poll_rx_only()
1937 struct vmxnet3_tx_queue *tq = data; in vmxnet3_msix_tx() local
1938 struct vmxnet3_adapter *adapter = tq->adapter; in vmxnet3_msix_tx()
1941 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
1951 vmxnet3_tq_tx_complete(tq, adapter); in vmxnet3_msix_tx()
1953 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx); in vmxnet3_msix_tx()
2445 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_setup_driver_shared() local
2448 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA); in vmxnet3_setup_driver_shared()
2449 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA); in vmxnet3_setup_driver_shared()
2450 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA); in vmxnet3_setup_driver_shared()
2451 tqc->ddPA = cpu_to_le64(tq->buf_info_pa); in vmxnet3_setup_driver_shared()
2452 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size); in vmxnet3_setup_driver_shared()
2453 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size); in vmxnet3_setup_driver_shared()
2454 tqc->txDataRingDescSize = cpu_to_le32(tq->txdata_desc_size); in vmxnet3_setup_driver_shared()
2455 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); in vmxnet3_setup_driver_shared()
2459 tqc->intrIdx = tq->comp_ring.intr_idx; in vmxnet3_setup_driver_shared()
2828 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_create_queues() local
2829 tq->tx_ring.size = tx_ring_size; in vmxnet3_create_queues()
2830 tq->data_ring.size = tx_ring_size; in vmxnet3_create_queues()
2831 tq->comp_ring.size = tx_ring_size; in vmxnet3_create_queues()
2832 tq->txdata_desc_size = txdata_desc_size; in vmxnet3_create_queues()
2833 tq->shared = &adapter->tqd_start[i].ctrl; in vmxnet3_create_queues()
2834 tq->stopped = true; in vmxnet3_create_queues()
2835 tq->adapter = adapter; in vmxnet3_create_queues()
2836 tq->qid = i; in vmxnet3_create_queues()
2837 err = vmxnet3_tq_create(tq, adapter); in vmxnet3_create_queues()