Lines Matching refs:tx_ring
194 static void igc_clean_tx_ring(struct igc_ring *tx_ring) in igc_clean_tx_ring() argument
196 u16 i = tx_ring->next_to_clean; in igc_clean_tx_ring()
197 struct igc_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_ring()
200 while (i != tx_ring->next_to_use) { in igc_clean_tx_ring()
209 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
213 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
216 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_ring()
222 tx_desc = IGC_TX_DESC(tx_ring, i); in igc_clean_tx_ring()
229 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
231 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
232 tx_desc = IGC_TX_DESC(tx_ring, 0); in igc_clean_tx_ring()
237 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_ring()
245 if (unlikely(i == tx_ring->count)) { in igc_clean_tx_ring()
247 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_ring()
251 if (tx_ring->xsk_pool && xsk_frames) in igc_clean_tx_ring()
252 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_ring()
255 netdev_tx_reset_queue(txring_txq(tx_ring)); in igc_clean_tx_ring()
258 tx_ring->next_to_use = 0; in igc_clean_tx_ring()
259 tx_ring->next_to_clean = 0; in igc_clean_tx_ring()
268 void igc_free_tx_resources(struct igc_ring *tx_ring) in igc_free_tx_resources() argument
270 igc_clean_tx_ring(tx_ring); in igc_free_tx_resources()
272 vfree(tx_ring->tx_buffer_info); in igc_free_tx_resources()
273 tx_ring->tx_buffer_info = NULL; in igc_free_tx_resources()
276 if (!tx_ring->desc) in igc_free_tx_resources()
279 dma_free_coherent(tx_ring->dev, tx_ring->size, in igc_free_tx_resources()
280 tx_ring->desc, tx_ring->dma); in igc_free_tx_resources()
282 tx_ring->desc = NULL; in igc_free_tx_resources()
296 igc_free_tx_resources(adapter->tx_ring[i]); in igc_free_all_tx_resources()
308 if (adapter->tx_ring[i]) in igc_clean_all_tx_rings()
309 igc_clean_tx_ring(adapter->tx_ring[i]); in igc_clean_all_tx_rings()
318 int igc_setup_tx_resources(struct igc_ring *tx_ring) in igc_setup_tx_resources() argument
320 struct net_device *ndev = tx_ring->netdev; in igc_setup_tx_resources()
321 struct device *dev = tx_ring->dev; in igc_setup_tx_resources()
324 size = sizeof(struct igc_tx_buffer) * tx_ring->count; in igc_setup_tx_resources()
325 tx_ring->tx_buffer_info = vzalloc(size); in igc_setup_tx_resources()
326 if (!tx_ring->tx_buffer_info) in igc_setup_tx_resources()
330 tx_ring->size = tx_ring->count * sizeof(union igc_adv_tx_desc); in igc_setup_tx_resources()
331 tx_ring->size = ALIGN(tx_ring->size, 4096); in igc_setup_tx_resources()
333 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in igc_setup_tx_resources()
334 &tx_ring->dma, GFP_KERNEL); in igc_setup_tx_resources()
336 if (!tx_ring->desc) in igc_setup_tx_resources()
339 tx_ring->next_to_use = 0; in igc_setup_tx_resources()
340 tx_ring->next_to_clean = 0; in igc_setup_tx_resources()
345 vfree(tx_ring->tx_buffer_info); in igc_setup_tx_resources()
362 err = igc_setup_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
366 igc_free_tx_resources(adapter->tx_ring[i]); in igc_setup_all_tx_resources()
732 igc_configure_tx_ring(adapter, adapter->tx_ring[i]); in igc_configure_tx()
1016 static void igc_tx_ctxtdesc(struct igc_ring *tx_ring, in igc_tx_ctxtdesc() argument
1022 u16 i = tx_ring->next_to_use; in igc_tx_ctxtdesc()
1024 context_desc = IGC_TX_CTXTDESC(tx_ring, i); in igc_tx_ctxtdesc()
1027 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in igc_tx_ctxtdesc()
1033 if (test_bit(IGC_RING_FLAG_TX_CTX_IDX, &tx_ring->flags)) in igc_tx_ctxtdesc()
1034 mss_l4len_idx |= tx_ring->reg_idx << 4; in igc_tx_ctxtdesc()
1043 if (tx_ring->launchtime_enable) { in igc_tx_ctxtdesc()
1044 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); in igc_tx_ctxtdesc()
1055 static void igc_tx_csum(struct igc_ring *tx_ring, struct igc_tx_buffer *first) in igc_tx_csum() argument
1064 !tx_ring->launchtime_enable) in igc_tx_csum()
1095 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0); in igc_tx_csum()
1098 static int __igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) in __igc_maybe_stop_tx() argument
1100 struct net_device *netdev = tx_ring->netdev; in __igc_maybe_stop_tx()
1102 netif_stop_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1110 if (igc_desc_unused(tx_ring) < size) in __igc_maybe_stop_tx()
1114 netif_wake_subqueue(netdev, tx_ring->queue_index); in __igc_maybe_stop_tx()
1116 u64_stats_update_begin(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1117 tx_ring->tx_stats.restart_queue2++; in __igc_maybe_stop_tx()
1118 u64_stats_update_end(&tx_ring->tx_syncp2); in __igc_maybe_stop_tx()
1123 static inline int igc_maybe_stop_tx(struct igc_ring *tx_ring, const u16 size) in igc_maybe_stop_tx() argument
1125 if (igc_desc_unused(tx_ring) >= size) in igc_maybe_stop_tx()
1127 return __igc_maybe_stop_tx(tx_ring, size); in igc_maybe_stop_tx()
1160 static void igc_tx_olinfo_status(struct igc_ring *tx_ring, in igc_tx_olinfo_status() argument
1179 static int igc_tx_map(struct igc_ring *tx_ring, in igc_tx_map() argument
1188 u16 i = tx_ring->next_to_use; in igc_tx_map()
1194 tx_desc = IGC_TX_DESC(tx_ring, i); in igc_tx_map()
1196 igc_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len); in igc_tx_map()
1201 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in igc_tx_map()
1206 if (dma_mapping_error(tx_ring->dev, dma)) in igc_tx_map()
1221 if (i == tx_ring->count) { in igc_tx_map()
1222 tx_desc = IGC_TX_DESC(tx_ring, 0); in igc_tx_map()
1240 if (i == tx_ring->count) { in igc_tx_map()
1241 tx_desc = IGC_TX_DESC(tx_ring, 0); in igc_tx_map()
1249 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, in igc_tx_map()
1252 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1259 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in igc_tx_map()
1279 if (i == tx_ring->count) in igc_tx_map()
1282 tx_ring->next_to_use = i; in igc_tx_map()
1285 igc_maybe_stop_tx(tx_ring, DESC_NEEDED); in igc_tx_map()
1287 if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { in igc_tx_map()
1288 writel(i, tx_ring->tail); in igc_tx_map()
1293 netdev_err(tx_ring->netdev, "TX DMA map failed\n"); in igc_tx_map()
1294 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1299 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1302 i += tx_ring->count; in igc_tx_map()
1303 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_tx_map()
1307 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_tx_map()
1312 tx_ring->next_to_use = i; in igc_tx_map()
1317 static int igc_tso(struct igc_ring *tx_ring, in igc_tso() argument
1405 igc_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, in igc_tso()
1412 struct igc_ring *tx_ring) in igc_xmit_frame_ring() argument
1432 if (igc_maybe_stop_tx(tx_ring, count + 3)) { in igc_xmit_frame_ring()
1438 first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; in igc_xmit_frame_ring()
1445 struct igc_adapter *adapter = netdev_priv(tx_ring->netdev); in igc_xmit_frame_ring()
1473 tso = igc_tso(tx_ring, first, &hdr_len); in igc_xmit_frame_ring()
1477 igc_tx_csum(tx_ring, first); in igc_xmit_frame_ring()
1479 igc_tx_map(tx_ring, first, hdr_len); in igc_xmit_frame_ring()
1498 return adapter->tx_ring[r_idx]; in igc_tx_queue_mapping()
2190 return adapter->tx_ring[index]; in igc_xdp_get_tx_ring()
2661 struct igc_ring *tx_ring = q_vector->tx.ring; in igc_clean_tx_irq() local
2662 unsigned int i = tx_ring->next_to_clean; in igc_clean_tx_irq()
2670 tx_buffer = &tx_ring->tx_buffer_info[i]; in igc_clean_tx_irq()
2671 tx_desc = IGC_TX_DESC(tx_ring, i); in igc_clean_tx_irq()
2672 i -= tx_ring->count; in igc_clean_tx_irq()
2701 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
2705 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
2708 netdev_warn_once(tx_ring->netdev, "Unknown Tx buffer type\n"); in igc_clean_tx_irq()
2718 i -= tx_ring->count; in igc_clean_tx_irq()
2719 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
2720 tx_desc = IGC_TX_DESC(tx_ring, 0); in igc_clean_tx_irq()
2725 igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); in igc_clean_tx_irq()
2733 i -= tx_ring->count; in igc_clean_tx_irq()
2734 tx_buffer = tx_ring->tx_buffer_info; in igc_clean_tx_irq()
2735 tx_desc = IGC_TX_DESC(tx_ring, 0); in igc_clean_tx_irq()
2745 netdev_tx_completed_queue(txring_txq(tx_ring), in igc_clean_tx_irq()
2748 i += tx_ring->count; in igc_clean_tx_irq()
2749 tx_ring->next_to_clean = i; in igc_clean_tx_irq()
2753 if (tx_ring->xsk_pool) { in igc_clean_tx_irq()
2755 xsk_tx_completed(tx_ring->xsk_pool, xsk_frames); in igc_clean_tx_irq()
2756 if (xsk_uses_need_wakeup(tx_ring->xsk_pool)) in igc_clean_tx_irq()
2757 xsk_set_tx_need_wakeup(tx_ring->xsk_pool); in igc_clean_tx_irq()
2758 igc_xdp_xmit_zc(tx_ring); in igc_clean_tx_irq()
2761 if (test_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) { in igc_clean_tx_irq()
2767 clear_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_clean_tx_irq()
2773 netdev_err(tx_ring->netdev, in igc_clean_tx_irq()
2785 tx_ring->queue_index, in igc_clean_tx_irq()
2786 rd32(IGC_TDH(tx_ring->reg_idx)), in igc_clean_tx_irq()
2787 readl(tx_ring->tail), in igc_clean_tx_irq()
2788 tx_ring->next_to_use, in igc_clean_tx_irq()
2789 tx_ring->next_to_clean, in igc_clean_tx_irq()
2794 netif_stop_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
2795 tx_ring->queue_index); in igc_clean_tx_irq()
2804 netif_carrier_ok(tx_ring->netdev) && in igc_clean_tx_irq()
2805 igc_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) { in igc_clean_tx_irq()
2810 if (__netif_subqueue_stopped(tx_ring->netdev, in igc_clean_tx_irq()
2811 tx_ring->queue_index) && in igc_clean_tx_irq()
2813 netif_wake_subqueue(tx_ring->netdev, in igc_clean_tx_irq()
2814 tx_ring->queue_index); in igc_clean_tx_irq()
2816 u64_stats_update_begin(&tx_ring->tx_syncp); in igc_clean_tx_irq()
2817 tx_ring->tx_stats.restart_queue++; in igc_clean_tx_irq()
2818 u64_stats_update_end(&tx_ring->tx_syncp); in igc_clean_tx_irq()
3870 adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL; in igc_reset_q_vector()
4258 adapter->tx_ring[j]->reg_idx = j; in igc_cache_ring_register()
4385 adapter->tx_ring[txr_idx] = ring; in igc_alloc_q_vector()
4644 struct igc_ring *ring = adapter->tx_ring[i]; in igc_update_stats()
5396 struct igc_ring *tx_ring = adapter->tx_ring[i]; in igc_watchdog_task() local
5404 if (igc_desc_unused(tx_ring) + 1 < tx_ring->count) { in igc_watchdog_task()
5413 set_bit(IGC_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags); in igc_watchdog_task()
5756 ring = adapter->tx_ring[queue]; in igc_save_launchtime_params()
5839 struct igc_ring *ring = adapter->tx_ring[i]; in igc_tsn_clear_schedule()
5876 struct igc_ring *ring = adapter->tx_ring[i]; in igc_save_qbv_schedule()
5922 ring = adapter->tx_ring[queue]; in igc_save_cbs_params()
5925 if (adapter->tx_ring[i]) in igc_save_cbs_params()
5926 cbs_status[i] = adapter->tx_ring[i]->cbs_enable; in igc_save_cbs_params()