Lines Matching refs:tx_queue

23 int ef100_tx_probe(struct efx_tx_queue *tx_queue)  in ef100_tx_probe()  argument
26 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf, in ef100_tx_probe()
27 (tx_queue->ptr_mask + 2) * in ef100_tx_probe()
32 void ef100_tx_init(struct efx_tx_queue *tx_queue) in ef100_tx_init() argument
35 tx_queue->core_txq = in ef100_tx_init()
36 netdev_get_tx_queue(tx_queue->efx->net_dev, in ef100_tx_init()
37 tx_queue->channel->channel - in ef100_tx_init()
38 tx_queue->efx->tx_channel_offset); in ef100_tx_init()
46 tx_queue->tso_version = 3; in ef100_tx_init()
47 if (efx_mcdi_tx_init(tx_queue)) in ef100_tx_init()
48 netdev_WARN(tx_queue->efx->net_dev, in ef100_tx_init()
49 "failed to initialise TXQ %d\n", tx_queue->queue); in ef100_tx_init()
52 static bool ef100_tx_can_tso(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in ef100_tx_can_tso() argument
54 struct efx_nic *efx = tx_queue->efx; in ef100_tx_can_tso()
95 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in ef100_tx_can_tso()
100 ++tx_queue->insert_count; in ef100_tx_can_tso()
118 static efx_oword_t *ef100_tx_desc(struct efx_tx_queue *tx_queue, unsigned int index) in ef100_tx_desc() argument
120 if (likely(tx_queue->txd.buf.addr)) in ef100_tx_desc()
121 return ((efx_oword_t *)tx_queue->txd.buf.addr) + index; in ef100_tx_desc()
126 static void ef100_notify_tx_desc(struct efx_tx_queue *tx_queue) in ef100_notify_tx_desc() argument
131 tx_queue->xmit_pending = false; in ef100_notify_tx_desc()
133 if (unlikely(tx_queue->notify_count == tx_queue->write_count)) in ef100_notify_tx_desc()
136 write_ptr = tx_queue->write_count & tx_queue->ptr_mask; in ef100_notify_tx_desc()
139 efx_writed_page(tx_queue->efx, &reg, in ef100_notify_tx_desc()
140 ER_GZ_TX_RING_DOORBELL, tx_queue->queue); in ef100_notify_tx_desc()
141 tx_queue->notify_count = tx_queue->write_count; in ef100_notify_tx_desc()
144 static void ef100_tx_push_buffers(struct efx_tx_queue *tx_queue) in ef100_tx_push_buffers() argument
146 ef100_notify_tx_desc(tx_queue); in ef100_tx_push_buffers()
147 ++tx_queue->pushes; in ef100_tx_push_buffers()
239 static void ef100_tx_make_descriptors(struct efx_tx_queue *tx_queue, in ef100_tx_make_descriptors() argument
243 unsigned int old_write_count = tx_queue->write_count; in ef100_tx_make_descriptors()
249 unsigned int nr_descs = tx_queue->insert_count - old_write_count; in ef100_tx_make_descriptors()
264 write_ptr = new_write_count & tx_queue->ptr_mask; in ef100_tx_make_descriptors()
265 buffer = &tx_queue->buffer[write_ptr]; in ef100_tx_make_descriptors()
266 txd = ef100_tx_desc(tx_queue, write_ptr); in ef100_tx_make_descriptors()
270 tx_queue->packet_write_count = new_write_count; in ef100_tx_make_descriptors()
274 ef100_make_send_desc(tx_queue->efx, skb, in ef100_tx_make_descriptors()
280 ef100_make_tso_desc(tx_queue->efx, skb, in ef100_tx_make_descriptors()
294 } while (new_write_count != tx_queue->insert_count); in ef100_tx_make_descriptors()
298 tx_queue->write_count = new_write_count; in ef100_tx_make_descriptors()
309 void ef100_tx_write(struct efx_tx_queue *tx_queue) in ef100_tx_write() argument
311 ef100_tx_make_descriptors(tx_queue, NULL, 0); in ef100_tx_write()
312 ef100_tx_push_buffers(tx_queue); in ef100_tx_write()
321 struct efx_tx_queue *tx_queue = in ef100_ev_tx() local
323 unsigned int tx_index = (tx_queue->read_count + tx_done - 1) & in ef100_ev_tx()
324 tx_queue->ptr_mask; in ef100_ev_tx()
326 efx_xmit_done(tx_queue, tx_index); in ef100_ev_tx()
336 int ef100_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in ef100_enqueue_skb() argument
338 unsigned int old_insert_count = tx_queue->insert_count; in ef100_enqueue_skb()
339 struct efx_nic *efx = tx_queue->efx; in ef100_enqueue_skb()
345 if (!tx_queue->buffer || !tx_queue->ptr_mask) { in ef100_enqueue_skb()
354 if (segments && !ef100_tx_can_tso(tx_queue, skb)) { in ef100_enqueue_skb()
355 rc = efx_tx_tso_fallback(tx_queue, skb); in ef100_enqueue_skb()
356 tx_queue->tso_fallbacks++; in ef100_enqueue_skb()
364 rc = efx_tx_map_data(tx_queue, skb, segments); in ef100_enqueue_skb()
367 ef100_tx_make_descriptors(tx_queue, skb, segments); in ef100_enqueue_skb()
369 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); in ef100_enqueue_skb()
373 netif_tx_stop_queue(tx_queue->core_txq); in ef100_enqueue_skb()
379 efx_for_each_channel_tx_queue(txq2, tx_queue->channel) in ef100_enqueue_skb()
381 fill_level = efx_channel_tx_old_fill_level(tx_queue->channel); in ef100_enqueue_skb()
383 netif_tx_start_queue(tx_queue->core_txq); in ef100_enqueue_skb()
386 tx_queue->xmit_pending = true; in ef100_enqueue_skb()
392 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb->len, xmit_more) || in ef100_enqueue_skb()
393 tx_queue->write_count - tx_queue->notify_count > 255) in ef100_enqueue_skb()
394 ef100_tx_push_buffers(tx_queue); in ef100_enqueue_skb()
397 tx_queue->tso_bursts++; in ef100_enqueue_skb()
398 tx_queue->tso_packets += segments; in ef100_enqueue_skb()
399 tx_queue->tx_packets += segments; in ef100_enqueue_skb()
401 tx_queue->tx_packets++; in ef100_enqueue_skb()
406 efx_enqueue_unwind(tx_queue, old_insert_count); in ef100_enqueue_skb()
415 if (tx_queue->xmit_pending && !xmit_more) in ef100_enqueue_skb()
416 ef100_tx_push_buffers(tx_queue); in ef100_enqueue_skb()