Lines Matching refs:tx_queue

33 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue,  in efx_tx_get_copy_buffer()  argument
36 unsigned int index = efx_tx_queue_get_insert_index(tx_queue); in efx_tx_get_copy_buffer()
38 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; in efx_tx_get_copy_buffer()
43 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in efx_tx_get_copy_buffer()
51 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer_limited() argument
56 return efx_tx_get_copy_buffer(tx_queue, buffer); in efx_tx_get_copy_buffer_limited()
59 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, in efx_dequeue_buffer() argument
65 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; in efx_dequeue_buffer()
82 if (tx_queue->timestamping && in efx_dequeue_buffer()
83 (tx_queue->completed_timestamp_major || in efx_dequeue_buffer()
84 tx_queue->completed_timestamp_minor)) { in efx_dequeue_buffer()
88 efx_ptp_nic_to_kernel_time(tx_queue); in efx_dequeue_buffer()
91 tx_queue->completed_timestamp_major = 0; in efx_dequeue_buffer()
92 tx_queue->completed_timestamp_minor = 0; in efx_dequeue_buffer()
95 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in efx_dequeue_buffer()
97 tx_queue->queue, tx_queue->read_count); in efx_dequeue_buffer()
164 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, in efx_enqueue_skb_copy() argument
174 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb_copy()
176 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); in efx_enqueue_skb_copy()
187 ++tx_queue->insert_count; in efx_enqueue_skb_copy()
285 static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, in efx_enqueue_skb_pio() argument
289 efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb_pio()
290 u8 __iomem *piobuf = tx_queue->piobuf; in efx_enqueue_skb_pio()
305 efx_skb_copy_bits_to_pio(tx_queue->efx, skb, in efx_enqueue_skb_pio()
307 efx_flush_copy_buffer(tx_queue->efx, piobuf, &copy_buf); in efx_enqueue_skb_pio()
315 __iowrite64_copy(tx_queue->piobuf, skb->data, in efx_enqueue_skb_pio()
328 tx_queue->piobuf_offset); in efx_enqueue_skb_pio()
329 ++tx_queue->insert_count; in efx_enqueue_skb_pio()
334 static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, in efx_tx_map_chunk() argument
338 const struct efx_nic_type *nic_type = tx_queue->efx->type; in efx_tx_map_chunk()
344 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_tx_map_chunk()
345 dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); in efx_tx_map_chunk()
352 ++tx_queue->insert_count; in efx_tx_map_chunk()
360 static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, in efx_tx_map_data() argument
363 struct efx_nic *efx = tx_queue->efx; in efx_tx_map_data()
391 tx_queue->tso_long_headers++; in efx_tx_map_data()
392 efx_tx_map_chunk(tx_queue, dma_addr, header_len); in efx_tx_map_data()
403 buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); in efx_tx_map_data()
438 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, in efx_enqueue_unwind() argument
446 while (tx_queue->insert_count != insert_count) { in efx_enqueue_unwind()
447 --tx_queue->insert_count; in efx_enqueue_unwind()
448 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_unwind()
449 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); in efx_enqueue_unwind()
462 static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, in efx_tx_tso_fallback() argument
478 efx_enqueue_skb(tx_queue, skb); in efx_tx_tso_fallback()
501 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in efx_enqueue_skb() argument
503 unsigned int old_insert_count = tx_queue->insert_count; in efx_enqueue_skb()
520 EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso); in efx_enqueue_skb()
521 rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped); in efx_enqueue_skb()
523 rc = efx_tx_tso_fallback(tx_queue, skb); in efx_enqueue_skb()
524 tx_queue->tso_fallbacks++; in efx_enqueue_skb()
532 efx_nic_may_tx_pio(tx_queue)) { in efx_enqueue_skb()
534 if (efx_enqueue_skb_pio(tx_queue, skb)) in efx_enqueue_skb()
536 tx_queue->pio_packets++; in efx_enqueue_skb()
541 if (efx_enqueue_skb_copy(tx_queue, skb)) in efx_enqueue_skb()
543 tx_queue->cb_packets++; in efx_enqueue_skb()
548 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) in efx_enqueue_skb()
551 efx_tx_maybe_stop_queue(tx_queue); in efx_enqueue_skb()
554 if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) { in efx_enqueue_skb()
555 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); in efx_enqueue_skb()
564 efx_nic_push_buffers(tx_queue); in efx_enqueue_skb()
566 tx_queue->xmit_more_available = xmit_more; in efx_enqueue_skb()
570 tx_queue->tso_bursts++; in efx_enqueue_skb()
571 tx_queue->tso_packets += segments; in efx_enqueue_skb()
572 tx_queue->tx_packets += segments; in efx_enqueue_skb()
574 tx_queue->tx_packets++; in efx_enqueue_skb()
581 efx_enqueue_unwind(tx_queue, old_insert_count); in efx_enqueue_skb()
589 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); in efx_enqueue_skb()
594 efx_nic_push_buffers(tx_queue); in efx_enqueue_skb()
605 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, in efx_dequeue_buffers() argument
610 struct efx_nic *efx = tx_queue->efx; in efx_dequeue_buffers()
613 stop_index = (index + 1) & tx_queue->ptr_mask; in efx_dequeue_buffers()
614 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers()
617 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; in efx_dequeue_buffers()
623 tx_queue->queue, read_ptr); in efx_dequeue_buffers()
628 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); in efx_dequeue_buffers()
630 ++tx_queue->read_count; in efx_dequeue_buffers()
631 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers()
648 struct efx_tx_queue *tx_queue; in efx_hard_start_xmit() local
665 tx_queue = efx_get_tx_queue(efx, index, type); in efx_hard_start_xmit()
667 return efx_enqueue_skb(tx_queue, skb); in efx_hard_start_xmit()
670 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) in efx_init_tx_queue_core_txq() argument
672 struct efx_nic *efx = tx_queue->efx; in efx_init_tx_queue_core_txq()
675 tx_queue->core_txq = in efx_init_tx_queue_core_txq()
677 tx_queue->queue / EFX_TXQ_TYPES + in efx_init_tx_queue_core_txq()
678 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? in efx_init_tx_queue_core_txq()
688 struct efx_tx_queue *tx_queue; in efx_setup_tc() local
713 efx_for_each_possible_channel_tx_queue(tx_queue, in efx_setup_tc()
715 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) in efx_setup_tc()
717 if (!tx_queue->buffer) { in efx_setup_tc()
718 rc = efx_probe_tx_queue(tx_queue); in efx_setup_tc()
722 if (!tx_queue->initialised) in efx_setup_tc()
723 efx_init_tx_queue(tx_queue); in efx_setup_tc()
724 efx_init_tx_queue_core_txq(tx_queue); in efx_setup_tc()
748 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) in efx_xmit_done() argument
751 struct efx_nic *efx = tx_queue->efx; in efx_xmit_done()
755 EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); in efx_xmit_done()
757 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); in efx_xmit_done()
758 tx_queue->pkts_compl += pkts_compl; in efx_xmit_done()
759 tx_queue->bytes_compl += bytes_compl; in efx_xmit_done()
762 ++tx_queue->merge_events; in efx_xmit_done()
769 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && in efx_xmit_done()
772 txq2 = efx_tx_queue_partner(tx_queue); in efx_xmit_done()
773 fill_level = max(tx_queue->insert_count - tx_queue->read_count, in efx_xmit_done()
776 netif_tx_wake_queue(tx_queue->core_txq); in efx_xmit_done()
780 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { in efx_xmit_done()
781 tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); in efx_xmit_done()
782 if (tx_queue->read_count == tx_queue->old_write_count) { in efx_xmit_done()
784 tx_queue->empty_read_count = in efx_xmit_done()
785 tx_queue->read_count | EFX_EMPTY_COUNT_VALID; in efx_xmit_done()
790 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) in efx_tx_cb_page_count() argument
792 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER); in efx_tx_cb_page_count()
795 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) in efx_probe_tx_queue() argument
797 struct efx_nic *efx = tx_queue->efx; in efx_probe_tx_queue()
804 tx_queue->ptr_mask = entries - 1; in efx_probe_tx_queue()
808 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in efx_probe_tx_queue()
811 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), in efx_probe_tx_queue()
813 if (!tx_queue->buffer) in efx_probe_tx_queue()
816 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), in efx_probe_tx_queue()
817 sizeof(tx_queue->cb_page[0]), GFP_KERNEL); in efx_probe_tx_queue()
818 if (!tx_queue->cb_page) { in efx_probe_tx_queue()
824 rc = efx_nic_probe_tx(tx_queue); in efx_probe_tx_queue()
831 kfree(tx_queue->cb_page); in efx_probe_tx_queue()
832 tx_queue->cb_page = NULL; in efx_probe_tx_queue()
834 kfree(tx_queue->buffer); in efx_probe_tx_queue()
835 tx_queue->buffer = NULL; in efx_probe_tx_queue()
839 void efx_init_tx_queue(struct efx_tx_queue *tx_queue) in efx_init_tx_queue() argument
841 struct efx_nic *efx = tx_queue->efx; in efx_init_tx_queue()
844 "initialising TX queue %d\n", tx_queue->queue); in efx_init_tx_queue()
846 tx_queue->insert_count = 0; in efx_init_tx_queue()
847 tx_queue->write_count = 0; in efx_init_tx_queue()
848 tx_queue->packet_write_count = 0; in efx_init_tx_queue()
849 tx_queue->old_write_count = 0; in efx_init_tx_queue()
850 tx_queue->read_count = 0; in efx_init_tx_queue()
851 tx_queue->old_read_count = 0; in efx_init_tx_queue()
852 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; in efx_init_tx_queue()
853 tx_queue->xmit_more_available = false; in efx_init_tx_queue()
854 tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && in efx_init_tx_queue()
855 tx_queue->channel == efx_ptp_channel(efx)); in efx_init_tx_queue()
856 tx_queue->completed_desc_ptr = tx_queue->ptr_mask; in efx_init_tx_queue()
857 tx_queue->completed_timestamp_major = 0; in efx_init_tx_queue()
858 tx_queue->completed_timestamp_minor = 0; in efx_init_tx_queue()
863 tx_queue->handle_tso = efx_enqueue_skb_tso; in efx_init_tx_queue()
866 efx_nic_init_tx(tx_queue); in efx_init_tx_queue()
868 tx_queue->initialised = true; in efx_init_tx_queue()
871 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) in efx_fini_tx_queue() argument
875 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_fini_tx_queue()
876 "shutting down TX queue %d\n", tx_queue->queue); in efx_fini_tx_queue()
878 if (!tx_queue->buffer) in efx_fini_tx_queue()
882 while (tx_queue->read_count != tx_queue->write_count) { in efx_fini_tx_queue()
884 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; in efx_fini_tx_queue()
885 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); in efx_fini_tx_queue()
887 ++tx_queue->read_count; in efx_fini_tx_queue()
889 tx_queue->xmit_more_available = false; in efx_fini_tx_queue()
890 netdev_tx_reset_queue(tx_queue->core_txq); in efx_fini_tx_queue()
893 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) in efx_remove_tx_queue() argument
897 if (!tx_queue->buffer) in efx_remove_tx_queue()
900 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_remove_tx_queue()
901 "destroying TX queue %d\n", tx_queue->queue); in efx_remove_tx_queue()
902 efx_nic_remove_tx(tx_queue); in efx_remove_tx_queue()
904 if (tx_queue->cb_page) { in efx_remove_tx_queue()
905 for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) in efx_remove_tx_queue()
906 efx_nic_free_buffer(tx_queue->efx, in efx_remove_tx_queue()
907 &tx_queue->cb_page[i]); in efx_remove_tx_queue()
908 kfree(tx_queue->cb_page); in efx_remove_tx_queue()
909 tx_queue->cb_page = NULL; in efx_remove_tx_queue()
912 kfree(tx_queue->buffer); in efx_remove_tx_queue()
913 tx_queue->buffer = NULL; in efx_remove_tx_queue()