Lines Matching refs:tx_queue
36 static inline u8 *efx_tx_get_copy_buffer(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer() argument
39 unsigned int index = efx_tx_queue_get_insert_index(tx_queue); in efx_tx_get_copy_buffer()
41 &tx_queue->cb_page[index >> (PAGE_SHIFT - EFX_TX_CB_ORDER)]; in efx_tx_get_copy_buffer()
46 efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE, in efx_tx_get_copy_buffer()
54 u8 *efx_tx_get_copy_buffer_limited(struct efx_tx_queue *tx_queue, in efx_tx_get_copy_buffer_limited() argument
59 return efx_tx_get_copy_buffer(tx_queue, buffer); in efx_tx_get_copy_buffer_limited()
62 static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, in efx_dequeue_buffer() argument
68 struct device *dma_dev = &tx_queue->efx->pci_dev->dev; in efx_dequeue_buffer()
85 if (tx_queue->timestamping && in efx_dequeue_buffer()
86 (tx_queue->completed_timestamp_major || in efx_dequeue_buffer()
87 tx_queue->completed_timestamp_minor)) { in efx_dequeue_buffer()
91 efx_ptp_nic_to_kernel_time(tx_queue); in efx_dequeue_buffer()
94 tx_queue->completed_timestamp_major = 0; in efx_dequeue_buffer()
95 tx_queue->completed_timestamp_minor = 0; in efx_dequeue_buffer()
98 netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev, in efx_dequeue_buffer()
100 tx_queue->queue, tx_queue->read_count); in efx_dequeue_buffer()
167 static int efx_enqueue_skb_copy(struct efx_tx_queue *tx_queue, in efx_enqueue_skb_copy() argument
177 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb_copy()
179 copy_buffer = efx_tx_get_copy_buffer(tx_queue, buffer); in efx_enqueue_skb_copy()
190 ++tx_queue->insert_count; in efx_enqueue_skb_copy()
288 static int efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, in efx_enqueue_skb_pio() argument
292 efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_skb_pio()
293 u8 __iomem *piobuf = tx_queue->piobuf; in efx_enqueue_skb_pio()
308 efx_skb_copy_bits_to_pio(tx_queue->efx, skb, in efx_enqueue_skb_pio()
310 efx_flush_copy_buffer(tx_queue->efx, piobuf, ©_buf); in efx_enqueue_skb_pio()
318 __iowrite64_copy(tx_queue->piobuf, skb->data, in efx_enqueue_skb_pio()
331 tx_queue->piobuf_offset); in efx_enqueue_skb_pio()
332 ++tx_queue->insert_count; in efx_enqueue_skb_pio()
337 static struct efx_tx_buffer *efx_tx_map_chunk(struct efx_tx_queue *tx_queue, in efx_tx_map_chunk() argument
341 const struct efx_nic_type *nic_type = tx_queue->efx->type; in efx_tx_map_chunk()
347 buffer = efx_tx_queue_get_insert_buffer(tx_queue); in efx_tx_map_chunk()
348 dma_len = nic_type->tx_limit_len(tx_queue, dma_addr, len); in efx_tx_map_chunk()
355 ++tx_queue->insert_count; in efx_tx_map_chunk()
363 static int efx_tx_map_data(struct efx_tx_queue *tx_queue, struct sk_buff *skb, in efx_tx_map_data() argument
366 struct efx_nic *efx = tx_queue->efx; in efx_tx_map_data()
394 tx_queue->tso_long_headers++; in efx_tx_map_data()
395 efx_tx_map_chunk(tx_queue, dma_addr, header_len); in efx_tx_map_data()
406 buffer = efx_tx_map_chunk(tx_queue, dma_addr, len); in efx_tx_map_data()
441 static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue, in efx_enqueue_unwind() argument
449 while (tx_queue->insert_count != insert_count) { in efx_enqueue_unwind()
450 --tx_queue->insert_count; in efx_enqueue_unwind()
451 buffer = __efx_tx_queue_get_insert_buffer(tx_queue); in efx_enqueue_unwind()
452 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); in efx_enqueue_unwind()
465 static int efx_tx_tso_fallback(struct efx_tx_queue *tx_queue, in efx_tx_tso_fallback() argument
483 efx_enqueue_skb(tx_queue, skb); in efx_tx_tso_fallback()
506 netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) in efx_enqueue_skb() argument
508 unsigned int old_insert_count = tx_queue->insert_count; in efx_enqueue_skb()
525 EFX_WARN_ON_ONCE_PARANOID(!tx_queue->handle_tso); in efx_enqueue_skb()
526 rc = tx_queue->handle_tso(tx_queue, skb, &data_mapped); in efx_enqueue_skb()
528 rc = efx_tx_tso_fallback(tx_queue, skb); in efx_enqueue_skb()
529 tx_queue->tso_fallbacks++; in efx_enqueue_skb()
537 efx_nic_may_tx_pio(tx_queue)) { in efx_enqueue_skb()
539 if (efx_enqueue_skb_pio(tx_queue, skb)) in efx_enqueue_skb()
541 tx_queue->pio_packets++; in efx_enqueue_skb()
546 if (efx_enqueue_skb_copy(tx_queue, skb)) in efx_enqueue_skb()
548 tx_queue->cb_packets++; in efx_enqueue_skb()
553 if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) in efx_enqueue_skb()
557 netdev_tx_sent_queue(tx_queue->core_txq, skb_len); in efx_enqueue_skb()
559 efx_tx_maybe_stop_queue(tx_queue); in efx_enqueue_skb()
562 if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { in efx_enqueue_skb()
563 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); in efx_enqueue_skb()
572 efx_nic_push_buffers(tx_queue); in efx_enqueue_skb()
574 tx_queue->xmit_more_available = skb->xmit_more; in efx_enqueue_skb()
578 tx_queue->tso_bursts++; in efx_enqueue_skb()
579 tx_queue->tso_packets += segments; in efx_enqueue_skb()
580 tx_queue->tx_packets += segments; in efx_enqueue_skb()
582 tx_queue->tx_packets++; in efx_enqueue_skb()
589 efx_enqueue_unwind(tx_queue, old_insert_count); in efx_enqueue_skb()
597 struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); in efx_enqueue_skb()
602 efx_nic_push_buffers(tx_queue); in efx_enqueue_skb()
613 static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, in efx_dequeue_buffers() argument
618 struct efx_nic *efx = tx_queue->efx; in efx_dequeue_buffers()
621 stop_index = (index + 1) & tx_queue->ptr_mask; in efx_dequeue_buffers()
622 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers()
625 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; in efx_dequeue_buffers()
631 tx_queue->queue, read_ptr); in efx_dequeue_buffers()
636 efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl); in efx_dequeue_buffers()
638 ++tx_queue->read_count; in efx_dequeue_buffers()
639 read_ptr = tx_queue->read_count & tx_queue->ptr_mask; in efx_dequeue_buffers()
656 struct efx_tx_queue *tx_queue; in efx_hard_start_xmit() local
673 tx_queue = efx_get_tx_queue(efx, index, type); in efx_hard_start_xmit()
675 return efx_enqueue_skb(tx_queue, skb); in efx_hard_start_xmit()
678 void efx_init_tx_queue_core_txq(struct efx_tx_queue *tx_queue) in efx_init_tx_queue_core_txq() argument
680 struct efx_nic *efx = tx_queue->efx; in efx_init_tx_queue_core_txq()
683 tx_queue->core_txq = in efx_init_tx_queue_core_txq()
685 tx_queue->queue / EFX_TXQ_TYPES + in efx_init_tx_queue_core_txq()
686 ((tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI) ? in efx_init_tx_queue_core_txq()
696 struct efx_tx_queue *tx_queue; in efx_setup_tc() local
721 efx_for_each_possible_channel_tx_queue(tx_queue, in efx_setup_tc()
723 if (!(tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI)) in efx_setup_tc()
725 if (!tx_queue->buffer) { in efx_setup_tc()
726 rc = efx_probe_tx_queue(tx_queue); in efx_setup_tc()
730 if (!tx_queue->initialised) in efx_setup_tc()
731 efx_init_tx_queue(tx_queue); in efx_setup_tc()
732 efx_init_tx_queue_core_txq(tx_queue); in efx_setup_tc()
756 void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) in efx_xmit_done() argument
759 struct efx_nic *efx = tx_queue->efx; in efx_xmit_done()
763 EFX_WARN_ON_ONCE_PARANOID(index > tx_queue->ptr_mask); in efx_xmit_done()
765 efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl); in efx_xmit_done()
766 tx_queue->pkts_compl += pkts_compl; in efx_xmit_done()
767 tx_queue->bytes_compl += bytes_compl; in efx_xmit_done()
770 ++tx_queue->merge_events; in efx_xmit_done()
777 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) && in efx_xmit_done()
780 txq2 = efx_tx_queue_partner(tx_queue); in efx_xmit_done()
781 fill_level = max(tx_queue->insert_count - tx_queue->read_count, in efx_xmit_done()
784 netif_tx_wake_queue(tx_queue->core_txq); in efx_xmit_done()
788 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { in efx_xmit_done()
789 tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); in efx_xmit_done()
790 if (tx_queue->read_count == tx_queue->old_write_count) { in efx_xmit_done()
792 tx_queue->empty_read_count = in efx_xmit_done()
793 tx_queue->read_count | EFX_EMPTY_COUNT_VALID; in efx_xmit_done()
798 static unsigned int efx_tx_cb_page_count(struct efx_tx_queue *tx_queue) in efx_tx_cb_page_count() argument
800 return DIV_ROUND_UP(tx_queue->ptr_mask + 1, PAGE_SIZE >> EFX_TX_CB_ORDER); in efx_tx_cb_page_count()
803 int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) in efx_probe_tx_queue() argument
805 struct efx_nic *efx = tx_queue->efx; in efx_probe_tx_queue()
812 tx_queue->ptr_mask = entries - 1; in efx_probe_tx_queue()
816 tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask); in efx_probe_tx_queue()
819 tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer), in efx_probe_tx_queue()
821 if (!tx_queue->buffer) in efx_probe_tx_queue()
824 tx_queue->cb_page = kcalloc(efx_tx_cb_page_count(tx_queue), in efx_probe_tx_queue()
825 sizeof(tx_queue->cb_page[0]), GFP_KERNEL); in efx_probe_tx_queue()
826 if (!tx_queue->cb_page) { in efx_probe_tx_queue()
832 rc = efx_nic_probe_tx(tx_queue); in efx_probe_tx_queue()
839 kfree(tx_queue->cb_page); in efx_probe_tx_queue()
840 tx_queue->cb_page = NULL; in efx_probe_tx_queue()
842 kfree(tx_queue->buffer); in efx_probe_tx_queue()
843 tx_queue->buffer = NULL; in efx_probe_tx_queue()
847 void efx_init_tx_queue(struct efx_tx_queue *tx_queue) in efx_init_tx_queue() argument
849 struct efx_nic *efx = tx_queue->efx; in efx_init_tx_queue()
852 "initialising TX queue %d\n", tx_queue->queue); in efx_init_tx_queue()
854 tx_queue->insert_count = 0; in efx_init_tx_queue()
855 tx_queue->write_count = 0; in efx_init_tx_queue()
856 tx_queue->packet_write_count = 0; in efx_init_tx_queue()
857 tx_queue->old_write_count = 0; in efx_init_tx_queue()
858 tx_queue->read_count = 0; in efx_init_tx_queue()
859 tx_queue->old_read_count = 0; in efx_init_tx_queue()
860 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID; in efx_init_tx_queue()
861 tx_queue->xmit_more_available = false; in efx_init_tx_queue()
862 tx_queue->timestamping = (efx_ptp_use_mac_tx_timestamps(efx) && in efx_init_tx_queue()
863 tx_queue->channel == efx_ptp_channel(efx)); in efx_init_tx_queue()
864 tx_queue->completed_desc_ptr = tx_queue->ptr_mask; in efx_init_tx_queue()
865 tx_queue->completed_timestamp_major = 0; in efx_init_tx_queue()
866 tx_queue->completed_timestamp_minor = 0; in efx_init_tx_queue()
871 tx_queue->handle_tso = efx_enqueue_skb_tso; in efx_init_tx_queue()
874 efx_nic_init_tx(tx_queue); in efx_init_tx_queue()
876 tx_queue->initialised = true; in efx_init_tx_queue()
879 void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) in efx_fini_tx_queue() argument
883 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_fini_tx_queue()
884 "shutting down TX queue %d\n", tx_queue->queue); in efx_fini_tx_queue()
886 if (!tx_queue->buffer) in efx_fini_tx_queue()
890 while (tx_queue->read_count != tx_queue->write_count) { in efx_fini_tx_queue()
892 buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask]; in efx_fini_tx_queue()
893 efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl); in efx_fini_tx_queue()
895 ++tx_queue->read_count; in efx_fini_tx_queue()
897 tx_queue->xmit_more_available = false; in efx_fini_tx_queue()
898 netdev_tx_reset_queue(tx_queue->core_txq); in efx_fini_tx_queue()
901 void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) in efx_remove_tx_queue() argument
905 if (!tx_queue->buffer) in efx_remove_tx_queue()
908 netif_dbg(tx_queue->efx, drv, tx_queue->efx->net_dev, in efx_remove_tx_queue()
909 "destroying TX queue %d\n", tx_queue->queue); in efx_remove_tx_queue()
910 efx_nic_remove_tx(tx_queue); in efx_remove_tx_queue()
912 if (tx_queue->cb_page) { in efx_remove_tx_queue()
913 for (i = 0; i < efx_tx_cb_page_count(tx_queue); i++) in efx_remove_tx_queue()
914 efx_nic_free_buffer(tx_queue->efx, in efx_remove_tx_queue()
915 &tx_queue->cb_page[i]); in efx_remove_tx_queue()
916 kfree(tx_queue->cb_page); in efx_remove_tx_queue()
917 tx_queue->cb_page = NULL; in efx_remove_tx_queue()
920 kfree(tx_queue->buffer); in efx_remove_tx_queue()
921 tx_queue->buffer = NULL; in efx_remove_tx_queue()