Lines Matching refs:tx_ring

176 		txr = &adapter->tx_ring[i];  in ena_init_io_rings()
210 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_setup_tx_resources() local
214 if (tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
220 size = sizeof(struct ena_tx_buffer) * tx_ring->ring_size; in ena_setup_tx_resources()
223 tx_ring->tx_buffer_info = vzalloc_node(size, node); in ena_setup_tx_resources()
224 if (!tx_ring->tx_buffer_info) { in ena_setup_tx_resources()
225 tx_ring->tx_buffer_info = vzalloc(size); in ena_setup_tx_resources()
226 if (!tx_ring->tx_buffer_info) in ena_setup_tx_resources()
230 size = sizeof(u16) * tx_ring->ring_size; in ena_setup_tx_resources()
231 tx_ring->free_ids = vzalloc_node(size, node); in ena_setup_tx_resources()
232 if (!tx_ring->free_ids) { in ena_setup_tx_resources()
233 tx_ring->free_ids = vzalloc(size); in ena_setup_tx_resources()
234 if (!tx_ring->free_ids) in ena_setup_tx_resources()
238 size = tx_ring->tx_max_header_size; in ena_setup_tx_resources()
239 tx_ring->push_buf_intermediate_buf = vzalloc_node(size, node); in ena_setup_tx_resources()
240 if (!tx_ring->push_buf_intermediate_buf) { in ena_setup_tx_resources()
241 tx_ring->push_buf_intermediate_buf = vzalloc(size); in ena_setup_tx_resources()
242 if (!tx_ring->push_buf_intermediate_buf) in ena_setup_tx_resources()
247 for (i = 0; i < tx_ring->ring_size; i++) in ena_setup_tx_resources()
248 tx_ring->free_ids[i] = i; in ena_setup_tx_resources()
251 memset(&tx_ring->tx_stats, 0x0, sizeof(tx_ring->tx_stats)); in ena_setup_tx_resources()
253 tx_ring->next_to_use = 0; in ena_setup_tx_resources()
254 tx_ring->next_to_clean = 0; in ena_setup_tx_resources()
255 tx_ring->cpu = ena_irq->cpu; in ena_setup_tx_resources()
259 vfree(tx_ring->free_ids); in ena_setup_tx_resources()
260 tx_ring->free_ids = NULL; in ena_setup_tx_resources()
262 vfree(tx_ring->tx_buffer_info); in ena_setup_tx_resources()
263 tx_ring->tx_buffer_info = NULL; in ena_setup_tx_resources()
276 struct ena_ring *tx_ring = &adapter->tx_ring[qid]; in ena_free_tx_resources() local
278 vfree(tx_ring->tx_buffer_info); in ena_free_tx_resources()
279 tx_ring->tx_buffer_info = NULL; in ena_free_tx_resources()
281 vfree(tx_ring->free_ids); in ena_free_tx_resources()
282 tx_ring->free_ids = NULL; in ena_free_tx_resources()
284 vfree(tx_ring->push_buf_intermediate_buf); in ena_free_tx_resources()
285 tx_ring->push_buf_intermediate_buf = NULL; in ena_free_tx_resources()
623 static void ena_unmap_tx_skb(struct ena_ring *tx_ring, in ena_unmap_tx_skb() argument
637 dma_unmap_single(tx_ring->dev, in ena_unmap_tx_skb()
647 dma_unmap_page(tx_ring->dev, dma_unmap_addr(ena_buf, paddr), in ena_unmap_tx_skb()
656 static void ena_free_tx_bufs(struct ena_ring *tx_ring) in ena_free_tx_bufs() argument
661 for (i = 0; i < tx_ring->ring_size; i++) { in ena_free_tx_bufs()
662 struct ena_tx_buffer *tx_info = &tx_ring->tx_buffer_info[i]; in ena_free_tx_bufs()
668 netdev_notice(tx_ring->netdev, in ena_free_tx_bufs()
670 tx_ring->qid, i); in ena_free_tx_bufs()
673 netdev_dbg(tx_ring->netdev, in ena_free_tx_bufs()
675 tx_ring->qid, i); in ena_free_tx_bufs()
678 ena_unmap_tx_skb(tx_ring, tx_info); in ena_free_tx_bufs()
682 netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, in ena_free_tx_bufs()
683 tx_ring->qid)); in ena_free_tx_bufs()
688 struct ena_ring *tx_ring; in ena_free_all_tx_bufs() local
692 tx_ring = &adapter->tx_ring[i]; in ena_free_all_tx_bufs()
693 ena_free_tx_bufs(tx_ring); in ena_free_all_tx_bufs()
726 static int validate_tx_req_id(struct ena_ring *tx_ring, u16 req_id) in validate_tx_req_id() argument
730 if (likely(req_id < tx_ring->ring_size)) { in validate_tx_req_id()
731 tx_info = &tx_ring->tx_buffer_info[req_id]; in validate_tx_req_id()
737 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, in validate_tx_req_id()
740 netif_err(tx_ring->adapter, tx_done, tx_ring->netdev, in validate_tx_req_id()
743 u64_stats_update_begin(&tx_ring->syncp); in validate_tx_req_id()
744 tx_ring->tx_stats.bad_req_id++; in validate_tx_req_id()
745 u64_stats_update_end(&tx_ring->syncp); in validate_tx_req_id()
748 tx_ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID; in validate_tx_req_id()
749 set_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags); in validate_tx_req_id()
753 static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget) in ena_clean_tx_irq() argument
764 next_to_clean = tx_ring->next_to_clean; in ena_clean_tx_irq()
765 txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->qid); in ena_clean_tx_irq()
771 rc = ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, in ena_clean_tx_irq()
776 rc = validate_tx_req_id(tx_ring, req_id); in ena_clean_tx_irq()
780 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_clean_tx_irq()
789 ena_unmap_tx_skb(tx_ring, tx_info); in ena_clean_tx_irq()
791 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
792 "tx_poll: q %d skb %p completed\n", tx_ring->qid, in ena_clean_tx_irq()
800 tx_ring->free_ids[next_to_clean] = req_id; in ena_clean_tx_irq()
802 tx_ring->ring_size); in ena_clean_tx_irq()
805 tx_ring->next_to_clean = next_to_clean; in ena_clean_tx_irq()
806 ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done); in ena_clean_tx_irq()
807 ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq); in ena_clean_tx_irq()
811 netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev, in ena_clean_tx_irq()
813 tx_ring->qid, tx_pkts); in ena_clean_tx_irq()
820 above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
825 ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_clean_tx_irq()
828 test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags)) { in ena_clean_tx_irq()
830 u64_stats_update_begin(&tx_ring->syncp); in ena_clean_tx_irq()
831 tx_ring->tx_stats.queue_wakeup++; in ena_clean_tx_irq()
832 u64_stats_update_end(&tx_ring->syncp); in ena_clean_tx_irq()
1187 static void ena_unmask_interrupt(struct ena_ring *tx_ring, in ena_unmask_interrupt() argument
1200 tx_ring->smoothed_interval, in ena_unmask_interrupt()
1210 static void ena_update_ring_numa_node(struct ena_ring *tx_ring, in ena_update_ring_numa_node() argument
1217 if (likely(tx_ring->cpu == cpu)) in ena_update_ring_numa_node()
1224 ena_com_update_numa_node(tx_ring->ena_com_io_cq, numa_node); in ena_update_ring_numa_node()
1228 tx_ring->cpu = cpu; in ena_update_ring_numa_node()
1239 struct ena_ring *tx_ring, *rx_ring; in ena_io_poll() local
1247 tx_ring = ena_napi->tx_ring; in ena_io_poll()
1250 tx_budget = tx_ring->ring_size / ENA_TX_POLL_BUDGET_DIVIDER; in ena_io_poll()
1252 if (!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1253 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags)) { in ena_io_poll()
1258 tx_work_done = ena_clean_tx_irq(tx_ring, tx_budget); in ena_io_poll()
1264 if (unlikely(!test_bit(ENA_FLAG_DEV_UP, &tx_ring->adapter->flags) || in ena_io_poll()
1265 test_bit(ENA_FLAG_TRIGGER_RESET, &tx_ring->adapter->flags))) { in ena_io_poll()
1282 ena_unmask_interrupt(tx_ring, rx_ring); in ena_io_poll()
1285 ena_update_ring_numa_node(tx_ring, rx_ring); in ena_io_poll()
1292 u64_stats_update_begin(&tx_ring->syncp); in ena_io_poll()
1293 tx_ring->tx_stats.napi_comp += napi_comp_call; in ena_io_poll()
1294 tx_ring->tx_stats.tx_poll++; in ena_io_poll()
1295 u64_stats_update_end(&tx_ring->syncp); in ena_io_poll()
1321 ena_napi->tx_ring->first_interrupt = true; in ena_intr_msix_io()
1549 napi->tx_ring = &adapter->tx_ring[i]; in ena_init_napi()
1628 struct ena_ring *tx_ring; in ena_create_io_tx_queue() local
1635 tx_ring = &adapter->tx_ring[qid]; in ena_create_io_tx_queue()
1645 ctx.queue_size = tx_ring->ring_size; in ena_create_io_tx_queue()
1646 ctx.numa_node = cpu_to_node(tx_ring->cpu); in ena_create_io_tx_queue()
1657 &tx_ring->ena_com_io_sq, in ena_create_io_tx_queue()
1658 &tx_ring->ena_com_io_cq); in ena_create_io_tx_queue()
1667 ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); in ena_create_io_tx_queue()
1768 adapter->tx_ring[i].ring_size = new_tx_size; in set_io_rings_size()
1832 cur_tx_ring_size = adapter->tx_ring[0].ring_size; in create_queues_with_size_backoff()
1906 ena_unmask_interrupt(&adapter->tx_ring[i], in ena_up()
2106 static int ena_check_and_linearize_skb(struct ena_ring *tx_ring, in ena_check_and_linearize_skb() argument
2114 if (num_frags < tx_ring->sgl_size) in ena_check_and_linearize_skb()
2117 if ((num_frags == tx_ring->sgl_size) && in ena_check_and_linearize_skb()
2118 (header_len < tx_ring->tx_max_header_size)) in ena_check_and_linearize_skb()
2121 u64_stats_update_begin(&tx_ring->syncp); in ena_check_and_linearize_skb()
2122 tx_ring->tx_stats.linearize++; in ena_check_and_linearize_skb()
2123 u64_stats_update_end(&tx_ring->syncp); in ena_check_and_linearize_skb()
2127 u64_stats_update_begin(&tx_ring->syncp); in ena_check_and_linearize_skb()
2128 tx_ring->tx_stats.linearize_failed++; in ena_check_and_linearize_skb()
2129 u64_stats_update_end(&tx_ring->syncp); in ena_check_and_linearize_skb()
2135 static int ena_tx_map_skb(struct ena_ring *tx_ring, in ena_tx_map_skb() argument
2141 struct ena_adapter *adapter = tx_ring->adapter; in ena_tx_map_skb()
2153 if (tx_ring->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { in ena_tx_map_skb()
2164 push_len = min_t(u32, skb->len, tx_ring->tx_max_header_size); in ena_tx_map_skb()
2166 tx_ring->push_buf_intermediate_buf); in ena_tx_map_skb()
2169 u64_stats_update_begin(&tx_ring->syncp); in ena_tx_map_skb()
2170 tx_ring->tx_stats.llq_buffer_copy++; in ena_tx_map_skb()
2171 u64_stats_update_end(&tx_ring->syncp); in ena_tx_map_skb()
2178 tx_ring->tx_max_header_size); in ena_tx_map_skb()
2186 dma = dma_map_single(tx_ring->dev, skb->data + push_len, in ena_tx_map_skb()
2188 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
2213 dma = skb_frag_dma_map(tx_ring->dev, frag, delta, in ena_tx_map_skb()
2215 if (unlikely(dma_mapping_error(tx_ring->dev, dma))) in ena_tx_map_skb()
2228 u64_stats_update_begin(&tx_ring->syncp); in ena_tx_map_skb()
2229 tx_ring->tx_stats.dma_mapping_err++; in ena_tx_map_skb()
2230 u64_stats_update_end(&tx_ring->syncp); in ena_tx_map_skb()
2236 ena_unmap_tx_skb(tx_ring, tx_info); in ena_tx_map_skb()
2247 struct ena_ring *tx_ring; in ena_start_xmit() local
2256 tx_ring = &adapter->tx_ring[qid]; in ena_start_xmit()
2259 rc = ena_check_and_linearize_skb(tx_ring, skb); in ena_start_xmit()
2265 next_to_use = tx_ring->next_to_use; in ena_start_xmit()
2266 req_id = tx_ring->free_ids[next_to_use]; in ena_start_xmit()
2267 tx_info = &tx_ring->tx_buffer_info[req_id]; in ena_start_xmit()
2272 rc = ena_tx_map_skb(tx_ring, tx_info, skb, &push_hdr, &header_len); in ena_start_xmit()
2286 if (unlikely(ena_com_is_doorbell_needed(tx_ring->ena_com_io_sq, &ena_tx_ctx))) { in ena_start_xmit()
2290 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); in ena_start_xmit()
2294 rc = ena_com_prepare_tx(tx_ring->ena_com_io_sq, &ena_tx_ctx, in ena_start_xmit()
2308 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
2309 tx_ring->tx_stats.prepare_ctx_err++; in ena_start_xmit()
2310 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
2318 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
2319 tx_ring->tx_stats.cnt++; in ena_start_xmit()
2320 tx_ring->tx_stats.bytes += skb->len; in ena_start_xmit()
2321 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
2327 tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, in ena_start_xmit()
2328 tx_ring->ring_size); in ena_start_xmit()
2334 if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
2335 tx_ring->sgl_size + 2))) { in ena_start_xmit()
2340 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
2341 tx_ring->tx_stats.queue_stop++; in ena_start_xmit()
2342 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
2354 if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq, in ena_start_xmit()
2357 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
2358 tx_ring->tx_stats.queue_wakeup++; in ena_start_xmit()
2359 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
2367 ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); in ena_start_xmit()
2368 u64_stats_update_begin(&tx_ring->syncp); in ena_start_xmit()
2369 tx_ring->tx_stats.doorbells++; in ena_start_xmit()
2370 u64_stats_update_end(&tx_ring->syncp); in ena_start_xmit()
2376 ena_unmap_tx_skb(tx_ring, tx_info); in ena_start_xmit()
2490 struct ena_ring *rx_ring, *tx_ring; in ena_get_stats64() local
2501 tx_ring = &adapter->tx_ring[i]; in ena_get_stats64()
2504 start = u64_stats_fetch_begin_irq(&tx_ring->syncp); in ena_get_stats64()
2505 packets = tx_ring->tx_stats.cnt; in ena_get_stats64()
2506 bytes = tx_ring->tx_stats.bytes; in ena_get_stats64()
2507 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start)); in ena_get_stats64()
2872 struct ena_ring *tx_ring) in check_missing_comp_in_tx_queue() argument
2879 for (i = 0; i < tx_ring->ring_size; i++) { in check_missing_comp_in_tx_queue()
2880 tx_buf = &tx_ring->tx_buffer_info[i]; in check_missing_comp_in_tx_queue()
2887 if (unlikely(!tx_ring->first_interrupt && time_is_before_jiffies(last_jiffies + in check_missing_comp_in_tx_queue()
2894 tx_ring->qid); in check_missing_comp_in_tx_queue()
2906 tx_ring->qid, i); in check_missing_comp_in_tx_queue()
2924 u64_stats_update_begin(&tx_ring->syncp); in check_missing_comp_in_tx_queue()
2925 tx_ring->tx_stats.missed_tx = missed_tx; in check_missing_comp_in_tx_queue()
2926 u64_stats_update_end(&tx_ring->syncp); in check_missing_comp_in_tx_queue()
2933 struct ena_ring *tx_ring; in check_for_missing_completions() local
2952 tx_ring = &adapter->tx_ring[i]; in check_for_missing_completions()
2955 rc = check_missing_comp_in_tx_queue(adapter, tx_ring); in check_for_missing_completions()