Lines Matching +full:disable +full:- +full:eop
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
16 * i40e_fdir - Generate a Flow Director descriptor based on fdata
26 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_fdir()
31 i = tx_ring->next_to_use; in i40e_fdir()
35 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_fdir()
38 (fdata->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT); in i40e_fdir()
41 (fdata->flex_off << I40E_TXD_FLTR_QW0_FLEXOFF_SHIFT); in i40e_fdir()
44 (fdata->pctype << I40E_TXD_FLTR_QW0_PCTYPE_SHIFT); in i40e_fdir()
48 ((u32)(fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id) << in i40e_fdir()
60 (fdata->dest_ctl << I40E_TXD_FLTR_QW1_DEST_SHIFT); in i40e_fdir()
63 (fdata->fd_status << I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT); in i40e_fdir()
65 if (fdata->cnt_index) { in i40e_fdir()
68 ((u32)fdata->cnt_index << in i40e_fdir()
72 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); in i40e_fdir()
73 fdir_desc->rsvd = cpu_to_le32(0); in i40e_fdir()
74 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); in i40e_fdir()
75 fdir_desc->fd_id = cpu_to_le32(fdata->fd_id); in i40e_fdir()
80 * i40e_program_fdir_filter - Program a Flow Director filter
82 * @raw_packet: the pre-allocated packet buffer for FDir
102 return -ENOENT; in i40e_program_fdir_filter()
104 tx_ring = vsi->tx_rings[0]; in i40e_program_fdir_filter()
105 dev = tx_ring->dev; in i40e_program_fdir_filter()
108 for (i = I40E_FD_CLEAN_DELAY; I40E_DESC_UNUSED(tx_ring) < 2; i--) { in i40e_program_fdir_filter()
110 return -EAGAIN; in i40e_program_fdir_filter()
120 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
121 first = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
125 i = tx_ring->next_to_use; in i40e_program_fdir_filter()
127 tx_buf = &tx_ring->tx_bi[i]; in i40e_program_fdir_filter()
129 tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0; in i40e_program_fdir_filter()
137 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_program_fdir_filter()
140 tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB; in i40e_program_fdir_filter()
141 tx_buf->raw_buf = (void *)raw_packet; in i40e_program_fdir_filter()
143 tx_desc->cmd_type_offset_bsz = in i40e_program_fdir_filter()
152 first->next_to_watch = tx_desc; in i40e_program_fdir_filter()
154 writel(tx_ring->next_to_use, tx_ring->tail); in i40e_program_fdir_filter()
158 return -1; in i40e_program_fdir_filter()
162 * i40e_create_dummy_packet - Constructs dummy packet for HW
173 bool is_vlan = !!data->vlan_tag; in i40e_create_dummy_packet()
186 ip.daddr = data->dst_ip; in i40e_create_dummy_packet()
187 ip.saddr = data->src_ip; in i40e_create_dummy_packet()
193 memcpy(&ipv6.saddr.in6_u.u6_addr32, data->src_ip6, in i40e_create_dummy_packet()
195 memcpy(&ipv6.daddr.in6_u.u6_addr32, data->dst_ip6, in i40e_create_dummy_packet()
200 vlan.h_vlan_TCI = data->vlan_tag; in i40e_create_dummy_packet()
202 eth.h_proto = data->vlan_etype; in i40e_create_dummy_packet()
226 * i40e_create_dummy_udp_packet - helper function to create UDP packet
242 udp->dest = data->dst_port; in i40e_create_dummy_udp_packet()
243 udp->source = data->src_port; in i40e_create_dummy_udp_packet()
247 * i40e_create_dummy_tcp_packet - helper function to create TCP packet
268 tcp->dest = data->dst_port; in i40e_create_dummy_tcp_packet()
269 tcp->source = data->src_port; in i40e_create_dummy_tcp_packet()
273 * i40e_create_dummy_sctp_packet - helper function to create SCTP packet
291 sctp->dest = data->dst_port; in i40e_create_dummy_sctp_packet()
292 sctp->source = data->src_port; in i40e_create_dummy_sctp_packet()
296 * i40e_prepare_fdir_filter - Prepare and program fdir filter
314 if (fd_data->flex_filter) { in i40e_prepare_fdir_filter()
316 __be16 pattern = fd_data->flex_word; in i40e_prepare_fdir_filter()
317 u16 off = fd_data->flex_offset; in i40e_prepare_fdir_filter()
322 if (!!fd_data->vlan_tag) in i40e_prepare_fdir_filter()
328 fd_data->pctype = pctype; in i40e_prepare_fdir_filter()
331 dev_info(&pf->pdev->dev, in i40e_prepare_fdir_filter()
333 fd_data->pctype, fd_data->fd_id, ret); in i40e_prepare_fdir_filter()
335 return -EOPNOTSUPP; in i40e_prepare_fdir_filter()
336 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { in i40e_prepare_fdir_filter()
338 dev_info(&pf->pdev->dev, in i40e_prepare_fdir_filter()
340 fd_data->pctype, fd_data->fd_id); in i40e_prepare_fdir_filter()
342 dev_info(&pf->pdev->dev, in i40e_prepare_fdir_filter()
344 fd_data->pctype, fd_data->fd_id); in i40e_prepare_fdir_filter()
351 * i40e_change_filter_num - Prepare and program fdir filter
369 (*ipv4_filter_num)--; in i40e_change_filter_num()
371 (*ipv6_filter_num)--; in i40e_change_filter_num()
378 * i40e_add_del_fdir_udp - Add/Remove UDP filters
391 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_udp()
397 return -ENOMEM; in i40e_add_del_fdir_udp()
417 i40e_change_filter_num(ipv4, add, &pf->fd_udp4_filter_cnt, in i40e_add_del_fdir_udp()
418 &pf->fd_udp6_filter_cnt); in i40e_add_del_fdir_udp()
426 * i40e_add_del_fdir_tcp - Add/Remove TCPv4 filters
439 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_tcp()
445 return -ENOMEM; in i40e_add_del_fdir_tcp()
464 i40e_change_filter_num(ipv4, add, &pf->fd_tcp4_filter_cnt, in i40e_add_del_fdir_tcp()
465 &pf->fd_tcp6_filter_cnt); in i40e_add_del_fdir_tcp()
468 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && in i40e_add_del_fdir_tcp()
469 I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_add_del_fdir_tcp()
470 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); in i40e_add_del_fdir_tcp()
471 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_add_del_fdir_tcp()
479 * i40e_add_del_fdir_sctp - Add/Remove SCTPv4 Flow Director filters for
493 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_sctp()
499 return -ENOMEM; in i40e_add_del_fdir_sctp()
519 i40e_change_filter_num(ipv4, add, &pf->fd_sctp4_filter_cnt, in i40e_add_del_fdir_sctp()
520 &pf->fd_sctp6_filter_cnt); in i40e_add_del_fdir_sctp()
528 * i40e_add_del_fdir_ip - Add/Remove IPv4 Flow Director filters for
542 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir_ip()
561 return -ENOMEM; in i40e_add_del_fdir_ip()
576 i40e_change_filter_num(ipv4, add, &pf->fd_ip4_filter_cnt, in i40e_add_del_fdir_ip()
577 &pf->fd_ip6_filter_cnt); in i40e_add_del_fdir_ip()
586 * i40e_add_del_fdir - Build raw packets to add/del fdir filter
596 struct i40e_pf *pf = vsi->back; in i40e_add_del_fdir()
599 switch (input->flow_type & ~FLOW_EXT) { in i40e_add_del_fdir()
619 switch (input->ipl4_proto) { in i40e_add_del_fdir()
634 dev_info(&pf->pdev->dev, "Unsupported IPv4 protocol 0x%02x\n", in i40e_add_del_fdir()
635 input->ipl4_proto); in i40e_add_del_fdir()
636 return -EINVAL; in i40e_add_del_fdir()
640 switch (input->ipl4_proto) { in i40e_add_del_fdir()
655 dev_info(&pf->pdev->dev, "Unsupported IPv6 protocol 0x%02x\n", in i40e_add_del_fdir()
656 input->ipl4_proto); in i40e_add_del_fdir()
657 return -EINVAL; in i40e_add_del_fdir()
661 dev_info(&pf->pdev->dev, "Unsupported flow type 0x%02x\n", in i40e_add_del_fdir()
662 input->flow_type); in i40e_add_del_fdir()
663 return -EINVAL; in i40e_add_del_fdir()
676 * i40e_fd_handle_status - check the Programming Status for FD
688 struct i40e_pf *pf = rx_ring->vsi->back; in i40e_fd_handle_status()
689 struct pci_dev *pdev = pf->pdev; in i40e_fd_handle_status()
699 pf->fd_inv = le32_to_cpu(qw0->hi_dword.fd_id); in i40e_fd_handle_status()
700 if (qw0->hi_dword.fd_id != 0 || in i40e_fd_handle_status()
701 (I40E_DEBUG_FD & pf->hw.debug_mask)) in i40e_fd_handle_status()
702 dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n", in i40e_fd_handle_status()
703 pf->fd_inv); in i40e_fd_handle_status()
706 * If so, auto disable ATR and set a state for in i40e_fd_handle_status()
711 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_fd_handle_status()
714 pf->fd_add_err++; in i40e_fd_handle_status()
716 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); in i40e_fd_handle_status()
718 if (qw0->hi_dword.fd_id == 0 && in i40e_fd_handle_status()
719 test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) { in i40e_fd_handle_status()
722 * disable ATR and queue a flush right after SB in i40e_fd_handle_status()
723 * support is re-enabled. That shouldn't cause an in i40e_fd_handle_status()
726 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_fd_handle_status()
727 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_fd_handle_status()
732 fcnt_avail = pf->fdir_pf_filter_count; in i40e_fd_handle_status()
734 * if we are very close to full, it makes sense to disable in i40e_fd_handle_status()
735 * FD ATR/SB and then re-enable it when there is room. in i40e_fd_handle_status()
737 if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) { in i40e_fd_handle_status()
738 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && in i40e_fd_handle_status()
740 pf->state)) in i40e_fd_handle_status()
741 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fd_handle_status()
742 dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n"); in i40e_fd_handle_status()
745 if (I40E_DEBUG_FD & pf->hw.debug_mask) in i40e_fd_handle_status()
746 dev_info(&pdev->dev, "ntuple filter fd_id = %d, could not be removed\n", in i40e_fd_handle_status()
747 qw0->hi_dword.fd_id); in i40e_fd_handle_status()
752 * i40e_unmap_and_free_tx_resource - Release a Tx buffer
759 if (tx_buffer->skb) { in i40e_unmap_and_free_tx_resource()
760 if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) in i40e_unmap_and_free_tx_resource()
761 kfree(tx_buffer->raw_buf); in i40e_unmap_and_free_tx_resource()
763 xdp_return_frame(tx_buffer->xdpf); in i40e_unmap_and_free_tx_resource()
765 dev_kfree_skb_any(tx_buffer->skb); in i40e_unmap_and_free_tx_resource()
767 dma_unmap_single(ring->dev, in i40e_unmap_and_free_tx_resource()
772 dma_unmap_page(ring->dev, in i40e_unmap_and_free_tx_resource()
778 tx_buffer->next_to_watch = NULL; in i40e_unmap_and_free_tx_resource()
779 tx_buffer->skb = NULL; in i40e_unmap_and_free_tx_resource()
785 * i40e_clean_tx_ring - Free any empty Tx buffers
793 if (ring_is_xdp(tx_ring) && tx_ring->xsk_pool) { in i40e_clean_tx_ring()
797 if (!tx_ring->tx_bi) in i40e_clean_tx_ring()
801 for (i = 0; i < tx_ring->count; i++) in i40e_clean_tx_ring()
803 &tx_ring->tx_bi[i]); in i40e_clean_tx_ring()
806 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_clean_tx_ring()
807 memset(tx_ring->tx_bi, 0, bi_size); in i40e_clean_tx_ring()
810 memset(tx_ring->desc, 0, tx_ring->size); in i40e_clean_tx_ring()
812 tx_ring->next_to_use = 0; in i40e_clean_tx_ring()
813 tx_ring->next_to_clean = 0; in i40e_clean_tx_ring()
815 if (!tx_ring->netdev) in i40e_clean_tx_ring()
823 * i40e_free_tx_resources - Free Tx resources per queue
831 kfree(tx_ring->tx_bi); in i40e_free_tx_resources()
832 tx_ring->tx_bi = NULL; in i40e_free_tx_resources()
834 if (tx_ring->desc) { in i40e_free_tx_resources()
835 dma_free_coherent(tx_ring->dev, tx_ring->size, in i40e_free_tx_resources()
836 tx_ring->desc, tx_ring->dma); in i40e_free_tx_resources()
837 tx_ring->desc = NULL; in i40e_free_tx_resources()
842 * i40e_get_tx_pending - how many tx descriptors not processed
855 tail = readl(ring->tail); in i40e_get_tx_pending()
857 head = ring->next_to_clean; in i40e_get_tx_pending()
858 tail = ring->next_to_use; in i40e_get_tx_pending()
863 tail - head : (tail + ring->count - head); in i40e_get_tx_pending()
869 * i40e_detect_recover_hung - Function to detect and recover hung_queues
885 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_detect_recover_hung()
888 netdev = vsi->netdev; in i40e_detect_recover_hung()
895 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_detect_recover_hung()
896 tx_ring = vsi->tx_rings[i]; in i40e_detect_recover_hung()
897 if (tx_ring && tx_ring->desc) { in i40e_detect_recover_hung()
905 packets = tx_ring->stats.packets & INT_MAX; in i40e_detect_recover_hung()
906 if (tx_ring->tx_stats.prev_pkt_ctr == packets) { in i40e_detect_recover_hung()
907 i40e_force_wb(vsi, tx_ring->q_vector); in i40e_detect_recover_hung()
915 tx_ring->tx_stats.prev_pkt_ctr = in i40e_detect_recover_hung()
916 i40e_get_tx_pending(tx_ring, true) ? packets : -1; in i40e_detect_recover_hung()
922 * i40e_clean_tx_irq - Reclaim resources after transmit completes
934 int i = tx_ring->next_to_clean; in i40e_clean_tx_irq()
939 unsigned int budget = vsi->work_limit; in i40e_clean_tx_irq()
941 tx_buf = &tx_ring->tx_bi[i]; in i40e_clean_tx_irq()
943 i -= tx_ring->count; in i40e_clean_tx_irq()
948 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch; in i40e_clean_tx_irq()
963 tx_buf->next_to_watch = NULL; in i40e_clean_tx_irq()
966 total_bytes += tx_buf->bytecount; in i40e_clean_tx_irq()
967 total_packets += tx_buf->gso_segs; in i40e_clean_tx_irq()
971 xdp_return_frame(tx_buf->xdpf); in i40e_clean_tx_irq()
973 napi_consume_skb(tx_buf->skb, napi_budget); in i40e_clean_tx_irq()
976 dma_unmap_single(tx_ring->dev, in i40e_clean_tx_irq()
982 tx_buf->skb = NULL; in i40e_clean_tx_irq()
994 i -= tx_ring->count; in i40e_clean_tx_irq()
995 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
1001 dma_unmap_page(tx_ring->dev, in i40e_clean_tx_irq()
1014 i -= tx_ring->count; in i40e_clean_tx_irq()
1015 tx_buf = tx_ring->tx_bi; in i40e_clean_tx_irq()
1022 budget--; in i40e_clean_tx_irq()
1025 i += tx_ring->count; in i40e_clean_tx_irq()
1026 tx_ring->next_to_clean = i; in i40e_clean_tx_irq()
1038 if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && in i40e_clean_tx_irq()
1044 if (__netif_subqueue_stopped(tx_ring->netdev, in i40e_clean_tx_irq()
1045 tx_ring->queue_index) && in i40e_clean_tx_irq()
1046 !test_bit(__I40E_VSI_DOWN, vsi->state)) { in i40e_clean_tx_irq()
1047 netif_wake_subqueue(tx_ring->netdev, in i40e_clean_tx_irq()
1048 tx_ring->queue_index); in i40e_clean_tx_irq()
1049 ++tx_ring->tx_stats.restart_queue; in i40e_clean_tx_irq()
1058 * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
1066 u16 flags = q_vector->tx.ring[0].flags; in i40e_enable_wb_on_itr()
1072 if (q_vector->arm_wb_state) in i40e_enable_wb_on_itr()
1075 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_enable_wb_on_itr()
1079 wr32(&vsi->back->hw, in i40e_enable_wb_on_itr()
1080 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), in i40e_enable_wb_on_itr()
1086 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); in i40e_enable_wb_on_itr()
1088 q_vector->arm_wb_state = true; in i40e_enable_wb_on_itr()
1092 * i40e_force_wb - Issue SW Interrupt so HW does a wb
1099 if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { in i40e_force_wb()
1106 wr32(&vsi->back->hw, in i40e_force_wb()
1107 I40E_PFINT_DYN_CTLN(q_vector->reg_idx), val); in i40e_force_wb()
1115 wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); in i40e_force_wb()
1122 return &q_vector->rx == rc; in i40e_container_is_rx()
1129 switch (q_vector->vsi->back->hw.phy.link_info.link_speed) { in i40e_itr_divisor()
1151 * i40e_update_itr - update the dynamic ITR value based on statistics
1172 if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting)) in i40e_update_itr()
1182 /* If we didn't update within up to 1 - 2 jiffies we can assume in i40e_update_itr()
1187 if (time_after(next_update, rc->next_update)) in i40e_update_itr()
1196 if (q_vector->itr_countdown) { in i40e_update_itr()
1197 itr = rc->target_itr; in i40e_update_itr()
1201 packets = rc->total_packets; in i40e_update_itr()
1202 bytes = rc->total_bytes; in i40e_update_itr()
1211 (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) { in i40e_update_itr()
1221 if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS && in i40e_update_itr()
1222 (q_vector->rx.target_itr & I40E_ITR_MASK) == in i40e_update_itr()
1229 rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY; in i40e_update_itr()
1241 itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC; in i40e_update_itr()
1250 itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr); in i40e_update_itr()
1347 rc->target_itr = itr; in i40e_update_itr()
1350 rc->next_update = next_update + 1; in i40e_update_itr()
1352 rc->total_bytes = 0; in i40e_update_itr()
1353 rc->total_packets = 0; in i40e_update_itr()
1358 return &rx_ring->rx_bi[idx]; in i40e_rx_bi()
1362 * i40e_reuse_rx_page - page flip buffer and store it back on the ring
1372 u16 nta = rx_ring->next_to_alloc; in i40e_reuse_rx_page()
1378 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; in i40e_reuse_rx_page()
1381 new_buff->dma = old_buff->dma; in i40e_reuse_rx_page()
1382 new_buff->page = old_buff->page; in i40e_reuse_rx_page()
1383 new_buff->page_offset = old_buff->page_offset; in i40e_reuse_rx_page()
1384 new_buff->pagecnt_bias = old_buff->pagecnt_bias; in i40e_reuse_rx_page()
1387 old_buff->page = NULL; in i40e_reuse_rx_page()
1391 * i40e_clean_programming_status - clean the programming status descriptor
1415 * i40e_setup_tx_descriptors - Allocate the Tx descriptors
1422 struct device *dev = tx_ring->dev; in i40e_setup_tx_descriptors()
1426 return -ENOMEM; in i40e_setup_tx_descriptors()
1429 WARN_ON(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1430 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; in i40e_setup_tx_descriptors()
1431 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); in i40e_setup_tx_descriptors()
1432 if (!tx_ring->tx_bi) in i40e_setup_tx_descriptors()
1435 u64_stats_init(&tx_ring->syncp); in i40e_setup_tx_descriptors()
1438 tx_ring->size = tx_ring->count * sizeof(struct i40e_tx_desc); in i40e_setup_tx_descriptors()
1442 tx_ring->size += sizeof(u32); in i40e_setup_tx_descriptors()
1443 tx_ring->size = ALIGN(tx_ring->size, 4096); in i40e_setup_tx_descriptors()
1444 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size, in i40e_setup_tx_descriptors()
1445 &tx_ring->dma, GFP_KERNEL); in i40e_setup_tx_descriptors()
1446 if (!tx_ring->desc) { in i40e_setup_tx_descriptors()
1448 tx_ring->size); in i40e_setup_tx_descriptors()
1452 tx_ring->next_to_use = 0; in i40e_setup_tx_descriptors()
1453 tx_ring->next_to_clean = 0; in i40e_setup_tx_descriptors()
1454 tx_ring->tx_stats.prev_pkt_ctr = -1; in i40e_setup_tx_descriptors()
1458 kfree(tx_ring->tx_bi); in i40e_setup_tx_descriptors()
1459 tx_ring->tx_bi = NULL; in i40e_setup_tx_descriptors()
1460 return -ENOMEM; in i40e_setup_tx_descriptors()
1465 memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); in i40e_clear_rx_bi()
1469 * i40e_clean_rx_ring - Free Rx buffers
1477 if (!rx_ring->rx_bi) in i40e_clean_rx_ring()
1480 if (rx_ring->xsk_pool) { in i40e_clean_rx_ring()
1486 for (i = 0; i < rx_ring->count; i++) { in i40e_clean_rx_ring()
1489 if (!rx_bi->page) in i40e_clean_rx_ring()
1495 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_clean_rx_ring()
1496 rx_bi->dma, in i40e_clean_rx_ring()
1497 rx_bi->page_offset, in i40e_clean_rx_ring()
1498 rx_ring->rx_buf_len, in i40e_clean_rx_ring()
1502 dma_unmap_page_attrs(rx_ring->dev, rx_bi->dma, in i40e_clean_rx_ring()
1507 __page_frag_cache_drain(rx_bi->page, rx_bi->pagecnt_bias); in i40e_clean_rx_ring()
1509 rx_bi->page = NULL; in i40e_clean_rx_ring()
1510 rx_bi->page_offset = 0; in i40e_clean_rx_ring()
1514 if (rx_ring->xsk_pool) in i40e_clean_rx_ring()
1520 memset(rx_ring->desc, 0, rx_ring->size); in i40e_clean_rx_ring()
1522 rx_ring->next_to_alloc = 0; in i40e_clean_rx_ring()
1523 rx_ring->next_to_clean = 0; in i40e_clean_rx_ring()
1524 rx_ring->next_to_process = 0; in i40e_clean_rx_ring()
1525 rx_ring->next_to_use = 0; in i40e_clean_rx_ring()
1529 * i40e_free_rx_resources - Free Rx resources
1537 if (rx_ring->vsi->type == I40E_VSI_MAIN) in i40e_free_rx_resources()
1538 xdp_rxq_info_unreg(&rx_ring->xdp_rxq); in i40e_free_rx_resources()
1539 rx_ring->xdp_prog = NULL; in i40e_free_rx_resources()
1540 kfree(rx_ring->rx_bi); in i40e_free_rx_resources()
1541 rx_ring->rx_bi = NULL; in i40e_free_rx_resources()
1543 if (rx_ring->desc) { in i40e_free_rx_resources()
1544 dma_free_coherent(rx_ring->dev, rx_ring->size, in i40e_free_rx_resources()
1545 rx_ring->desc, rx_ring->dma); in i40e_free_rx_resources()
1546 rx_ring->desc = NULL; in i40e_free_rx_resources()
1551 * i40e_setup_rx_descriptors - Allocate Rx descriptors
1558 struct device *dev = rx_ring->dev; in i40e_setup_rx_descriptors()
1561 u64_stats_init(&rx_ring->syncp); in i40e_setup_rx_descriptors()
1564 rx_ring->size = rx_ring->count * sizeof(union i40e_rx_desc); in i40e_setup_rx_descriptors()
1565 rx_ring->size = ALIGN(rx_ring->size, 4096); in i40e_setup_rx_descriptors()
1566 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size, in i40e_setup_rx_descriptors()
1567 &rx_ring->dma, GFP_KERNEL); in i40e_setup_rx_descriptors()
1569 if (!rx_ring->desc) { in i40e_setup_rx_descriptors()
1571 rx_ring->size); in i40e_setup_rx_descriptors()
1572 return -ENOMEM; in i40e_setup_rx_descriptors()
1575 rx_ring->next_to_alloc = 0; in i40e_setup_rx_descriptors()
1576 rx_ring->next_to_clean = 0; in i40e_setup_rx_descriptors()
1577 rx_ring->next_to_process = 0; in i40e_setup_rx_descriptors()
1578 rx_ring->next_to_use = 0; in i40e_setup_rx_descriptors()
1580 /* XDP RX-queue info only needed for RX rings exposed to XDP */ in i40e_setup_rx_descriptors()
1581 if (rx_ring->vsi->type == I40E_VSI_MAIN) { in i40e_setup_rx_descriptors()
1582 err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev, in i40e_setup_rx_descriptors()
1583 rx_ring->queue_index, rx_ring->q_vector->napi.napi_id); in i40e_setup_rx_descriptors()
1588 rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; in i40e_setup_rx_descriptors()
1590 rx_ring->rx_bi = in i40e_setup_rx_descriptors()
1591 kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); in i40e_setup_rx_descriptors()
1592 if (!rx_ring->rx_bi) in i40e_setup_rx_descriptors()
1593 return -ENOMEM; in i40e_setup_rx_descriptors()
1599 * i40e_release_rx_desc - Store the new tail and head values
1605 rx_ring->next_to_use = val; in i40e_release_rx_desc()
1608 rx_ring->next_to_alloc = val; in i40e_release_rx_desc()
1612 * applicable for weak-ordered memory model archs, in i40e_release_rx_desc()
1613 * such as IA-64). in i40e_release_rx_desc()
1616 writel(val, rx_ring->tail); in i40e_release_rx_desc()
1625 truesize = rx_ring->rx_offset ? in i40e_rx_frame_truesize()
1626 SKB_DATA_ALIGN(size + rx_ring->rx_offset) + in i40e_rx_frame_truesize()
1634 * i40e_alloc_mapped_page - recycle or make a new page
1644 struct page *page = bi->page; in i40e_alloc_mapped_page()
1649 rx_ring->rx_stats.page_reuse_count++; in i40e_alloc_mapped_page()
1656 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_mapped_page()
1660 rx_ring->rx_stats.page_alloc_count++; in i40e_alloc_mapped_page()
1663 dma = dma_map_page_attrs(rx_ring->dev, page, 0, in i40e_alloc_mapped_page()
1671 if (dma_mapping_error(rx_ring->dev, dma)) { in i40e_alloc_mapped_page()
1673 rx_ring->rx_stats.alloc_page_failed++; in i40e_alloc_mapped_page()
1677 bi->dma = dma; in i40e_alloc_mapped_page()
1678 bi->page = page; in i40e_alloc_mapped_page()
1679 bi->page_offset = rx_ring->rx_offset; in i40e_alloc_mapped_page()
1680 page_ref_add(page, USHRT_MAX - 1); in i40e_alloc_mapped_page()
1681 bi->pagecnt_bias = USHRT_MAX; in i40e_alloc_mapped_page()
1687 * i40e_alloc_rx_buffers - Replace used receive buffers
1695 u16 ntu = rx_ring->next_to_use; in i40e_alloc_rx_buffers()
1700 if (!rx_ring->netdev || !cleaned_count) in i40e_alloc_rx_buffers()
1711 dma_sync_single_range_for_device(rx_ring->dev, bi->dma, in i40e_alloc_rx_buffers()
1712 bi->page_offset, in i40e_alloc_rx_buffers()
1713 rx_ring->rx_buf_len, in i40e_alloc_rx_buffers()
1717 * because each write-back erases this info. in i40e_alloc_rx_buffers()
1719 rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); in i40e_alloc_rx_buffers()
1724 if (unlikely(ntu == rx_ring->count)) { in i40e_alloc_rx_buffers()
1731 rx_desc->wb.qword1.status_error_len = 0; in i40e_alloc_rx_buffers()
1733 cleaned_count--; in i40e_alloc_rx_buffers()
1736 if (rx_ring->next_to_use != ntu) in i40e_alloc_rx_buffers()
1742 if (rx_ring->next_to_use != ntu) in i40e_alloc_rx_buffers()
1752 * i40e_rx_checksum - Indicate in skb if hw indicated a good cksum
1767 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); in i40e_rx_checksum()
1775 skb->ip_summed = CHECKSUM_NONE; in i40e_rx_checksum()
1780 if (!(vsi->netdev->features & NETIF_F_RXCSUM)) in i40e_rx_checksum()
1804 /* don't increment checksum err here, non-fatal err */ in i40e_rx_checksum()
1823 skb->csum_level = 1; in i40e_rx_checksum()
1830 skb->ip_summed = CHECKSUM_UNNECESSARY; in i40e_rx_checksum()
1839 vsi->back->hw_csum_rx_error++; in i40e_rx_checksum()
1843 * i40e_ptype_to_htype - get a hash type
1866 * i40e_rx_hash - set the hash value in the skb
1882 if (!(ring->netdev->features & NETIF_F_RXHASH)) in i40e_rx_hash()
1885 if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { in i40e_rx_hash()
1886 hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); in i40e_rx_hash()
1892 * i40e_process_skb_fields - Populate skb header fields from Rx descriptor
1894 * @rx_desc: pointer to the EOP Rx descriptor
1904 u64 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); in i40e_process_skb_fields()
1914 i40e_ptp_rx_hwtstamp(rx_ring->vsi->back, skb, tsyn); in i40e_process_skb_fields()
1918 i40e_rx_checksum(rx_ring->vsi, skb, rx_desc); in i40e_process_skb_fields()
1920 skb_record_rx_queue(skb, rx_ring->queue_index); in i40e_process_skb_fields()
1923 __le16 vlan_tag = rx_desc->wb.qword0.lo_dword.l2tag1; in i40e_process_skb_fields()
1929 /* modifies the skb - consumes the enet header */ in i40e_process_skb_fields()
1930 skb->protocol = eth_type_trans(skb, rx_ring->netdev); in i40e_process_skb_fields()
1934 * i40e_cleanup_headers - Correct empty headers
1937 * @rx_desc: pointer to the EOP Rx descriptor
1948 /* ERR_MASK will only have valid bits if EOP set, and in i40e_cleanup_headers()
1967 * i40e_can_reuse_rx_page - Determine if page can be reused for another Rx
1982 unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; in i40e_can_reuse_rx_page()
1983 struct page *page = rx_buffer->page; in i40e_can_reuse_rx_page()
1987 rx_stats->page_waive_count++; in i40e_can_reuse_rx_page()
1993 if (unlikely((rx_buffer->page_count - pagecnt_bias) > 1)) { in i40e_can_reuse_rx_page()
1994 rx_stats->page_busy_count++; in i40e_can_reuse_rx_page()
1999 (SKB_WITH_OVERHEAD(PAGE_SIZE) - I40E_RXBUFFER_2048) in i40e_can_reuse_rx_page()
2000 if (rx_buffer->page_offset > I40E_LAST_OFFSET) { in i40e_can_reuse_rx_page()
2001 rx_stats->page_busy_count++; in i40e_can_reuse_rx_page()
2011 page_ref_add(page, USHRT_MAX - 1); in i40e_can_reuse_rx_page()
2012 rx_buffer->pagecnt_bias = USHRT_MAX; in i40e_can_reuse_rx_page()
2019 * i40e_rx_buffer_flip - adjusted rx_buffer to point to an unused region
2027 rx_buffer->page_offset ^= truesize; in i40e_rx_buffer_flip()
2029 rx_buffer->page_offset += truesize; in i40e_rx_buffer_flip()
2034 * i40e_get_rx_buffer - Fetch Rx buffer and synchronize data for use
2046 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_process); in i40e_get_rx_buffer()
2047 rx_buffer->page_count = in i40e_get_rx_buffer()
2049 page_count(rx_buffer->page); in i40e_get_rx_buffer()
2053 prefetch_page_address(rx_buffer->page); in i40e_get_rx_buffer()
2056 dma_sync_single_range_for_cpu(rx_ring->dev, in i40e_get_rx_buffer()
2057 rx_buffer->dma, in i40e_get_rx_buffer()
2058 rx_buffer->page_offset, in i40e_get_rx_buffer()
2063 rx_buffer->pagecnt_bias--; in i40e_get_rx_buffer()
2069 * i40e_put_rx_buffer - Clean up used buffer and either recycle or free
2079 if (i40e_can_reuse_rx_page(rx_buffer, &rx_ring->rx_stats)) { in i40e_put_rx_buffer()
2084 dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, in i40e_put_rx_buffer()
2087 __page_frag_cache_drain(rx_buffer->page, in i40e_put_rx_buffer()
2088 rx_buffer->pagecnt_bias); in i40e_put_rx_buffer()
2090 rx_buffer->page = NULL; in i40e_put_rx_buffer()
2095 * i40e_process_rx_buffs- Processing of buffers post XDP prog or on error
2103 u32 next = rx_ring->next_to_clean; in i40e_process_rx_buffs()
2106 xdp->flags = 0; in i40e_process_rx_buffs()
2110 if (++next == rx_ring->count) in i40e_process_rx_buffs()
2113 if (!rx_buffer->page) in i40e_process_rx_buffs()
2117 rx_buffer->pagecnt_bias++; in i40e_process_rx_buffs()
2119 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_process_rx_buffs()
2121 /* EOP buffer will be put in i40e_clean_rx_irq() */ in i40e_process_rx_buffs()
2122 if (next == rx_ring->next_to_process) in i40e_process_rx_buffs()
2130 * i40e_construct_skb - Allocate skb and populate it
2143 unsigned int size = xdp->data_end - xdp->data; in i40e_construct_skb()
2149 net_prefetch(xdp->data); in i40e_construct_skb()
2151 /* Note, we get here by enabling legacy-rx via: in i40e_construct_skb()
2153 * ethtool --set-priv-flags <dev> legacy-rx on in i40e_construct_skb()
2156 * opposed to having legacy-rx off, where we process XDP in i40e_construct_skb()
2161 * xdp->data_meta will always point to xdp->data, since in i40e_construct_skb()
2163 * change in future for legacy-rx mode on, then lets also in i40e_construct_skb()
2164 * add xdp->data_meta handling here. in i40e_construct_skb()
2168 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, in i40e_construct_skb()
2177 headlen = eth_get_headlen(skb->dev, xdp->data, in i40e_construct_skb()
2181 memcpy(__skb_put(skb, headlen), xdp->data, in i40e_construct_skb()
2184 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); in i40e_construct_skb()
2186 size -= headlen; in i40e_construct_skb()
2192 skb_add_rx_frag(skb, 0, rx_buffer->page, in i40e_construct_skb()
2193 rx_buffer->page_offset + headlen, in i40e_construct_skb()
2194 size, xdp->frame_sz); in i40e_construct_skb()
2196 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_construct_skb()
2199 rx_buffer->pagecnt_bias++; in i40e_construct_skb()
2206 memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0], in i40e_construct_skb()
2209 xdp_update_skb_shared_info(skb, skinfo->nr_frags + nr_frags, in i40e_construct_skb()
2210 sinfo->xdp_frags_size, in i40e_construct_skb()
2211 nr_frags * xdp->frame_sz, in i40e_construct_skb()
2215 if (++rx_ring->next_to_clean == rx_ring->count) in i40e_construct_skb()
2216 rx_ring->next_to_clean = 0; in i40e_construct_skb()
2225 * i40e_build_skb - Build skb around an existing buffer
2237 unsigned int metasize = xdp->data - xdp->data_meta; in i40e_build_skb()
2240 /* Prefetch first cache line of first page. If xdp->data_meta in i40e_build_skb()
2241 * is unused, this points exactly as xdp->data, otherwise we in i40e_build_skb()
2245 net_prefetch(xdp->data_meta); in i40e_build_skb()
2248 skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz); in i40e_build_skb()
2253 skb_reserve(skb, xdp->data - xdp->data_hard_start); in i40e_build_skb()
2254 __skb_put(skb, xdp->data_end - xdp->data); in i40e_build_skb()
2263 sinfo->xdp_frags_size, in i40e_build_skb()
2264 nr_frags * xdp->frame_sz, in i40e_build_skb()
2271 rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean); in i40e_build_skb()
2273 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_build_skb()
2280 * i40e_is_non_eop - process handling of non-EOP buffers
2284 * If the buffer is an EOP buffer, this function exits returning false,
2285 * otherwise return true indicating that this is in fact a non-EOP buffer.
2295 rx_ring->rx_stats.non_eop_descs++; in i40e_is_non_eop()
2314 * i40e_run_xdp - run an XDP program
2328 prefetchw(xdp->data_hard_start); /* xdp_frame write */ in i40e_run_xdp()
2335 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_run_xdp()
2341 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in i40e_run_xdp()
2347 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp()
2351 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in i40e_run_xdp()
2362 * i40e_xdp_ring_update_tail - Updates the XDP Tx ring tail register
2373 writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail); in i40e_xdp_ring_update_tail()
2377 * i40e_update_rx_stats - Update Rx ring statistics
2388 u64_stats_update_begin(&rx_ring->syncp); in i40e_update_rx_stats()
2389 rx_ring->stats.packets += total_rx_packets; in i40e_update_rx_stats()
2390 rx_ring->stats.bytes += total_rx_bytes; in i40e_update_rx_stats()
2391 u64_stats_update_end(&rx_ring->syncp); in i40e_update_rx_stats()
2392 rx_ring->q_vector->rx.total_packets += total_rx_packets; in i40e_update_rx_stats()
2393 rx_ring->q_vector->rx.total_bytes += total_rx_bytes; in i40e_update_rx_stats()
2397 * i40e_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
2412 rx_ring->vsi->xdp_rings[rx_ring->queue_index]; in i40e_finalize_xdp_rx()
2424 u32 ntp = rx_ring->next_to_process + 1; in i40e_inc_ntp()
2426 ntp = (ntp < rx_ring->count) ? ntp : 0; in i40e_inc_ntp()
2427 rx_ring->next_to_process = ntp; in i40e_inc_ntp()
2444 sinfo->nr_frags = 0; in i40e_add_xdp_frag()
2445 sinfo->xdp_frags_size = 0; in i40e_add_xdp_frag()
2447 } else if (unlikely(sinfo->nr_frags >= MAX_SKB_FRAGS)) { in i40e_add_xdp_frag()
2449 return -ENOMEM; in i40e_add_xdp_frag()
2452 __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buffer->page, in i40e_add_xdp_frag()
2453 rx_buffer->page_offset, size); in i40e_add_xdp_frag()
2455 sinfo->xdp_frags_size += size; in i40e_add_xdp_frag()
2457 if (page_is_pfmemalloc(rx_buffer->page)) in i40e_add_xdp_frag()
2459 *nr_frags = sinfo->nr_frags; in i40e_add_xdp_frag()
2465 * i40e_consume_xdp_buff - Consume all the buffers of the packet and update ntc
2468 * @rx_buffer: rx_buffer of eop desc
2476 rx_ring->next_to_clean = rx_ring->next_to_process; in i40e_consume_xdp_buff()
2477 xdp->data = NULL; in i40e_consume_xdp_buff()
2481 * i40e_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
2498 u16 clean_threshold = rx_ring->count / 2; in i40e_clean_rx_irq()
2499 unsigned int offset = rx_ring->rx_offset; in i40e_clean_rx_irq()
2500 struct xdp_buff *xdp = &rx_ring->xdp; in i40e_clean_rx_irq()
2506 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in i40e_clean_rx_irq()
2509 u16 ntp = rx_ring->next_to_process; in i40e_clean_rx_irq()
2530 * hardware wrote DD then the length will be non-zero in i40e_clean_rx_irq()
2532 qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); in i40e_clean_rx_irq()
2542 rx_desc->raw.qword[0], in i40e_clean_rx_irq()
2550 if (rx_ring->next_to_clean == ntp) { in i40e_clean_rx_irq()
2551 rx_ring->next_to_clean = in i40e_clean_rx_irq()
2552 rx_ring->next_to_process; in i40e_clean_rx_irq()
2570 if (!xdp->data) { in i40e_clean_rx_irq()
2573 hard_start = page_address(rx_buffer->page) + in i40e_clean_rx_irq()
2574 rx_buffer->page_offset - offset; in i40e_clean_rx_irq()
2578 xdp->frame_sz = i40e_rx_frame_truesize(rx_ring, size); in i40e_clean_rx_irq()
2582 /* Overflowing packet: Drop all frags on EOP */ in i40e_clean_rx_irq()
2599 i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz); in i40e_clean_rx_irq()
2601 rx_buffer->pagecnt_bias++; in i40e_clean_rx_irq()
2612 rx_ring->rx_stats.alloc_buff_failed++; in i40e_clean_rx_irq()
2621 total_rx_bytes += skb->len; in i40e_clean_rx_irq()
2627 napi_gro_receive(&rx_ring->q_vector->napi, skb); in i40e_clean_rx_irq()
2635 rx_ring->next_to_clean = rx_ring->next_to_process; in i40e_clean_rx_irq()
2637 xdp->data = NULL; in i40e_clean_rx_irq()
2656 * auto-cleared". The auto-clearing happens when the interrupt is in i40e_buildreg_itr()
2662 * to hold pending events for us until the interrupt is re-enabled in i40e_buildreg_itr()
2666 * only need to shift by the interval shift - 1 instead of the in i40e_buildreg_itr()
2673 (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1)); in i40e_buildreg_itr()
2691 * i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
2699 struct i40e_hw *hw = &vsi->back->hw; in i40e_update_enable_itr()
2702 /* If we don't have MSIX, then we only need to re-enable icr0 */ in i40e_update_enable_itr()
2703 if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) { in i40e_update_enable_itr()
2704 i40e_irq_dynamic_enable_icr0(vsi->back); in i40e_update_enable_itr()
2709 i40e_update_itr(q_vector, &q_vector->tx); in i40e_update_enable_itr()
2710 i40e_update_itr(q_vector, &q_vector->rx); in i40e_update_enable_itr()
2714 * pseudo-lazy update with the following criteria. in i40e_update_enable_itr()
2720 if (q_vector->rx.target_itr < q_vector->rx.current_itr) { in i40e_update_enable_itr()
2723 q_vector->rx.target_itr); in i40e_update_enable_itr()
2724 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_update_enable_itr()
2725 q_vector->itr_countdown = ITR_COUNTDOWN_START; in i40e_update_enable_itr()
2726 } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) || in i40e_update_enable_itr()
2727 ((q_vector->rx.target_itr - q_vector->rx.current_itr) < in i40e_update_enable_itr()
2728 (q_vector->tx.target_itr - q_vector->tx.current_itr))) { in i40e_update_enable_itr()
2733 q_vector->tx.target_itr); in i40e_update_enable_itr()
2734 q_vector->tx.current_itr = q_vector->tx.target_itr; in i40e_update_enable_itr()
2735 q_vector->itr_countdown = ITR_COUNTDOWN_START; in i40e_update_enable_itr()
2736 } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) { in i40e_update_enable_itr()
2739 q_vector->rx.target_itr); in i40e_update_enable_itr()
2740 q_vector->rx.current_itr = q_vector->rx.target_itr; in i40e_update_enable_itr()
2741 q_vector->itr_countdown = ITR_COUNTDOWN_START; in i40e_update_enable_itr()
2745 if (q_vector->itr_countdown) in i40e_update_enable_itr()
2746 q_vector->itr_countdown--; in i40e_update_enable_itr()
2749 if (!test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_update_enable_itr()
2750 wr32(hw, INTREG(q_vector->reg_idx), intval); in i40e_update_enable_itr()
2754 * i40e_napi_poll - NAPI polling Rx/Tx cleanup routine
2766 struct i40e_vsi *vsi = q_vector->vsi; in i40e_napi_poll()
2777 if (test_bit(__I40E_VSI_DOWN, vsi->state)) { in i40e_napi_poll()
2785 i40e_for_each_ring(ring, q_vector->tx) { in i40e_napi_poll()
2786 bool wd = ring->xsk_pool ? in i40e_napi_poll()
2794 arm_wb |= ring->arm_wb; in i40e_napi_poll()
2795 ring->arm_wb = false; in i40e_napi_poll()
2803 if (unlikely(q_vector->num_ringpairs > 1)) in i40e_napi_poll()
2808 budget_per_ring = max_t(int, budget / q_vector->num_ringpairs, 1); in i40e_napi_poll()
2813 i40e_for_each_ring(ring, q_vector->rx) { in i40e_napi_poll()
2814 int cleaned = ring->xsk_pool ? in i40e_napi_poll()
2839 if (!cpumask_test_cpu(cpu_id, &q_vector->affinity_mask)) { in i40e_napi_poll()
2846 /* Return budget-1 so that polling stops */ in i40e_napi_poll()
2847 return budget - 1; in i40e_napi_poll()
2851 q_vector->tx.ring[0].tx_stats.tx_force_wb++; in i40e_napi_poll()
2857 if (q_vector->tx.ring[0].flags & I40E_TXR_FLAGS_WB_ON_ITR) in i40e_napi_poll()
2858 q_vector->arm_wb_state = false; in i40e_napi_poll()
2860 /* Exit the polling mode, but don't re-enable interrupts if stack might in i40e_napi_poll()
2861 * poll us due to busy-polling in i40e_napi_poll()
2866 return min(work_done, budget - 1); in i40e_napi_poll()
2870 * i40e_atr - Add a Flow Director ATR filter
2879 struct i40e_pf *pf = tx_ring->vsi->back; in i40e_atr()
2892 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) in i40e_atr()
2895 if (test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_atr()
2899 if (!tx_ring->atr_sample_rate) in i40e_atr()
2916 l4_proto = hdr.ipv4->protocol; in i40e_atr()
2919 unsigned int inner_hlen = hdr.network - skb->data; in i40e_atr()
2926 hlen = h_offset - inner_hlen; in i40e_atr()
2935 if (th->syn && test_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state)) in i40e_atr()
2937 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) { in i40e_atr()
2941 if (th->fin || th->rst) in i40e_atr()
2945 tx_ring->atr_count++; in i40e_atr()
2948 if (!th->fin && in i40e_atr()
2949 !th->syn && in i40e_atr()
2950 !th->rst && in i40e_atr()
2951 (tx_ring->atr_count < tx_ring->atr_sample_rate)) in i40e_atr()
2954 tx_ring->atr_count = 0; in i40e_atr()
2957 i = tx_ring->next_to_use; in i40e_atr()
2961 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_atr()
2963 flex_ptype = (tx_ring->queue_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) & in i40e_atr()
2971 flex_ptype |= tx_ring->vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT; in i40e_atr()
2975 dtype_cmd |= (th->fin || th->rst) ? in i40e_atr()
2990 ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) << in i40e_atr()
2995 ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) << in i40e_atr()
2999 if (pf->flags & I40E_FLAG_HW_ATR_EVICT_ENABLED) in i40e_atr()
3002 fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype); in i40e_atr()
3003 fdir_desc->rsvd = cpu_to_le32(0); in i40e_atr()
3004 fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd); in i40e_atr()
3005 fdir_desc->fd_id = cpu_to_le32(0); in i40e_atr()
3009 * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
3024 __be16 protocol = skb->protocol; in i40e_tx_prepare_vlan_flags()
3028 !(tx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in i40e_tx_prepare_vlan_flags()
3036 skb->protocol = vlan_get_protocol(skb); in i40e_tx_prepare_vlan_flags()
3050 return -EINVAL; in i40e_tx_prepare_vlan_flags()
3052 protocol = vhdr->h_vlan_encapsulated_proto; in i40e_tx_prepare_vlan_flags()
3053 tx_flags |= ntohs(vhdr->h_vlan_TCI) << I40E_TX_FLAGS_VLAN_SHIFT; in i40e_tx_prepare_vlan_flags()
3057 if (!(tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED)) in i40e_tx_prepare_vlan_flags()
3062 (skb->priority != TC_PRIO_CONTROL)) { in i40e_tx_prepare_vlan_flags()
3064 tx_flags |= (skb->priority & 0x7) << in i40e_tx_prepare_vlan_flags()
3074 vhdr->h_vlan_TCI = htons(tx_flags >> in i40e_tx_prepare_vlan_flags()
3087 * i40e_tso - set up the tso context descriptor
3097 struct sk_buff *skb = first->skb; in i40e_tso()
3114 if (skb->ip_summed != CHECKSUM_PARTIAL) in i40e_tso()
3133 if (ip.v4->version == 4) { in i40e_tso()
3134 ip.v4->tot_len = 0; in i40e_tso()
3135 ip.v4->check = 0; in i40e_tso()
3137 first->tx_flags |= I40E_TX_FLAGS_TSO; in i40e_tso()
3139 ip.v6->payload_len = 0; in i40e_tso()
3140 first->tx_flags |= I40E_TX_FLAGS_TSO; in i40e_tso()
3143 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in i40e_tso()
3149 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in i40e_tso()
3150 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) { in i40e_tso()
3151 l4.udp->len = 0; in i40e_tso()
3154 l4_offset = l4.hdr - skb->data; in i40e_tso()
3157 paylen = skb->len - l4_offset; in i40e_tso()
3158 csum_replace_by_diff(&l4.udp->check, in i40e_tso()
3167 if (ip.v4->version == 4) { in i40e_tso()
3168 ip.v4->tot_len = 0; in i40e_tso()
3169 ip.v4->check = 0; in i40e_tso()
3171 ip.v6->payload_len = 0; in i40e_tso()
3176 l4_offset = l4.hdr - skb->data; in i40e_tso()
3179 paylen = skb->len - l4_offset; in i40e_tso()
3181 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in i40e_tso()
3182 csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen)); in i40e_tso()
3186 csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen)); in i40e_tso()
3188 *hdr_len = (l4.tcp->doff * 4) + l4_offset; in i40e_tso()
3192 gso_size = skb_shinfo(skb)->gso_size; in i40e_tso()
3195 first->gso_segs = skb_shinfo(skb)->gso_segs; in i40e_tso()
3196 first->bytecount += (first->gso_segs - 1) * *hdr_len; in i40e_tso()
3200 cd_tso_len = skb->len - *hdr_len; in i40e_tso()
3209 * i40e_tsyn - set up the tsyn context descriptor
3222 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))) in i40e_tsyn()
3232 pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_tsyn()
3233 if (!(pf->flags & I40E_FLAG_PTP)) in i40e_tsyn()
3236 if (pf->ptp_tx && in i40e_tsyn()
3237 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, pf->state)) { in i40e_tsyn()
3238 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in i40e_tsyn()
3239 pf->ptp_tx_start = jiffies; in i40e_tsyn()
3240 pf->ptp_tx_skb = skb_get(skb); in i40e_tsyn()
3242 pf->tx_hwtstamp_skipped++; in i40e_tsyn()
3253 * i40e_tx_enable_csum - Enable Tx checksum offloads
3282 if (skb->ip_summed != CHECKSUM_PARTIAL) in i40e_tx_enable_csum()
3298 if (ip.v4->version == 4) in i40e_tx_enable_csum()
3304 offset = ((ip.hdr - skb->data) / 2) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT; in i40e_tx_enable_csum()
3306 if (skb->encapsulation) { in i40e_tx_enable_csum()
3314 l4_proto = ip.v4->protocol; in i40e_tx_enable_csum()
3321 l4_proto = ip.v6->nexthdr; in i40e_tx_enable_csum()
3322 ret = ipv6_skip_exthdr(skb, exthdr - skb->data, in i40e_tx_enable_csum()
3325 return -1; in i40e_tx_enable_csum()
3345 return -1; in i40e_tx_enable_csum()
3352 tunnel |= ((l4.hdr - ip.hdr) / 4) << in i40e_tx_enable_csum()
3359 tunnel |= ((ip.hdr - l4.hdr) / 2) << in i40e_tx_enable_csum()
3364 !(skb_shinfo(skb)->gso_type & SKB_GSO_PARTIAL) && in i40e_tx_enable_csum()
3365 (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM)) in i40e_tx_enable_csum()
3377 if (ip.v4->version == 4) in i40e_tx_enable_csum()
3379 if (ip.v6->version == 6) in i40e_tx_enable_csum()
3385 l4_proto = ip.v4->protocol; in i40e_tx_enable_csum()
3396 l4_proto = ip.v6->nexthdr; in i40e_tx_enable_csum()
3398 ipv6_skip_exthdr(skb, exthdr - skb->data, in i40e_tx_enable_csum()
3403 offset |= ((l4.hdr - ip.hdr) / 4) << I40E_TX_DESC_LENGTH_IPLEN_SHIFT; in i40e_tx_enable_csum()
3410 offset |= l4.tcp->doff << I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT; in i40e_tx_enable_csum()
3426 return -1; in i40e_tx_enable_csum()
3438 * i40e_create_tx_ctx - Build the Tx context descriptor
3441 * @cd_tunneling: Quad Word 0 - bits 0-31
3442 * @cd_l2tag2: Quad Word 0 - bits 32-63
3449 int i = tx_ring->next_to_use; in i40e_create_tx_ctx()
3459 tx_ring->next_to_use = (i < tx_ring->count) ? i : 0; in i40e_create_tx_ctx()
3462 context_desc->tunneling_params = cpu_to_le32(cd_tunneling); in i40e_create_tx_ctx()
3463 context_desc->l2tag2 = cpu_to_le16(cd_l2tag2); in i40e_create_tx_ctx()
3464 context_desc->rsvd = cpu_to_le16(0); in i40e_create_tx_ctx()
3465 context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); in i40e_create_tx_ctx()
3469 * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
3473 * Returns -EBUSY if a stop is needed, else 0
3477 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3481 ++tx_ring->tx_stats.tx_stopped; in __i40e_maybe_stop_tx()
3485 return -EBUSY; in __i40e_maybe_stop_tx()
3487 /* A reprieve! - use start_queue because it doesn't call schedule */ in __i40e_maybe_stop_tx()
3488 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); in __i40e_maybe_stop_tx()
3489 ++tx_ring->tx_stats.restart_queue; in __i40e_maybe_stop_tx()
3494 * __i40e_chk_linearize - Check if there are more than 8 buffers per packet
3512 nr_frags = skb_shinfo(skb)->nr_frags; in __i40e_chk_linearize()
3513 if (nr_frags < (I40E_MAX_BUFFER_TXD - 1)) in __i40e_chk_linearize()
3519 nr_frags -= I40E_MAX_BUFFER_TXD - 2; in __i40e_chk_linearize()
3520 frag = &skb_shinfo(skb)->frags[0]; in __i40e_chk_linearize()
3528 sum = 1 - skb_shinfo(skb)->gso_size; in __i40e_chk_linearize()
3540 for (stale = &skb_shinfo(skb)->frags[0];; stale++) { in __i40e_chk_linearize()
3552 int align_pad = -(skb_frag_off(stale)) & in __i40e_chk_linearize()
3553 (I40E_MAX_READ_REQ_SIZE - 1); in __i40e_chk_linearize()
3555 sum -= align_pad; in __i40e_chk_linearize()
3556 stale_size -= align_pad; in __i40e_chk_linearize()
3559 sum -= I40E_MAX_DATA_PER_TXD_ALIGNED; in __i40e_chk_linearize()
3560 stale_size -= I40E_MAX_DATA_PER_TXD_ALIGNED; in __i40e_chk_linearize()
3568 if (!nr_frags--) in __i40e_chk_linearize()
3571 sum -= stale_size; in __i40e_chk_linearize()
3578 * i40e_tx_map - Build the Tx descriptor
3587 * Returns 0 on success, -1 on failure to DMA
3593 unsigned int data_len = skb->data_len; in i40e_tx_map()
3598 u16 i = tx_ring->next_to_use; in i40e_tx_map()
3609 first->tx_flags = tx_flags; in i40e_tx_map()
3611 dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); in i40e_tx_map()
3616 for (frag = &skb_shinfo(skb)->frags[0];; frag++) { in i40e_tx_map()
3619 if (dma_mapping_error(tx_ring->dev, dma)) in i40e_tx_map()
3627 max_data += -dma & (I40E_MAX_READ_REQ_SIZE - 1); in i40e_tx_map()
3628 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_tx_map()
3631 tx_desc->cmd_type_offset_bsz = in i40e_tx_map()
3639 if (i == tx_ring->count) { in i40e_tx_map()
3645 size -= max_data; in i40e_tx_map()
3648 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_tx_map()
3654 tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, in i40e_tx_map()
3661 if (i == tx_ring->count) { in i40e_tx_map()
3667 data_len -= size; in i40e_tx_map()
3669 dma = skb_frag_dma_map(tx_ring->dev, frag, 0, size, in i40e_tx_map()
3672 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3675 netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount); in i40e_tx_map()
3678 if (i == tx_ring->count) in i40e_tx_map()
3681 tx_ring->next_to_use = i; in i40e_tx_map()
3685 /* write last descriptor with EOP bit */ in i40e_tx_map()
3689 * below. This is safe since we don't re-use desc_count afterwards. in i40e_tx_map()
3691 desc_count |= ++tx_ring->packet_stride; in i40e_tx_map()
3696 tx_ring->packet_stride = 0; in i40e_tx_map()
3699 tx_desc->cmd_type_offset_bsz = in i40e_tx_map()
3713 first->next_to_watch = tx_desc; in i40e_tx_map()
3717 writel(i, tx_ring->tail); in i40e_tx_map()
3723 dev_info(tx_ring->dev, "TX DMA map failed\n"); in i40e_tx_map()
3727 tx_bi = &tx_ring->tx_bi[i]; in i40e_tx_map()
3732 i = tx_ring->count; in i40e_tx_map()
3733 i--; in i40e_tx_map()
3736 tx_ring->next_to_use = i; in i40e_tx_map()
3738 return -1; in i40e_tx_map()
3748 if (skb->sk && skb->sk->sk_hash) in i40e_swdcb_skb_tx_hash()
3749 hash = skb->sk->sk_hash; in i40e_swdcb_skb_tx_hash()
3751 hash = (__force u16)skb->protocol ^ skb->hash; in i40e_swdcb_skb_tx_hash()
3763 struct i40e_vsi *vsi = np->vsi; in i40e_lan_select_queue()
3772 if (vsi->tc_config.numtc == 1 || in i40e_lan_select_queue()
3773 i40e_is_tc_mqprio_enabled(vsi->back)) in i40e_lan_select_queue()
3776 prio = skb->priority; in i40e_lan_select_queue()
3777 hw = &vsi->back->hw; in i40e_lan_select_queue()
3778 tclass = hw->local_dcbx_config.etscfg.prioritytable[prio]; in i40e_lan_select_queue()
3780 if (unlikely(!(vsi->tc_config.enabled_tc & BIT(tclass)))) in i40e_lan_select_queue()
3784 qcount = vsi->tc_config.tc_info[tclass].qcount; in i40e_lan_select_queue()
3787 qoffset = vsi->tc_config.tc_info[tclass].qoffset; in i40e_lan_select_queue()
3792 * i40e_xmit_xdp_ring - transmits an XDP buffer to an XDP Tx ring
3800 u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; in i40e_xmit_xdp_ring()
3801 u16 i = 0, index = xdp_ring->next_to_use; in i40e_xmit_xdp_ring()
3802 struct i40e_tx_buffer *tx_head = &xdp_ring->tx_bi[index]; in i40e_xmit_xdp_ring()
3805 void *data = xdpf->data; in i40e_xmit_xdp_ring()
3806 u32 size = xdpf->len; in i40e_xmit_xdp_ring()
3809 xdp_ring->tx_stats.tx_busy++; in i40e_xmit_xdp_ring()
3813 tx_head->bytecount = xdp_get_frame_len(xdpf); in i40e_xmit_xdp_ring()
3814 tx_head->gso_segs = 1; in i40e_xmit_xdp_ring()
3815 tx_head->xdpf = xdpf; in i40e_xmit_xdp_ring()
3820 dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); in i40e_xmit_xdp_ring()
3821 if (dma_mapping_error(xdp_ring->dev, dma)) in i40e_xmit_xdp_ring()
3828 tx_desc->buffer_addr = cpu_to_le64(dma); in i40e_xmit_xdp_ring()
3829 tx_desc->cmd_type_offset_bsz = in i40e_xmit_xdp_ring()
3832 if (++index == xdp_ring->count) in i40e_xmit_xdp_ring()
3838 tx_bi = &xdp_ring->tx_bi[index]; in i40e_xmit_xdp_ring()
3841 data = skb_frag_address(&sinfo->frags[i]); in i40e_xmit_xdp_ring()
3842 size = skb_frag_size(&sinfo->frags[i]); in i40e_xmit_xdp_ring()
3846 tx_desc->cmd_type_offset_bsz |= in i40e_xmit_xdp_ring()
3854 xdp_ring->xdp_tx_active++; in i40e_xmit_xdp_ring()
3856 tx_head->next_to_watch = tx_desc; in i40e_xmit_xdp_ring()
3857 xdp_ring->next_to_use = index; in i40e_xmit_xdp_ring()
3863 tx_bi = &xdp_ring->tx_bi[index]; in i40e_xmit_xdp_ring()
3865 dma_unmap_page(xdp_ring->dev, in i40e_xmit_xdp_ring()
3874 index += xdp_ring->count; in i40e_xmit_xdp_ring()
3875 index--; in i40e_xmit_xdp_ring()
3882 * i40e_xmit_frame_ring - Sends buffer on Tx ring
3902 prefetch(skb->data); in i40e_xmit_frame_ring()
3912 count = i40e_txd_use_count(skb->len); in i40e_xmit_frame_ring()
3913 tx_ring->tx_stats.tx_linearize++; in i40e_xmit_frame_ring()
3923 tx_ring->tx_stats.tx_busy++; in i40e_xmit_frame_ring()
3928 first = &tx_ring->tx_bi[tx_ring->next_to_use]; in i40e_xmit_frame_ring()
3929 first->skb = skb; in i40e_xmit_frame_ring()
3930 first->bytecount = skb->len; in i40e_xmit_frame_ring()
3931 first->gso_segs = 1; in i40e_xmit_frame_ring()
3974 i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); in i40e_xmit_frame_ring()
3975 dev_kfree_skb_any(first->skb); in i40e_xmit_frame_ring()
3976 first->skb = NULL; in i40e_xmit_frame_ring()
3979 struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev); in i40e_xmit_frame_ring()
3981 dev_kfree_skb_any(pf->ptp_tx_skb); in i40e_xmit_frame_ring()
3982 pf->ptp_tx_skb = NULL; in i40e_xmit_frame_ring()
3983 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state); in i40e_xmit_frame_ring()
3990 * i40e_lan_xmit_frame - Selects the correct VSI and Tx queue to send buffer
3999 struct i40e_vsi *vsi = np->vsi; in i40e_lan_xmit_frame()
4000 struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; in i40e_lan_xmit_frame()
4012 * i40e_xdp_xmit - Implements ndo_xdp_xmit
4021 * For error cases, a negative errno code is returned and no-frames
4029 struct i40e_vsi *vsi = np->vsi; in i40e_xdp_xmit()
4030 struct i40e_pf *pf = vsi->back; in i40e_xdp_xmit()
4035 if (test_bit(__I40E_VSI_DOWN, vsi->state)) in i40e_xdp_xmit()
4036 return -ENETDOWN; in i40e_xdp_xmit()
4038 if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs || in i40e_xdp_xmit()
4039 test_bit(__I40E_CONFIG_BUSY, pf->state)) in i40e_xdp_xmit()
4040 return -ENXIO; in i40e_xdp_xmit()
4043 return -EINVAL; in i40e_xdp_xmit()
4045 xdp_ring = vsi->xdp_rings[queue_index]; in i40e_xdp_xmit()