Lines Matching +full:data +full:- +full:mapping
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
30 dma_addr_t mapping; in qede_alloc_rx_buffer() local
31 struct page *data; in qede_alloc_rx_buffer() local
33 /* In case lazy-allocation is allowed, postpone allocation until the in qede_alloc_rx_buffer()
37 if (allow_lazy && likely(rxq->filled_buffers > 12)) { in qede_alloc_rx_buffer()
38 rxq->filled_buffers--; in qede_alloc_rx_buffer()
42 data = alloc_pages(GFP_ATOMIC, 0); in qede_alloc_rx_buffer()
43 if (unlikely(!data)) in qede_alloc_rx_buffer()
44 return -ENOMEM; in qede_alloc_rx_buffer()
47 * for multiple RX buffer segment size mapping. in qede_alloc_rx_buffer()
49 mapping = dma_map_page(rxq->dev, data, 0, in qede_alloc_rx_buffer()
50 PAGE_SIZE, rxq->data_direction); in qede_alloc_rx_buffer()
51 if (unlikely(dma_mapping_error(rxq->dev, mapping))) { in qede_alloc_rx_buffer()
52 __free_page(data); in qede_alloc_rx_buffer()
53 return -ENOMEM; in qede_alloc_rx_buffer()
56 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_alloc_rx_buffer()
57 sw_rx_data->page_offset = 0; in qede_alloc_rx_buffer()
58 sw_rx_data->data = data; in qede_alloc_rx_buffer()
59 sw_rx_data->mapping = mapping; in qede_alloc_rx_buffer()
62 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); in qede_alloc_rx_buffer()
64 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); in qede_alloc_rx_buffer()
65 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) + in qede_alloc_rx_buffer()
66 rxq->rx_headroom); in qede_alloc_rx_buffer()
68 rxq->sw_rx_prod++; in qede_alloc_rx_buffer()
69 rxq->filled_buffers++; in qede_alloc_rx_buffer()
74 /* Unmap the data and free skb */
77 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt()
78 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt()
83 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
88 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", in qede_free_tx_pkt()
89 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt()
90 return -1; in qede_free_tx_pkt()
93 *len = skb->len; in qede_free_tx_pkt()
95 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
99 nbds = first_bd->data.nbds; in qede_free_tx_pkt()
103 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
107 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), in qede_free_tx_pkt()
110 /* Unmap the data of the skb frags */ in qede_free_tx_pkt()
111 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { in qede_free_tx_pkt()
113 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
114 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), in qede_free_tx_pkt()
119 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
123 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt()
124 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_tx_pkt()
129 /* Unmap the data and free skb when mapping failed during start_xmit */
134 u16 idx = txq->sw_tx_prod; in qede_free_failed_tx_pkt()
135 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_failed_tx_pkt()
140 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
141 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
143 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
147 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
149 nbd--; in qede_free_failed_tx_pkt()
152 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), in qede_free_failed_tx_pkt()
155 /* Unmap the data of the skb frags */ in qede_free_failed_tx_pkt()
158 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
159 if (tx_data_bd->nbytes) in qede_free_failed_tx_pkt()
160 dma_unmap_page(txq->dev, in qede_free_failed_tx_pkt()
166 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
167 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
171 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_failed_tx_pkt()
172 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_failed_tx_pkt()
180 if (skb->ip_summed != CHECKSUM_PARTIAL) in qede_xmit_type()
185 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) in qede_xmit_type()
188 if (skb->encapsulation) { in qede_xmit_type()
191 unsigned short gso_type = skb_shinfo(skb)->gso_type; in qede_xmit_type()
217 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & in qede_set_params_for_ipv6_ext()
225 l4_proto = ipv6_hdr(skb)->nexthdr; in qede_set_params_for_ipv6_ext()
227 l4_proto = ip_hdr(skb)->protocol; in qede_set_params_for_ipv6_ext()
233 third_bd->data.bitfields |= in qede_set_params_for_ipv6_ext()
238 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); in qede_set_params_for_ipv6_ext()
239 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); in qede_set_params_for_ipv6_ext()
245 dma_addr_t mapping; in map_frag_to_bd() local
247 /* Map skb non-linear frag data for DMA */ in map_frag_to_bd()
248 mapping = skb_frag_dma_map(txq->dev, frag, 0, in map_frag_to_bd()
250 if (unlikely(dma_mapping_error(txq->dev, mapping))) in map_frag_to_bd()
251 return -ENOMEM; in map_frag_to_bd()
253 /* Setup the data pointer of the frag data */ in map_frag_to_bd()
254 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); in map_frag_to_bd()
263 inner_tcp_hdrlen(skb) - skb->data); in qede_get_skb_hlen()
266 tcp_hdrlen(skb) - skb->data); in qede_get_skb_hlen()
273 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; in qede_pkt_req_lin()
282 allowed_frags--; in qede_pkt_req_lin()
285 return (skb_shinfo(skb)->nr_frags > allowed_frags); in qede_pkt_req_lin()
291 /* wmb makes sure that the BDs data is updated before updating the in qede_update_tx_producer()
292 * producer, otherwise FW may read old data from the BDs. in qede_update_tx_producer()
296 writel(txq->tx_db.raw, txq->doorbell_addr); in qede_update_tx_producer()
299 * CPU may write to the same doorbell address and data may be lost in qede_update_tx_producer()
312 if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >= in qede_xdp_xmit()
313 txq->num_tx_buffers)) { in qede_xdp_xmit()
314 txq->stopped_cnt++; in qede_xdp_xmit()
315 return -ENOMEM; in qede_xdp_xmit()
318 bd = qed_chain_produce(&txq->tx_pbl); in qede_xdp_xmit()
319 bd->data.nbds = 1; in qede_xdp_xmit()
320 bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT); in qede_xdp_xmit()
325 bd->data.bitfields = cpu_to_le16(val); in qede_xdp_xmit()
330 xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod; in qede_xdp_xmit()
331 xdp->mapping = dma; in qede_xdp_xmit()
332 xdp->page = page; in qede_xdp_xmit()
333 xdp->xdpf = xdpf; in qede_xdp_xmit()
335 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_xdp_xmit()
344 struct device *dmadev = &edev->pdev->dev; in qede_xdp_transmit()
347 dma_addr_t mapping; in qede_xdp_transmit() local
352 return -EINVAL; in qede_xdp_transmit()
355 return -ENETDOWN; in qede_xdp_transmit()
357 i = smp_processor_id() % edev->total_xdp_queues; in qede_xdp_transmit()
358 xdp_tx = edev->fp_array[i].xdp_tx; in qede_xdp_transmit()
360 spin_lock(&xdp_tx->xdp_tx_lock); in qede_xdp_transmit()
365 mapping = dma_map_single(dmadev, xdpf->data, xdpf->len, in qede_xdp_transmit()
367 if (unlikely(dma_mapping_error(dmadev, mapping))) in qede_xdp_transmit()
370 if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len, in qede_xdp_transmit()
377 xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl); in qede_xdp_transmit()
379 xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); in qede_xdp_transmit()
383 spin_unlock(&xdp_tx->xdp_tx_lock); in qede_xdp_transmit()
394 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_txq_has_work()
395 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) in qede_txq_has_work()
398 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); in qede_txq_has_work()
403 struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp; in qede_xdp_tx_int()
404 struct device *dev = &edev->pdev->dev; in qede_xdp_tx_int()
408 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_xdp_tx_int()
411 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_xdp_tx_int()
412 xdp_info = xdp_arr + txq->sw_tx_cons; in qede_xdp_tx_int()
413 xdpf = xdp_info->xdpf; in qede_xdp_tx_int()
416 dma_unmap_single(dev, xdp_info->mapping, xdpf->len, in qede_xdp_tx_int()
420 xdp_info->xdpf = NULL; in qede_xdp_tx_int()
422 dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE, in qede_xdp_tx_int()
424 __free_page(xdp_info->page); in qede_xdp_tx_int()
427 qed_chain_consume(&txq->tx_pbl); in qede_xdp_tx_int()
428 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_xdp_tx_int()
429 txq->xmit_pkts++; in qede_xdp_tx_int()
440 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); in qede_tx_int()
442 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_tx_int()
445 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_tx_int()
452 qed_chain_get_cons_idx(&txq->tx_pbl)); in qede_tx_int()
458 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_tx_int()
459 txq->xmit_pkts++; in qede_tx_int()
481 * stops the queue->sees fresh tx_bd_cons->releases the queue-> in qede_tx_int()
482 * sends some packets consuming the whole queue again-> in qede_tx_int()
489 (edev->state == QEDE_STATE_OPEN) && in qede_tx_int()
490 (qed_chain_get_elem_left(&txq->tx_pbl) in qede_tx_int()
510 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); in qede_has_rx_work()
511 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_has_rx_work()
518 qed_chain_consume(&rxq->rx_bd_ring); in qede_rx_bd_ring_consume()
519 rxq->sw_rx_cons++; in qede_rx_bd_ring_consume()
528 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); in qede_reuse_page()
532 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_reuse_page()
535 new_mapping = curr_prod->mapping + curr_prod->page_offset; in qede_reuse_page()
537 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); in qede_reuse_page()
538 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) + in qede_reuse_page()
539 rxq->rx_headroom); in qede_reuse_page()
541 rxq->sw_rx_prod++; in qede_reuse_page()
542 curr_cons->data = NULL; in qede_reuse_page()
552 for (; count > 0; count--) { in qede_recycle_rx_bd_ring()
553 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_recycle_rx_bd_ring()
563 curr_cons->page_offset += rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
565 if (curr_cons->page_offset == PAGE_SIZE) { in qede_realloc_rx_buffer()
570 curr_cons->page_offset -= rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
572 return -ENOMEM; in qede_realloc_rx_buffer()
575 dma_unmap_page(rxq->dev, curr_cons->mapping, in qede_realloc_rx_buffer()
576 PAGE_SIZE, rxq->data_direction); in qede_realloc_rx_buffer()
582 page_ref_inc(curr_cons->data); in qede_realloc_rx_buffer()
591 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); in qede_update_rx_prod()
592 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); in qede_update_rx_prod()
599 /* Make sure that the BD and SGE data is updated before updating the in qede_update_rx_prod()
605 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), in qede_update_rx_prod()
630 skb->ip_summed = CHECKSUM_UNNECESSARY; in qede_set_skb_csum()
633 skb->csum_level = 1; in qede_set_skb_csum()
634 skb->encapsulation = 1; in qede_set_skb_csum()
646 napi_gro_receive(&fp->napi, skb); in qede_skb_receive()
653 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params()
657 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in qede_set_gro_params()
659 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in qede_set_gro_params()
661 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
662 cqe->header_len; in qede_set_gro_params()
669 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & in qede_fill_frag_skb()
671 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; in qede_fill_frag_skb()
672 struct sk_buff *skb = tpa_info->skb; in qede_fill_frag_skb()
674 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) in qede_fill_frag_skb()
678 skb_fill_page_desc(skb, tpa_info->frag_id++, in qede_fill_frag_skb()
679 current_bd->data, in qede_fill_frag_skb()
680 current_bd->page_offset + rxq->rx_headroom, in qede_fill_frag_skb()
687 page_ref_inc(current_bd->data); in qede_fill_frag_skb()
693 skb->data_len += len_on_bd; in qede_fill_frag_skb()
694 skb->truesize += rxq->rx_buf_seg_size; in qede_fill_frag_skb()
695 skb->len += len_on_bd; in qede_fill_frag_skb()
700 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_fill_frag_skb()
703 return -ENOMEM; in qede_fill_frag_skb()
747 buf = page_address(bd->data) + bd->page_offset; in qede_build_skb()
748 skb = build_skb(buf, rxq->rx_buf_seg_size); in qede_build_skb()
765 bd->page_offset += rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
767 if (bd->page_offset == PAGE_SIZE) { in qede_tpa_rx_build_skb()
771 bd->page_offset -= rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
772 page_ref_inc(bd->data); in qede_tpa_rx_build_skb()
777 page_ref_inc(bd->data); in qede_tpa_rx_build_skb()
795 * data and benefit in reusing the page segment instead of in qede_rx_build_skb()
796 * un-mapping it. in qede_rx_build_skb()
798 if ((len + pad <= edev->rx_copybreak)) { in qede_rx_build_skb()
799 unsigned int offset = bd->page_offset + pad; in qede_rx_build_skb()
801 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); in qede_rx_build_skb()
806 skb_put_data(skb, page_address(bd->data) + offset, len); in qede_rx_build_skb()
818 page_ref_inc(bd->data); in qede_rx_build_skb()
833 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
837 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_tpa_start()
838 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
840 tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons, in qede_tpa_start()
841 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start()
843 tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset; in qede_tpa_start()
844 tpa_info->buffer.mapping = sw_rx_data_cons->mapping; in qede_tpa_start()
846 if (unlikely(!tpa_info->skb)) { in qede_tpa_start()
850 * this might be used by FW still, it will be re-used in qede_tpa_start()
853 tpa_info->tpa_start_fail = true; in qede_tpa_start()
855 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_tpa_start()
859 tpa_info->frag_id = 0; in qede_tpa_start()
860 tpa_info->state = QEDE_AGG_STATE_START; in qede_tpa_start()
862 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start()
865 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start()
867 tpa_info->vlan_tag = 0; in qede_tpa_start()
869 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); in qede_tpa_start()
872 qede_set_gro_params(edev, tpa_info->skb, cqe); in qede_tpa_start()
875 if (likely(cqe->bw_ext_bd_len_list[0])) in qede_tpa_start()
876 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_start()
877 le16_to_cpu(cqe->bw_ext_bd_len_list[0])); in qede_tpa_start()
879 if (unlikely(cqe->bw_ext_bd_len_list[1])) { in qede_tpa_start()
881 …"Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n"); in qede_tpa_start()
882 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_tpa_start()
895 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), in qede_gro_ip_csum()
896 iph->saddr, iph->daddr, 0); in qede_gro_ip_csum()
909 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), in qede_gro_ipv6_csum()
910 &iph->saddr, &iph->daddr, 0); in qede_gro_ipv6_csum()
925 if (unlikely(!skb->data_len)) { in qede_gro_receive()
926 skb_shinfo(skb)->gso_type = 0; in qede_gro_receive()
927 skb_shinfo(skb)->gso_size = 0; in qede_gro_receive()
932 if (skb_shinfo(skb)->gso_size) { in qede_gro_receive()
935 switch (skb->protocol) { in qede_gro_receive()
945 ntohs(skb->protocol)); in qede_gro_receive()
951 skb_record_rx_queue(skb, fp->rxq->rxq_id); in qede_gro_receive()
952 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); in qede_gro_receive()
961 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_cont()
962 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_cont()
963 le16_to_cpu(cqe->len_list[i])); in qede_tpa_cont()
967 "Strange - TPA cont with more than a single len_list entry\n"); in qede_tpa_cont()
974 struct qede_rx_queue *rxq = fp->rxq; in qede_tpa_end()
979 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_end()
980 skb = tpa_info->skb; in qede_tpa_end()
982 if (tpa_info->buffer.page_offset == PAGE_SIZE) in qede_tpa_end()
983 dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, in qede_tpa_end()
984 PAGE_SIZE, rxq->data_direction); in qede_tpa_end()
986 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_end()
987 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_end()
988 le16_to_cpu(cqe->len_list[i])); in qede_tpa_end()
991 "Strange - TPA emd with more than a single len_list entry\n"); in qede_tpa_end()
993 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) in qede_tpa_end()
997 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) in qede_tpa_end()
999 "Strange - TPA had %02x BDs, but SKB has only %d frags\n", in qede_tpa_end()
1000 cqe->num_of_bds, tpa_info->frag_id); in qede_tpa_end()
1001 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) in qede_tpa_end()
1003 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", in qede_tpa_end()
1004 le16_to_cpu(cqe->total_packet_len), skb->len); in qede_tpa_end()
1007 skb->protocol = eth_type_trans(skb, edev->ndev); in qede_tpa_end()
1008 skb->ip_summed = CHECKSUM_UNNECESSARY; in qede_tpa_end()
1010 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count in qede_tpa_end()
1011 * to skb_shinfo(skb)->gso_segs in qede_tpa_end()
1013 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); in qede_tpa_end()
1015 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); in qede_tpa_end()
1017 tpa_info->state = QEDE_AGG_STATE_NONE; in qede_tpa_end()
1021 tpa_info->state = QEDE_AGG_STATE_NONE; in qede_tpa_end()
1023 if (tpa_info->tpa_start_fail) { in qede_tpa_end()
1024 qede_reuse_page(rxq, &tpa_info->buffer); in qede_tpa_end()
1025 tpa_info->tpa_start_fail = false; in qede_tpa_end()
1028 dev_kfree_skb_any(tpa_info->skb); in qede_tpa_end()
1029 tpa_info->skb = NULL; in qede_tpa_end()
1065 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; in qede_pkt_is_ip_fragmented()
1088 xdp_init_buff(&xdp, rxq->rx_buf_seg_size, &rxq->xdp_rxq); in qede_rx_xdp()
1089 xdp_prepare_buff(&xdp, page_address(bd->data), *data_offset, in qede_rx_xdp()
1095 *data_offset = xdp.data - xdp.data_hard_start; in qede_rx_xdp()
1096 *len = xdp.data_end - xdp.data; in qede_rx_xdp()
1102 rxq->xdp_no_pass++; in qede_rx_xdp()
1110 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1117 if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping, in qede_rx_xdp()
1118 *data_offset, *len, bd->data, in qede_rx_xdp()
1120 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, in qede_rx_xdp()
1121 rxq->data_direction); in qede_rx_xdp()
1122 __free_page(bd->data); in qede_rx_xdp()
1124 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1126 dma_sync_single_for_device(rxq->dev, in qede_rx_xdp()
1127 bd->mapping + *data_offset, in qede_rx_xdp()
1128 *len, rxq->data_direction); in qede_rx_xdp()
1129 fp->xdp_xmit |= QEDE_XDP_TX; in qede_rx_xdp()
1140 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1144 dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE, in qede_rx_xdp()
1145 rxq->data_direction); in qede_rx_xdp()
1147 if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog))) in qede_rx_xdp()
1150 fp->xdp_xmit |= QEDE_XDP_REDIRECT; in qede_rx_xdp()
1158 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1161 qede_recycle_rx_bd_ring(rxq, cqe->bd_num); in qede_rx_xdp()
1173 u16 pkt_len = le16_to_cpu(cqe->pkt_len); in qede_rx_build_jumbo()
1178 pkt_len -= first_bd_len; in qede_rx_build_jumbo()
1181 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { in qede_rx_build_jumbo()
1182 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : in qede_rx_build_jumbo()
1187 "Still got %d BDs for mapping jumbo, but length became 0\n", in qede_rx_build_jumbo()
1199 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; in qede_rx_build_jumbo()
1200 bd = &rxq->sw_rx_ring[bd_cons_idx]; in qede_rx_build_jumbo()
1203 dma_unmap_page(rxq->dev, bd->mapping, in qede_rx_build_jumbo()
1206 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, bd->data, in qede_rx_build_jumbo()
1207 rxq->rx_headroom, cur_size, PAGE_SIZE); in qede_rx_build_jumbo()
1209 pkt_len -= cur_size; in qede_rx_build_jumbo()
1229 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); in qede_rx_process_tpa_cqe()
1232 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); in qede_rx_process_tpa_cqe()
1235 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); in qede_rx_process_tpa_cqe()
1245 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); in qede_rx_process_cqe()
1256 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); in qede_rx_process_cqe()
1257 cqe_type = cqe->fast_path_regular.type; in qede_rx_process_cqe()
1264 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); in qede_rx_process_cqe()
1272 /* Get the data from the SW ring; Consume it only after it's evident in qede_rx_process_cqe()
1275 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; in qede_rx_process_cqe()
1276 bd = &rxq->sw_rx_ring[bd_cons_idx]; in qede_rx_process_cqe()
1278 fp_cqe = &cqe->fast_path_regular; in qede_rx_process_cqe()
1279 len = le16_to_cpu(fp_cqe->len_on_first_bd); in qede_rx_process_cqe()
1280 pad = fp_cqe->placement_offset + rxq->rx_headroom; in qede_rx_process_cqe()
1289 flags = cqe->fast_path_regular.pars_flags.flags; in qede_rx_process_cqe()
1295 rxq->rx_ip_frags++; in qede_rx_process_cqe()
1297 rxq->rx_hw_errors++; in qede_rx_process_cqe()
1305 rxq->rx_alloc_errors++; in qede_rx_process_cqe()
1306 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); in qede_rx_process_cqe()
1313 if (fp_cqe->bd_num > 1) { in qede_rx_process_cqe()
1324 /* The SKB contains all the data. Now prepare meta-magic */ in qede_rx_process_cqe()
1325 skb->protocol = eth_type_trans(skb, edev->ndev); in qede_rx_process_cqe()
1326 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); in qede_rx_process_cqe()
1328 skb_record_rx_queue(skb, rxq->rxq_id); in qede_rx_process_cqe()
1331 /* SKB is prepared - pass it to stack */ in qede_rx_process_cqe()
1332 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); in qede_rx_process_cqe()
1339 struct qede_rx_queue *rxq = fp->rxq; in qede_rx_int()
1340 struct qede_dev *edev = fp->edev; in qede_rx_int()
1344 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); in qede_rx_int()
1345 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_rx_int()
1348 * / BD in the while-loop before reading hw_comp_cons. If the CQE is in qede_rx_int()
1357 qed_chain_recycle_consumed(&rxq->rx_comp_ring); in qede_rx_int()
1358 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_rx_int()
1362 rxq->rcv_pkts += rcv_pkts; in qede_rx_int()
1365 while (rxq->num_rx_buffers - rxq->filled_buffers) in qede_rx_int()
1377 qed_sb_update_sb_idx(fp->sb_info); in qede_poll_is_more_work()
1391 if (likely(fp->type & QEDE_FASTPATH_RX)) in qede_poll_is_more_work()
1392 if (qede_has_rx_work(fp->rxq)) in qede_poll_is_more_work()
1395 if (fp->type & QEDE_FASTPATH_XDP) in qede_poll_is_more_work()
1396 if (qede_txq_has_work(fp->xdp_tx)) in qede_poll_is_more_work()
1399 if (likely(fp->type & QEDE_FASTPATH_TX)) { in qede_poll_is_more_work()
1402 for_each_cos_in_txq(fp->edev, cos) { in qede_poll_is_more_work()
1403 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll_is_more_work()
1418 struct qede_dev *edev = fp->edev; in qede_poll()
1422 fp->xdp_xmit = 0; in qede_poll()
1424 if (likely(fp->type & QEDE_FASTPATH_TX)) { in qede_poll()
1427 for_each_cos_in_txq(fp->edev, cos) { in qede_poll()
1428 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll()
1429 qede_tx_int(edev, &fp->txq[cos]); in qede_poll()
1433 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) in qede_poll()
1434 qede_xdp_tx_int(edev, fp->xdp_tx); in qede_poll()
1436 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && in qede_poll()
1437 qede_has_rx_work(fp->rxq)) ? in qede_poll()
1445 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); in qede_poll()
1451 if (fp->xdp_xmit & QEDE_XDP_TX) { in qede_poll()
1452 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); in qede_poll()
1454 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); in qede_poll()
1455 qede_update_tx_producer(fp->xdp_tx); in qede_poll()
1458 if (fp->xdp_xmit & QEDE_XDP_REDIRECT) in qede_poll()
1468 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); in qede_msix_fp_int()
1470 napi_schedule_irqoff(&fp->napi); in qede_msix_fp_int()
1486 dma_addr_t mapping; in qede_start_xmit() local
1493 /* Get tx-queue context and netdev index */ in qede_start_xmit()
1495 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc); in qede_start_xmit()
1499 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); in qede_start_xmit()
1506 txq->tx_mem_alloc_err++; in qede_start_xmit()
1515 idx = txq->sw_tx_prod; in qede_start_xmit()
1516 txq->sw_tx_ring.skbs[idx].skb = skb; in qede_start_xmit()
1518 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1520 first_bd->data.bd_flags.bitfields = in qede_start_xmit()
1523 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in qede_start_xmit()
1526 /* Map skb linear data for DMA and set in the first BD */ in qede_start_xmit()
1527 mapping = dma_map_single(txq->dev, skb->data, in qede_start_xmit()
1529 if (unlikely(dma_mapping_error(txq->dev, mapping))) { in qede_start_xmit()
1530 DP_NOTICE(edev, "SKB mapping failed\n"); in qede_start_xmit()
1536 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); in qede_start_xmit()
1543 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1548 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1552 /* We need to fill in additional data in second_bd... */ in qede_start_xmit()
1557 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); in qede_start_xmit()
1558 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1564 /* We don't re-calculate IP checksum as it is already done by in qede_start_xmit()
1567 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1571 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1577 /* Legacy FW had flipped behavior in regard to this bit - in qede_start_xmit()
1581 if (unlikely(txq->is_legacy)) in qede_start_xmit()
1593 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1595 third_bd->data.lso_mss = in qede_start_xmit()
1596 cpu_to_le16(skb_shinfo(skb)->gso_size); in qede_start_xmit()
1599 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1605 first_bd->data.bd_flags.bitfields |= 1 << tmp; in qede_start_xmit()
1609 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1614 /* @@@TBD - if will not be removed need to check */ in qede_start_xmit()
1615 third_bd->data.bitfields |= in qede_start_xmit()
1619 * data on same BD. If we need to split, use the second bd... in qede_start_xmit()
1624 first_bd->nbytes, first_bd->addr.hi, in qede_start_xmit()
1625 first_bd->addr.lo); in qede_start_xmit()
1627 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), in qede_start_xmit()
1628 le32_to_cpu(first_bd->addr.lo)) + in qede_start_xmit()
1631 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, in qede_start_xmit()
1632 le16_to_cpu(first_bd->nbytes) - in qede_start_xmit()
1636 * individual mapping in qede_start_xmit()
1638 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; in qede_start_xmit()
1640 first_bd->nbytes = cpu_to_le16(hlen); in qede_start_xmit()
1646 val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << in qede_start_xmit()
1650 first_bd->data.bitfields = cpu_to_le16(val); in qede_start_xmit()
1654 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { in qede_start_xmit()
1656 &skb_shinfo(skb)->frags[frag_idx], in qede_start_xmit()
1673 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { in qede_start_xmit()
1675 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1680 &skb_shinfo(skb)->frags[frag_idx], in qede_start_xmit()
1690 first_bd->data.nbds = nbd; in qede_start_xmit()
1692 netdev_tx_sent_queue(netdev_txq, skb->len); in qede_start_xmit()
1696 /* Advance packet producer only before sending the packet since mapping in qede_start_xmit()
1699 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_start_xmit()
1702 txq->tx_db.data.bd_prod = in qede_start_xmit()
1703 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); in qede_start_xmit()
1708 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) in qede_start_xmit()
1714 txq->stopped_cnt++; in qede_start_xmit()
1719 * fp->bd_tx_cons in qede_start_xmit()
1723 if ((qed_chain_get_elem_left(&txq->tx_pbl) >= in qede_start_xmit()
1725 (edev->state == QEDE_STATE_OPEN)) { in qede_start_xmit()
1741 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; in qede_select_queue()
1754 if (skb->encapsulation) { in qede_features_check()
1759 l4_proto = ip_hdr(skb)->protocol; in qede_features_check()
1762 l4_proto = ipv6_hdr(skb)->nexthdr; in qede_features_check()
1777 vxln_port = edev->vxlan_dst_port; in qede_features_check()
1778 gnv_port = edev->geneve_dst_port; in qede_features_check()
1780 if ((skb_inner_mac_header(skb) - in qede_features_check()
1782 (ntohs(udp_hdr(skb)->dest) != vxln_port && in qede_features_check()
1783 ntohs(udp_hdr(skb)->dest) != gnv_port)) in qede_features_check()