Lines Matching +full:data +full:- +full:mapping

2  * Copyright (c) 2015-2017  QLogic Corporation
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
55 dma_addr_t mapping; in qede_alloc_rx_buffer() local
56 struct page *data; in qede_alloc_rx_buffer() local
58 /* In case lazy-allocation is allowed, postpone allocation until the in qede_alloc_rx_buffer()
62 if (allow_lazy && likely(rxq->filled_buffers > 12)) { in qede_alloc_rx_buffer()
63 rxq->filled_buffers--; in qede_alloc_rx_buffer()
67 data = alloc_pages(GFP_ATOMIC, 0); in qede_alloc_rx_buffer()
68 if (unlikely(!data)) in qede_alloc_rx_buffer()
69 return -ENOMEM; in qede_alloc_rx_buffer()
72 * for multiple RX buffer segment size mapping. in qede_alloc_rx_buffer()
74 mapping = dma_map_page(rxq->dev, data, 0, in qede_alloc_rx_buffer()
75 PAGE_SIZE, rxq->data_direction); in qede_alloc_rx_buffer()
76 if (unlikely(dma_mapping_error(rxq->dev, mapping))) { in qede_alloc_rx_buffer()
77 __free_page(data); in qede_alloc_rx_buffer()
78 return -ENOMEM; in qede_alloc_rx_buffer()
81 sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_alloc_rx_buffer()
82 sw_rx_data->page_offset = 0; in qede_alloc_rx_buffer()
83 sw_rx_data->data = data; in qede_alloc_rx_buffer()
84 sw_rx_data->mapping = mapping; in qede_alloc_rx_buffer()
87 rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring); in qede_alloc_rx_buffer()
89 rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping)); in qede_alloc_rx_buffer()
90 rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) + in qede_alloc_rx_buffer()
91 rxq->rx_headroom); in qede_alloc_rx_buffer()
93 rxq->sw_rx_prod++; in qede_alloc_rx_buffer()
94 rxq->filled_buffers++; in qede_alloc_rx_buffer()
99 /* Unmap the data and free skb */
102 u16 idx = txq->sw_tx_cons; in qede_free_tx_pkt()
103 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_tx_pkt()
108 bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD; in qede_free_tx_pkt()
113 "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n", in qede_free_tx_pkt()
114 idx, txq->sw_tx_cons, txq->sw_tx_prod); in qede_free_tx_pkt()
115 return -1; in qede_free_tx_pkt()
118 *len = skb->len; in qede_free_tx_pkt()
120 first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
124 nbds = first_bd->data.nbds; in qede_free_tx_pkt()
128 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
132 dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd), in qede_free_tx_pkt()
135 /* Unmap the data of the skb frags */ in qede_free_tx_pkt()
136 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) { in qede_free_tx_pkt()
138 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
139 dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd), in qede_free_tx_pkt()
144 qed_chain_consume(&txq->tx_pbl); in qede_free_tx_pkt()
148 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_tx_pkt()
149 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_tx_pkt()
154 /* Unmap the data and free skb when mapping failed during start_xmit */
159 u16 idx = txq->sw_tx_prod; in qede_free_failed_tx_pkt()
160 struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb; in qede_free_failed_tx_pkt()
165 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
166 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
168 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
172 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
174 nbd--; in qede_free_failed_tx_pkt()
177 dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd), in qede_free_failed_tx_pkt()
180 /* Unmap the data of the skb frags */ in qede_free_failed_tx_pkt()
183 qed_chain_produce(&txq->tx_pbl); in qede_free_failed_tx_pkt()
184 if (tx_data_bd->nbytes) in qede_free_failed_tx_pkt()
185 dma_unmap_page(txq->dev, in qede_free_failed_tx_pkt()
191 qed_chain_set_prod(&txq->tx_pbl, in qede_free_failed_tx_pkt()
192 le16_to_cpu(txq->tx_db.data.bd_prod), first_bd); in qede_free_failed_tx_pkt()
196 txq->sw_tx_ring.skbs[idx].skb = NULL; in qede_free_failed_tx_pkt()
197 txq->sw_tx_ring.skbs[idx].flags = 0; in qede_free_failed_tx_pkt()
205 if (skb->ip_summed != CHECKSUM_PARTIAL) in qede_xmit_type()
210 (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6)) in qede_xmit_type()
213 if (skb->encapsulation) { in qede_xmit_type()
216 unsigned short gso_type = skb_shinfo(skb)->gso_type; in qede_xmit_type()
242 bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) & in qede_set_params_for_ipv6_ext()
250 l4_proto = ipv6_hdr(skb)->nexthdr; in qede_set_params_for_ipv6_ext()
252 l4_proto = ip_hdr(skb)->protocol; in qede_set_params_for_ipv6_ext()
258 third_bd->data.bitfields |= in qede_set_params_for_ipv6_ext()
263 second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1); in qede_set_params_for_ipv6_ext()
264 second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2); in qede_set_params_for_ipv6_ext()
270 dma_addr_t mapping; in map_frag_to_bd() local
272 /* Map skb non-linear frag data for DMA */ in map_frag_to_bd()
273 mapping = skb_frag_dma_map(txq->dev, frag, 0, in map_frag_to_bd()
275 if (unlikely(dma_mapping_error(txq->dev, mapping))) in map_frag_to_bd()
276 return -ENOMEM; in map_frag_to_bd()
278 /* Setup the data pointer of the frag data */ in map_frag_to_bd()
279 BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag)); in map_frag_to_bd()
288 inner_tcp_hdrlen(skb) - skb->data); in qede_get_skb_hlen()
291 tcp_hdrlen(skb) - skb->data); in qede_get_skb_hlen()
298 int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1; in qede_pkt_req_lin()
307 allowed_frags--; in qede_pkt_req_lin()
310 return (skb_shinfo(skb)->nr_frags > allowed_frags); in qede_pkt_req_lin()
316 /* wmb makes sure that the BDs data is updated before updating the in qede_update_tx_producer()
317 * producer, otherwise FW may read old data from the BDs. in qede_update_tx_producer()
321 writel(txq->tx_db.raw, txq->doorbell_addr); in qede_update_tx_producer()
324 * CPU may write to the same doorbell address and data may be lost in qede_update_tx_producer()
333 struct qede_tx_queue *txq = fp->xdp_tx; in qede_xdp_xmit()
335 u16 idx = txq->sw_tx_prod; in qede_xdp_xmit()
338 if (!qed_chain_get_elem_left(&txq->tx_pbl)) { in qede_xdp_xmit()
339 txq->stopped_cnt++; in qede_xdp_xmit()
340 return -ENOMEM; in qede_xdp_xmit()
343 first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl); in qede_xdp_xmit()
346 first_bd->data.bd_flags.bitfields = in qede_xdp_xmit()
352 first_bd->data.bitfields |= cpu_to_le16(val); in qede_xdp_xmit()
353 first_bd->data.nbds = 1; in qede_xdp_xmit()
356 BD_SET_UNMAP_ADDR_LEN(first_bd, metadata->mapping + padding, length); in qede_xdp_xmit()
361 dma_sync_single_for_device(&edev->pdev->dev, in qede_xdp_xmit()
362 metadata->mapping + padding, in qede_xdp_xmit()
365 txq->sw_tx_ring.xdp[idx].page = metadata->data; in qede_xdp_xmit()
366 txq->sw_tx_ring.xdp[idx].mapping = metadata->mapping; in qede_xdp_xmit()
367 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_xdp_xmit()
370 fp->xdp_xmit = 1; in qede_xdp_xmit()
381 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_txq_has_work()
382 if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1) in qede_txq_has_work()
385 return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl); in qede_txq_has_work()
392 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_xdp_tx_int()
395 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_xdp_tx_int()
396 qed_chain_consume(&txq->tx_pbl); in qede_xdp_tx_int()
397 idx = txq->sw_tx_cons; in qede_xdp_tx_int()
399 dma_unmap_page(&edev->pdev->dev, in qede_xdp_tx_int()
400 txq->sw_tx_ring.xdp[idx].mapping, in qede_xdp_tx_int()
402 __free_page(txq->sw_tx_ring.xdp[idx].page); in qede_xdp_tx_int()
404 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_xdp_tx_int()
405 txq->xmit_pkts++; in qede_xdp_tx_int()
416 netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id); in qede_tx_int()
418 hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr); in qede_tx_int()
421 while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) { in qede_tx_int()
428 qed_chain_get_cons_idx(&txq->tx_pbl)); in qede_tx_int()
434 txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers; in qede_tx_int()
435 txq->xmit_pkts++; in qede_tx_int()
457 * stops the queue->sees fresh tx_bd_cons->releases the queue-> in qede_tx_int()
458 * sends some packets consuming the whole queue again-> in qede_tx_int()
465 (edev->state == QEDE_STATE_OPEN) && in qede_tx_int()
466 (qed_chain_get_elem_left(&txq->tx_pbl) in qede_tx_int()
486 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); in qede_has_rx_work()
487 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_has_rx_work()
494 qed_chain_consume(&rxq->rx_bd_ring); in qede_rx_bd_ring_consume()
495 rxq->sw_rx_cons++; in qede_rx_bd_ring_consume()
504 struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring); in qede_reuse_page()
508 curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX]; in qede_reuse_page()
511 new_mapping = curr_prod->mapping + curr_prod->page_offset; in qede_reuse_page()
513 rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping)); in qede_reuse_page()
514 rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) + in qede_reuse_page()
515 rxq->rx_headroom); in qede_reuse_page()
517 rxq->sw_rx_prod++; in qede_reuse_page()
518 curr_cons->data = NULL; in qede_reuse_page()
528 for (; count > 0; count--) { in qede_recycle_rx_bd_ring()
529 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_recycle_rx_bd_ring()
539 curr_cons->page_offset += rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
541 if (curr_cons->page_offset == PAGE_SIZE) { in qede_realloc_rx_buffer()
546 curr_cons->page_offset -= rxq->rx_buf_seg_size; in qede_realloc_rx_buffer()
548 return -ENOMEM; in qede_realloc_rx_buffer()
551 dma_unmap_page(rxq->dev, curr_cons->mapping, in qede_realloc_rx_buffer()
552 PAGE_SIZE, rxq->data_direction); in qede_realloc_rx_buffer()
558 page_ref_inc(curr_cons->data); in qede_realloc_rx_buffer()
567 u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring); in qede_update_rx_prod()
568 u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring); in qede_update_rx_prod()
575 /* Make sure that the BD and SGE data is updated before updating the in qede_update_rx_prod()
581 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), in qede_update_rx_prod()
606 skb->ip_summed = CHECKSUM_UNNECESSARY; in qede_set_skb_csum()
609 skb->csum_level = 1; in qede_set_skb_csum()
610 skb->encapsulation = 1; in qede_set_skb_csum()
622 napi_gro_receive(&fp->napi, skb); in qede_skb_receive()
629 u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags); in qede_set_gro_params()
633 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in qede_set_gro_params()
635 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in qede_set_gro_params()
637 skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) - in qede_set_gro_params()
638 cqe->header_len; in qede_set_gro_params()
645 struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons & in qede_fill_frag_skb()
647 struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index]; in qede_fill_frag_skb()
648 struct sk_buff *skb = tpa_info->skb; in qede_fill_frag_skb()
650 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) in qede_fill_frag_skb()
654 skb_fill_page_desc(skb, tpa_info->frag_id++, in qede_fill_frag_skb()
655 current_bd->data, in qede_fill_frag_skb()
656 current_bd->page_offset + rxq->rx_headroom, in qede_fill_frag_skb()
663 page_ref_inc(current_bd->data); in qede_fill_frag_skb()
669 skb->data_len += len_on_bd; in qede_fill_frag_skb()
670 skb->truesize += rxq->rx_buf_seg_size; in qede_fill_frag_skb()
671 skb->len += len_on_bd; in qede_fill_frag_skb()
676 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_fill_frag_skb()
679 return -ENOMEM; in qede_fill_frag_skb()
723 buf = page_address(bd->data) + bd->page_offset; in qede_build_skb()
724 skb = build_skb(buf, rxq->rx_buf_seg_size); in qede_build_skb()
741 bd->page_offset += rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
743 if (bd->page_offset == PAGE_SIZE) { in qede_tpa_rx_build_skb()
747 bd->page_offset -= rxq->rx_buf_seg_size; in qede_tpa_rx_build_skb()
748 page_ref_inc(bd->data); in qede_tpa_rx_build_skb()
753 page_ref_inc(bd->data); in qede_tpa_rx_build_skb()
771 * data and benefit in reusing the page segment instead of in qede_rx_build_skb()
772 * un-mapping it. in qede_rx_build_skb()
774 if ((len + pad <= edev->rx_copybreak)) { in qede_rx_build_skb()
775 unsigned int offset = bd->page_offset + pad; in qede_rx_build_skb()
777 skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE); in qede_rx_build_skb()
782 skb_put_data(skb, page_address(bd->data) + offset, len); in qede_rx_build_skb()
794 page_ref_inc(bd->data); in qede_rx_build_skb()
809 struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_start()
813 sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX]; in qede_tpa_start()
814 pad = cqe->placement_offset + rxq->rx_headroom; in qede_tpa_start()
816 tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons, in qede_tpa_start()
817 le16_to_cpu(cqe->len_on_first_bd), in qede_tpa_start()
819 tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset; in qede_tpa_start()
820 tpa_info->buffer.mapping = sw_rx_data_cons->mapping; in qede_tpa_start()
822 if (unlikely(!tpa_info->skb)) { in qede_tpa_start()
826 * this might be used by FW still, it will be re-used in qede_tpa_start()
829 tpa_info->tpa_start_fail = true; in qede_tpa_start()
831 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_tpa_start()
835 tpa_info->frag_id = 0; in qede_tpa_start()
836 tpa_info->state = QEDE_AGG_STATE_START; in qede_tpa_start()
838 if ((le16_to_cpu(cqe->pars_flags.flags) >> in qede_tpa_start()
841 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag); in qede_tpa_start()
843 tpa_info->vlan_tag = 0; in qede_tpa_start()
845 qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash); in qede_tpa_start()
848 qede_set_gro_params(edev, tpa_info->skb, cqe); in qede_tpa_start()
851 if (likely(cqe->ext_bd_len_list[0])) in qede_tpa_start()
852 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_start()
853 le16_to_cpu(cqe->ext_bd_len_list[0])); in qede_tpa_start()
855 if (unlikely(cqe->ext_bd_len_list[1])) { in qede_tpa_start()
857 … "Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n"); in qede_tpa_start()
858 tpa_info->state = QEDE_AGG_STATE_ERROR; in qede_tpa_start()
871 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb), in qede_gro_ip_csum()
872 iph->saddr, iph->daddr, 0); in qede_gro_ip_csum()
885 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb), in qede_gro_ipv6_csum()
886 &iph->saddr, &iph->daddr, 0); in qede_gro_ipv6_csum()
901 if (unlikely(!skb->data_len)) { in qede_gro_receive()
902 skb_shinfo(skb)->gso_type = 0; in qede_gro_receive()
903 skb_shinfo(skb)->gso_size = 0; in qede_gro_receive()
908 if (skb_shinfo(skb)->gso_size) { in qede_gro_receive()
911 switch (skb->protocol) { in qede_gro_receive()
921 ntohs(skb->protocol)); in qede_gro_receive()
927 skb_record_rx_queue(skb, fp->rxq->rxq_id); in qede_gro_receive()
928 qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag); in qede_gro_receive()
937 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_cont()
938 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_cont()
939 le16_to_cpu(cqe->len_list[i])); in qede_tpa_cont()
943 "Strange - TPA cont with more than a single len_list entry\n"); in qede_tpa_cont()
950 struct qede_rx_queue *rxq = fp->rxq; in qede_tpa_end()
955 tpa_info = &rxq->tpa_info[cqe->tpa_agg_index]; in qede_tpa_end()
956 skb = tpa_info->skb; in qede_tpa_end()
958 if (tpa_info->buffer.page_offset == PAGE_SIZE) in qede_tpa_end()
959 dma_unmap_page(rxq->dev, tpa_info->buffer.mapping, in qede_tpa_end()
960 PAGE_SIZE, rxq->data_direction); in qede_tpa_end()
962 for (i = 0; cqe->len_list[i]; i++) in qede_tpa_end()
963 qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index, in qede_tpa_end()
964 le16_to_cpu(cqe->len_list[i])); in qede_tpa_end()
967 "Strange - TPA emd with more than a single len_list entry\n"); in qede_tpa_end()
969 if (unlikely(tpa_info->state != QEDE_AGG_STATE_START)) in qede_tpa_end()
973 if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1)) in qede_tpa_end()
975 "Strange - TPA had %02x BDs, but SKB has only %d frags\n", in qede_tpa_end()
976 cqe->num_of_bds, tpa_info->frag_id); in qede_tpa_end()
977 if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len))) in qede_tpa_end()
979 "Strange - total packet len [cqe] is %4x but SKB has len %04x\n", in qede_tpa_end()
980 le16_to_cpu(cqe->total_packet_len), skb->len); in qede_tpa_end()
983 skb->protocol = eth_type_trans(skb, edev->ndev); in qede_tpa_end()
984 skb->ip_summed = CHECKSUM_UNNECESSARY; in qede_tpa_end()
986 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count in qede_tpa_end()
987 * to skb_shinfo(skb)->gso_segs in qede_tpa_end()
989 NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs); in qede_tpa_end()
991 qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag); in qede_tpa_end()
993 tpa_info->state = QEDE_AGG_STATE_NONE; in qede_tpa_end()
997 tpa_info->state = QEDE_AGG_STATE_NONE; in qede_tpa_end()
999 if (tpa_info->tpa_start_fail) { in qede_tpa_end()
1000 qede_reuse_page(rxq, &tpa_info->buffer); in qede_tpa_end()
1001 tpa_info->tpa_start_fail = false; in qede_tpa_end()
1004 dev_kfree_skb_any(tpa_info->skb); in qede_tpa_end()
1005 tpa_info->skb = NULL; in qede_tpa_end()
1041 u8 tun_pars_flg = cqe->tunnel_pars_flags.flags; in qede_pkt_is_ip_fragmented()
1064 xdp.data_hard_start = page_address(bd->data); in qede_rx_xdp()
1065 xdp.data = xdp.data_hard_start + *data_offset; in qede_rx_xdp()
1067 xdp.data_end = xdp.data + *len; in qede_rx_xdp()
1068 xdp.rxq = &rxq->xdp_rxq; in qede_rx_xdp()
1079 *data_offset = xdp.data - xdp.data_hard_start; in qede_rx_xdp()
1080 *len = xdp.data_end - xdp.data; in qede_rx_xdp()
1086 rxq->xdp_no_pass++; in qede_rx_xdp()
1093 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1101 dma_unmap_page(rxq->dev, bd->mapping, in qede_rx_xdp()
1103 __free_page(bd->data); in qede_rx_xdp()
1104 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1115 trace_xdp_exception(edev->ndev, prog, act); in qede_rx_xdp()
1118 qede_recycle_rx_bd_ring(rxq, cqe->bd_num); in qede_rx_xdp()
1130 u16 pkt_len = le16_to_cpu(cqe->pkt_len); in qede_rx_build_jumbo()
1135 pkt_len -= first_bd_len; in qede_rx_build_jumbo()
1138 for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) { in qede_rx_build_jumbo()
1139 u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size : in qede_rx_build_jumbo()
1144 "Still got %d BDs for mapping jumbo, but length became 0\n", in qede_rx_build_jumbo()
1156 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; in qede_rx_build_jumbo()
1157 bd = &rxq->sw_rx_ring[bd_cons_idx]; in qede_rx_build_jumbo()
1160 dma_unmap_page(rxq->dev, bd->mapping, in qede_rx_build_jumbo()
1163 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++, in qede_rx_build_jumbo()
1164 bd->data, rxq->rx_headroom, cur_size); in qede_rx_build_jumbo()
1166 skb->truesize += PAGE_SIZE; in qede_rx_build_jumbo()
1167 skb->data_len += cur_size; in qede_rx_build_jumbo()
1168 skb->len += cur_size; in qede_rx_build_jumbo()
1169 pkt_len -= cur_size; in qede_rx_build_jumbo()
1189 qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start); in qede_rx_process_tpa_cqe()
1192 qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont); in qede_rx_process_tpa_cqe()
1195 return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end); in qede_rx_process_tpa_cqe()
1205 struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog); in qede_rx_process_cqe()
1216 cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring); in qede_rx_process_cqe()
1217 cqe_type = cqe->fast_path_regular.type; in qede_rx_process_cqe()
1224 edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe); in qede_rx_process_cqe()
1232 /* Get the data from the SW ring; Consume it only after it's evident in qede_rx_process_cqe()
1235 bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX; in qede_rx_process_cqe()
1236 bd = &rxq->sw_rx_ring[bd_cons_idx]; in qede_rx_process_cqe()
1238 fp_cqe = &cqe->fast_path_regular; in qede_rx_process_cqe()
1239 len = le16_to_cpu(fp_cqe->len_on_first_bd); in qede_rx_process_cqe()
1240 pad = fp_cqe->placement_offset + rxq->rx_headroom; in qede_rx_process_cqe()
1249 flags = cqe->fast_path_regular.pars_flags.flags; in qede_rx_process_cqe()
1255 rxq->rx_ip_frags++; in qede_rx_process_cqe()
1257 rxq->rx_hw_errors++; in qede_rx_process_cqe()
1265 rxq->rx_alloc_errors++; in qede_rx_process_cqe()
1266 qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num); in qede_rx_process_cqe()
1273 if (fp_cqe->bd_num > 1) { in qede_rx_process_cqe()
1284 /* The SKB contains all the data. Now prepare meta-magic */ in qede_rx_process_cqe()
1285 skb->protocol = eth_type_trans(skb, edev->ndev); in qede_rx_process_cqe()
1286 qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash); in qede_rx_process_cqe()
1288 skb_record_rx_queue(skb, rxq->rxq_id); in qede_rx_process_cqe()
1291 /* SKB is prepared - pass it to stack */ in qede_rx_process_cqe()
1292 qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag)); in qede_rx_process_cqe()
1299 struct qede_rx_queue *rxq = fp->rxq; in qede_rx_int()
1300 struct qede_dev *edev = fp->edev; in qede_rx_int()
1304 hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr); in qede_rx_int()
1305 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_rx_int()
1308 * / BD in the while-loop before reading hw_comp_cons. If the CQE is in qede_rx_int()
1317 qed_chain_recycle_consumed(&rxq->rx_comp_ring); in qede_rx_int()
1318 sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring); in qede_rx_int()
1322 rxq->rcv_pkts += rcv_pkts; in qede_rx_int()
1325 while (rxq->num_rx_buffers - rxq->filled_buffers) in qede_rx_int()
1337 qed_sb_update_sb_idx(fp->sb_info); in qede_poll_is_more_work()
1351 if (likely(fp->type & QEDE_FASTPATH_RX)) in qede_poll_is_more_work()
1352 if (qede_has_rx_work(fp->rxq)) in qede_poll_is_more_work()
1355 if (fp->type & QEDE_FASTPATH_XDP) in qede_poll_is_more_work()
1356 if (qede_txq_has_work(fp->xdp_tx)) in qede_poll_is_more_work()
1359 if (likely(fp->type & QEDE_FASTPATH_TX)) { in qede_poll_is_more_work()
1362 for_each_cos_in_txq(fp->edev, cos) { in qede_poll_is_more_work()
1363 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll_is_more_work()
1378 struct qede_dev *edev = fp->edev; in qede_poll()
1381 if (likely(fp->type & QEDE_FASTPATH_TX)) { in qede_poll()
1384 for_each_cos_in_txq(fp->edev, cos) { in qede_poll()
1385 if (qede_txq_has_work(&fp->txq[cos])) in qede_poll()
1386 qede_tx_int(edev, &fp->txq[cos]); in qede_poll()
1390 if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx)) in qede_poll()
1391 qede_xdp_tx_int(edev, fp->xdp_tx); in qede_poll()
1393 rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) && in qede_poll()
1394 qede_has_rx_work(fp->rxq)) ? in qede_poll()
1401 qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1); in qede_poll()
1407 if (fp->xdp_xmit) { in qede_poll()
1408 u16 xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl); in qede_poll()
1410 fp->xdp_xmit = 0; in qede_poll()
1411 fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod); in qede_poll()
1412 qede_update_tx_producer(fp->xdp_tx); in qede_poll()
1422 qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/); in qede_msix_fp_int()
1424 napi_schedule_irqoff(&fp->napi); in qede_msix_fp_int()
1440 dma_addr_t mapping; in qede_start_xmit() local
1447 /* Get tx-queue context and netdev index */ in qede_start_xmit()
1449 WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc); in qede_start_xmit()
1453 WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1)); in qede_start_xmit()
1460 txq->tx_mem_alloc_err++; in qede_start_xmit()
1469 idx = txq->sw_tx_prod; in qede_start_xmit()
1470 txq->sw_tx_ring.skbs[idx].skb = skb; in qede_start_xmit()
1472 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1474 first_bd->data.bd_flags.bitfields = in qede_start_xmit()
1477 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in qede_start_xmit()
1480 /* Map skb linear data for DMA and set in the first BD */ in qede_start_xmit()
1481 mapping = dma_map_single(txq->dev, skb->data, in qede_start_xmit()
1483 if (unlikely(dma_mapping_error(txq->dev, mapping))) { in qede_start_xmit()
1484 DP_NOTICE(edev, "SKB mapping failed\n"); in qede_start_xmit()
1490 BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb)); in qede_start_xmit()
1497 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1502 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1506 /* We need to fill in additional data in second_bd... */ in qede_start_xmit()
1511 first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb)); in qede_start_xmit()
1512 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1518 /* We don't re-calculate IP checksum as it is already done by in qede_start_xmit()
1521 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1525 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1531 /* Legacy FW had flipped behavior in regard to this bit - in qede_start_xmit()
1535 if (unlikely(txq->is_legacy)) in qede_start_xmit()
1547 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1549 third_bd->data.lso_mss = in qede_start_xmit()
1550 cpu_to_le16(skb_shinfo(skb)->gso_size); in qede_start_xmit()
1553 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1559 first_bd->data.bd_flags.bitfields |= 1 << tmp; in qede_start_xmit()
1563 first_bd->data.bd_flags.bitfields |= in qede_start_xmit()
1568 /* @@@TBD - if will not be removed need to check */ in qede_start_xmit()
1569 third_bd->data.bitfields |= in qede_start_xmit()
1573 * data on same BD. If we need to split, use the second bd... in qede_start_xmit()
1578 first_bd->nbytes, first_bd->addr.hi, in qede_start_xmit()
1579 first_bd->addr.lo); in qede_start_xmit()
1581 mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi), in qede_start_xmit()
1582 le32_to_cpu(first_bd->addr.lo)) + in qede_start_xmit()
1585 BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping, in qede_start_xmit()
1586 le16_to_cpu(first_bd->nbytes) - in qede_start_xmit()
1590 * individual mapping in qede_start_xmit()
1592 txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD; in qede_start_xmit()
1594 first_bd->nbytes = cpu_to_le16(hlen); in qede_start_xmit()
1600 val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) << in qede_start_xmit()
1604 first_bd->data.bitfields = cpu_to_le16(val); in qede_start_xmit()
1608 while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) { in qede_start_xmit()
1610 &skb_shinfo(skb)->frags[frag_idx], in qede_start_xmit()
1627 for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) { in qede_start_xmit()
1629 qed_chain_produce(&txq->tx_pbl); in qede_start_xmit()
1634 &skb_shinfo(skb)->frags[frag_idx], in qede_start_xmit()
1644 first_bd->data.nbds = nbd; in qede_start_xmit()
1646 netdev_tx_sent_queue(netdev_txq, skb->len); in qede_start_xmit()
1650 /* Advance packet producer only before sending the packet since mapping in qede_start_xmit()
1653 txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers; in qede_start_xmit()
1656 txq->tx_db.data.bd_prod = in qede_start_xmit()
1657 cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl)); in qede_start_xmit()
1662 if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl) in qede_start_xmit()
1668 txq->stopped_cnt++; in qede_start_xmit()
1673 * fp->bd_tx_cons in qede_start_xmit()
1677 if ((qed_chain_get_elem_left(&txq->tx_pbl) >= in qede_start_xmit()
1679 (edev->state == QEDE_STATE_OPEN)) { in qede_start_xmit()
1695 total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc; in qede_select_queue()
1708 if (skb->encapsulation) { in qede_features_check()
1713 l4_proto = ip_hdr(skb)->protocol; in qede_features_check()
1716 l4_proto = ipv6_hdr(skb)->nexthdr; in qede_features_check()
1731 vxln_port = edev->vxlan_dst_port; in qede_features_check()
1732 gnv_port = edev->geneve_dst_port; in qede_features_check()
1734 if ((skb_inner_mac_header(skb) - in qede_features_check()
1736 (ntohs(udp_hdr(skb)->dest) != vxln_port && in qede_features_check()
1737 ntohs(udp_hdr(skb)->dest) != gnv_port)) in qede_features_check()