Lines Matching +full:rx +full:- +full:eq
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
40 #include <linux/dma-mapping.h>
62 * Rx buffer size. We use largish buffers if possible but settle for single
68 # define FL_PG_ORDER (16 - PAGE_SHIFT)
76 * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
91 * Max number of Rx buffers we replenish at a time. Again keep this modest,
97 * Period of the Rx queue check timer. This timer is infrequent as it has
115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
118 * Suspension threshold for non-Ethernet Tx queues. We require enough room
134 struct rx_sw_desc { /* SW state per Rx descriptor */
140 * Rx buffer sizes for "useskbs" Free List buffers (one ingress packet pe skb
141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
151 struct sge *s = &adapter->sge; in fl_mtu_bufsize()
153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); in fl_mtu_bufsize()
165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; in get_buf_addr()
198 return !(d->dma_addr & RX_UNMAPPED_BUF); in is_buf_mapped()
202 * txq_avail - return the number of available slots in a Tx queue
210 return q->size - 1 - q->in_use; in txq_avail()
214 * fl_cap - return the capacity of a free-buffer list
217 * Returns the capacity of a free-buffer list. The capacity is less than
223 return fl->size - 8; /* 1 descriptor = 8 buffers */ in fl_cap()
227 * fl_starving - return whether a Free List is starving.
238 const struct sge *s = &adapter->sge; in fl_starving()
240 return fl->avail - fl->pend_cred <= s->fl_starve_thres; in fl_starving()
249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); in cxgb4_map_skb()
254 end = &si->frags[si->nr_frags]; in cxgb4_map_skb()
256 for (fp = si->frags; fp < end; fp++) { in cxgb4_map_skb()
265 while (fp-- > si->frags) in cxgb4_map_skb()
266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); in cxgb4_map_skb()
268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); in cxgb4_map_skb()
270 return -ENOMEM; in cxgb4_map_skb()
283 end = &si->frags[si->nr_frags]; in unmap_skb()
284 for (fp = si->frags; fp < end; fp++) in unmap_skb()
290 * deferred_unmap_destructor - unmap a packet when it is freed
299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); in deferred_unmap_destructor()
304 * free_tx_desc - reclaims Tx descriptors and their buffers
316 unsigned int cidx = q->cidx; in free_tx_desc()
319 d = &q->sdesc[cidx]; in free_tx_desc()
320 while (n--) { in free_tx_desc()
321 if (d->skb) { /* an SGL is present */ in free_tx_desc()
322 if (unmap && d->addr[0]) { in free_tx_desc()
323 unmap_skb(adap->pdev_dev, d->skb, d->addr); in free_tx_desc()
324 memset(d->addr, 0, sizeof(d->addr)); in free_tx_desc()
326 dev_consume_skb_any(d->skb); in free_tx_desc()
327 d->skb = NULL; in free_tx_desc()
330 if (++cidx == q->size) { in free_tx_desc()
332 d = q->sdesc; in free_tx_desc()
335 q->cidx = cidx; in free_tx_desc()
343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in reclaimable()
344 hw_cidx -= q->cidx; in reclaimable()
345 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; in reclaimable()
349 * reclaim_completed_tx - reclaims completed TX Descriptors
352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
356 * and frees the associated buffers if possible. If @max == -1, then
375 q->in_use -= reclaim; in reclaim_completed_tx()
382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
394 (void)reclaim_completed_tx(adap, q, -1, unmap); in cxgb4_reclaim_completed_tx()
401 struct sge *s = &adapter->sge; in get_buf_size()
402 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; in get_buf_size()
411 buf_size = PAGE_SIZE << s->fl_pg_order; in get_buf_size()
430 * free_rx_bufs - free the Rx buffers on an SGE free list
435 * Release the next @n buffers on an SGE free-buffer Rx queue. The
440 while (n--) { in free_rx_bufs()
441 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in free_rx_bufs()
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in free_rx_bufs()
447 put_page(d->page); in free_rx_bufs()
448 d->page = NULL; in free_rx_bufs()
449 if (++q->cidx == q->size) in free_rx_bufs()
450 q->cidx = 0; in free_rx_bufs()
451 q->avail--; in free_rx_bufs()
456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
460 * Unmap the current buffer on an SGE free-buffer Rx queue. The
468 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in unmap_rx_buf()
471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in unmap_rx_buf()
473 d->page = NULL; in unmap_rx_buf()
474 if (++q->cidx == q->size) in unmap_rx_buf()
475 q->cidx = 0; in unmap_rx_buf()
476 q->avail--; in unmap_rx_buf()
481 if (q->pend_cred >= 8) { in ring_fl_db()
482 u32 val = adap->params.arch.sge_fl_db; in ring_fl_db()
484 if (is_t4(adap->params.chip)) in ring_fl_db()
485 val |= PIDX_V(q->pend_cred / 8); in ring_fl_db()
487 val |= PIDX_T5_V(q->pend_cred / 8); in ring_fl_db()
498 if (unlikely(q->bar2_addr == NULL)) { in ring_fl_db()
500 val | QID_V(q->cntxt_id)); in ring_fl_db()
502 writel(val | QID_V(q->bar2_qid), in ring_fl_db()
503 q->bar2_addr + SGE_UDB_KDOORBELL); in ring_fl_db()
510 q->pend_cred &= 7; in ring_fl_db()
517 sd->page = pg; in set_rx_sw_desc()
518 sd->dma_addr = mapping; /* includes size low bits */ in set_rx_sw_desc()
522 * refill_fl - refill an SGE Rx buffer ring
528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
538 struct sge *s = &adap->sge; in refill_fl()
541 unsigned int cred = q->avail; in refill_fl()
542 __be64 *d = &q->desc[q->pidx]; in refill_fl()
543 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) in refill_fl()
552 node = dev_to_node(adap->pdev_dev); in refill_fl()
554 if (s->fl_pg_order == 0) in refill_fl()
561 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); in refill_fl()
563 q->large_alloc_failed++; in refill_fl()
567 mapping = dma_map_page(adap->pdev_dev, pg, 0, in refill_fl()
568 PAGE_SIZE << s->fl_pg_order, in refill_fl()
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
571 __free_pages(pg, s->fl_pg_order); in refill_fl()
572 q->mapping_err++; in refill_fl()
581 q->avail++; in refill_fl()
582 if (++q->pidx == q->size) { in refill_fl()
583 q->pidx = 0; in refill_fl()
584 sd = q->sdesc; in refill_fl()
585 d = q->desc; in refill_fl()
587 n--; in refill_fl()
591 while (n--) { in refill_fl()
594 q->alloc_failed++; in refill_fl()
598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, in refill_fl()
600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
602 q->mapping_err++; in refill_fl()
610 q->avail++; in refill_fl()
611 if (++q->pidx == q->size) { in refill_fl()
612 q->pidx = 0; in refill_fl()
613 sd = q->sdesc; in refill_fl()
614 d = q->desc; in refill_fl()
618 out: cred = q->avail - cred; in refill_fl()
619 q->pend_cred += cred; in refill_fl()
624 q->low++; in refill_fl()
625 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl()
626 adap->sge.starving_fl); in refill_fl()
634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), in __refill_fl()
639 * alloc_ring - allocate resources for an SGE descriptor ring
681 * sgl_len - calculates the size of an SGL of the given capacity
689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA in sgl_len()
690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL in sgl_len()
693 * Address[i+1] } (this ensures that all addresses are on 64-bit in sgl_len()
700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 in sgl_len()
701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and in sgl_len()
702 * finally the "+((n-1)&1)" adds the one remaining flit needed if in sgl_len()
703 * (n-1) is odd ... in sgl_len()
705 n--; in sgl_len()
710 * flits_to_desc - returns the num of Tx descriptors for the given flits
723 * is_eth_imm - can an Ethernet packet be sent as immediate data?
734 if (skb->encapsulation && skb_shinfo(skb)->gso_size && in is_eth_imm()
738 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in is_eth_imm()
741 hdrlen = skb_shinfo(skb)->gso_size ? in is_eth_imm()
745 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) in is_eth_imm()
751 * calc_tx_flits - calculate the number of flits for a packet Tx WR
770 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); in calc_tx_flits()
780 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); in calc_tx_flits()
781 if (skb_shinfo(skb)->gso_size) { in calc_tx_flits()
782 if (skb->encapsulation && chip_ver > CHELSIO_T5) { in calc_tx_flits()
785 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in calc_tx_flits()
788 pkt_hdrlen = eth_get_headlen(skb->dev, skb->data, in calc_tx_flits()
807 * calc_tx_descs - calculate the number of Tx descriptors for a packet
821 * cxgb4_write_sgl - populate a scatter/gather list for a packet
826 * @start: start offset into skb main-body data to include in the SGL
832 * main body except for the first @start bytes. @sgl must be 16-byte
844 unsigned int nfrags = si->nr_frags; in cxgb4_write_sgl()
847 len = skb_headlen(skb) - start; in cxgb4_write_sgl()
849 sgl->len0 = htonl(len); in cxgb4_write_sgl()
850 sgl->addr0 = cpu_to_be64(addr[0] + start); in cxgb4_write_sgl()
853 sgl->len0 = htonl(skb_frag_size(&si->frags[0])); in cxgb4_write_sgl()
854 sgl->addr0 = cpu_to_be64(addr[1]); in cxgb4_write_sgl()
857 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | in cxgb4_write_sgl()
859 if (likely(--nfrags == 0)) in cxgb4_write_sgl()
866 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in cxgb4_write_sgl()
868 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { in cxgb4_write_sgl()
869 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); in cxgb4_write_sgl()
870 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); in cxgb4_write_sgl()
871 to->addr[0] = cpu_to_be64(addr[i]); in cxgb4_write_sgl()
872 to->addr[1] = cpu_to_be64(addr[++i]); in cxgb4_write_sgl()
875 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); in cxgb4_write_sgl()
876 to->len[1] = cpu_to_be32(0); in cxgb4_write_sgl()
877 to->addr[0] = cpu_to_be64(addr[i + 1]); in cxgb4_write_sgl()
879 if (unlikely((u8 *)end > (u8 *)q->stat)) { in cxgb4_write_sgl()
880 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in cxgb4_write_sgl()
883 memcpy(sgl->sge, buf, part0); in cxgb4_write_sgl()
884 part1 = (u8 *)end - (u8 *)q->stat; in cxgb4_write_sgl()
885 memcpy(q->desc, (u8 *)buf + part0, part1); in cxgb4_write_sgl()
886 end = (void *)q->desc + part1; in cxgb4_write_sgl()
888 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ in cxgb4_write_sgl()
893 /* cxgb4_write_partial_sgl - populate SGL for partial packet
920 frag_size = min(len, skb_linear_data_len - start); in cxgb4_write_partial_sgl()
921 sgl->len0 = htonl(frag_size); in cxgb4_write_partial_sgl()
922 sgl->addr0 = cpu_to_be64(addr[0] + start); in cxgb4_write_partial_sgl()
923 len -= frag_size; in cxgb4_write_partial_sgl()
926 start -= skb_linear_data_len; in cxgb4_write_partial_sgl()
927 frag = &si->frags[frag_idx]; in cxgb4_write_partial_sgl()
931 start -= frag_size; in cxgb4_write_partial_sgl()
933 frag = &si->frags[frag_idx]; in cxgb4_write_partial_sgl()
937 frag_size = min(len, skb_frag_size(frag) - start); in cxgb4_write_partial_sgl()
938 sgl->len0 = cpu_to_be32(frag_size); in cxgb4_write_partial_sgl()
939 sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start); in cxgb4_write_partial_sgl()
940 len -= frag_size; in cxgb4_write_partial_sgl()
955 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in cxgb4_write_partial_sgl()
962 frag_size = min(len, skb_frag_size(&si->frags[frag_idx])); in cxgb4_write_partial_sgl()
963 to->len[i & 1] = cpu_to_be32(frag_size); in cxgb4_write_partial_sgl()
964 to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]); in cxgb4_write_partial_sgl()
970 len -= frag_size; in cxgb4_write_partial_sgl()
977 to->len[1] = cpu_to_be32(0); in cxgb4_write_partial_sgl()
982 if (unlikely((u8 *)end > (u8 *)q->stat)) { in cxgb4_write_partial_sgl()
983 u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in cxgb4_write_partial_sgl()
986 memcpy(sgl->sge, buf, part0); in cxgb4_write_partial_sgl()
987 part1 = (u8 *)end - (u8 *)q->stat; in cxgb4_write_partial_sgl()
988 memcpy(q->desc, (u8 *)buf + part0, part1); in cxgb4_write_partial_sgl()
989 end = (void *)q->desc + part1; in cxgb4_write_partial_sgl()
992 /* 0-pad to multiple of 16 */ in cxgb4_write_partial_sgl()
996 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | in cxgb4_write_partial_sgl()
1013 count--; in cxgb_pio_copy()
1018 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
1035 if (unlikely(q->bar2_addr == NULL)) { in cxgb4_ring_tx_db()
1042 spin_lock_irqsave(&q->db_lock, flags); in cxgb4_ring_tx_db()
1043 if (!q->db_disabled) in cxgb4_ring_tx_db()
1045 QID_V(q->cntxt_id) | val); in cxgb4_ring_tx_db()
1047 q->db_pidx_inc += n; in cxgb4_ring_tx_db()
1048 q->db_pidx = q->pidx; in cxgb4_ring_tx_db()
1049 spin_unlock_irqrestore(&q->db_lock, flags); in cxgb4_ring_tx_db()
1065 if (n == 1 && q->bar2_qid == 0) { in cxgb4_ring_tx_db()
1066 int index = (q->pidx in cxgb4_ring_tx_db()
1067 ? (q->pidx - 1) in cxgb4_ring_tx_db()
1068 : (q->size - 1)); in cxgb4_ring_tx_db()
1069 u64 *wr = (u64 *)&q->desc[index]; in cxgb4_ring_tx_db()
1072 (q->bar2_addr + SGE_UDB_WCDOORBELL), in cxgb4_ring_tx_db()
1075 writel(val | QID_V(q->bar2_qid), in cxgb4_ring_tx_db()
1076 q->bar2_addr + SGE_UDB_KDOORBELL); in cxgb4_ring_tx_db()
1095 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1108 int left = (void *)q->stat - pos; in cxgb4_inline_tx_skb()
1111 if (likely(skb->len <= left)) { in cxgb4_inline_tx_skb()
1112 if (likely(!skb->data_len)) in cxgb4_inline_tx_skb()
1113 skb_copy_from_linear_data(skb, pos, skb->len); in cxgb4_inline_tx_skb()
1115 skb_copy_bits(skb, 0, pos, skb->len); in cxgb4_inline_tx_skb()
1116 pos += skb->len; in cxgb4_inline_tx_skb()
1119 skb_copy_bits(skb, left, q->desc, skb->len - left); in cxgb4_inline_tx_skb()
1120 pos = (void *)q->desc + (skb->len - left); in cxgb4_inline_tx_skb()
1123 /* 0-pad to multiple of 16 */ in cxgb4_inline_tx_skb()
1135 int left = (void *)q->stat - pos; in inline_tx_skb_header()
1138 memcpy(pos, skb->data, length); in inline_tx_skb_header()
1141 memcpy(pos, skb->data, left); in inline_tx_skb_header()
1142 memcpy(q->desc, skb->data + left, length - left); in inline_tx_skb_header()
1143 pos = (void *)q->desc + (length - left); in inline_tx_skb_header()
1145 /* 0-pad to multiple of 16 */ in inline_tx_skb_header()
1164 if (skb->encapsulation && in hwcsum()
1169 ver = inner_ip_hdr(skb)->version; in hwcsum()
1170 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : in hwcsum()
1171 inner_ipv6_hdr(skb)->nexthdr; in hwcsum()
1173 ver = ip_hdr(skb)->version; in hwcsum()
1174 proto = (ver == 4) ? ip_hdr(skb)->protocol : in hwcsum()
1175 ipv6_hdr(skb)->nexthdr; in hwcsum()
1211 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; in hwcsum()
1214 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; in hwcsum()
1228 TXPKT_CSUM_LOC_V(start + skb->csum_offset); in hwcsum()
1234 netif_tx_stop_queue(q->txq); in eth_txq_stop()
1235 q->q.stops++; in eth_txq_stop()
1240 q->in_use += n; in txq_advance()
1241 q->pidx += n; in txq_advance()
1242 if (q->pidx >= q->size) in txq_advance()
1243 q->pidx -= q->size; in txq_advance()
1251 const struct cxgb_fcoe *fcoe = &pi->fcoe; in cxgb_fcoe_offload()
1253 if (!(fcoe->flags & CXGB_FCOE_ENABLED)) in cxgb_fcoe_offload()
1256 if (skb->protocol != htons(ETH_P_FCOE)) in cxgb_fcoe_offload()
1260 skb->mac_len = sizeof(struct ethhdr); in cxgb_fcoe_offload()
1262 skb_set_network_header(skb, skb->mac_len); in cxgb_fcoe_offload()
1263 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); in cxgb_fcoe_offload()
1266 return -ENOTSUPP; in cxgb_fcoe_offload()
1285 struct port_info *pi = netdev_priv(skb->dev); in cxgb_encap_offload_supported()
1286 struct adapter *adapter = pi->adapter; in cxgb_encap_offload_supported()
1288 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || in cxgb_encap_offload_supported()
1289 skb->inner_protocol != htons(ETH_P_TEB)) in cxgb_encap_offload_supported()
1294 l4_hdr = ip_hdr(skb)->protocol; in cxgb_encap_offload_supported()
1297 l4_hdr = ipv6_hdr(skb)->nexthdr; in cxgb_encap_offload_supported()
1305 if (adapter->vxlan_port == udp_hdr(skb)->dest) in cxgb_encap_offload_supported()
1307 else if (adapter->geneve_port == udp_hdr(skb)->dest) in cxgb_encap_offload_supported()
1324 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in t6_fill_tnl_lso()
1326 bool v6 = (ip_hdr(skb)->version == 6); in t6_fill_tnl_lso()
1337 tnl_lso->op_to_IpIdSplitOut = htonl(val); in t6_fill_tnl_lso()
1339 tnl_lso->IpIdOffsetOut = 0; in t6_fill_tnl_lso()
1342 val = skb_inner_mac_header(skb) - skb_mac_header(skb); in t6_fill_tnl_lso()
1343 in_eth_xtra_len = skb_inner_network_header(skb) - in t6_fill_tnl_lso()
1344 skb_inner_mac_header(skb) - ETH_HLEN; in t6_fill_tnl_lso()
1349 tnl_lso->UdpLenSetOut_to_TnlHdrLen = in t6_fill_tnl_lso()
1354 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; in t6_fill_tnl_lso()
1358 tnl_lso->UdpLenSetOut_to_TnlHdrLen |= in t6_fill_tnl_lso()
1362 tnl_lso->r1 = 0; in t6_fill_tnl_lso()
1365 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | in t6_fill_tnl_lso()
1368 tnl_lso->Flow_to_TcpHdrLen = htonl(val); in t6_fill_tnl_lso()
1370 tnl_lso->IpIdOffset = htons(0); in t6_fill_tnl_lso()
1372 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); in t6_fill_tnl_lso()
1373 tnl_lso->TCPSeqOffset = htonl(0); in t6_fill_tnl_lso()
1374 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); in t6_fill_tnl_lso()
1380 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in write_tso_wr()
1386 if (ssi->gso_type & SKB_GSO_TCPV6) in write_tso_wr()
1389 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | in write_tso_wr()
1394 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); in write_tso_wr()
1395 lso->ipid_ofst = htons(0); in write_tso_wr()
1396 lso->mss = htons(ssi->gso_size); in write_tso_wr()
1397 lso->seqno_offset = htonl(0); in write_tso_wr()
1398 if (is_t4(adap->params.chip)) in write_tso_wr()
1399 lso->len = htonl(skb->len); in write_tso_wr()
1401 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); in write_tso_wr()
1407 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1409 * @eq: the Ethernet TX Queue
1410 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1416 * in regular timer-based Ethernet TX Queue maintenance.
1418 int t4_sge_eth_txq_egress_update(struct adapter *adap, struct sge_eth_txq *eq, in t4_sge_eth_txq_egress_update() argument
1422 struct sge_txq *q = &eq->q; in t4_sge_eth_txq_egress_update()
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq)) in t4_sge_eth_txq_egress_update()
1429 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); in t4_sge_eth_txq_egress_update()
1431 hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in t4_sge_eth_txq_egress_update()
1432 hw_in_use = q->pidx - hw_cidx; in t4_sge_eth_txq_egress_update()
1434 hw_in_use += q->size; in t4_sge_eth_txq_egress_update()
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { in t4_sge_eth_txq_egress_update()
1442 netif_tx_wake_queue(eq->txq); in t4_sge_eth_txq_egress_update()
1443 eq->q.restarts++; in t4_sge_eth_txq_egress_update()
1446 __netif_tx_unlock(eq->txq); in t4_sge_eth_txq_egress_update()
1460 if (unlikely(skb->len < min_pkt_len)) in cxgb4_validate_skb()
1461 return -EINVAL; in cxgb4_validate_skb()
1464 max_pkt_len = ETH_HLEN + dev->mtu; in cxgb4_validate_skb()
1469 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) in cxgb4_validate_skb()
1470 return -EINVAL; in cxgb4_validate_skb()
1478 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; in write_eo_udp_wr()
1479 wr->u.udpseg.ethlen = skb_network_offset(skb); in write_eo_udp_wr()
1480 wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); in write_eo_udp_wr()
1481 wr->u.udpseg.udplen = sizeof(struct udphdr); in write_eo_udp_wr()
1482 wr->u.udpseg.rtplen = 0; in write_eo_udp_wr()
1483 wr->u.udpseg.r4 = 0; in write_eo_udp_wr()
1484 if (skb_shinfo(skb)->gso_size) in write_eo_udp_wr()
1485 wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); in write_eo_udp_wr()
1487 wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len); in write_eo_udp_wr()
1488 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; in write_eo_udp_wr()
1489 wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len); in write_eo_udp_wr()
1495 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1525 adap = pi->adapter; in cxgb4_eth_xmit()
1528 if (xfrm_offload(skb) && !ssi->gso_size) in cxgb4_eth_xmit()
1529 return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev); in cxgb4_eth_xmit()
1534 (skb->len - skb_tcp_all_headers(skb))) in cxgb4_eth_xmit()
1535 return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); in cxgb4_eth_xmit()
1540 if (!(adap->ptp_tx_skb)) { in cxgb4_eth_xmit()
1541 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in cxgb4_eth_xmit()
1542 adap->ptp_tx_skb = skb_get(skb); in cxgb4_eth_xmit()
1546 q = &adap->sge.ptptxq; in cxgb4_eth_xmit()
1548 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in cxgb4_eth_xmit()
1552 reclaim_completed_tx(adap, &q->q, -1, true); in cxgb4_eth_xmit()
1557 if (unlikely(ret == -EOPNOTSUPP)) in cxgb4_eth_xmit()
1561 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in cxgb4_eth_xmit()
1564 credits = txq_avail(&q->q) - ndesc; in cxgb4_eth_xmit()
1568 dev_err(adap->pdev_dev, in cxgb4_eth_xmit()
1570 dev->name, qidx); in cxgb4_eth_xmit()
1577 if (skb->encapsulation && chip_ver > CHELSIO_T5) in cxgb4_eth_xmit()
1580 last_desc = q->q.pidx + ndesc - 1; in cxgb4_eth_xmit()
1581 if (last_desc >= q->q.size) in cxgb4_eth_xmit()
1582 last_desc -= q->q.size; in cxgb4_eth_xmit()
1583 sgl_sdesc = &q->q.sdesc[last_desc]; in cxgb4_eth_xmit()
1586 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { in cxgb4_eth_xmit()
1587 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); in cxgb4_eth_xmit()
1588 q->mapping_err++; in cxgb4_eth_xmit()
1607 wr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_eth_xmit()
1608 eowr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_eth_xmit()
1609 wr->equiq_to_len16 = htonl(wr_mid); in cxgb4_eth_xmit()
1610 wr->r3 = cpu_to_be64(0); in cxgb4_eth_xmit()
1611 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in cxgb4_eth_xmit()
1616 len = immediate ? skb->len : 0; in cxgb4_eth_xmit()
1618 if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) { in cxgb4_eth_xmit()
1627 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | in cxgb4_eth_xmit()
1637 if (iph->version == 4) { in cxgb4_eth_xmit()
1638 iph->check = 0; in cxgb4_eth_xmit()
1639 iph->tot_len = 0; in cxgb4_eth_xmit()
1640 iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl); in cxgb4_eth_xmit()
1642 if (skb->ip_summed == CHECKSUM_PARTIAL) in cxgb4_eth_xmit()
1643 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1646 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1649 q->tso++; in cxgb4_eth_xmit()
1650 q->tx_cso += ssi->gso_segs; in cxgb4_eth_xmit()
1651 } else if (ssi->gso_size) { in cxgb4_eth_xmit()
1655 hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb)); in cxgb4_eth_xmit()
1657 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | in cxgb4_eth_xmit()
1660 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1663 sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start, in cxgb4_eth_xmit()
1666 left = (u8 *)end - (u8 *)q->q.stat; in cxgb4_eth_xmit()
1667 end = (void *)q->q.desc + left; in cxgb4_eth_xmit()
1670 q->uso++; in cxgb4_eth_xmit()
1671 q->tx_cso += ssi->gso_segs; in cxgb4_eth_xmit()
1677 wr->op_immdlen = htonl(FW_WR_OP_V(op) | in cxgb4_eth_xmit()
1681 if (skb->ip_summed == CHECKSUM_PARTIAL) { in cxgb4_eth_xmit()
1682 cntrl = hwcsum(adap->params.chip, skb) | in cxgb4_eth_xmit()
1684 q->tx_cso++; in cxgb4_eth_xmit()
1688 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { in cxgb4_eth_xmit()
1693 left = (u8 *)end - (u8 *)q->q.stat; in cxgb4_eth_xmit()
1694 end = (void *)q->q.desc + left; in cxgb4_eth_xmit()
1695 sgl = (void *)q->q.desc; in cxgb4_eth_xmit()
1699 q->vlan_ins++; in cxgb4_eth_xmit()
1702 if (skb->protocol == htons(ETH_P_FCOE)) in cxgb4_eth_xmit()
1704 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); in cxgb4_eth_xmit()
1708 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | in cxgb4_eth_xmit()
1709 TXPKT_PF_V(adap->pf); in cxgb4_eth_xmit()
1713 if (is_t4(adap->params.chip)) in cxgb4_eth_xmit()
1714 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); in cxgb4_eth_xmit()
1716 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); in cxgb4_eth_xmit()
1718 cpl->ctrl0 = htonl(ctrl0); in cxgb4_eth_xmit()
1719 cpl->pack = htons(0); in cxgb4_eth_xmit()
1720 cpl->len = htons(skb->len); in cxgb4_eth_xmit()
1721 cpl->ctrl1 = cpu_to_be64(cntrl); in cxgb4_eth_xmit()
1724 cxgb4_inline_tx_skb(skb, &q->q, sgl); in cxgb4_eth_xmit()
1727 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off, in cxgb4_eth_xmit()
1728 sgl_sdesc->addr); in cxgb4_eth_xmit()
1730 sgl_sdesc->skb = skb; in cxgb4_eth_xmit()
1733 txq_advance(&q->q, ndesc); in cxgb4_eth_xmit()
1735 cxgb4_ring_tx_db(adap, &q->q, ndesc); in cxgb4_eth_xmit()
1749 * 64-bit PCI DMA addresses.
1761 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1779 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1794 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), in t4vf_calc_tx_flits()
1805 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); in t4vf_calc_tx_flits()
1806 if (skb_shinfo(skb)->gso_size) in t4vf_calc_tx_flits()
1817 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1845 BUILD_BUG_ON(sizeof(wr->firmware) != in cxgb4_vf_eth_xmit()
1846 (sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + in cxgb4_vf_eth_xmit()
1847 sizeof(wr->ethtype) + sizeof(wr->vlantci))); in cxgb4_vf_eth_xmit()
1848 fw_hdr_copy_len = sizeof(wr->firmware); in cxgb4_vf_eth_xmit()
1855 adapter = pi->adapter; in cxgb4_vf_eth_xmit()
1857 WARN_ON(qidx >= pi->nqsets); in cxgb4_vf_eth_xmit()
1858 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit()
1863 reclaim_completed_tx(adapter, &txq->q, -1, true); in cxgb4_vf_eth_xmit()
1871 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit()
1880 dev_err(adapter->pdev_dev, in cxgb4_vf_eth_xmit()
1882 dev->name, qidx); in cxgb4_vf_eth_xmit()
1886 last_desc = txq->q.pidx + ndesc - 1; in cxgb4_vf_eth_xmit()
1887 if (last_desc >= txq->q.size) in cxgb4_vf_eth_xmit()
1888 last_desc -= txq->q.size; in cxgb4_vf_eth_xmit()
1889 sgl_sdesc = &txq->q.sdesc[last_desc]; in cxgb4_vf_eth_xmit()
1892 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, in cxgb4_vf_eth_xmit()
1893 sgl_sdesc->addr) < 0)) { in cxgb4_vf_eth_xmit()
1895 * be in-lined directly into the Work Request) and the mapping in cxgb4_vf_eth_xmit()
1898 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); in cxgb4_vf_eth_xmit()
1899 txq->mapping_err++; in cxgb4_vf_eth_xmit()
1903 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); in cxgb4_vf_eth_xmit()
1925 wr = (void *)&txq->q.desc[txq->q.pidx]; in cxgb4_vf_eth_xmit()
1926 wr->equiq_to_len16 = cpu_to_be32(wr_mid); in cxgb4_vf_eth_xmit()
1927 wr->r3[0] = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1928 wr->r3[1] = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1929 skb_copy_from_linear_data(skb, &wr->firmware, fw_hdr_copy_len); in cxgb4_vf_eth_xmit()
1937 if (ssi->gso_size) { in cxgb4_vf_eth_xmit()
1939 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; in cxgb4_vf_eth_xmit()
1941 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in cxgb4_vf_eth_xmit()
1943 wr->op_immdlen = in cxgb4_vf_eth_xmit()
1948 lso->lso_ctrl = in cxgb4_vf_eth_xmit()
1955 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); in cxgb4_vf_eth_xmit()
1956 lso->ipid_ofst = cpu_to_be16(0); in cxgb4_vf_eth_xmit()
1957 lso->mss = cpu_to_be16(ssi->gso_size); in cxgb4_vf_eth_xmit()
1958 lso->seqno_offset = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1959 if (is_t4(adapter->params.chip)) in cxgb4_vf_eth_xmit()
1960 lso->len = cpu_to_be32(skb->len); in cxgb4_vf_eth_xmit()
1962 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); in cxgb4_vf_eth_xmit()
1977 txq->tso++; in cxgb4_vf_eth_xmit()
1978 txq->tx_cso += ssi->gso_segs; in cxgb4_vf_eth_xmit()
1983 ? skb->len + sizeof(*cpl) in cxgb4_vf_eth_xmit()
1985 wr->op_immdlen = in cxgb4_vf_eth_xmit()
1993 if (skb->ip_summed == CHECKSUM_PARTIAL) { in cxgb4_vf_eth_xmit()
1994 cntrl = hwcsum(adapter->params.chip, skb) | in cxgb4_vf_eth_xmit()
1996 txq->tx_cso++; in cxgb4_vf_eth_xmit()
2006 txq->vlan_ins++; in cxgb4_vf_eth_xmit()
2011 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | in cxgb4_vf_eth_xmit()
2012 TXPKT_INTF_V(pi->port_id) | in cxgb4_vf_eth_xmit()
2014 cpl->pack = cpu_to_be16(0); in cxgb4_vf_eth_xmit()
2015 cpl->len = cpu_to_be16(skb->len); in cxgb4_vf_eth_xmit()
2016 cpl->ctrl1 = cpu_to_be64(cntrl); in cxgb4_vf_eth_xmit()
2018 /* Fill in the body of the TX Packet CPL message with either in-lined in cxgb4_vf_eth_xmit()
2022 /* In-line the packet's data and free the skb since we don't in cxgb4_vf_eth_xmit()
2025 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); in cxgb4_vf_eth_xmit()
2065 struct sge_txq *tq = &txq->q; in cxgb4_vf_eth_xmit()
2073 if (unlikely((void *)sgl == (void *)tq->stat)) { in cxgb4_vf_eth_xmit()
2074 sgl = (void *)tq->desc; in cxgb4_vf_eth_xmit()
2075 end = (void *)((void *)tq->desc + in cxgb4_vf_eth_xmit()
2076 ((void *)end - (void *)tq->stat)); in cxgb4_vf_eth_xmit()
2079 cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr); in cxgb4_vf_eth_xmit()
2081 sgl_sdesc->skb = skb; in cxgb4_vf_eth_xmit()
2087 txq_advance(&txq->q, ndesc); in cxgb4_vf_eth_xmit()
2089 cxgb4_ring_tx_db(adapter, &txq->q, ndesc); in cxgb4_vf_eth_xmit()
2101 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
2110 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in reclaim_completed_tx_imm()
2111 int reclaim = hw_cidx - q->cidx; in reclaim_completed_tx_imm()
2114 reclaim += q->size; in reclaim_completed_tx_imm()
2116 q->in_use -= reclaim; in reclaim_completed_tx_imm()
2117 q->cidx = hw_cidx; in reclaim_completed_tx_imm()
2125 val -= max; in eosw_txq_advance_index()
2135 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2136 while (ndesc--) { in cxgb4_eosw_txq_free_desc()
2137 if (d->skb) { in cxgb4_eosw_txq_free_desc()
2138 if (d->addr[0]) { in cxgb4_eosw_txq_free_desc()
2139 unmap_skb(adap->pdev_dev, d->skb, d->addr); in cxgb4_eosw_txq_free_desc()
2140 memset(d->addr, 0, sizeof(d->addr)); in cxgb4_eosw_txq_free_desc()
2142 dev_consume_skb_any(d->skb); in cxgb4_eosw_txq_free_desc()
2143 d->skb = NULL; in cxgb4_eosw_txq_free_desc()
2145 eosw_txq_advance_index(&eosw_txq->last_cidx, 1, in cxgb4_eosw_txq_free_desc()
2146 eosw_txq->ndesc); in cxgb4_eosw_txq_free_desc()
2147 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2153 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); in eosw_txq_advance()
2154 eosw_txq->inuse += n; in eosw_txq_advance()
2160 if (eosw_txq->inuse == eosw_txq->ndesc) in eosw_txq_enqueue()
2161 return -ENOMEM; in eosw_txq_enqueue()
2163 eosw_txq->desc[eosw_txq->pidx].skb = skb; in eosw_txq_enqueue()
2169 return eosw_txq->desc[eosw_txq->last_pidx].skb; in eosw_txq_peek()
2179 if (skb_shinfo(skb)->gso_size && in ethofld_calc_tx_flits()
2180 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) in ethofld_calc_tx_flits()
2188 if (skb_shinfo(skb)->nr_frags > 0) { in ethofld_calc_tx_flits()
2189 if (skb_headlen(skb) - hdr_len) in ethofld_calc_tx_flits()
2190 nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1); in ethofld_calc_tx_flits()
2192 nsgl = sgl_len(skb_shinfo(skb)->nr_frags); in ethofld_calc_tx_flits()
2193 } else if (skb->len - hdr_len) { in ethofld_calc_tx_flits()
2210 ver = ip_hdr(skb)->version; in write_eo_wr()
2211 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol; in write_eo_wr()
2215 if (skb_shinfo(skb)->gso_size && in write_eo_wr()
2216 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) in write_eo_wr()
2220 if (!eosw_txq->ncompl || in write_eo_wr()
2221 (eosw_txq->last_compl + wrlen16) >= in write_eo_wr()
2222 (adap->params.ofldq_wr_cred / 2)) { in write_eo_wr()
2224 eosw_txq->ncompl++; in write_eo_wr()
2225 eosw_txq->last_compl = 0; in write_eo_wr()
2228 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | in write_eo_wr()
2231 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) | in write_eo_wr()
2232 FW_WR_FLOWID_V(eosw_txq->hwtid)); in write_eo_wr()
2233 wr->r3 = 0; in write_eo_wr()
2237 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; in write_eo_wr()
2238 wr->u.tcpseg.ethlen = skb_network_offset(skb); in write_eo_wr()
2239 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); in write_eo_wr()
2240 wr->u.tcpseg.tcplen = tcp_hdrlen(skb); in write_eo_wr()
2241 wr->u.tcpseg.tsclk_tsoff = 0; in write_eo_wr()
2242 wr->u.tcpseg.r4 = 0; in write_eo_wr()
2243 wr->u.tcpseg.r5 = 0; in write_eo_wr()
2244 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); in write_eo_wr()
2246 if (ssi->gso_size) { in write_eo_wr()
2249 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); in write_eo_wr()
2252 wr->u.tcpseg.mss = cpu_to_be16(0xffff); in write_eo_wr()
2257 eosw_txq->cred -= wrlen16; in write_eo_wr()
2258 eosw_txq->last_compl += wrlen16; in write_eo_wr()
2279 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; in ethofld_hard_xmit()
2280 spin_lock(&eohw_txq->lock); in ethofld_hard_xmit()
2281 reclaim_completed_tx_imm(&eohw_txq->q); in ethofld_hard_xmit()
2283 d = &eosw_txq->desc[eosw_txq->last_pidx]; in ethofld_hard_xmit()
2284 skb = d->skb; in ethofld_hard_xmit()
2287 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx]; in ethofld_hard_xmit()
2288 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && in ethofld_hard_xmit()
2289 eosw_txq->last_pidx == eosw_txq->flowc_idx)) { in ethofld_hard_xmit()
2290 hdr_len = skb->len; in ethofld_hard_xmit()
2293 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) in ethofld_hard_xmit()
2299 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb)); in ethofld_hard_xmit()
2300 data_len = skb->len - hdr_len; in ethofld_hard_xmit()
2307 left = txq_avail(&eohw_txq->q) - ndesc; in ethofld_hard_xmit()
2316 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { in ethofld_hard_xmit()
2317 ret = -ENOMEM; in ethofld_hard_xmit()
2323 eosw_txq->state = next_state; in ethofld_hard_xmit()
2324 eosw_txq->cred -= wrlen16; in ethofld_hard_xmit()
2325 eosw_txq->ncompl++; in ethofld_hard_xmit()
2326 eosw_txq->last_compl = 0; in ethofld_hard_xmit()
2331 cntrl = hwcsum(adap->params.chip, skb); in ethofld_hard_xmit()
2335 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | in ethofld_hard_xmit()
2336 TXPKT_INTF_V(pi->tx_chan) | in ethofld_hard_xmit()
2337 TXPKT_PF_V(adap->pf)); in ethofld_hard_xmit()
2338 cpl->pack = 0; in ethofld_hard_xmit()
2339 cpl->len = cpu_to_be16(skb->len); in ethofld_hard_xmit()
2340 cpl->ctrl1 = cpu_to_be64(cntrl); in ethofld_hard_xmit()
2345 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, in ethofld_hard_xmit()
2348 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr); in ethofld_hard_xmit()
2350 memset(d->addr, 0, sizeof(d->addr)); in ethofld_hard_xmit()
2351 eohw_txq->mapping_err++; in ethofld_hard_xmit()
2357 left = (u8 *)end - (u8 *)eohw_txq->q.stat; in ethofld_hard_xmit()
2358 end = (void *)eohw_txq->q.desc + left; in ethofld_hard_xmit()
2361 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) { in ethofld_hard_xmit()
2366 left = (u8 *)end - (u8 *)eohw_txq->q.stat; in ethofld_hard_xmit()
2368 end = (void *)eohw_txq->q.desc + left; in ethofld_hard_xmit()
2369 sgl = (void *)eohw_txq->q.desc; in ethofld_hard_xmit()
2372 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len, in ethofld_hard_xmit()
2373 d->addr); in ethofld_hard_xmit()
2376 if (skb_shinfo(skb)->gso_size) { in ethofld_hard_xmit()
2377 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in ethofld_hard_xmit()
2378 eohw_txq->uso++; in ethofld_hard_xmit()
2380 eohw_txq->tso++; in ethofld_hard_xmit()
2381 eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs; in ethofld_hard_xmit()
2382 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in ethofld_hard_xmit()
2383 eohw_txq->tx_cso++; in ethofld_hard_xmit()
2387 eohw_txq->vlan_ins++; in ethofld_hard_xmit()
2389 txq_advance(&eohw_txq->q, ndesc); in ethofld_hard_xmit()
2390 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); in ethofld_hard_xmit()
2391 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); in ethofld_hard_xmit()
2394 spin_unlock(&eohw_txq->lock); in ethofld_hard_xmit()
2403 switch (eosw_txq->state) { in ethofld_xmit()
2407 pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in ethofld_xmit()
2409 pktcount += eosw_txq->ndesc; in ethofld_xmit()
2418 while (pktcount--) { in ethofld_xmit()
2421 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, in ethofld_xmit()
2422 eosw_txq->ndesc); in ethofld_xmit()
2446 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; in cxgb4_ethofld_xmit()
2447 qid = skb_get_queue_mapping(skb) - pi->nqsets; in cxgb4_ethofld_xmit()
2448 eosw_txq = &tc_port_mqprio->eosw_txq[qid]; in cxgb4_ethofld_xmit()
2449 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2450 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_xmit()
2465 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2469 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2480 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) in t4_start_xmit()
2483 if (unlikely(qid >= pi->nqsets)) in t4_start_xmit()
2490 spin_lock(&adap->ptp_lock); in t4_start_xmit()
2492 spin_unlock(&adap->ptp_lock); in t4_start_xmit()
2501 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in eosw_txq_flush_pending_skbs()
2502 int pidx = eosw_txq->pidx; in eosw_txq_flush_pending_skbs()
2509 pktcount += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2511 while (pktcount--) { in eosw_txq_flush_pending_skbs()
2512 pidx--; in eosw_txq_flush_pending_skbs()
2514 pidx += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2516 skb = eosw_txq->desc[pidx].skb; in eosw_txq_flush_pending_skbs()
2519 eosw_txq->desc[pidx].skb = NULL; in eosw_txq_flush_pending_skbs()
2520 eosw_txq->inuse--; in eosw_txq_flush_pending_skbs()
2524 eosw_txq->pidx = eosw_txq->last_pidx + 1; in eosw_txq_flush_pending_skbs()
2528 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
2553 entry = cxgb4_lookup_eotid(&adap->tids, eotid); in cxgb4_ethofld_send_flowc()
2555 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2557 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_send_flowc()
2559 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2561 if (!(adap->flags & CXGB4_FW_OK)) { in cxgb4_ethofld_send_flowc()
2563 complete(&eosw_txq->completion); in cxgb4_ethofld_send_flowc()
2564 return -EIO; in cxgb4_ethofld_send_flowc()
2569 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2571 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2573 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) in cxgb4_ethofld_send_flowc()
2578 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_send_flowc()
2587 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; in cxgb4_ethofld_send_flowc()
2588 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) | in cxgb4_ethofld_send_flowc()
2589 FW_WR_FLOWID_V(eosw_txq->hwtid)); in cxgb4_ethofld_send_flowc()
2590 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | in cxgb4_ethofld_send_flowc()
2593 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; in cxgb4_ethofld_send_flowc()
2594 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf)); in cxgb4_ethofld_send_flowc()
2595 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; in cxgb4_ethofld_send_flowc()
2596 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan); in cxgb4_ethofld_send_flowc()
2597 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; in cxgb4_ethofld_send_flowc()
2598 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan); in cxgb4_ethofld_send_flowc()
2599 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; in cxgb4_ethofld_send_flowc()
2600 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); in cxgb4_ethofld_send_flowc()
2601 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; in cxgb4_ethofld_send_flowc()
2602 flowc->mnemval[4].val = cpu_to_be32(tc); in cxgb4_ethofld_send_flowc()
2603 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE; in cxgb4_ethofld_send_flowc()
2604 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ? in cxgb4_ethofld_send_flowc()
2618 eosw_txq->state = next_state; in cxgb4_ethofld_send_flowc()
2619 eosw_txq->flowc_idx = eosw_txq->pidx; in cxgb4_ethofld_send_flowc()
2623 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2628 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2633 * is_imm - check whether a packet can be sent as immediate data
2640 return skb->len <= MAX_CTRL_WR_LEN; in is_imm()
2644 * ctrlq_check_stop - check if a control queue is full and should stop
2655 reclaim_completed_tx_imm(&q->q); in ctrlq_check_stop()
2656 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in ctrlq_check_stop()
2657 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); in ctrlq_check_stop()
2658 q->q.stops++; in ctrlq_check_stop()
2659 q->full = 1; in ctrlq_check_stop()
2668 struct adapter *adap = pi->adapter; in cxgb4_selftest_lb_pkt()
2683 lb = &pi->ethtool_lb; in cxgb4_selftest_lb_pkt()
2684 lb->loopback = 1; in cxgb4_selftest_lb_pkt()
2686 q = &adap->sge.ethtxq[pi->first_qset]; in cxgb4_selftest_lb_pkt()
2687 __netif_tx_lock(q->txq, smp_processor_id()); in cxgb4_selftest_lb_pkt()
2689 reclaim_completed_tx(adap, &q->q, -1, true); in cxgb4_selftest_lb_pkt()
2690 credits = txq_avail(&q->q) - ndesc; in cxgb4_selftest_lb_pkt()
2692 __netif_tx_unlock(q->txq); in cxgb4_selftest_lb_pkt()
2693 return -ENOMEM; in cxgb4_selftest_lb_pkt()
2696 wr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_selftest_lb_pkt()
2699 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | in cxgb4_selftest_lb_pkt()
2702 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2))); in cxgb4_selftest_lb_pkt()
2703 wr->r3 = cpu_to_be64(0); in cxgb4_selftest_lb_pkt()
2708 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) | in cxgb4_selftest_lb_pkt()
2709 TXPKT_INTF_V(pi->tx_chan + 4); in cxgb4_selftest_lb_pkt()
2711 cpl->ctrl0 = htonl(ctrl0); in cxgb4_selftest_lb_pkt()
2712 cpl->pack = htons(0); in cxgb4_selftest_lb_pkt()
2713 cpl->len = htons(pkt_len); in cxgb4_selftest_lb_pkt()
2714 cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F); in cxgb4_selftest_lb_pkt()
2718 ether_addr_copy(&sgl[i], netdev->dev_addr); in cxgb4_selftest_lb_pkt()
2724 init_completion(&lb->completion); in cxgb4_selftest_lb_pkt()
2725 txq_advance(&q->q, ndesc); in cxgb4_selftest_lb_pkt()
2726 cxgb4_ring_tx_db(adap, &q->q, ndesc); in cxgb4_selftest_lb_pkt()
2727 __netif_tx_unlock(q->txq); in cxgb4_selftest_lb_pkt()
2730 ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); in cxgb4_selftest_lb_pkt()
2732 ret = -ETIMEDOUT; in cxgb4_selftest_lb_pkt()
2734 ret = lb->result; in cxgb4_selftest_lb_pkt()
2736 lb->loopback = 0; in cxgb4_selftest_lb_pkt()
2742 * ctrl_xmit - send a packet through an SGE control Tx queue
2760 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); in ctrl_xmit()
2761 spin_lock(&q->sendq.lock); in ctrl_xmit()
2763 if (unlikely(q->full)) { in ctrl_xmit()
2764 skb->priority = ndesc; /* save for restart */ in ctrl_xmit()
2765 __skb_queue_tail(&q->sendq, skb); in ctrl_xmit()
2766 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2770 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in ctrl_xmit()
2771 cxgb4_inline_tx_skb(skb, &q->q, wr); in ctrl_xmit()
2773 txq_advance(&q->q, ndesc); in ctrl_xmit()
2774 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) in ctrl_xmit()
2777 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); in ctrl_xmit()
2778 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2785 * restart_ctrlq - restart a suspended control queue
2796 spin_lock(&q->sendq.lock); in restart_ctrlq()
2797 reclaim_completed_tx_imm(&q->q); in restart_ctrlq()
2798 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ in restart_ctrlq()
2800 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
2802 unsigned int ndesc = skb->priority; /* previously saved */ in restart_ctrlq()
2806 * wait times. q->full is still set so new skbs will be queued. in restart_ctrlq()
2808 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in restart_ctrlq()
2809 txq_advance(&q->q, ndesc); in restart_ctrlq()
2810 spin_unlock(&q->sendq.lock); in restart_ctrlq()
2812 cxgb4_inline_tx_skb(skb, &q->q, wr); in restart_ctrlq()
2815 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in restart_ctrlq()
2816 unsigned long old = q->q.stops; in restart_ctrlq()
2819 if (q->q.stops != old) { /* suspended anew */ in restart_ctrlq()
2820 spin_lock(&q->sendq.lock); in restart_ctrlq()
2825 cxgb4_ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
2828 spin_lock(&q->sendq.lock); in restart_ctrlq()
2830 q->full = 0; in restart_ctrlq()
2833 cxgb4_ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
2834 spin_unlock(&q->sendq.lock); in restart_ctrlq()
2838 * t4_mgmt_tx - send a management message
2849 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); in t4_mgmt_tx()
2855 * is_ofld_imm - check whether a packet can be sent as immediate data
2860 * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
2866 struct work_request_hdr *req = (struct work_request_hdr *)skb->data; in is_ofld_imm()
2867 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); in is_ofld_imm()
2870 return skb->len <= MAX_IMM_ULPTX_WR_LEN; in is_ofld_imm()
2872 return skb->len <= SGE_MAX_WR_LEN; in is_ofld_imm()
2874 return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN; in is_ofld_imm()
2878 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2890 return DIV_ROUND_UP(skb->len, 8); in calc_tx_flits_ofld()
2893 cnt = skb_shinfo(skb)->nr_frags; in calc_tx_flits_ofld()
2900 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2909 q->mapping_err++; in txq_stop_maperr()
2910 q->q.stops++; in txq_stop_maperr()
2911 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, in txq_stop_maperr()
2912 q->adap->sge.txq_maperr); in txq_stop_maperr()
2916 * ofldtxq_stop - stop an offload Tx queue that has become full
2925 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); in ofldtxq_stop()
2926 q->q.stops++; in ofldtxq_stop()
2927 q->full = 1; in ofldtxq_stop()
2931 * service_ofldq - service/restart a suspended offload queue
2949 __must_hold(&q->sendq.lock) in service_ofldq()
2965 if (q->service_ofldq_running) in service_ofldq()
2967 q->service_ofldq_running = true; in service_ofldq()
2969 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { in service_ofldq()
2977 spin_unlock(&q->sendq.lock); in service_ofldq()
2979 cxgb4_reclaim_completed_tx(q->adap, &q->q, false); in service_ofldq()
2981 flits = skb->priority; /* previously saved */ in service_ofldq()
2983 credits = txq_avail(&q->q) - ndesc; in service_ofldq()
2986 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); in service_ofldq()
2988 pos = (u64 *)&q->q.desc[q->q.pidx]; in service_ofldq()
2990 cxgb4_inline_tx_skb(skb, &q->q, pos); in service_ofldq()
2991 else if (cxgb4_map_skb(q->adap->pdev_dev, skb, in service_ofldq()
2992 (dma_addr_t *)skb->head)) { in service_ofldq()
2994 spin_lock(&q->sendq.lock); in service_ofldq()
3000 * So we need to deal with wrap-around here. in service_ofldq()
3004 txq = &q->q; in service_ofldq()
3005 pos = (void *)inline_tx_skb_header(skb, &q->q, in service_ofldq()
3009 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3010 end = (void *)txq->desc + left; in service_ofldq()
3017 if (pos == (u64 *)txq->stat) { in service_ofldq()
3018 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3019 end = (void *)txq->desc + left; in service_ofldq()
3020 pos = (void *)txq->desc; in service_ofldq()
3023 cxgb4_write_sgl(skb, &q->q, (void *)pos, in service_ofldq()
3025 (dma_addr_t *)skb->head); in service_ofldq()
3027 skb->dev = q->adap->port[0]; in service_ofldq()
3028 skb->destructor = deferred_unmap_destructor; in service_ofldq()
3030 last_desc = q->q.pidx + ndesc - 1; in service_ofldq()
3031 if (last_desc >= q->q.size) in service_ofldq()
3032 last_desc -= q->q.size; in service_ofldq()
3033 q->q.sdesc[last_desc].skb = skb; in service_ofldq()
3036 txq_advance(&q->q, ndesc); in service_ofldq()
3039 cxgb4_ring_tx_db(q->adap, &q->q, written); in service_ofldq()
3048 spin_lock(&q->sendq.lock); in service_ofldq()
3049 __skb_unlink(skb, &q->sendq); in service_ofldq()
3054 cxgb4_ring_tx_db(q->adap, &q->q, written); in service_ofldq()
3059 q->service_ofldq_running = false; in service_ofldq()
3063 * ofld_xmit - send a packet through an offload queue
3071 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ in ofld_xmit()
3072 spin_lock(&q->sendq.lock); in ofld_xmit()
3082 __skb_queue_tail(&q->sendq, skb); in ofld_xmit()
3083 if (q->sendq.qlen == 1) in ofld_xmit()
3086 spin_unlock(&q->sendq.lock); in ofld_xmit()
3091 * restart_ofldq - restart a suspended offload queue
3100 spin_lock(&q->sendq.lock); in restart_ofldq()
3101 q->full = 0; /* the queue actually is completely empty now */ in restart_ofldq()
3103 spin_unlock(&q->sendq.lock); in restart_ofldq()
3107 * skb_txq - return the Tx queue an offload packet should use
3111 * 1-15 in the packet's queue_mapping.
3115 return skb->queue_mapping >> 1; in skb_txq()
3119 * is_ctrl_pkt - return whether an offload packet is a control packet
3127 return skb->queue_mapping & 1; in is_ctrl_pkt()
3139 if (adap->tids.nsftids) in uld_send()
3141 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); in uld_send()
3144 txq_info = adap->sge.uld_txq_info[tx_uld_type]; in uld_send()
3151 txq = &txq_info->uldtxq[idx]; in uld_send()
3156 * t4_ofld_send - send an offload packet
3162 * should be sent as regular or control, bits 1-15 select the queue.
3175 * cxgb4_ofld_send - send an offload packet
3192 int left = (void *)q->stat - pos; in inline_tx_header()
3200 memcpy(q->desc, src + left, length - left); in inline_tx_header()
3201 pos = (void *)q->desc + (length - left); in inline_tx_header()
3203 /* 0-pad to multiple of 16 */ in inline_tx_header()
3213 * ofld_xmit_direct - copy a WR into offload queue
3227 /* Use the lower limit as the cut-off */ in ofld_xmit_direct()
3237 if (!spin_trylock(&q->sendq.lock)) in ofld_xmit_direct()
3240 if (q->full || !skb_queue_empty(&q->sendq) || in ofld_xmit_direct()
3241 q->service_ofldq_running) { in ofld_xmit_direct()
3242 spin_unlock(&q->sendq.lock); in ofld_xmit_direct()
3246 credits = txq_avail(&q->q) - ndesc; in ofld_xmit_direct()
3247 pos = (u64 *)&q->q.desc[q->q.pidx]; in ofld_xmit_direct()
3249 /* ofldtxq_stop modifies WR header in-situ */ in ofld_xmit_direct()
3250 inline_tx_header(src, &q->q, pos, len); in ofld_xmit_direct()
3253 txq_advance(&q->q, ndesc); in ofld_xmit_direct()
3254 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); in ofld_xmit_direct()
3256 spin_unlock(&q->sendq.lock); in ofld_xmit_direct()
3271 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in cxgb4_immdata_send()
3277 txq = &txq_info->uldtxq[idx]; in cxgb4_immdata_send()
3286 * t4_crypto_send - send crypto packet
3292 * should be sent as regular or control, bits 1-15 select the queue.
3305 * cxgb4_crypto_send - send crypto packet
3324 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags()
3325 gl->frags[0].offset + offset, in copy_frags()
3326 gl->frags[0].size - offset); in copy_frags()
3327 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags()
3328 for (i = 1; i < gl->nfrags; i++) in copy_frags()
3329 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags()
3330 gl->frags[i].offset, in copy_frags()
3331 gl->frags[i].size); in copy_frags()
3334 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags()
3338 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
3352 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer in cxgb4_pktgl_to_skb()
3356 if (gl->tot_len <= RX_COPY_THRES) { in cxgb4_pktgl_to_skb()
3357 skb = dev_alloc_skb(gl->tot_len); in cxgb4_pktgl_to_skb()
3360 __skb_put(skb, gl->tot_len); in cxgb4_pktgl_to_skb()
3361 skb_copy_to_linear_data(skb, gl->va, gl->tot_len); in cxgb4_pktgl_to_skb()
3367 skb_copy_to_linear_data(skb, gl->va, pull_len); in cxgb4_pktgl_to_skb()
3370 skb->len = gl->tot_len; in cxgb4_pktgl_to_skb()
3371 skb->data_len = skb->len - pull_len; in cxgb4_pktgl_to_skb()
3372 skb->truesize += skb->data_len; in cxgb4_pktgl_to_skb()
3379 * t4_pktgl_free - free a packet gather list
3390 for (p = gl->frags, n = gl->nfrags - 1; n--; p++) in t4_pktgl_free()
3391 put_page(p->page); in t4_pktgl_free()
3409 if (is_t4(adap->params.chip)) in handle_trace_pkt()
3415 skb->protocol = htons(0xffff); in handle_trace_pkt()
3416 skb->dev = adap->port[0]; in handle_trace_pkt()
3422 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
3427 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
3435 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); in cxgb4_sgetim_to_hwtstamp()
3437 ns = div_u64(tmp, adap->params.vpd.cclk); in cxgb4_sgetim_to_hwtstamp()
3440 hwtstamps->hwtstamp = ns_to_ktime(ns); in cxgb4_sgetim_to_hwtstamp()
3446 struct adapter *adapter = rxq->rspq.adap; in do_gro()
3447 struct sge *s = &adapter->sge; in do_gro()
3452 skb = napi_get_frags(&rxq->rspq.napi); in do_gro()
3455 rxq->stats.rx_drops++; in do_gro()
3459 copy_frags(skb, gl, s->pktshift); in do_gro()
3461 skb->csum_level = 1; in do_gro()
3462 skb->len = gl->tot_len - s->pktshift; in do_gro()
3463 skb->data_len = skb->len; in do_gro()
3464 skb->truesize += skb->data_len; in do_gro()
3465 skb->ip_summed = CHECKSUM_UNNECESSARY; in do_gro()
3466 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro()
3467 pi = netdev_priv(skb->dev); in do_gro()
3468 if (pi->rxtstamp) in do_gro()
3470 gl->sgetstamp); in do_gro()
3471 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) in do_gro()
3472 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, in do_gro()
3475 if (unlikely(pkt->vlan_ex)) { in do_gro()
3476 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); in do_gro()
3477 rxq->stats.vlan_ex++; in do_gro()
3479 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro()
3481 rxq->stats.lro_pkts++; in do_gro()
3483 rxq->stats.lro_merged++; in do_gro()
3484 rxq->stats.pkts++; in do_gro()
3485 rxq->stats.rx_cso++; in do_gro()
3495 * t4_systim_to_hwstamp - read hardware time stamp
3510 cpl = (struct cpl_rx_mps_pkt *)skb->data; in t4_systim_to_hwstamp()
3511 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & in t4_systim_to_hwstamp()
3515 data = skb->data + sizeof(*cpl); in t4_systim_to_hwstamp()
3517 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; in t4_systim_to_hwstamp()
3518 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) in t4_systim_to_hwstamp()
3523 hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); in t4_systim_to_hwstamp()
3529 * t4_rx_hststamp - Recv PTP Event Message
3543 !is_t4(adapter->params.chip))) { in t4_rx_hststamp()
3547 rxq->stats.rx_drops++; in t4_rx_hststamp()
3555 * t4_tx_hststamp - Loopback PTP Transmit Event Message
3567 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { in t4_tx_hststamp()
3576 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
3577 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
3583 * Associated Ethernet RX Response Queue with CPL_SGE_EGR_UPDATE
3584 * messages. This adds a small load to PCIe Link RX bandwidth and,
3594 u8 opcode = ((const struct rss_header *)rsp)->opcode; in t4_tx_completion_handler()
3595 struct port_info *pi = netdev_priv(rspq->netdev); in t4_tx_completion_handler()
3596 struct adapter *adapter = rspq->adap; in t4_tx_completion_handler()
3597 struct sge *s = &adapter->sge; in t4_tx_completion_handler()
3606 ((const struct cpl_fw4_msg *)rsp)->type == in t4_tx_completion_handler()
3609 opcode = ((const struct rss_header *)rsp)->opcode; in t4_tx_completion_handler()
3614 pr_info("%s: unexpected FW4/CPL %#x on Rx queue\n", in t4_tx_completion_handler()
3619 txq = &s->ethtxq[pi->first_qset + rspq->idx]; in t4_tx_completion_handler()
3632 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { in t4_tx_completion_handler()
3636 WRITE_ONCE(txq->q.stat->cidx, egr->cidx); in t4_tx_completion_handler()
3639 t4_sge_eth_txq_egress_update(adapter, txq, -1); in t4_tx_completion_handler()
3644 struct adapter *adap = pi->adapter; in cxgb4_validate_lb_pkt()
3646 struct sge *s = &adap->sge; in cxgb4_validate_lb_pkt()
3651 netdev = adap->port[pi->port_id]; in cxgb4_validate_lb_pkt()
3652 lb = &pi->ethtool_lb; in cxgb4_validate_lb_pkt()
3653 data = si->va + s->pktshift; in cxgb4_validate_lb_pkt()
3656 if (!ether_addr_equal(data + i, netdev->dev_addr)) in cxgb4_validate_lb_pkt()
3657 return -1; in cxgb4_validate_lb_pkt()
3661 lb->result = -EIO; in cxgb4_validate_lb_pkt()
3663 complete(&lb->completion); in cxgb4_validate_lb_pkt()
3668 * t4_ethrx_handler - process an ingress ethernet packet
3682 struct adapter *adapter = q->adap; in t4_ethrx_handler()
3683 struct sge *s = &q->adap->sge; in t4_ethrx_handler()
3684 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? in t4_ethrx_handler()
3690 pi = netdev_priv(q->netdev); in t4_ethrx_handler()
3701 return handle_trace_pkt(q->adap, si); in t4_ethrx_handler()
3705 if (q->adap->params.tp.rx_pkt_encap) { in t4_ethrx_handler()
3706 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); in t4_ethrx_handler()
3707 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); in t4_ethrx_handler()
3709 err_vec = be16_to_cpu(pkt->err_vec); in t4_ethrx_handler()
3712 csum_ok = pkt->csum_calc && !err_vec && in t4_ethrx_handler()
3713 (q->netdev->features & NETIF_F_RXCSUM); in t4_ethrx_handler()
3716 rxq->stats.bad_rx_pkts++; in t4_ethrx_handler()
3718 if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) { in t4_ethrx_handler()
3724 if (((pkt->l2info & htonl(RXF_TCP_F)) || in t4_ethrx_handler()
3726 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { in t4_ethrx_handler()
3734 rxq->stats.rx_drops++; in t4_ethrx_handler()
3738 /* Handle PTP Event Rx packet */ in t4_ethrx_handler()
3739 if (unlikely(pi->ptp_enable)) { in t4_ethrx_handler()
3745 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ in t4_ethrx_handler()
3748 if (unlikely(pi->ptp_enable && !ret && in t4_ethrx_handler()
3749 (pkt->l2info & htonl(RXF_UDP_F)) && in t4_ethrx_handler()
3751 if (!t4_tx_hststamp(adapter, skb, q->netdev)) in t4_ethrx_handler()
3755 skb->protocol = eth_type_trans(skb, q->netdev); in t4_ethrx_handler()
3756 skb_record_rx_queue(skb, q->idx); in t4_ethrx_handler()
3757 if (skb->dev->features & NETIF_F_RXHASH) in t4_ethrx_handler()
3758 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, in t4_ethrx_handler()
3761 rxq->stats.pkts++; in t4_ethrx_handler()
3763 if (pi->rxtstamp) in t4_ethrx_handler()
3764 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), in t4_ethrx_handler()
3765 si->sgetstamp); in t4_ethrx_handler()
3766 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { in t4_ethrx_handler()
3767 if (!pkt->ip_frag) { in t4_ethrx_handler()
3768 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3769 rxq->stats.rx_cso++; in t4_ethrx_handler()
3770 } else if (pkt->l2info & htonl(RXF_IP_F)) { in t4_ethrx_handler()
3771 __sum16 c = (__force __sum16)pkt->csum; in t4_ethrx_handler()
3772 skb->csum = csum_unfold(c); in t4_ethrx_handler()
3775 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3776 skb->csum_level = 1; in t4_ethrx_handler()
3778 skb->ip_summed = CHECKSUM_COMPLETE; in t4_ethrx_handler()
3780 rxq->stats.rx_cso++; in t4_ethrx_handler()
3788 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { in t4_ethrx_handler()
3789 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && in t4_ethrx_handler()
3790 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { in t4_ethrx_handler()
3791 if (q->adap->params.tp.rx_pkt_encap) in t4_ethrx_handler()
3797 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3805 if (unlikely(pkt->vlan_ex)) { in t4_ethrx_handler()
3806 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); in t4_ethrx_handler()
3807 rxq->stats.vlan_ex++; in t4_ethrx_handler()
3809 skb_mark_napi_id(skb, &q->napi); in t4_ethrx_handler()
3815 * restore_rx_bufs - put back a packet's Rx buffers
3820 * Puts back on an FL the Rx buffers associated with @si. The buffers
3834 while (frags--) { in restore_rx_bufs()
3835 if (q->cidx == 0) in restore_rx_bufs()
3836 q->cidx = q->size - 1; in restore_rx_bufs()
3838 q->cidx--; in restore_rx_bufs()
3839 d = &q->sdesc[q->cidx]; in restore_rx_bufs()
3840 d->page = si->frags[frags].page; in restore_rx_bufs()
3841 d->dma_addr |= RX_UNMAPPED_BUF; in restore_rx_bufs()
3842 q->avail++; in restore_rx_bufs()
3847 * is_new_response - check if a response is newly written
3857 return (r->type_gen >> RSPD_GEN_S) == q->gen; in is_new_response()
3861 * rspq_next - advance to the next entry in a response queue
3868 q->cur_desc = (void *)q->cur_desc + q->iqe_len; in rspq_next()
3869 if (unlikely(++q->cidx == q->size)) { in rspq_next()
3870 q->cidx = 0; in rspq_next()
3871 q->gen ^= 1; in rspq_next()
3872 q->cur_desc = q->desc; in rspq_next()
3877 * process_responses - process responses from an SGE response queue
3895 struct adapter *adapter = q->adap; in process_responses()
3896 struct sge *s = &adapter->sge; in process_responses()
3899 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_responses()
3901 if (q->flush_handler) in process_responses()
3902 q->flush_handler(q); in process_responses()
3907 rsp_type = RSPD_TYPE_G(rc->type_gen); in process_responses()
3912 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; in process_responses()
3915 if (likely(q->offset > 0)) { in process_responses()
3916 free_rx_bufs(q->adap, &rxq->fl, 1); in process_responses()
3917 q->offset = 0; in process_responses()
3925 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; in process_responses()
3927 fp->page = rsd->page; in process_responses()
3928 fp->offset = q->offset; in process_responses()
3929 fp->size = min(bufsz, len); in process_responses()
3930 len -= fp->size; in process_responses()
3933 unmap_rx_buf(q->adap, &rxq->fl); in process_responses()
3937 be64_to_cpu(rc->last_flit)); in process_responses()
3942 dma_sync_single_for_cpu(q->adap->pdev_dev, in process_responses()
3944 fp->size, DMA_FROM_DEVICE); in process_responses()
3951 ret = q->handler(q, q->cur_desc, &si); in process_responses()
3953 q->offset += ALIGN(fp->size, s->fl_align); in process_responses()
3955 restore_rx_bufs(&si, &rxq->fl, frags); in process_responses()
3957 ret = q->handler(q, q->cur_desc, NULL); in process_responses()
3959 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); in process_responses()
3964 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); in process_responses()
3969 budget_left--; in process_responses()
3972 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) in process_responses()
3973 __refill_fl(q->adap, &rxq->fl); in process_responses()
3974 return budget - budget_left; in process_responses()
3978 * napi_rx_handler - the NAPI handler for Rx processing
3985 * in not a concern at all with MSI-X as non-data interrupts then have
4000 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); in napi_rx_handler()
4002 if (q->adaptive_rx) { in napi_rx_handler()
4007 timer_index = timer_index - 1; in napi_rx_handler()
4009 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); in napi_rx_handler()
4010 q->next_intr_params = in napi_rx_handler()
4013 params = q->next_intr_params; in napi_rx_handler()
4015 params = q->next_intr_params; in napi_rx_handler()
4016 q->next_intr_params = q->intr_params; in napi_rx_handler()
4026 if (unlikely(q->bar2_addr == NULL)) { in napi_rx_handler()
4027 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in napi_rx_handler()
4028 val | INGRESSQID_V((u32)q->cntxt_id)); in napi_rx_handler()
4030 writel(val | INGRESSQID_V(q->bar2_qid), in napi_rx_handler()
4031 q->bar2_addr + SGE_UDB_GTS); in napi_rx_handler()
4043 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4044 pktcount = eosw_txq->cidx - eosw_txq->last_cidx; in cxgb4_ethofld_restart()
4046 pktcount += eosw_txq->ndesc; in cxgb4_ethofld_restart()
4049 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), in cxgb4_ethofld_restart()
4051 eosw_txq->inuse -= pktcount; in cxgb4_ethofld_restart()
4057 ethofld_xmit(eosw_txq->netdev, eosw_txq); in cxgb4_ethofld_restart()
4058 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4061 /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
4072 u8 opcode = ((const struct rss_header *)rsp)->opcode; in cxgb4_ethofld_rx_handler()
4087 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) - in cxgb4_ethofld_rx_handler()
4088 q->adap->tids.eotid_base; in cxgb4_ethofld_rx_handler()
4089 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid); in cxgb4_ethofld_rx_handler()
4093 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_rx_handler()
4097 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4098 credits = cpl->credits; in cxgb4_ethofld_rx_handler()
4100 skb = eosw_txq->desc[eosw_txq->cidx].skb; in cxgb4_ethofld_rx_handler()
4104 if (unlikely((eosw_txq->state == in cxgb4_ethofld_rx_handler()
4106 eosw_txq->state == in cxgb4_ethofld_rx_handler()
4108 eosw_txq->cidx == eosw_txq->flowc_idx)) { in cxgb4_ethofld_rx_handler()
4109 flits = DIV_ROUND_UP(skb->len, 8); in cxgb4_ethofld_rx_handler()
4110 if (eosw_txq->state == in cxgb4_ethofld_rx_handler()
4112 eosw_txq->state = CXGB4_EO_STATE_ACTIVE; in cxgb4_ethofld_rx_handler()
4114 eosw_txq->state = CXGB4_EO_STATE_CLOSED; in cxgb4_ethofld_rx_handler()
4115 complete(&eosw_txq->completion); in cxgb4_ethofld_rx_handler()
4117 hdr_len = eth_get_headlen(eosw_txq->netdev, in cxgb4_ethofld_rx_handler()
4118 skb->data, in cxgb4_ethofld_rx_handler()
4120 flits = ethofld_calc_tx_flits(q->adap, skb, in cxgb4_ethofld_rx_handler()
4123 eosw_txq_advance_index(&eosw_txq->cidx, 1, in cxgb4_ethofld_rx_handler()
4124 eosw_txq->ndesc); in cxgb4_ethofld_rx_handler()
4126 credits -= wrlen16; in cxgb4_ethofld_rx_handler()
4129 eosw_txq->cred += cpl->credits; in cxgb4_ethofld_rx_handler()
4130 eosw_txq->ncompl--; in cxgb4_ethofld_rx_handler()
4132 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4137 tasklet_schedule(&eosw_txq->qresume_tsk); in cxgb4_ethofld_rx_handler()
4145 * The MSI-X interrupt handler for an SGE response queue.
4151 napi_schedule(&q->napi); in t4_sge_intr_msix()
4163 struct sge_rspq *q = &adap->sge.intrq; in process_intrq()
4166 spin_lock(&adap->sge.intrq_lock); in process_intrq()
4168 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_intrq()
4173 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { in process_intrq()
4174 unsigned int qid = ntohl(rc->pldbuflen_qid); in process_intrq()
4176 qid -= adap->sge.ingr_start; in process_intrq()
4177 napi_schedule(&adap->sge.ingr_map[qid]->napi); in process_intrq()
4183 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); in process_intrq()
4188 if (unlikely(q->bar2_addr == NULL)) { in process_intrq()
4190 val | INGRESSQID_V(q->cntxt_id)); in process_intrq()
4192 writel(val | INGRESSQID_V(q->bar2_qid), in process_intrq()
4193 q->bar2_addr + SGE_UDB_GTS); in process_intrq()
4196 spin_unlock(&adap->sge.intrq_lock); in process_intrq()
4208 if (adap->flags & CXGB4_MASTER_PF) in t4_intr_msi()
4224 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) | in t4_intr_intx()
4231 * t4_intr_handler - select the top-level interrupt handler
4234 * Selects the top-level interrupt handler based on the type of interrupts
4235 * (MSI-X, MSI, or INTx).
4239 if (adap->flags & CXGB4_USING_MSIX) in t4_intr_handler()
4241 if (adap->flags & CXGB4_USING_MSI) in t4_intr_handler()
4251 struct sge *s = &adap->sge; in sge_rx_timer_cb()
4253 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) in sge_rx_timer_cb()
4254 for (m = s->starving_fl[i]; m; m &= m - 1) { in sge_rx_timer_cb()
4257 struct sge_fl *fl = s->egr_map[id]; in sge_rx_timer_cb()
4259 clear_bit(id, s->starving_fl); in sge_rx_timer_cb()
4264 if (napi_reschedule(&rxq->rspq.napi)) in sge_rx_timer_cb()
4265 fl->starving++; in sge_rx_timer_cb()
4267 set_bit(id, s->starving_fl); in sge_rx_timer_cb()
4270 /* The remainder of the SGE RX Timer Callback routine is dedicated to in sge_rx_timer_cb()
4274 if (!(adap->flags & CXGB4_MASTER_PF)) in sge_rx_timer_cb()
4277 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); in sge_rx_timer_cb()
4280 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); in sge_rx_timer_cb()
4286 struct sge *s = &adap->sge; in sge_tx_timer_cb()
4290 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) in sge_tx_timer_cb()
4291 for (m = s->txq_maperr[i]; m; m &= m - 1) { in sge_tx_timer_cb()
4293 struct sge_uld_txq *txq = s->egr_map[id]; in sge_tx_timer_cb()
4295 clear_bit(id, s->txq_maperr); in sge_tx_timer_cb()
4296 tasklet_schedule(&txq->qresume_tsk); in sge_tx_timer_cb()
4299 if (!is_t4(adap->params.chip)) { in sge_tx_timer_cb()
4300 struct sge_eth_txq *q = &s->ptptxq; in sge_tx_timer_cb()
4303 spin_lock(&adap->ptp_lock); in sge_tx_timer_cb()
4304 avail = reclaimable(&q->q); in sge_tx_timer_cb()
4307 free_tx_desc(adap, &q->q, avail, false); in sge_tx_timer_cb()
4308 q->q.in_use -= avail; in sge_tx_timer_cb()
4310 spin_unlock(&adap->ptp_lock); in sge_tx_timer_cb()
4314 i = s->ethtxq_rover; in sge_tx_timer_cb()
4316 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i], in sge_tx_timer_cb()
4321 if (++i >= s->ethqsets) in sge_tx_timer_cb()
4323 } while (i != s->ethtxq_rover); in sge_tx_timer_cb()
4324 s->ethtxq_rover = i; in sge_tx_timer_cb()
4338 mod_timer(&s->tx_timer, jiffies + period); in sge_tx_timer_cb()
4342 * bar2_address - return the BAR2 address for an SGE Queue's Registers
4367 return adapter->bar2 + bar2_qoffset; in bar2_address()
4370 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
4371 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
4380 struct sge *s = &adap->sge; in t4_sge_alloc_rxq()
4382 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING); in t4_sge_alloc_rxq()
4385 iq->size = roundup(iq->size, 16); in t4_sge_alloc_rxq()
4387 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, in t4_sge_alloc_rxq()
4388 &iq->phys_addr, NULL, 0, in t4_sge_alloc_rxq()
4389 dev_to_node(adap->pdev_dev)); in t4_sge_alloc_rxq()
4390 if (!iq->desc) in t4_sge_alloc_rxq()
4391 return -ENOMEM; in t4_sge_alloc_rxq()
4396 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); in t4_sge_alloc_rxq()
4400 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | in t4_sge_alloc_rxq()
4404 -intr_idx - 1)); in t4_sge_alloc_rxq()
4405 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | in t4_sge_alloc_rxq()
4407 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | in t4_sge_alloc_rxq()
4408 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); in t4_sge_alloc_rxq()
4409 c.iqsize = htons(iq->size); in t4_sge_alloc_rxq()
4410 c.iqaddr = cpu_to_be64(iq->phys_addr); in t4_sge_alloc_rxq()
4418 CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_rxq()
4425 * (fl_starve_thres - 1). in t4_sge_alloc_rxq()
4427 if (fl->size < s->fl_starve_thres - 1 + 2 * 8) in t4_sge_alloc_rxq()
4428 fl->size = s->fl_starve_thres - 1 + 2 * 8; in t4_sge_alloc_rxq()
4429 fl->size = roundup(fl->size, 8); in t4_sge_alloc_rxq()
4430 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), in t4_sge_alloc_rxq()
4431 sizeof(struct rx_sw_desc), &fl->addr, in t4_sge_alloc_rxq()
4432 &fl->sdesc, s->stat_len, in t4_sge_alloc_rxq()
4433 dev_to_node(adap->pdev_dev)); in t4_sge_alloc_rxq()
4434 if (!fl->desc) in t4_sge_alloc_rxq()
4437 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_rxq()
4451 * Free List pointers are provided, so we use a 128-byte Fetch in t4_sge_alloc_rxq()
4453 * the smaller 64-byte value there). in t4_sge_alloc_rxq()
4463 c.fl0addr = cpu_to_be64(fl->addr); in t4_sge_alloc_rxq()
4466 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_rxq()
4470 netif_napi_add(dev, &iq->napi, napi_rx_handler); in t4_sge_alloc_rxq()
4471 iq->cur_desc = iq->desc; in t4_sge_alloc_rxq()
4472 iq->cidx = 0; in t4_sge_alloc_rxq()
4473 iq->gen = 1; in t4_sge_alloc_rxq()
4474 iq->next_intr_params = iq->intr_params; in t4_sge_alloc_rxq()
4475 iq->cntxt_id = ntohs(c.iqid); in t4_sge_alloc_rxq()
4476 iq->abs_id = ntohs(c.physiqid); in t4_sge_alloc_rxq()
4477 iq->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
4478 iq->cntxt_id, in t4_sge_alloc_rxq()
4480 &iq->bar2_qid); in t4_sge_alloc_rxq()
4481 iq->size--; /* subtract status entry */ in t4_sge_alloc_rxq()
4482 iq->netdev = dev; in t4_sge_alloc_rxq()
4483 iq->handler = hnd; in t4_sge_alloc_rxq()
4484 iq->flush_handler = flush_hnd; in t4_sge_alloc_rxq()
4486 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); in t4_sge_alloc_rxq()
4487 skb_queue_head_init(&iq->lro_mgr.lroq); in t4_sge_alloc_rxq()
4489 /* set offset to -1 to distinguish ingress queues without FL */ in t4_sge_alloc_rxq()
4490 iq->offset = fl ? 0 : -1; in t4_sge_alloc_rxq()
4492 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; in t4_sge_alloc_rxq()
4495 fl->cntxt_id = ntohs(c.fl0id); in t4_sge_alloc_rxq()
4496 fl->avail = fl->pend_cred = 0; in t4_sge_alloc_rxq()
4497 fl->pidx = fl->cidx = 0; in t4_sge_alloc_rxq()
4498 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; in t4_sge_alloc_rxq()
4499 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; in t4_sge_alloc_rxq()
4504 fl->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
4505 fl->cntxt_id, in t4_sge_alloc_rxq()
4507 &fl->bar2_qid); in t4_sge_alloc_rxq()
4512 * of the new RX Ethernet Queue. This should really be handled by in t4_sge_alloc_rxq()
4519 if (!is_t4(adap->params.chip) && cong >= 0) { in t4_sge_alloc_rxq()
4522 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; in t4_sge_alloc_rxq()
4526 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); in t4_sge_alloc_rxq()
4538 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in t4_sge_alloc_rxq()
4541 dev_warn(adap->pdev_dev, "Failed to set Congestion" in t4_sge_alloc_rxq()
4543 iq->cntxt_id, -ret); in t4_sge_alloc_rxq()
4549 ret = -ENOMEM; in t4_sge_alloc_rxq()
4551 if (iq->desc) { in t4_sge_alloc_rxq()
4552 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, in t4_sge_alloc_rxq()
4553 iq->desc, iq->phys_addr); in t4_sge_alloc_rxq()
4554 iq->desc = NULL; in t4_sge_alloc_rxq()
4556 if (fl && fl->desc) { in t4_sge_alloc_rxq()
4557 kfree(fl->sdesc); in t4_sge_alloc_rxq()
4558 fl->sdesc = NULL; in t4_sge_alloc_rxq()
4559 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), in t4_sge_alloc_rxq()
4560 fl->desc, fl->addr); in t4_sge_alloc_rxq()
4561 fl->desc = NULL; in t4_sge_alloc_rxq()
4568 q->cntxt_id = id; in init_txq()
4569 q->bar2_addr = bar2_address(adap, in init_txq()
4570 q->cntxt_id, in init_txq()
4572 &q->bar2_qid); in init_txq()
4573 q->in_use = 0; in init_txq()
4574 q->cidx = q->pidx = 0; in init_txq()
4575 q->stops = q->restarts = 0; in init_txq()
4576 q->stat = (void *)&q->desc[q->size]; in init_txq()
4577 spin_lock_init(&q->db_lock); in init_txq()
4578 adap->sge.egr_map[id - adap->sge.egr_start] = q; in init_txq()
4582 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
4594 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_eth_txq()
4596 struct sge *s = &adap->sge; in t4_sge_alloc_eth_txq()
4601 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
4603 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
4605 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
4607 if (!txq->q.desc) in t4_sge_alloc_eth_txq()
4608 return -ENOMEM; in t4_sge_alloc_eth_txq()
4613 FW_EQ_ETH_CMD_PFN_V(adap->pf) | in t4_sge_alloc_eth_txq()
4627 FW_EQ_ETH_CMD_VIID_V(pi->viid)); in t4_sge_alloc_eth_txq()
4633 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_eth_txq()
4646 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4650 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE in t4_sge_alloc_eth_txq()
4657 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); in t4_sge_alloc_eth_txq()
4659 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_eth_txq()
4661 kfree(txq->q.sdesc); in t4_sge_alloc_eth_txq()
4662 txq->q.sdesc = NULL; in t4_sge_alloc_eth_txq()
4663 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_eth_txq()
4665 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4666 txq->q.desc = NULL; in t4_sge_alloc_eth_txq()
4670 txq->q.q_type = CXGB4_TXQ_ETH; in t4_sge_alloc_eth_txq()
4671 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
4672 txq->txq = netdevq; in t4_sge_alloc_eth_txq()
4673 txq->tso = 0; in t4_sge_alloc_eth_txq()
4674 txq->uso = 0; in t4_sge_alloc_eth_txq()
4675 txq->tx_cso = 0; in t4_sge_alloc_eth_txq()
4676 txq->vlan_ins = 0; in t4_sge_alloc_eth_txq()
4677 txq->mapping_err = 0; in t4_sge_alloc_eth_txq()
4678 txq->dbqt = dbqt; in t4_sge_alloc_eth_txq()
4687 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_ctrl_txq()
4689 struct sge *s = &adap->sge; in t4_sge_alloc_ctrl_txq()
4694 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ctrl_txq()
4696 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
4697 sizeof(struct tx_desc), 0, &txq->q.phys_addr, in t4_sge_alloc_ctrl_txq()
4698 NULL, 0, dev_to_node(adap->pdev_dev)); in t4_sge_alloc_ctrl_txq()
4699 if (!txq->q.desc) in t4_sge_alloc_ctrl_txq()
4700 return -ENOMEM; in t4_sge_alloc_ctrl_txq()
4704 FW_EQ_CTRL_CMD_PFN_V(adap->pf) | in t4_sge_alloc_ctrl_txq()
4712 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_ctrl_txq()
4721 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4723 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_ctrl_txq()
4725 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ctrl_txq()
4727 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4728 txq->q.desc = NULL; in t4_sge_alloc_ctrl_txq()
4732 txq->q.q_type = CXGB4_TXQ_CTRL; in t4_sge_alloc_ctrl_txq()
4733 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
4734 txq->adap = adap; in t4_sge_alloc_ctrl_txq()
4735 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_ctrl_txq()
4736 tasklet_setup(&txq->qresume_tsk, restart_ctrlq); in t4_sge_alloc_ctrl_txq()
4737 txq->full = 0; in t4_sge_alloc_ctrl_txq()
4750 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); in t4_sge_mod_ctrl_txq()
4756 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_ofld_txq()
4758 struct sge *s = &adap->sge; in t4_sge_alloc_ofld_txq()
4764 nentries = q->size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ofld_txq()
4765 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc), in t4_sge_alloc_ofld_txq()
4766 sizeof(struct tx_sw_desc), &q->phys_addr, in t4_sge_alloc_ofld_txq()
4767 &q->sdesc, s->stat_len, NUMA_NO_NODE); in t4_sge_alloc_ofld_txq()
4768 if (!q->desc) in t4_sge_alloc_ofld_txq()
4769 return -ENOMEM; in t4_sge_alloc_ofld_txq()
4779 FW_EQ_OFLD_CMD_PFN_V(adap->pf) | in t4_sge_alloc_ofld_txq()
4785 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_ofld_txq()
4792 c.eqaddr = cpu_to_be64(q->phys_addr); in t4_sge_alloc_ofld_txq()
4794 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_ofld_txq()
4796 kfree(q->sdesc); in t4_sge_alloc_ofld_txq()
4797 q->sdesc = NULL; in t4_sge_alloc_ofld_txq()
4798 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ofld_txq()
4800 q->desc, q->phys_addr); in t4_sge_alloc_ofld_txq()
4801 q->desc = NULL; in t4_sge_alloc_ofld_txq()
4819 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); in t4_sge_alloc_uld_txq()
4823 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_uld_txq()
4824 txq->adap = adap; in t4_sge_alloc_uld_txq()
4825 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_uld_txq()
4826 tasklet_setup(&txq->qresume_tsk, restart_ofldq); in t4_sge_alloc_uld_txq()
4827 txq->full = 0; in t4_sge_alloc_uld_txq()
4828 txq->mapping_err = 0; in t4_sge_alloc_uld_txq()
4837 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); in t4_sge_alloc_ethofld_txq()
4841 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_ethofld_txq()
4842 spin_lock_init(&txq->lock); in t4_sge_alloc_ethofld_txq()
4843 txq->adap = adap; in t4_sge_alloc_ethofld_txq()
4844 txq->tso = 0; in t4_sge_alloc_ethofld_txq()
4845 txq->uso = 0; in t4_sge_alloc_ethofld_txq()
4846 txq->tx_cso = 0; in t4_sge_alloc_ethofld_txq()
4847 txq->vlan_ins = 0; in t4_sge_alloc_ethofld_txq()
4848 txq->mapping_err = 0; in t4_sge_alloc_ethofld_txq()
4854 struct sge *s = &adap->sge; in free_txq()
4856 dma_free_coherent(adap->pdev_dev, in free_txq()
4857 q->size * sizeof(struct tx_desc) + s->stat_len, in free_txq()
4858 q->desc, q->phys_addr); in free_txq()
4859 q->cntxt_id = 0; in free_txq()
4860 q->sdesc = NULL; in free_txq()
4861 q->desc = NULL; in free_txq()
4867 struct sge *s = &adap->sge; in free_rspq_fl()
4868 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; in free_rspq_fl()
4870 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; in free_rspq_fl()
4871 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, in free_rspq_fl()
4872 rq->cntxt_id, fl_id, 0xffff); in free_rspq_fl()
4873 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, in free_rspq_fl()
4874 rq->desc, rq->phys_addr); in free_rspq_fl()
4875 netif_napi_del(&rq->napi); in free_rspq_fl()
4876 rq->netdev = NULL; in free_rspq_fl()
4877 rq->cntxt_id = rq->abs_id = 0; in free_rspq_fl()
4878 rq->desc = NULL; in free_rspq_fl()
4881 free_rx_bufs(adap, fl, fl->avail); in free_rspq_fl()
4882 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, in free_rspq_fl()
4883 fl->desc, fl->addr); in free_rspq_fl()
4884 kfree(fl->sdesc); in free_rspq_fl()
4885 fl->sdesc = NULL; in free_rspq_fl()
4886 fl->cntxt_id = 0; in free_rspq_fl()
4887 fl->desc = NULL; in free_rspq_fl()
4892 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
4897 * Release the resources of a consecutive block of offload Rx queues.
4901 for ( ; n; n--, q++) in t4_free_ofld_rxqs()
4902 if (q->rspq.desc) in t4_free_ofld_rxqs()
4903 free_rspq_fl(adap, &q->rspq, in t4_free_ofld_rxqs()
4904 q->fl.size ? &q->fl : NULL); in t4_free_ofld_rxqs()
4909 if (txq->q.desc) { in t4_sge_free_ethofld_txq()
4910 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, in t4_sge_free_ethofld_txq()
4911 txq->q.cntxt_id); in t4_sge_free_ethofld_txq()
4912 free_tx_desc(adap, &txq->q, txq->q.in_use, false); in t4_sge_free_ethofld_txq()
4913 kfree(txq->q.sdesc); in t4_sge_free_ethofld_txq()
4914 free_txq(adap, &txq->q); in t4_sge_free_ethofld_txq()
4919 * t4_free_sge_resources - free SGE resources
4927 struct sge_eth_rxq *eq; in t4_free_sge_resources() local
4930 /* stop all Rx queues in order to start them draining */ in t4_free_sge_resources()
4931 for (i = 0; i < adap->sge.ethqsets; i++) { in t4_free_sge_resources()
4932 eq = &adap->sge.ethrxq[i]; in t4_free_sge_resources()
4933 if (eq->rspq.desc) in t4_free_sge_resources()
4934 t4_iq_stop(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4936 eq->rspq.cntxt_id, in t4_free_sge_resources()
4937 eq->fl.size ? eq->fl.cntxt_id : 0xffff, in t4_free_sge_resources()
4941 /* clean up Ethernet Tx/Rx queues */ in t4_free_sge_resources()
4942 for (i = 0; i < adap->sge.ethqsets; i++) { in t4_free_sge_resources()
4943 eq = &adap->sge.ethrxq[i]; in t4_free_sge_resources()
4944 if (eq->rspq.desc) in t4_free_sge_resources()
4945 free_rspq_fl(adap, &eq->rspq, in t4_free_sge_resources()
4946 eq->fl.size ? &eq->fl : NULL); in t4_free_sge_resources()
4947 if (eq->msix) { in t4_free_sge_resources()
4948 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx); in t4_free_sge_resources()
4949 eq->msix = NULL; in t4_free_sge_resources()
4952 etq = &adap->sge.ethtxq[i]; in t4_free_sge_resources()
4953 if (etq->q.desc) { in t4_free_sge_resources()
4954 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4955 etq->q.cntxt_id); in t4_free_sge_resources()
4956 __netif_tx_lock_bh(etq->txq); in t4_free_sge_resources()
4957 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
4958 __netif_tx_unlock_bh(etq->txq); in t4_free_sge_resources()
4959 kfree(etq->q.sdesc); in t4_free_sge_resources()
4960 free_txq(adap, &etq->q); in t4_free_sge_resources()
4965 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { in t4_free_sge_resources()
4966 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; in t4_free_sge_resources()
4968 if (cq->q.desc) { in t4_free_sge_resources()
4969 tasklet_kill(&cq->qresume_tsk); in t4_free_sge_resources()
4970 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4971 cq->q.cntxt_id); in t4_free_sge_resources()
4972 __skb_queue_purge(&cq->sendq); in t4_free_sge_resources()
4973 free_txq(adap, &cq->q); in t4_free_sge_resources()
4977 if (adap->sge.fw_evtq.desc) { in t4_free_sge_resources()
4978 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); in t4_free_sge_resources()
4979 if (adap->sge.fwevtq_msix_idx >= 0) in t4_free_sge_resources()
4981 adap->sge.fwevtq_msix_idx); in t4_free_sge_resources()
4984 if (adap->sge.nd_msix_idx >= 0) in t4_free_sge_resources()
4985 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx); in t4_free_sge_resources()
4987 if (adap->sge.intrq.desc) in t4_free_sge_resources()
4988 free_rspq_fl(adap, &adap->sge.intrq, NULL); in t4_free_sge_resources()
4990 if (!is_t4(adap->params.chip)) { in t4_free_sge_resources()
4991 etq = &adap->sge.ptptxq; in t4_free_sge_resources()
4992 if (etq->q.desc) { in t4_free_sge_resources()
4993 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4994 etq->q.cntxt_id); in t4_free_sge_resources()
4995 spin_lock_bh(&adap->ptp_lock); in t4_free_sge_resources()
4996 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
4997 spin_unlock_bh(&adap->ptp_lock); in t4_free_sge_resources()
4998 kfree(etq->q.sdesc); in t4_free_sge_resources()
4999 free_txq(adap, &etq->q); in t4_free_sge_resources()
5004 memset(adap->sge.egr_map, 0, in t4_free_sge_resources()
5005 adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); in t4_free_sge_resources()
5010 adap->sge.ethtxq_rover = 0; in t4_sge_start()
5011 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); in t4_sge_start()
5012 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); in t4_sge_start()
5016 * t4_sge_stop - disable SGE operation
5026 struct sge *s = &adap->sge; in t4_sge_stop()
5028 if (s->rx_timer.function) in t4_sge_stop()
5029 del_timer_sync(&s->rx_timer); in t4_sge_stop()
5030 if (s->tx_timer.function) in t4_sge_stop()
5031 del_timer_sync(&s->tx_timer); in t4_sge_stop()
5036 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in t4_sge_stop()
5038 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop()
5040 for_each_ofldtxq(&adap->sge, i) { in t4_sge_stop()
5041 if (txq->q.desc) in t4_sge_stop()
5042 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5050 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; in t4_sge_stop()
5052 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop()
5054 for_each_ofldtxq(&adap->sge, i) { in t4_sge_stop()
5055 if (txq->q.desc) in t4_sge_stop()
5056 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5061 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { in t4_sge_stop()
5062 struct sge_ctrl_txq *cq = &s->ctrlq[i]; in t4_sge_stop()
5064 if (cq->q.desc) in t4_sge_stop()
5065 tasklet_kill(&cq->qresume_tsk); in t4_sge_stop()
5070 * t4_sge_init_soft - grab core SGE values needed by SGE code
5079 struct sge *s = &adap->sge; in t4_sge_init_soft()
5091 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); in t4_sge_init_soft()
5092 return -EINVAL; in t4_sge_init_soft()
5123 (fl_large_pg & (fl_large_pg-1)) != 0) { in t4_sge_init_soft()
5124 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", in t4_sge_init_soft()
5126 return -EINVAL; in t4_sge_init_soft()
5129 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; in t4_sge_init_soft()
5133 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", in t4_sge_init_soft()
5135 return -EINVAL; in t4_sge_init_soft()
5139 * Retrieve our RX interrupt holdoff timer values and counter in t4_sge_init_soft()
5145 s->timer_val[0] = core_ticks_to_us(adap, in t4_sge_init_soft()
5147 s->timer_val[1] = core_ticks_to_us(adap, in t4_sge_init_soft()
5149 s->timer_val[2] = core_ticks_to_us(adap, in t4_sge_init_soft()
5151 s->timer_val[3] = core_ticks_to_us(adap, in t4_sge_init_soft()
5153 s->timer_val[4] = core_ticks_to_us(adap, in t4_sge_init_soft()
5155 s->timer_val[5] = core_ticks_to_us(adap, in t4_sge_init_soft()
5159 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); in t4_sge_init_soft()
5160 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); in t4_sge_init_soft()
5161 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); in t4_sge_init_soft()
5162 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); in t4_sge_init_soft()
5168 * t4_sge_init - initialize SGE
5171 * Perform low-level SGE code initialization needed every time after a
5176 struct sge *s = &adap->sge; in t4_sge_init()
5185 s->pktshift = PKTSHIFT_G(sge_control); in t4_sge_init()
5186 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; in t4_sge_init()
5188 s->fl_align = t4_fl_pkt_align(adap); in t4_sge_init()
5206 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { in t4_sge_init()
5217 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", in t4_sge_init()
5218 CHELSIO_CHIP_VERSION(adap->params.chip)); in t4_sge_init()
5219 return -EINVAL; in t4_sge_init()
5221 s->fl_starve_thres = 2*egress_threshold + 1; in t4_sge_init()
5223 t4_idma_monitor_init(adap, &s->idma_monitor); in t4_sge_init()
5225 /* Set up timers used for recuring callbacks to process RX and TX in t4_sge_init()
5228 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); in t4_sge_init()
5229 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); in t4_sge_init()
5231 spin_lock_init(&s->intrq_lock); in t4_sge_init()