Lines Matching +full:embedded +full:- +full:trace +full:- +full:extension
4 * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
40 #include <linux/dma-mapping.h>
68 # define FL_PG_ORDER (16 - PAGE_SHIFT)
115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
118 * Suspension threshold for non-Ethernet Tx queues. We require enough room
141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
151 struct sge *s = &adapter->sge; in fl_mtu_bufsize()
153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); in fl_mtu_bufsize()
165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; in get_buf_addr()
198 return !(d->dma_addr & RX_UNMAPPED_BUF); in is_buf_mapped()
202 * txq_avail - return the number of available slots in a Tx queue
210 return q->size - 1 - q->in_use; in txq_avail()
214 * fl_cap - return the capacity of a free-buffer list
217 * Returns the capacity of a free-buffer list. The capacity is less than
223 return fl->size - 8; /* 1 descriptor = 8 buffers */ in fl_cap()
227 * fl_starving - return whether a Free List is starving.
238 const struct sge *s = &adapter->sge; in fl_starving()
240 return fl->avail - fl->pend_cred <= s->fl_starve_thres; in fl_starving()
249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); in cxgb4_map_skb()
254 end = &si->frags[si->nr_frags]; in cxgb4_map_skb()
256 for (fp = si->frags; fp < end; fp++) { in cxgb4_map_skb()
265 while (fp-- > si->frags) in cxgb4_map_skb()
266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); in cxgb4_map_skb()
268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); in cxgb4_map_skb()
270 return -ENOMEM; in cxgb4_map_skb()
283 end = &si->frags[si->nr_frags]; in unmap_skb()
284 for (fp = si->frags; fp < end; fp++) in unmap_skb()
290 * deferred_unmap_destructor - unmap a packet when it is freed
299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); in deferred_unmap_destructor()
304 * free_tx_desc - reclaims Tx descriptors and their buffers
316 unsigned int cidx = q->cidx; in free_tx_desc()
319 d = &q->sdesc[cidx]; in free_tx_desc()
320 while (n--) { in free_tx_desc()
321 if (d->skb) { /* an SGL is present */ in free_tx_desc()
322 if (unmap && d->addr[0]) { in free_tx_desc()
323 unmap_skb(adap->pdev_dev, d->skb, d->addr); in free_tx_desc()
324 memset(d->addr, 0, sizeof(d->addr)); in free_tx_desc()
326 dev_consume_skb_any(d->skb); in free_tx_desc()
327 d->skb = NULL; in free_tx_desc()
330 if (++cidx == q->size) { in free_tx_desc()
332 d = q->sdesc; in free_tx_desc()
335 q->cidx = cidx; in free_tx_desc()
343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in reclaimable()
344 hw_cidx -= q->cidx; in reclaimable()
345 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; in reclaimable()
349 * reclaim_completed_tx - reclaims completed TX Descriptors
352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
356 * and frees the associated buffers if possible. If @max == -1, then
375 q->in_use -= reclaim; in reclaim_completed_tx()
382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
394 (void)reclaim_completed_tx(adap, q, -1, unmap); in cxgb4_reclaim_completed_tx()
401 struct sge *s = &adapter->sge; in get_buf_size()
402 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; in get_buf_size()
411 buf_size = PAGE_SIZE << s->fl_pg_order; in get_buf_size()
430 * free_rx_bufs - free the Rx buffers on an SGE free list
435 * Release the next @n buffers on an SGE free-buffer Rx queue. The
440 while (n--) { in free_rx_bufs()
441 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in free_rx_bufs()
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in free_rx_bufs()
447 put_page(d->page); in free_rx_bufs()
448 d->page = NULL; in free_rx_bufs()
449 if (++q->cidx == q->size) in free_rx_bufs()
450 q->cidx = 0; in free_rx_bufs()
451 q->avail--; in free_rx_bufs()
456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
460 * Unmap the current buffer on an SGE free-buffer Rx queue. The
468 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in unmap_rx_buf()
471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in unmap_rx_buf()
473 d->page = NULL; in unmap_rx_buf()
474 if (++q->cidx == q->size) in unmap_rx_buf()
475 q->cidx = 0; in unmap_rx_buf()
476 q->avail--; in unmap_rx_buf()
481 if (q->pend_cred >= 8) { in ring_fl_db()
482 u32 val = adap->params.arch.sge_fl_db; in ring_fl_db()
484 if (is_t4(adap->params.chip)) in ring_fl_db()
485 val |= PIDX_V(q->pend_cred / 8); in ring_fl_db()
487 val |= PIDX_T5_V(q->pend_cred / 8); in ring_fl_db()
498 if (unlikely(q->bar2_addr == NULL)) { in ring_fl_db()
500 val | QID_V(q->cntxt_id)); in ring_fl_db()
502 writel(val | QID_V(q->bar2_qid), in ring_fl_db()
503 q->bar2_addr + SGE_UDB_KDOORBELL); in ring_fl_db()
510 q->pend_cred &= 7; in ring_fl_db()
517 sd->page = pg; in set_rx_sw_desc()
518 sd->dma_addr = mapping; /* includes size low bits */ in set_rx_sw_desc()
522 * refill_fl - refill an SGE Rx buffer ring
528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
538 struct sge *s = &adap->sge; in refill_fl()
541 unsigned int cred = q->avail; in refill_fl()
542 __be64 *d = &q->desc[q->pidx]; in refill_fl()
543 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) in refill_fl()
552 node = dev_to_node(adap->pdev_dev); in refill_fl()
554 if (s->fl_pg_order == 0) in refill_fl()
561 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); in refill_fl()
563 q->large_alloc_failed++; in refill_fl()
567 mapping = dma_map_page(adap->pdev_dev, pg, 0, in refill_fl()
568 PAGE_SIZE << s->fl_pg_order, in refill_fl()
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
571 __free_pages(pg, s->fl_pg_order); in refill_fl()
572 q->mapping_err++; in refill_fl()
581 q->avail++; in refill_fl()
582 if (++q->pidx == q->size) { in refill_fl()
583 q->pidx = 0; in refill_fl()
584 sd = q->sdesc; in refill_fl()
585 d = q->desc; in refill_fl()
587 n--; in refill_fl()
591 while (n--) { in refill_fl()
594 q->alloc_failed++; in refill_fl()
598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, in refill_fl()
600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
602 q->mapping_err++; in refill_fl()
610 q->avail++; in refill_fl()
611 if (++q->pidx == q->size) { in refill_fl()
612 q->pidx = 0; in refill_fl()
613 sd = q->sdesc; in refill_fl()
614 d = q->desc; in refill_fl()
618 out: cred = q->avail - cred; in refill_fl()
619 q->pend_cred += cred; in refill_fl()
624 q->low++; in refill_fl()
625 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl()
626 adap->sge.starving_fl); in refill_fl()
634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), in __refill_fl()
639 * alloc_ring - allocate resources for an SGE descriptor ring
681 * sgl_len - calculates the size of an SGL of the given capacity
689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA in sgl_len()
690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL in sgl_len()
693 * Address[i+1] } (this ensures that all addresses are on 64-bit in sgl_len()
700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 in sgl_len()
701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and in sgl_len()
702 * finally the "+((n-1)&1)" adds the one remaining flit needed if in sgl_len()
703 * (n-1) is odd ... in sgl_len()
705 n--; in sgl_len()
710 * flits_to_desc - returns the num of Tx descriptors for the given flits
723 * is_eth_imm - can an Ethernet packet be sent as immediate data?
734 if (skb->encapsulation && skb_shinfo(skb)->gso_size && in is_eth_imm()
738 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in is_eth_imm()
741 hdrlen = skb_shinfo(skb)->gso_size ? in is_eth_imm()
745 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) in is_eth_imm()
751 * calc_tx_flits - calculate the number of flits for a packet Tx WR
770 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); in calc_tx_flits()
778 * with an embedded TX Packet Write CPL message. in calc_tx_flits()
780 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); in calc_tx_flits()
781 if (skb_shinfo(skb)->gso_size) { in calc_tx_flits()
782 if (skb->encapsulation && chip_ver > CHELSIO_T5) { in calc_tx_flits()
785 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in calc_tx_flits()
788 pkt_hdrlen = eth_get_headlen(skb->dev, skb->data, in calc_tx_flits()
807 * calc_tx_descs - calculate the number of Tx descriptors for a packet
821 * cxgb4_write_sgl - populate a scatter/gather list for a packet
826 * @start: start offset into skb main-body data to include in the SGL
832 * main body except for the first @start bytes. @sgl must be 16-byte
844 unsigned int nfrags = si->nr_frags; in cxgb4_write_sgl()
847 len = skb_headlen(skb) - start; in cxgb4_write_sgl()
849 sgl->len0 = htonl(len); in cxgb4_write_sgl()
850 sgl->addr0 = cpu_to_be64(addr[0] + start); in cxgb4_write_sgl()
853 sgl->len0 = htonl(skb_frag_size(&si->frags[0])); in cxgb4_write_sgl()
854 sgl->addr0 = cpu_to_be64(addr[1]); in cxgb4_write_sgl()
857 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | in cxgb4_write_sgl()
859 if (likely(--nfrags == 0)) in cxgb4_write_sgl()
866 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in cxgb4_write_sgl()
868 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { in cxgb4_write_sgl()
869 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); in cxgb4_write_sgl()
870 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); in cxgb4_write_sgl()
871 to->addr[0] = cpu_to_be64(addr[i]); in cxgb4_write_sgl()
872 to->addr[1] = cpu_to_be64(addr[++i]); in cxgb4_write_sgl()
875 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); in cxgb4_write_sgl()
876 to->len[1] = cpu_to_be32(0); in cxgb4_write_sgl()
877 to->addr[0] = cpu_to_be64(addr[i + 1]); in cxgb4_write_sgl()
879 if (unlikely((u8 *)end > (u8 *)q->stat)) { in cxgb4_write_sgl()
880 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in cxgb4_write_sgl()
883 memcpy(sgl->sge, buf, part0); in cxgb4_write_sgl()
884 part1 = (u8 *)end - (u8 *)q->stat; in cxgb4_write_sgl()
885 memcpy(q->desc, (u8 *)buf + part0, part1); in cxgb4_write_sgl()
886 end = (void *)q->desc + part1; in cxgb4_write_sgl()
888 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ in cxgb4_write_sgl()
893 /* cxgb4_write_partial_sgl - populate SGL for partial packet
920 frag_size = min(len, skb_linear_data_len - start); in cxgb4_write_partial_sgl()
921 sgl->len0 = htonl(frag_size); in cxgb4_write_partial_sgl()
922 sgl->addr0 = cpu_to_be64(addr[0] + start); in cxgb4_write_partial_sgl()
923 len -= frag_size; in cxgb4_write_partial_sgl()
926 start -= skb_linear_data_len; in cxgb4_write_partial_sgl()
927 frag = &si->frags[frag_idx]; in cxgb4_write_partial_sgl()
931 start -= frag_size; in cxgb4_write_partial_sgl()
933 frag = &si->frags[frag_idx]; in cxgb4_write_partial_sgl()
937 frag_size = min(len, skb_frag_size(frag) - start); in cxgb4_write_partial_sgl()
938 sgl->len0 = cpu_to_be32(frag_size); in cxgb4_write_partial_sgl()
939 sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start); in cxgb4_write_partial_sgl()
940 len -= frag_size; in cxgb4_write_partial_sgl()
955 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in cxgb4_write_partial_sgl()
962 frag_size = min(len, skb_frag_size(&si->frags[frag_idx])); in cxgb4_write_partial_sgl()
963 to->len[i & 1] = cpu_to_be32(frag_size); in cxgb4_write_partial_sgl()
964 to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]); in cxgb4_write_partial_sgl()
970 len -= frag_size; in cxgb4_write_partial_sgl()
977 to->len[1] = cpu_to_be32(0); in cxgb4_write_partial_sgl()
982 if (unlikely((u8 *)end > (u8 *)q->stat)) { in cxgb4_write_partial_sgl()
983 u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in cxgb4_write_partial_sgl()
986 memcpy(sgl->sge, buf, part0); in cxgb4_write_partial_sgl()
987 part1 = (u8 *)end - (u8 *)q->stat; in cxgb4_write_partial_sgl()
988 memcpy(q->desc, (u8 *)buf + part0, part1); in cxgb4_write_partial_sgl()
989 end = (void *)q->desc + part1; in cxgb4_write_partial_sgl()
992 /* 0-pad to multiple of 16 */ in cxgb4_write_partial_sgl()
996 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | in cxgb4_write_partial_sgl()
1013 count--; in cxgb_pio_copy()
1018 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
1035 if (unlikely(q->bar2_addr == NULL)) { in cxgb4_ring_tx_db()
1042 spin_lock_irqsave(&q->db_lock, flags); in cxgb4_ring_tx_db()
1043 if (!q->db_disabled) in cxgb4_ring_tx_db()
1045 QID_V(q->cntxt_id) | val); in cxgb4_ring_tx_db()
1047 q->db_pidx_inc += n; in cxgb4_ring_tx_db()
1048 q->db_pidx = q->pidx; in cxgb4_ring_tx_db()
1049 spin_unlock_irqrestore(&q->db_lock, flags); in cxgb4_ring_tx_db()
1065 if (n == 1 && q->bar2_qid == 0) { in cxgb4_ring_tx_db()
1066 int index = (q->pidx in cxgb4_ring_tx_db()
1067 ? (q->pidx - 1) in cxgb4_ring_tx_db()
1068 : (q->size - 1)); in cxgb4_ring_tx_db()
1069 u64 *wr = (u64 *)&q->desc[index]; in cxgb4_ring_tx_db()
1072 (q->bar2_addr + SGE_UDB_WCDOORBELL), in cxgb4_ring_tx_db()
1075 writel(val | QID_V(q->bar2_qid), in cxgb4_ring_tx_db()
1076 q->bar2_addr + SGE_UDB_KDOORBELL); in cxgb4_ring_tx_db()
1095 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1108 int left = (void *)q->stat - pos; in cxgb4_inline_tx_skb()
1111 if (likely(skb->len <= left)) { in cxgb4_inline_tx_skb()
1112 if (likely(!skb->data_len)) in cxgb4_inline_tx_skb()
1113 skb_copy_from_linear_data(skb, pos, skb->len); in cxgb4_inline_tx_skb()
1115 skb_copy_bits(skb, 0, pos, skb->len); in cxgb4_inline_tx_skb()
1116 pos += skb->len; in cxgb4_inline_tx_skb()
1119 skb_copy_bits(skb, left, q->desc, skb->len - left); in cxgb4_inline_tx_skb()
1120 pos = (void *)q->desc + (skb->len - left); in cxgb4_inline_tx_skb()
1123 /* 0-pad to multiple of 16 */ in cxgb4_inline_tx_skb()
1135 int left = (void *)q->stat - pos; in inline_tx_skb_header()
1138 memcpy(pos, skb->data, length); in inline_tx_skb_header()
1141 memcpy(pos, skb->data, left); in inline_tx_skb_header()
1142 memcpy(q->desc, skb->data + left, length - left); in inline_tx_skb_header()
1143 pos = (void *)q->desc + (length - left); in inline_tx_skb_header()
1145 /* 0-pad to multiple of 16 */ in inline_tx_skb_header()
1164 if (skb->encapsulation && in hwcsum()
1169 ver = inner_ip_hdr(skb)->version; in hwcsum()
1170 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : in hwcsum()
1171 inner_ipv6_hdr(skb)->nexthdr; in hwcsum()
1173 ver = ip_hdr(skb)->version; in hwcsum()
1174 proto = (ver == 4) ? ip_hdr(skb)->protocol : in hwcsum()
1175 ipv6_hdr(skb)->nexthdr; in hwcsum()
1192 * this doesn't work with extension headers in hwcsum()
1211 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; in hwcsum()
1214 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; in hwcsum()
1228 TXPKT_CSUM_LOC_V(start + skb->csum_offset); in hwcsum()
1234 netif_tx_stop_queue(q->txq); in eth_txq_stop()
1235 q->q.stops++; in eth_txq_stop()
1240 q->in_use += n; in txq_advance()
1241 q->pidx += n; in txq_advance()
1242 if (q->pidx >= q->size) in txq_advance()
1243 q->pidx -= q->size; in txq_advance()
1251 const struct cxgb_fcoe *fcoe = &pi->fcoe; in cxgb_fcoe_offload()
1253 if (!(fcoe->flags & CXGB_FCOE_ENABLED)) in cxgb_fcoe_offload()
1256 if (skb->protocol != htons(ETH_P_FCOE)) in cxgb_fcoe_offload()
1260 skb->mac_len = sizeof(struct ethhdr); in cxgb_fcoe_offload()
1262 skb_set_network_header(skb, skb->mac_len); in cxgb_fcoe_offload()
1263 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); in cxgb_fcoe_offload()
1266 return -ENOTSUPP; in cxgb_fcoe_offload()
1285 struct port_info *pi = netdev_priv(skb->dev); in cxgb_encap_offload_supported()
1286 struct adapter *adapter = pi->adapter; in cxgb_encap_offload_supported()
1288 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || in cxgb_encap_offload_supported()
1289 skb->inner_protocol != htons(ETH_P_TEB)) in cxgb_encap_offload_supported()
1294 l4_hdr = ip_hdr(skb)->protocol; in cxgb_encap_offload_supported()
1297 l4_hdr = ipv6_hdr(skb)->nexthdr; in cxgb_encap_offload_supported()
1305 if (adapter->vxlan_port == udp_hdr(skb)->dest) in cxgb_encap_offload_supported()
1307 else if (adapter->geneve_port == udp_hdr(skb)->dest) in cxgb_encap_offload_supported()
1324 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in t6_fill_tnl_lso()
1326 bool v6 = (ip_hdr(skb)->version == 6); in t6_fill_tnl_lso()
1337 tnl_lso->op_to_IpIdSplitOut = htonl(val); in t6_fill_tnl_lso()
1339 tnl_lso->IpIdOffsetOut = 0; in t6_fill_tnl_lso()
1342 val = skb_inner_mac_header(skb) - skb_mac_header(skb); in t6_fill_tnl_lso()
1343 in_eth_xtra_len = skb_inner_network_header(skb) - in t6_fill_tnl_lso()
1344 skb_inner_mac_header(skb) - ETH_HLEN; in t6_fill_tnl_lso()
1349 tnl_lso->UdpLenSetOut_to_TnlHdrLen = in t6_fill_tnl_lso()
1354 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; in t6_fill_tnl_lso()
1358 tnl_lso->UdpLenSetOut_to_TnlHdrLen |= in t6_fill_tnl_lso()
1362 tnl_lso->r1 = 0; in t6_fill_tnl_lso()
1365 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | in t6_fill_tnl_lso()
1368 tnl_lso->Flow_to_TcpHdrLen = htonl(val); in t6_fill_tnl_lso()
1370 tnl_lso->IpIdOffset = htons(0); in t6_fill_tnl_lso()
1372 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); in t6_fill_tnl_lso()
1373 tnl_lso->TCPSeqOffset = htonl(0); in t6_fill_tnl_lso()
1374 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); in t6_fill_tnl_lso()
1380 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in write_tso_wr()
1386 if (ssi->gso_type & SKB_GSO_TCPV6) in write_tso_wr()
1389 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | in write_tso_wr()
1394 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); in write_tso_wr()
1395 lso->ipid_ofst = htons(0); in write_tso_wr()
1396 lso->mss = htons(ssi->gso_size); in write_tso_wr()
1397 lso->seqno_offset = htonl(0); in write_tso_wr()
1398 if (is_t4(adap->params.chip)) in write_tso_wr()
1399 lso->len = htonl(skb->len); in write_tso_wr()
1401 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); in write_tso_wr()
1407 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1410 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1416 * in regular timer-based Ethernet TX Queue maintenance.
1422 struct sge_txq *q = &eq->q; in t4_sge_eth_txq_egress_update()
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq)) in t4_sge_eth_txq_egress_update()
1429 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); in t4_sge_eth_txq_egress_update()
1431 hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in t4_sge_eth_txq_egress_update()
1432 hw_in_use = q->pidx - hw_cidx; in t4_sge_eth_txq_egress_update()
1434 hw_in_use += q->size; in t4_sge_eth_txq_egress_update()
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { in t4_sge_eth_txq_egress_update()
1442 netif_tx_wake_queue(eq->txq); in t4_sge_eth_txq_egress_update()
1443 eq->q.restarts++; in t4_sge_eth_txq_egress_update()
1446 __netif_tx_unlock(eq->txq); in t4_sge_eth_txq_egress_update()
1460 if (unlikely(skb->len < min_pkt_len)) in cxgb4_validate_skb()
1461 return -EINVAL; in cxgb4_validate_skb()
1464 max_pkt_len = ETH_HLEN + dev->mtu; in cxgb4_validate_skb()
1469 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) in cxgb4_validate_skb()
1470 return -EINVAL; in cxgb4_validate_skb()
1478 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; in write_eo_udp_wr()
1479 wr->u.udpseg.ethlen = skb_network_offset(skb); in write_eo_udp_wr()
1480 wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); in write_eo_udp_wr()
1481 wr->u.udpseg.udplen = sizeof(struct udphdr); in write_eo_udp_wr()
1482 wr->u.udpseg.rtplen = 0; in write_eo_udp_wr()
1483 wr->u.udpseg.r4 = 0; in write_eo_udp_wr()
1484 if (skb_shinfo(skb)->gso_size) in write_eo_udp_wr()
1485 wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); in write_eo_udp_wr()
1487 wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len); in write_eo_udp_wr()
1488 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; in write_eo_udp_wr()
1489 wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len); in write_eo_udp_wr()
1495 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1525 adap = pi->adapter; in cxgb4_eth_xmit()
1528 if (xfrm_offload(skb) && !ssi->gso_size) in cxgb4_eth_xmit()
1529 return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev); in cxgb4_eth_xmit()
1534 (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)))) in cxgb4_eth_xmit()
1535 return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); in cxgb4_eth_xmit()
1540 if (!(adap->ptp_tx_skb)) { in cxgb4_eth_xmit()
1541 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in cxgb4_eth_xmit()
1542 adap->ptp_tx_skb = skb_get(skb); in cxgb4_eth_xmit()
1546 q = &adap->sge.ptptxq; in cxgb4_eth_xmit()
1548 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in cxgb4_eth_xmit()
1552 reclaim_completed_tx(adap, &q->q, -1, true); in cxgb4_eth_xmit()
1557 if (unlikely(ret == -EOPNOTSUPP)) in cxgb4_eth_xmit()
1561 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in cxgb4_eth_xmit()
1564 credits = txq_avail(&q->q) - ndesc; in cxgb4_eth_xmit()
1568 dev_err(adap->pdev_dev, in cxgb4_eth_xmit()
1570 dev->name, qidx); in cxgb4_eth_xmit()
1577 if (skb->encapsulation && chip_ver > CHELSIO_T5) in cxgb4_eth_xmit()
1580 last_desc = q->q.pidx + ndesc - 1; in cxgb4_eth_xmit()
1581 if (last_desc >= q->q.size) in cxgb4_eth_xmit()
1582 last_desc -= q->q.size; in cxgb4_eth_xmit()
1583 sgl_sdesc = &q->q.sdesc[last_desc]; in cxgb4_eth_xmit()
1586 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { in cxgb4_eth_xmit()
1587 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); in cxgb4_eth_xmit()
1588 q->mapping_err++; in cxgb4_eth_xmit()
1607 wr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_eth_xmit()
1608 eowr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_eth_xmit()
1609 wr->equiq_to_len16 = htonl(wr_mid); in cxgb4_eth_xmit()
1610 wr->r3 = cpu_to_be64(0); in cxgb4_eth_xmit()
1611 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in cxgb4_eth_xmit()
1616 len = immediate ? skb->len : 0; in cxgb4_eth_xmit()
1618 if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) { in cxgb4_eth_xmit()
1627 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | in cxgb4_eth_xmit()
1637 if (iph->version == 4) { in cxgb4_eth_xmit()
1638 iph->check = 0; in cxgb4_eth_xmit()
1639 iph->tot_len = 0; in cxgb4_eth_xmit()
1640 iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl); in cxgb4_eth_xmit()
1642 if (skb->ip_summed == CHECKSUM_PARTIAL) in cxgb4_eth_xmit()
1643 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1646 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1649 q->tso++; in cxgb4_eth_xmit()
1650 q->tx_cso += ssi->gso_segs; in cxgb4_eth_xmit()
1651 } else if (ssi->gso_size) { in cxgb4_eth_xmit()
1655 hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb)); in cxgb4_eth_xmit()
1657 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | in cxgb4_eth_xmit()
1660 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1663 sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start, in cxgb4_eth_xmit()
1666 left = (u8 *)end - (u8 *)q->q.stat; in cxgb4_eth_xmit()
1667 end = (void *)q->q.desc + left; in cxgb4_eth_xmit()
1670 q->uso++; in cxgb4_eth_xmit()
1671 q->tx_cso += ssi->gso_segs; in cxgb4_eth_xmit()
1677 wr->op_immdlen = htonl(FW_WR_OP_V(op) | in cxgb4_eth_xmit()
1681 if (skb->ip_summed == CHECKSUM_PARTIAL) { in cxgb4_eth_xmit()
1682 cntrl = hwcsum(adap->params.chip, skb) | in cxgb4_eth_xmit()
1684 q->tx_cso++; in cxgb4_eth_xmit()
1688 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { in cxgb4_eth_xmit()
1693 left = (u8 *)end - (u8 *)q->q.stat; in cxgb4_eth_xmit()
1694 end = (void *)q->q.desc + left; in cxgb4_eth_xmit()
1695 sgl = (void *)q->q.desc; in cxgb4_eth_xmit()
1699 q->vlan_ins++; in cxgb4_eth_xmit()
1702 if (skb->protocol == htons(ETH_P_FCOE)) in cxgb4_eth_xmit()
1704 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); in cxgb4_eth_xmit()
1708 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | in cxgb4_eth_xmit()
1709 TXPKT_PF_V(adap->pf); in cxgb4_eth_xmit()
1713 if (is_t4(adap->params.chip)) in cxgb4_eth_xmit()
1714 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); in cxgb4_eth_xmit()
1716 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); in cxgb4_eth_xmit()
1718 cpl->ctrl0 = htonl(ctrl0); in cxgb4_eth_xmit()
1719 cpl->pack = htons(0); in cxgb4_eth_xmit()
1720 cpl->len = htons(skb->len); in cxgb4_eth_xmit()
1721 cpl->ctrl1 = cpu_to_be64(cntrl); in cxgb4_eth_xmit()
1724 cxgb4_inline_tx_skb(skb, &q->q, sgl); in cxgb4_eth_xmit()
1727 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off, in cxgb4_eth_xmit()
1728 sgl_sdesc->addr); in cxgb4_eth_xmit()
1730 sgl_sdesc->skb = skb; in cxgb4_eth_xmit()
1733 txq_advance(&q->q, ndesc); in cxgb4_eth_xmit()
1735 cxgb4_ring_tx_db(adap, &q->q, ndesc); in cxgb4_eth_xmit()
1749 * 64-bit PCI DMA addresses.
1761 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1779 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1794 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), in t4vf_calc_tx_flits()
1803 * with an embedded TX Packet Write CPL message. in t4vf_calc_tx_flits()
1805 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); in t4vf_calc_tx_flits()
1806 if (skb_shinfo(skb)->gso_size) in t4vf_calc_tx_flits()
1817 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1845 fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + in cxgb4_vf_eth_xmit()
1846 sizeof(wr->ethtype) + sizeof(wr->vlantci); in cxgb4_vf_eth_xmit()
1853 adapter = pi->adapter; in cxgb4_vf_eth_xmit()
1855 WARN_ON(qidx >= pi->nqsets); in cxgb4_vf_eth_xmit()
1856 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit()
1861 reclaim_completed_tx(adapter, &txq->q, -1, true); in cxgb4_vf_eth_xmit()
1869 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit()
1878 dev_err(adapter->pdev_dev, in cxgb4_vf_eth_xmit()
1880 dev->name, qidx); in cxgb4_vf_eth_xmit()
1884 last_desc = txq->q.pidx + ndesc - 1; in cxgb4_vf_eth_xmit()
1885 if (last_desc >= txq->q.size) in cxgb4_vf_eth_xmit()
1886 last_desc -= txq->q.size; in cxgb4_vf_eth_xmit()
1887 sgl_sdesc = &txq->q.sdesc[last_desc]; in cxgb4_vf_eth_xmit()
1890 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, in cxgb4_vf_eth_xmit()
1891 sgl_sdesc->addr) < 0)) { in cxgb4_vf_eth_xmit()
1893 * be in-lined directly into the Work Request) and the mapping in cxgb4_vf_eth_xmit()
1896 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); in cxgb4_vf_eth_xmit()
1897 txq->mapping_err++; in cxgb4_vf_eth_xmit()
1901 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); in cxgb4_vf_eth_xmit()
1923 wr = (void *)&txq->q.desc[txq->q.pidx]; in cxgb4_vf_eth_xmit()
1924 wr->equiq_to_len16 = cpu_to_be32(wr_mid); in cxgb4_vf_eth_xmit()
1925 wr->r3[0] = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1926 wr->r3[1] = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1927 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); in cxgb4_vf_eth_xmit()
1935 if (ssi->gso_size) { in cxgb4_vf_eth_xmit()
1937 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; in cxgb4_vf_eth_xmit()
1939 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in cxgb4_vf_eth_xmit()
1941 wr->op_immdlen = in cxgb4_vf_eth_xmit()
1946 lso->lso_ctrl = in cxgb4_vf_eth_xmit()
1953 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); in cxgb4_vf_eth_xmit()
1954 lso->ipid_ofst = cpu_to_be16(0); in cxgb4_vf_eth_xmit()
1955 lso->mss = cpu_to_be16(ssi->gso_size); in cxgb4_vf_eth_xmit()
1956 lso->seqno_offset = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1957 if (is_t4(adapter->params.chip)) in cxgb4_vf_eth_xmit()
1958 lso->len = cpu_to_be32(skb->len); in cxgb4_vf_eth_xmit()
1960 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); in cxgb4_vf_eth_xmit()
1975 txq->tso++; in cxgb4_vf_eth_xmit()
1976 txq->tx_cso += ssi->gso_segs; in cxgb4_vf_eth_xmit()
1981 ? skb->len + sizeof(*cpl) in cxgb4_vf_eth_xmit()
1983 wr->op_immdlen = in cxgb4_vf_eth_xmit()
1991 if (skb->ip_summed == CHECKSUM_PARTIAL) { in cxgb4_vf_eth_xmit()
1992 cntrl = hwcsum(adapter->params.chip, skb) | in cxgb4_vf_eth_xmit()
1994 txq->tx_cso++; in cxgb4_vf_eth_xmit()
2004 txq->vlan_ins++; in cxgb4_vf_eth_xmit()
2009 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | in cxgb4_vf_eth_xmit()
2010 TXPKT_INTF_V(pi->port_id) | in cxgb4_vf_eth_xmit()
2012 cpl->pack = cpu_to_be16(0); in cxgb4_vf_eth_xmit()
2013 cpl->len = cpu_to_be16(skb->len); in cxgb4_vf_eth_xmit()
2014 cpl->ctrl1 = cpu_to_be64(cntrl); in cxgb4_vf_eth_xmit()
2016 /* Fill in the body of the TX Packet CPL message with either in-lined in cxgb4_vf_eth_xmit()
2020 /* In-line the packet's data and free the skb since we don't in cxgb4_vf_eth_xmit()
2023 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); in cxgb4_vf_eth_xmit()
2063 struct sge_txq *tq = &txq->q; in cxgb4_vf_eth_xmit()
2071 if (unlikely((void *)sgl == (void *)tq->stat)) { in cxgb4_vf_eth_xmit()
2072 sgl = (void *)tq->desc; in cxgb4_vf_eth_xmit()
2073 end = (void *)((void *)tq->desc + in cxgb4_vf_eth_xmit()
2074 ((void *)end - (void *)tq->stat)); in cxgb4_vf_eth_xmit()
2077 cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr); in cxgb4_vf_eth_xmit()
2079 sgl_sdesc->skb = skb; in cxgb4_vf_eth_xmit()
2085 txq_advance(&txq->q, ndesc); in cxgb4_vf_eth_xmit()
2087 cxgb4_ring_tx_db(adapter, &txq->q, ndesc); in cxgb4_vf_eth_xmit()
2099 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
2108 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in reclaim_completed_tx_imm()
2109 int reclaim = hw_cidx - q->cidx; in reclaim_completed_tx_imm()
2112 reclaim += q->size; in reclaim_completed_tx_imm()
2114 q->in_use -= reclaim; in reclaim_completed_tx_imm()
2115 q->cidx = hw_cidx; in reclaim_completed_tx_imm()
2123 val -= max; in eosw_txq_advance_index()
2133 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2134 while (ndesc--) { in cxgb4_eosw_txq_free_desc()
2135 if (d->skb) { in cxgb4_eosw_txq_free_desc()
2136 if (d->addr[0]) { in cxgb4_eosw_txq_free_desc()
2137 unmap_skb(adap->pdev_dev, d->skb, d->addr); in cxgb4_eosw_txq_free_desc()
2138 memset(d->addr, 0, sizeof(d->addr)); in cxgb4_eosw_txq_free_desc()
2140 dev_consume_skb_any(d->skb); in cxgb4_eosw_txq_free_desc()
2141 d->skb = NULL; in cxgb4_eosw_txq_free_desc()
2143 eosw_txq_advance_index(&eosw_txq->last_cidx, 1, in cxgb4_eosw_txq_free_desc()
2144 eosw_txq->ndesc); in cxgb4_eosw_txq_free_desc()
2145 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2151 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); in eosw_txq_advance()
2152 eosw_txq->inuse += n; in eosw_txq_advance()
2158 if (eosw_txq->inuse == eosw_txq->ndesc) in eosw_txq_enqueue()
2159 return -ENOMEM; in eosw_txq_enqueue()
2161 eosw_txq->desc[eosw_txq->pidx].skb = skb; in eosw_txq_enqueue()
2167 return eosw_txq->desc[eosw_txq->last_pidx].skb; in eosw_txq_peek()
2177 if (skb_shinfo(skb)->gso_size && in ethofld_calc_tx_flits()
2178 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) in ethofld_calc_tx_flits()
2186 if (skb_shinfo(skb)->nr_frags > 0) { in ethofld_calc_tx_flits()
2187 if (skb_headlen(skb) - hdr_len) in ethofld_calc_tx_flits()
2188 nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1); in ethofld_calc_tx_flits()
2190 nsgl = sgl_len(skb_shinfo(skb)->nr_frags); in ethofld_calc_tx_flits()
2191 } else if (skb->len - hdr_len) { in ethofld_calc_tx_flits()
2208 ver = ip_hdr(skb)->version; in write_eo_wr()
2209 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol; in write_eo_wr()
2213 if (skb_shinfo(skb)->gso_size && in write_eo_wr()
2214 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) in write_eo_wr()
2218 if (!eosw_txq->ncompl || in write_eo_wr()
2219 (eosw_txq->last_compl + wrlen16) >= in write_eo_wr()
2220 (adap->params.ofldq_wr_cred / 2)) { in write_eo_wr()
2222 eosw_txq->ncompl++; in write_eo_wr()
2223 eosw_txq->last_compl = 0; in write_eo_wr()
2226 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | in write_eo_wr()
2229 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) | in write_eo_wr()
2230 FW_WR_FLOWID_V(eosw_txq->hwtid)); in write_eo_wr()
2231 wr->r3 = 0; in write_eo_wr()
2235 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; in write_eo_wr()
2236 wr->u.tcpseg.ethlen = skb_network_offset(skb); in write_eo_wr()
2237 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); in write_eo_wr()
2238 wr->u.tcpseg.tcplen = tcp_hdrlen(skb); in write_eo_wr()
2239 wr->u.tcpseg.tsclk_tsoff = 0; in write_eo_wr()
2240 wr->u.tcpseg.r4 = 0; in write_eo_wr()
2241 wr->u.tcpseg.r5 = 0; in write_eo_wr()
2242 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); in write_eo_wr()
2244 if (ssi->gso_size) { in write_eo_wr()
2247 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); in write_eo_wr()
2250 wr->u.tcpseg.mss = cpu_to_be16(0xffff); in write_eo_wr()
2255 eosw_txq->cred -= wrlen16; in write_eo_wr()
2256 eosw_txq->last_compl += wrlen16; in write_eo_wr()
2277 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; in ethofld_hard_xmit()
2278 spin_lock(&eohw_txq->lock); in ethofld_hard_xmit()
2279 reclaim_completed_tx_imm(&eohw_txq->q); in ethofld_hard_xmit()
2281 d = &eosw_txq->desc[eosw_txq->last_pidx]; in ethofld_hard_xmit()
2282 skb = d->skb; in ethofld_hard_xmit()
2285 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx]; in ethofld_hard_xmit()
2286 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && in ethofld_hard_xmit()
2287 eosw_txq->last_pidx == eosw_txq->flowc_idx)) { in ethofld_hard_xmit()
2288 hdr_len = skb->len; in ethofld_hard_xmit()
2291 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) in ethofld_hard_xmit()
2297 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb)); in ethofld_hard_xmit()
2298 data_len = skb->len - hdr_len; in ethofld_hard_xmit()
2305 left = txq_avail(&eohw_txq->q) - ndesc; in ethofld_hard_xmit()
2314 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { in ethofld_hard_xmit()
2315 ret = -ENOMEM; in ethofld_hard_xmit()
2321 eosw_txq->state = next_state; in ethofld_hard_xmit()
2322 eosw_txq->cred -= wrlen16; in ethofld_hard_xmit()
2323 eosw_txq->ncompl++; in ethofld_hard_xmit()
2324 eosw_txq->last_compl = 0; in ethofld_hard_xmit()
2329 cntrl = hwcsum(adap->params.chip, skb); in ethofld_hard_xmit()
2333 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | in ethofld_hard_xmit()
2334 TXPKT_INTF_V(pi->tx_chan) | in ethofld_hard_xmit()
2335 TXPKT_PF_V(adap->pf)); in ethofld_hard_xmit()
2336 cpl->pack = 0; in ethofld_hard_xmit()
2337 cpl->len = cpu_to_be16(skb->len); in ethofld_hard_xmit()
2338 cpl->ctrl1 = cpu_to_be64(cntrl); in ethofld_hard_xmit()
2343 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, in ethofld_hard_xmit()
2346 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr); in ethofld_hard_xmit()
2348 memset(d->addr, 0, sizeof(d->addr)); in ethofld_hard_xmit()
2349 eohw_txq->mapping_err++; in ethofld_hard_xmit()
2355 left = (u8 *)end - (u8 *)eohw_txq->q.stat; in ethofld_hard_xmit()
2356 end = (void *)eohw_txq->q.desc + left; in ethofld_hard_xmit()
2359 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) { in ethofld_hard_xmit()
2364 left = (u8 *)end - (u8 *)eohw_txq->q.stat; in ethofld_hard_xmit()
2366 end = (void *)eohw_txq->q.desc + left; in ethofld_hard_xmit()
2367 sgl = (void *)eohw_txq->q.desc; in ethofld_hard_xmit()
2370 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len, in ethofld_hard_xmit()
2371 d->addr); in ethofld_hard_xmit()
2374 if (skb_shinfo(skb)->gso_size) { in ethofld_hard_xmit()
2375 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in ethofld_hard_xmit()
2376 eohw_txq->uso++; in ethofld_hard_xmit()
2378 eohw_txq->tso++; in ethofld_hard_xmit()
2379 eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs; in ethofld_hard_xmit()
2380 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in ethofld_hard_xmit()
2381 eohw_txq->tx_cso++; in ethofld_hard_xmit()
2385 eohw_txq->vlan_ins++; in ethofld_hard_xmit()
2387 txq_advance(&eohw_txq->q, ndesc); in ethofld_hard_xmit()
2388 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); in ethofld_hard_xmit()
2389 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); in ethofld_hard_xmit()
2392 spin_unlock(&eohw_txq->lock); in ethofld_hard_xmit()
2401 switch (eosw_txq->state) { in ethofld_xmit()
2405 pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in ethofld_xmit()
2407 pktcount += eosw_txq->ndesc; in ethofld_xmit()
2416 while (pktcount--) { in ethofld_xmit()
2419 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, in ethofld_xmit()
2420 eosw_txq->ndesc); in ethofld_xmit()
2444 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; in cxgb4_ethofld_xmit()
2445 qid = skb_get_queue_mapping(skb) - pi->nqsets; in cxgb4_ethofld_xmit()
2446 eosw_txq = &tc_port_mqprio->eosw_txq[qid]; in cxgb4_ethofld_xmit()
2447 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2448 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_xmit()
2463 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2467 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2478 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) in t4_start_xmit()
2481 if (unlikely(qid >= pi->nqsets)) in t4_start_xmit()
2488 spin_lock(&adap->ptp_lock); in t4_start_xmit()
2490 spin_unlock(&adap->ptp_lock); in t4_start_xmit()
2499 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in eosw_txq_flush_pending_skbs()
2500 int pidx = eosw_txq->pidx; in eosw_txq_flush_pending_skbs()
2507 pktcount += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2509 while (pktcount--) { in eosw_txq_flush_pending_skbs()
2510 pidx--; in eosw_txq_flush_pending_skbs()
2512 pidx += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2514 skb = eosw_txq->desc[pidx].skb; in eosw_txq_flush_pending_skbs()
2517 eosw_txq->desc[pidx].skb = NULL; in eosw_txq_flush_pending_skbs()
2518 eosw_txq->inuse--; in eosw_txq_flush_pending_skbs()
2522 eosw_txq->pidx = eosw_txq->last_pidx + 1; in eosw_txq_flush_pending_skbs()
2526 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
2551 entry = cxgb4_lookup_eotid(&adap->tids, eotid); in cxgb4_ethofld_send_flowc()
2553 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2555 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_send_flowc()
2557 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2559 if (!(adap->flags & CXGB4_FW_OK)) { in cxgb4_ethofld_send_flowc()
2561 complete(&eosw_txq->completion); in cxgb4_ethofld_send_flowc()
2562 return -EIO; in cxgb4_ethofld_send_flowc()
2567 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2569 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2571 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) in cxgb4_ethofld_send_flowc()
2576 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_send_flowc()
2585 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; in cxgb4_ethofld_send_flowc()
2586 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) | in cxgb4_ethofld_send_flowc()
2587 FW_WR_FLOWID_V(eosw_txq->hwtid)); in cxgb4_ethofld_send_flowc()
2588 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | in cxgb4_ethofld_send_flowc()
2591 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; in cxgb4_ethofld_send_flowc()
2592 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf)); in cxgb4_ethofld_send_flowc()
2593 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; in cxgb4_ethofld_send_flowc()
2594 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan); in cxgb4_ethofld_send_flowc()
2595 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; in cxgb4_ethofld_send_flowc()
2596 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan); in cxgb4_ethofld_send_flowc()
2597 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; in cxgb4_ethofld_send_flowc()
2598 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); in cxgb4_ethofld_send_flowc()
2599 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; in cxgb4_ethofld_send_flowc()
2600 flowc->mnemval[4].val = cpu_to_be32(tc); in cxgb4_ethofld_send_flowc()
2601 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE; in cxgb4_ethofld_send_flowc()
2602 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ? in cxgb4_ethofld_send_flowc()
2616 eosw_txq->state = next_state; in cxgb4_ethofld_send_flowc()
2617 eosw_txq->flowc_idx = eosw_txq->pidx; in cxgb4_ethofld_send_flowc()
2621 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2626 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2631 * is_imm - check whether a packet can be sent as immediate data
2638 return skb->len <= MAX_CTRL_WR_LEN; in is_imm()
2642 * ctrlq_check_stop - check if a control queue is full and should stop
2653 reclaim_completed_tx_imm(&q->q); in ctrlq_check_stop()
2654 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in ctrlq_check_stop()
2655 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); in ctrlq_check_stop()
2656 q->q.stops++; in ctrlq_check_stop()
2657 q->full = 1; in ctrlq_check_stop()
2666 struct adapter *adap = pi->adapter; in cxgb4_selftest_lb_pkt()
2681 lb = &pi->ethtool_lb; in cxgb4_selftest_lb_pkt()
2682 lb->loopback = 1; in cxgb4_selftest_lb_pkt()
2684 q = &adap->sge.ethtxq[pi->first_qset]; in cxgb4_selftest_lb_pkt()
2685 __netif_tx_lock(q->txq, smp_processor_id()); in cxgb4_selftest_lb_pkt()
2687 reclaim_completed_tx(adap, &q->q, -1, true); in cxgb4_selftest_lb_pkt()
2688 credits = txq_avail(&q->q) - ndesc; in cxgb4_selftest_lb_pkt()
2690 __netif_tx_unlock(q->txq); in cxgb4_selftest_lb_pkt()
2691 return -ENOMEM; in cxgb4_selftest_lb_pkt()
2694 wr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_selftest_lb_pkt()
2697 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | in cxgb4_selftest_lb_pkt()
2700 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2))); in cxgb4_selftest_lb_pkt()
2701 wr->r3 = cpu_to_be64(0); in cxgb4_selftest_lb_pkt()
2706 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) | in cxgb4_selftest_lb_pkt()
2707 TXPKT_INTF_V(pi->tx_chan + 4); in cxgb4_selftest_lb_pkt()
2709 cpl->ctrl0 = htonl(ctrl0); in cxgb4_selftest_lb_pkt()
2710 cpl->pack = htons(0); in cxgb4_selftest_lb_pkt()
2711 cpl->len = htons(pkt_len); in cxgb4_selftest_lb_pkt()
2712 cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F); in cxgb4_selftest_lb_pkt()
2716 ether_addr_copy(&sgl[i], netdev->dev_addr); in cxgb4_selftest_lb_pkt()
2722 init_completion(&lb->completion); in cxgb4_selftest_lb_pkt()
2723 txq_advance(&q->q, ndesc); in cxgb4_selftest_lb_pkt()
2724 cxgb4_ring_tx_db(adap, &q->q, ndesc); in cxgb4_selftest_lb_pkt()
2725 __netif_tx_unlock(q->txq); in cxgb4_selftest_lb_pkt()
2728 ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); in cxgb4_selftest_lb_pkt()
2730 ret = -ETIMEDOUT; in cxgb4_selftest_lb_pkt()
2732 ret = lb->result; in cxgb4_selftest_lb_pkt()
2734 lb->loopback = 0; in cxgb4_selftest_lb_pkt()
2740 * ctrl_xmit - send a packet through an SGE control Tx queue
2758 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); in ctrl_xmit()
2759 spin_lock(&q->sendq.lock); in ctrl_xmit()
2761 if (unlikely(q->full)) { in ctrl_xmit()
2762 skb->priority = ndesc; /* save for restart */ in ctrl_xmit()
2763 __skb_queue_tail(&q->sendq, skb); in ctrl_xmit()
2764 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2768 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in ctrl_xmit()
2769 cxgb4_inline_tx_skb(skb, &q->q, wr); in ctrl_xmit()
2771 txq_advance(&q->q, ndesc); in ctrl_xmit()
2772 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) in ctrl_xmit()
2775 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); in ctrl_xmit()
2776 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2783 * restart_ctrlq - restart a suspended control queue
2794 spin_lock(&q->sendq.lock); in restart_ctrlq()
2795 reclaim_completed_tx_imm(&q->q); in restart_ctrlq()
2796 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ in restart_ctrlq()
2798 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
2800 unsigned int ndesc = skb->priority; /* previously saved */ in restart_ctrlq()
2804 * wait times. q->full is still set so new skbs will be queued. in restart_ctrlq()
2806 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in restart_ctrlq()
2807 txq_advance(&q->q, ndesc); in restart_ctrlq()
2808 spin_unlock(&q->sendq.lock); in restart_ctrlq()
2810 cxgb4_inline_tx_skb(skb, &q->q, wr); in restart_ctrlq()
2813 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in restart_ctrlq()
2814 unsigned long old = q->q.stops; in restart_ctrlq()
2817 if (q->q.stops != old) { /* suspended anew */ in restart_ctrlq()
2818 spin_lock(&q->sendq.lock); in restart_ctrlq()
2823 cxgb4_ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
2826 spin_lock(&q->sendq.lock); in restart_ctrlq()
2828 q->full = 0; in restart_ctrlq()
2831 cxgb4_ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
2832 spin_unlock(&q->sendq.lock); in restart_ctrlq()
2836 * t4_mgmt_tx - send a management message
2847 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); in t4_mgmt_tx()
2853 * is_ofld_imm - check whether a packet can be sent as immediate data
2858 * FW_OFLD_TX_DATA_WR limits the payload to 255 bytes due to 8-bit field.
2864 struct work_request_hdr *req = (struct work_request_hdr *)skb->data; in is_ofld_imm()
2865 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); in is_ofld_imm()
2868 return skb->len <= MAX_IMM_ULPTX_WR_LEN; in is_ofld_imm()
2870 return skb->len <= SGE_MAX_WR_LEN; in is_ofld_imm()
2872 return skb->len <= MAX_IMM_OFLD_TX_DATA_WR_LEN; in is_ofld_imm()
2876 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2888 return DIV_ROUND_UP(skb->len, 8); in calc_tx_flits_ofld()
2891 cnt = skb_shinfo(skb)->nr_frags; in calc_tx_flits_ofld()
2898 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2907 q->mapping_err++; in txq_stop_maperr()
2908 q->q.stops++; in txq_stop_maperr()
2909 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, in txq_stop_maperr()
2910 q->adap->sge.txq_maperr); in txq_stop_maperr()
2914 * ofldtxq_stop - stop an offload Tx queue that has become full
2923 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); in ofldtxq_stop()
2924 q->q.stops++; in ofldtxq_stop()
2925 q->full = 1; in ofldtxq_stop()
2929 * service_ofldq - service/restart a suspended offload queue
2947 __must_hold(&q->sendq.lock) in service_ofldq()
2963 if (q->service_ofldq_running) in service_ofldq()
2965 q->service_ofldq_running = true; in service_ofldq()
2967 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { in service_ofldq()
2975 spin_unlock(&q->sendq.lock); in service_ofldq()
2977 cxgb4_reclaim_completed_tx(q->adap, &q->q, false); in service_ofldq()
2979 flits = skb->priority; /* previously saved */ in service_ofldq()
2981 credits = txq_avail(&q->q) - ndesc; in service_ofldq()
2984 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); in service_ofldq()
2986 pos = (u64 *)&q->q.desc[q->q.pidx]; in service_ofldq()
2988 cxgb4_inline_tx_skb(skb, &q->q, pos); in service_ofldq()
2989 else if (cxgb4_map_skb(q->adap->pdev_dev, skb, in service_ofldq()
2990 (dma_addr_t *)skb->head)) { in service_ofldq()
2992 spin_lock(&q->sendq.lock); in service_ofldq()
2998 * So we need to deal with wrap-around here. in service_ofldq()
3002 txq = &q->q; in service_ofldq()
3003 pos = (void *)inline_tx_skb_header(skb, &q->q, in service_ofldq()
3007 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3008 end = (void *)txq->desc + left; in service_ofldq()
3015 if (pos == (u64 *)txq->stat) { in service_ofldq()
3016 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3017 end = (void *)txq->desc + left; in service_ofldq()
3018 pos = (void *)txq->desc; in service_ofldq()
3021 cxgb4_write_sgl(skb, &q->q, (void *)pos, in service_ofldq()
3023 (dma_addr_t *)skb->head); in service_ofldq()
3025 skb->dev = q->adap->port[0]; in service_ofldq()
3026 skb->destructor = deferred_unmap_destructor; in service_ofldq()
3028 last_desc = q->q.pidx + ndesc - 1; in service_ofldq()
3029 if (last_desc >= q->q.size) in service_ofldq()
3030 last_desc -= q->q.size; in service_ofldq()
3031 q->q.sdesc[last_desc].skb = skb; in service_ofldq()
3034 txq_advance(&q->q, ndesc); in service_ofldq()
3037 cxgb4_ring_tx_db(q->adap, &q->q, written); in service_ofldq()
3046 spin_lock(&q->sendq.lock); in service_ofldq()
3047 __skb_unlink(skb, &q->sendq); in service_ofldq()
3052 cxgb4_ring_tx_db(q->adap, &q->q, written); in service_ofldq()
3057 q->service_ofldq_running = false; in service_ofldq()
3061 * ofld_xmit - send a packet through an offload queue
3069 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ in ofld_xmit()
3070 spin_lock(&q->sendq.lock); in ofld_xmit()
3080 __skb_queue_tail(&q->sendq, skb); in ofld_xmit()
3081 if (q->sendq.qlen == 1) in ofld_xmit()
3084 spin_unlock(&q->sendq.lock); in ofld_xmit()
3089 * restart_ofldq - restart a suspended offload queue
3098 spin_lock(&q->sendq.lock); in restart_ofldq()
3099 q->full = 0; /* the queue actually is completely empty now */ in restart_ofldq()
3101 spin_unlock(&q->sendq.lock); in restart_ofldq()
3105 * skb_txq - return the Tx queue an offload packet should use
3109 * 1-15 in the packet's queue_mapping.
3113 return skb->queue_mapping >> 1; in skb_txq()
3117 * is_ctrl_pkt - return whether an offload packet is a control packet
3125 return skb->queue_mapping & 1; in is_ctrl_pkt()
3137 if (adap->tids.nsftids) in uld_send()
3139 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); in uld_send()
3142 txq_info = adap->sge.uld_txq_info[tx_uld_type]; in uld_send()
3149 txq = &txq_info->uldtxq[idx]; in uld_send()
3154 * t4_ofld_send - send an offload packet
3160 * should be sent as regular or control, bits 1-15 select the queue.
3173 * cxgb4_ofld_send - send an offload packet
3190 int left = (void *)q->stat - pos; in inline_tx_header()
3198 memcpy(q->desc, src + left, length - left); in inline_tx_header()
3199 pos = (void *)q->desc + (length - left); in inline_tx_header()
3201 /* 0-pad to multiple of 16 */ in inline_tx_header()
3211 * ofld_xmit_direct - copy a WR into offload queue
3225 /* Use the lower limit as the cut-off */ in ofld_xmit_direct()
3235 if (!spin_trylock(&q->sendq.lock)) in ofld_xmit_direct()
3238 if (q->full || !skb_queue_empty(&q->sendq) || in ofld_xmit_direct()
3239 q->service_ofldq_running) { in ofld_xmit_direct()
3240 spin_unlock(&q->sendq.lock); in ofld_xmit_direct()
3244 credits = txq_avail(&q->q) - ndesc; in ofld_xmit_direct()
3245 pos = (u64 *)&q->q.desc[q->q.pidx]; in ofld_xmit_direct()
3247 /* ofldtxq_stop modifies WR header in-situ */ in ofld_xmit_direct()
3248 inline_tx_header(src, &q->q, pos, len); in ofld_xmit_direct()
3251 txq_advance(&q->q, ndesc); in ofld_xmit_direct()
3252 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); in ofld_xmit_direct()
3254 spin_unlock(&q->sendq.lock); in ofld_xmit_direct()
3269 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in cxgb4_immdata_send()
3275 txq = &txq_info->uldtxq[idx]; in cxgb4_immdata_send()
3284 * t4_crypto_send - send crypto packet
3290 * should be sent as regular or control, bits 1-15 select the queue.
3303 * cxgb4_crypto_send - send crypto packet
3322 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags()
3323 gl->frags[0].offset + offset, in copy_frags()
3324 gl->frags[0].size - offset); in copy_frags()
3325 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags()
3326 for (i = 1; i < gl->nfrags; i++) in copy_frags()
3327 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags()
3328 gl->frags[i].offset, in copy_frags()
3329 gl->frags[i].size); in copy_frags()
3332 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags()
3336 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
3354 if (gl->tot_len <= RX_COPY_THRES) { in cxgb4_pktgl_to_skb()
3355 skb = dev_alloc_skb(gl->tot_len); in cxgb4_pktgl_to_skb()
3358 __skb_put(skb, gl->tot_len); in cxgb4_pktgl_to_skb()
3359 skb_copy_to_linear_data(skb, gl->va, gl->tot_len); in cxgb4_pktgl_to_skb()
3365 skb_copy_to_linear_data(skb, gl->va, pull_len); in cxgb4_pktgl_to_skb()
3368 skb->len = gl->tot_len; in cxgb4_pktgl_to_skb()
3369 skb->data_len = skb->len - pull_len; in cxgb4_pktgl_to_skb()
3370 skb->truesize += skb->data_len; in cxgb4_pktgl_to_skb()
3377 * t4_pktgl_free - free a packet gather list
3388 for (p = gl->frags, n = gl->nfrags - 1; n--; p++) in t4_pktgl_free()
3389 put_page(p->page); in t4_pktgl_free()
3393 * Process an MPS trace packet. Give it an unused protocol number so it won't
3407 if (is_t4(adap->params.chip)) in handle_trace_pkt()
3413 skb->protocol = htons(0xffff); in handle_trace_pkt()
3414 skb->dev = adap->port[0]; in handle_trace_pkt()
3420 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
3425 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
3433 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); in cxgb4_sgetim_to_hwtstamp()
3435 ns = div_u64(tmp, adap->params.vpd.cclk); in cxgb4_sgetim_to_hwtstamp()
3438 hwtstamps->hwtstamp = ns_to_ktime(ns); in cxgb4_sgetim_to_hwtstamp()
3444 struct adapter *adapter = rxq->rspq.adap; in do_gro()
3445 struct sge *s = &adapter->sge; in do_gro()
3450 skb = napi_get_frags(&rxq->rspq.napi); in do_gro()
3453 rxq->stats.rx_drops++; in do_gro()
3457 copy_frags(skb, gl, s->pktshift); in do_gro()
3459 skb->csum_level = 1; in do_gro()
3460 skb->len = gl->tot_len - s->pktshift; in do_gro()
3461 skb->data_len = skb->len; in do_gro()
3462 skb->truesize += skb->data_len; in do_gro()
3463 skb->ip_summed = CHECKSUM_UNNECESSARY; in do_gro()
3464 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro()
3465 pi = netdev_priv(skb->dev); in do_gro()
3466 if (pi->rxtstamp) in do_gro()
3468 gl->sgetstamp); in do_gro()
3469 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) in do_gro()
3470 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, in do_gro()
3473 if (unlikely(pkt->vlan_ex)) { in do_gro()
3474 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); in do_gro()
3475 rxq->stats.vlan_ex++; in do_gro()
3477 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro()
3479 rxq->stats.lro_pkts++; in do_gro()
3481 rxq->stats.lro_merged++; in do_gro()
3482 rxq->stats.pkts++; in do_gro()
3483 rxq->stats.rx_cso++; in do_gro()
3493 * t4_systim_to_hwstamp - read hardware time stamp
3508 cpl = (struct cpl_rx_mps_pkt *)skb->data; in t4_systim_to_hwstamp()
3509 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & in t4_systim_to_hwstamp()
3513 data = skb->data + sizeof(*cpl); in t4_systim_to_hwstamp()
3515 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; in t4_systim_to_hwstamp()
3516 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) in t4_systim_to_hwstamp()
3521 hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); in t4_systim_to_hwstamp()
3527 * t4_rx_hststamp - Recv PTP Event Message
3541 !is_t4(adapter->params.chip))) { in t4_rx_hststamp()
3545 rxq->stats.rx_drops++; in t4_rx_hststamp()
3553 * t4_tx_hststamp - Loopback PTP Transmit Event Message
3565 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { in t4_tx_hststamp()
3574 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
3592 u8 opcode = ((const struct rss_header *)rsp)->opcode; in t4_tx_completion_handler()
3593 struct port_info *pi = netdev_priv(rspq->netdev); in t4_tx_completion_handler()
3594 struct adapter *adapter = rspq->adap; in t4_tx_completion_handler()
3595 struct sge *s = &adapter->sge; in t4_tx_completion_handler()
3604 ((const struct cpl_fw4_msg *)rsp)->type == in t4_tx_completion_handler()
3607 opcode = ((const struct rss_header *)rsp)->opcode; in t4_tx_completion_handler()
3617 txq = &s->ethtxq[pi->first_qset + rspq->idx]; in t4_tx_completion_handler()
3630 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) { in t4_tx_completion_handler()
3634 WRITE_ONCE(txq->q.stat->cidx, egr->cidx); in t4_tx_completion_handler()
3637 t4_sge_eth_txq_egress_update(adapter, txq, -1); in t4_tx_completion_handler()
3642 struct adapter *adap = pi->adapter; in cxgb4_validate_lb_pkt()
3644 struct sge *s = &adap->sge; in cxgb4_validate_lb_pkt()
3649 netdev = adap->port[pi->port_id]; in cxgb4_validate_lb_pkt()
3650 lb = &pi->ethtool_lb; in cxgb4_validate_lb_pkt()
3651 data = si->va + s->pktshift; in cxgb4_validate_lb_pkt()
3654 if (!ether_addr_equal(data + i, netdev->dev_addr)) in cxgb4_validate_lb_pkt()
3655 return -1; in cxgb4_validate_lb_pkt()
3659 lb->result = -EIO; in cxgb4_validate_lb_pkt()
3661 complete(&lb->completion); in cxgb4_validate_lb_pkt()
3666 * t4_ethrx_handler - process an ingress ethernet packet
3680 struct adapter *adapter = q->adap; in t4_ethrx_handler()
3681 struct sge *s = &q->adap->sge; in t4_ethrx_handler()
3682 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? in t4_ethrx_handler()
3688 pi = netdev_priv(q->netdev); in t4_ethrx_handler()
3699 return handle_trace_pkt(q->adap, si); in t4_ethrx_handler()
3703 if (q->adap->params.tp.rx_pkt_encap) { in t4_ethrx_handler()
3704 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); in t4_ethrx_handler()
3705 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); in t4_ethrx_handler()
3707 err_vec = be16_to_cpu(pkt->err_vec); in t4_ethrx_handler()
3710 csum_ok = pkt->csum_calc && !err_vec && in t4_ethrx_handler()
3711 (q->netdev->features & NETIF_F_RXCSUM); in t4_ethrx_handler()
3714 rxq->stats.bad_rx_pkts++; in t4_ethrx_handler()
3716 if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) { in t4_ethrx_handler()
3722 if (((pkt->l2info & htonl(RXF_TCP_F)) || in t4_ethrx_handler()
3724 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { in t4_ethrx_handler()
3732 rxq->stats.rx_drops++; in t4_ethrx_handler()
3737 if (unlikely(pi->ptp_enable)) { in t4_ethrx_handler()
3743 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ in t4_ethrx_handler()
3746 if (unlikely(pi->ptp_enable && !ret && in t4_ethrx_handler()
3747 (pkt->l2info & htonl(RXF_UDP_F)) && in t4_ethrx_handler()
3749 if (!t4_tx_hststamp(adapter, skb, q->netdev)) in t4_ethrx_handler()
3753 skb->protocol = eth_type_trans(skb, q->netdev); in t4_ethrx_handler()
3754 skb_record_rx_queue(skb, q->idx); in t4_ethrx_handler()
3755 if (skb->dev->features & NETIF_F_RXHASH) in t4_ethrx_handler()
3756 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, in t4_ethrx_handler()
3759 rxq->stats.pkts++; in t4_ethrx_handler()
3761 if (pi->rxtstamp) in t4_ethrx_handler()
3762 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), in t4_ethrx_handler()
3763 si->sgetstamp); in t4_ethrx_handler()
3764 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { in t4_ethrx_handler()
3765 if (!pkt->ip_frag) { in t4_ethrx_handler()
3766 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3767 rxq->stats.rx_cso++; in t4_ethrx_handler()
3768 } else if (pkt->l2info & htonl(RXF_IP_F)) { in t4_ethrx_handler()
3769 __sum16 c = (__force __sum16)pkt->csum; in t4_ethrx_handler()
3770 skb->csum = csum_unfold(c); in t4_ethrx_handler()
3773 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3774 skb->csum_level = 1; in t4_ethrx_handler()
3776 skb->ip_summed = CHECKSUM_COMPLETE; in t4_ethrx_handler()
3778 rxq->stats.rx_cso++; in t4_ethrx_handler()
3786 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { in t4_ethrx_handler()
3787 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && in t4_ethrx_handler()
3788 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { in t4_ethrx_handler()
3789 if (q->adap->params.tp.rx_pkt_encap) in t4_ethrx_handler()
3795 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3803 if (unlikely(pkt->vlan_ex)) { in t4_ethrx_handler()
3804 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); in t4_ethrx_handler()
3805 rxq->stats.vlan_ex++; in t4_ethrx_handler()
3807 skb_mark_napi_id(skb, &q->napi); in t4_ethrx_handler()
3813 * restore_rx_bufs - put back a packet's Rx buffers
3832 while (frags--) { in restore_rx_bufs()
3833 if (q->cidx == 0) in restore_rx_bufs()
3834 q->cidx = q->size - 1; in restore_rx_bufs()
3836 q->cidx--; in restore_rx_bufs()
3837 d = &q->sdesc[q->cidx]; in restore_rx_bufs()
3838 d->page = si->frags[frags].page; in restore_rx_bufs()
3839 d->dma_addr |= RX_UNMAPPED_BUF; in restore_rx_bufs()
3840 q->avail++; in restore_rx_bufs()
3845 * is_new_response - check if a response is newly written
3855 return (r->type_gen >> RSPD_GEN_S) == q->gen; in is_new_response()
3859 * rspq_next - advance to the next entry in a response queue
3866 q->cur_desc = (void *)q->cur_desc + q->iqe_len; in rspq_next()
3867 if (unlikely(++q->cidx == q->size)) { in rspq_next()
3868 q->cidx = 0; in rspq_next()
3869 q->gen ^= 1; in rspq_next()
3870 q->cur_desc = q->desc; in rspq_next()
3875 * process_responses - process responses from an SGE response queue
3893 struct adapter *adapter = q->adap; in process_responses()
3894 struct sge *s = &adapter->sge; in process_responses()
3897 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_responses()
3899 if (q->flush_handler) in process_responses()
3900 q->flush_handler(q); in process_responses()
3905 rsp_type = RSPD_TYPE_G(rc->type_gen); in process_responses()
3910 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; in process_responses()
3913 if (likely(q->offset > 0)) { in process_responses()
3914 free_rx_bufs(q->adap, &rxq->fl, 1); in process_responses()
3915 q->offset = 0; in process_responses()
3923 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; in process_responses()
3925 fp->page = rsd->page; in process_responses()
3926 fp->offset = q->offset; in process_responses()
3927 fp->size = min(bufsz, len); in process_responses()
3928 len -= fp->size; in process_responses()
3931 unmap_rx_buf(q->adap, &rxq->fl); in process_responses()
3935 be64_to_cpu(rc->last_flit)); in process_responses()
3940 dma_sync_single_for_cpu(q->adap->pdev_dev, in process_responses()
3942 fp->size, DMA_FROM_DEVICE); in process_responses()
3949 ret = q->handler(q, q->cur_desc, &si); in process_responses()
3951 q->offset += ALIGN(fp->size, s->fl_align); in process_responses()
3953 restore_rx_bufs(&si, &rxq->fl, frags); in process_responses()
3955 ret = q->handler(q, q->cur_desc, NULL); in process_responses()
3957 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); in process_responses()
3962 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); in process_responses()
3967 budget_left--; in process_responses()
3970 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) in process_responses()
3971 __refill_fl(q->adap, &rxq->fl); in process_responses()
3972 return budget - budget_left; in process_responses()
3976 * napi_rx_handler - the NAPI handler for Rx processing
3983 * in not a concern at all with MSI-X as non-data interrupts then have
3998 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); in napi_rx_handler()
4000 if (q->adaptive_rx) { in napi_rx_handler()
4005 timer_index = timer_index - 1; in napi_rx_handler()
4007 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); in napi_rx_handler()
4008 q->next_intr_params = in napi_rx_handler()
4011 params = q->next_intr_params; in napi_rx_handler()
4013 params = q->next_intr_params; in napi_rx_handler()
4014 q->next_intr_params = q->intr_params; in napi_rx_handler()
4024 if (unlikely(q->bar2_addr == NULL)) { in napi_rx_handler()
4025 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in napi_rx_handler()
4026 val | INGRESSQID_V((u32)q->cntxt_id)); in napi_rx_handler()
4028 writel(val | INGRESSQID_V(q->bar2_qid), in napi_rx_handler()
4029 q->bar2_addr + SGE_UDB_GTS); in napi_rx_handler()
4041 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4042 pktcount = eosw_txq->cidx - eosw_txq->last_cidx; in cxgb4_ethofld_restart()
4044 pktcount += eosw_txq->ndesc; in cxgb4_ethofld_restart()
4047 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), in cxgb4_ethofld_restart()
4049 eosw_txq->inuse -= pktcount; in cxgb4_ethofld_restart()
4055 ethofld_xmit(eosw_txq->netdev, eosw_txq); in cxgb4_ethofld_restart()
4056 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4059 /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
4070 u8 opcode = ((const struct rss_header *)rsp)->opcode; in cxgb4_ethofld_rx_handler()
4085 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) - in cxgb4_ethofld_rx_handler()
4086 q->adap->tids.eotid_base; in cxgb4_ethofld_rx_handler()
4087 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid); in cxgb4_ethofld_rx_handler()
4091 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_rx_handler()
4095 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4096 credits = cpl->credits; in cxgb4_ethofld_rx_handler()
4098 skb = eosw_txq->desc[eosw_txq->cidx].skb; in cxgb4_ethofld_rx_handler()
4102 if (unlikely((eosw_txq->state == in cxgb4_ethofld_rx_handler()
4104 eosw_txq->state == in cxgb4_ethofld_rx_handler()
4106 eosw_txq->cidx == eosw_txq->flowc_idx)) { in cxgb4_ethofld_rx_handler()
4107 flits = DIV_ROUND_UP(skb->len, 8); in cxgb4_ethofld_rx_handler()
4108 if (eosw_txq->state == in cxgb4_ethofld_rx_handler()
4110 eosw_txq->state = CXGB4_EO_STATE_ACTIVE; in cxgb4_ethofld_rx_handler()
4112 eosw_txq->state = CXGB4_EO_STATE_CLOSED; in cxgb4_ethofld_rx_handler()
4113 complete(&eosw_txq->completion); in cxgb4_ethofld_rx_handler()
4115 hdr_len = eth_get_headlen(eosw_txq->netdev, in cxgb4_ethofld_rx_handler()
4116 skb->data, in cxgb4_ethofld_rx_handler()
4118 flits = ethofld_calc_tx_flits(q->adap, skb, in cxgb4_ethofld_rx_handler()
4121 eosw_txq_advance_index(&eosw_txq->cidx, 1, in cxgb4_ethofld_rx_handler()
4122 eosw_txq->ndesc); in cxgb4_ethofld_rx_handler()
4124 credits -= wrlen16; in cxgb4_ethofld_rx_handler()
4127 eosw_txq->cred += cpl->credits; in cxgb4_ethofld_rx_handler()
4128 eosw_txq->ncompl--; in cxgb4_ethofld_rx_handler()
4130 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4135 tasklet_schedule(&eosw_txq->qresume_tsk); in cxgb4_ethofld_rx_handler()
4143 * The MSI-X interrupt handler for an SGE response queue.
4149 napi_schedule(&q->napi); in t4_sge_intr_msix()
4161 struct sge_rspq *q = &adap->sge.intrq; in process_intrq()
4164 spin_lock(&adap->sge.intrq_lock); in process_intrq()
4166 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_intrq()
4171 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { in process_intrq()
4172 unsigned int qid = ntohl(rc->pldbuflen_qid); in process_intrq()
4174 qid -= adap->sge.ingr_start; in process_intrq()
4175 napi_schedule(&adap->sge.ingr_map[qid]->napi); in process_intrq()
4181 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); in process_intrq()
4186 if (unlikely(q->bar2_addr == NULL)) { in process_intrq()
4188 val | INGRESSQID_V(q->cntxt_id)); in process_intrq()
4190 writel(val | INGRESSQID_V(q->bar2_qid), in process_intrq()
4191 q->bar2_addr + SGE_UDB_GTS); in process_intrq()
4194 spin_unlock(&adap->sge.intrq_lock); in process_intrq()
4206 if (adap->flags & CXGB4_MASTER_PF) in t4_intr_msi()
4222 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) | in t4_intr_intx()
4229 * t4_intr_handler - select the top-level interrupt handler
4232 * Selects the top-level interrupt handler based on the type of interrupts
4233 * (MSI-X, MSI, or INTx).
4237 if (adap->flags & CXGB4_USING_MSIX) in t4_intr_handler()
4239 if (adap->flags & CXGB4_USING_MSI) in t4_intr_handler()
4249 struct sge *s = &adap->sge; in sge_rx_timer_cb()
4251 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) in sge_rx_timer_cb()
4252 for (m = s->starving_fl[i]; m; m &= m - 1) { in sge_rx_timer_cb()
4255 struct sge_fl *fl = s->egr_map[id]; in sge_rx_timer_cb()
4257 clear_bit(id, s->starving_fl); in sge_rx_timer_cb()
4262 if (napi_reschedule(&rxq->rspq.napi)) in sge_rx_timer_cb()
4263 fl->starving++; in sge_rx_timer_cb()
4265 set_bit(id, s->starving_fl); in sge_rx_timer_cb()
4272 if (!(adap->flags & CXGB4_MASTER_PF)) in sge_rx_timer_cb()
4275 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); in sge_rx_timer_cb()
4278 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); in sge_rx_timer_cb()
4284 struct sge *s = &adap->sge; in sge_tx_timer_cb()
4288 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) in sge_tx_timer_cb()
4289 for (m = s->txq_maperr[i]; m; m &= m - 1) { in sge_tx_timer_cb()
4291 struct sge_uld_txq *txq = s->egr_map[id]; in sge_tx_timer_cb()
4293 clear_bit(id, s->txq_maperr); in sge_tx_timer_cb()
4294 tasklet_schedule(&txq->qresume_tsk); in sge_tx_timer_cb()
4297 if (!is_t4(adap->params.chip)) { in sge_tx_timer_cb()
4298 struct sge_eth_txq *q = &s->ptptxq; in sge_tx_timer_cb()
4301 spin_lock(&adap->ptp_lock); in sge_tx_timer_cb()
4302 avail = reclaimable(&q->q); in sge_tx_timer_cb()
4305 free_tx_desc(adap, &q->q, avail, false); in sge_tx_timer_cb()
4306 q->q.in_use -= avail; in sge_tx_timer_cb()
4308 spin_unlock(&adap->ptp_lock); in sge_tx_timer_cb()
4312 i = s->ethtxq_rover; in sge_tx_timer_cb()
4314 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i], in sge_tx_timer_cb()
4319 if (++i >= s->ethqsets) in sge_tx_timer_cb()
4321 } while (i != s->ethtxq_rover); in sge_tx_timer_cb()
4322 s->ethtxq_rover = i; in sge_tx_timer_cb()
4336 mod_timer(&s->tx_timer, jiffies + period); in sge_tx_timer_cb()
4340 * bar2_address - return the BAR2 address for an SGE Queue's Registers
4365 return adapter->bar2 + bar2_qoffset; in bar2_address()
4368 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
4369 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
4378 struct sge *s = &adap->sge; in t4_sge_alloc_rxq()
4380 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING); in t4_sge_alloc_rxq()
4383 iq->size = roundup(iq->size, 16); in t4_sge_alloc_rxq()
4385 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, in t4_sge_alloc_rxq()
4386 &iq->phys_addr, NULL, 0, in t4_sge_alloc_rxq()
4387 dev_to_node(adap->pdev_dev)); in t4_sge_alloc_rxq()
4388 if (!iq->desc) in t4_sge_alloc_rxq()
4389 return -ENOMEM; in t4_sge_alloc_rxq()
4394 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); in t4_sge_alloc_rxq()
4398 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | in t4_sge_alloc_rxq()
4402 -intr_idx - 1)); in t4_sge_alloc_rxq()
4403 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | in t4_sge_alloc_rxq()
4405 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | in t4_sge_alloc_rxq()
4406 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); in t4_sge_alloc_rxq()
4407 c.iqsize = htons(iq->size); in t4_sge_alloc_rxq()
4408 c.iqaddr = cpu_to_be64(iq->phys_addr); in t4_sge_alloc_rxq()
4416 CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_rxq()
4423 * (fl_starve_thres - 1). in t4_sge_alloc_rxq()
4425 if (fl->size < s->fl_starve_thres - 1 + 2 * 8) in t4_sge_alloc_rxq()
4426 fl->size = s->fl_starve_thres - 1 + 2 * 8; in t4_sge_alloc_rxq()
4427 fl->size = roundup(fl->size, 8); in t4_sge_alloc_rxq()
4428 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), in t4_sge_alloc_rxq()
4429 sizeof(struct rx_sw_desc), &fl->addr, in t4_sge_alloc_rxq()
4430 &fl->sdesc, s->stat_len, in t4_sge_alloc_rxq()
4431 dev_to_node(adap->pdev_dev)); in t4_sge_alloc_rxq()
4432 if (!fl->desc) in t4_sge_alloc_rxq()
4435 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_rxq()
4449 * Free List pointers are provided, so we use a 128-byte Fetch in t4_sge_alloc_rxq()
4451 * the smaller 64-byte value there). in t4_sge_alloc_rxq()
4461 c.fl0addr = cpu_to_be64(fl->addr); in t4_sge_alloc_rxq()
4464 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_rxq()
4468 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); in t4_sge_alloc_rxq()
4469 iq->cur_desc = iq->desc; in t4_sge_alloc_rxq()
4470 iq->cidx = 0; in t4_sge_alloc_rxq()
4471 iq->gen = 1; in t4_sge_alloc_rxq()
4472 iq->next_intr_params = iq->intr_params; in t4_sge_alloc_rxq()
4473 iq->cntxt_id = ntohs(c.iqid); in t4_sge_alloc_rxq()
4474 iq->abs_id = ntohs(c.physiqid); in t4_sge_alloc_rxq()
4475 iq->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
4476 iq->cntxt_id, in t4_sge_alloc_rxq()
4478 &iq->bar2_qid); in t4_sge_alloc_rxq()
4479 iq->size--; /* subtract status entry */ in t4_sge_alloc_rxq()
4480 iq->netdev = dev; in t4_sge_alloc_rxq()
4481 iq->handler = hnd; in t4_sge_alloc_rxq()
4482 iq->flush_handler = flush_hnd; in t4_sge_alloc_rxq()
4484 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); in t4_sge_alloc_rxq()
4485 skb_queue_head_init(&iq->lro_mgr.lroq); in t4_sge_alloc_rxq()
4487 /* set offset to -1 to distinguish ingress queues without FL */ in t4_sge_alloc_rxq()
4488 iq->offset = fl ? 0 : -1; in t4_sge_alloc_rxq()
4490 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; in t4_sge_alloc_rxq()
4493 fl->cntxt_id = ntohs(c.fl0id); in t4_sge_alloc_rxq()
4494 fl->avail = fl->pend_cred = 0; in t4_sge_alloc_rxq()
4495 fl->pidx = fl->cidx = 0; in t4_sge_alloc_rxq()
4496 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; in t4_sge_alloc_rxq()
4497 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; in t4_sge_alloc_rxq()
4502 fl->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
4503 fl->cntxt_id, in t4_sge_alloc_rxq()
4505 &fl->bar2_qid); in t4_sge_alloc_rxq()
4517 if (!is_t4(adap->params.chip) && cong >= 0) { in t4_sge_alloc_rxq()
4520 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; in t4_sge_alloc_rxq()
4524 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); in t4_sge_alloc_rxq()
4536 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in t4_sge_alloc_rxq()
4539 dev_warn(adap->pdev_dev, "Failed to set Congestion" in t4_sge_alloc_rxq()
4541 iq->cntxt_id, -ret); in t4_sge_alloc_rxq()
4547 ret = -ENOMEM; in t4_sge_alloc_rxq()
4549 if (iq->desc) { in t4_sge_alloc_rxq()
4550 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, in t4_sge_alloc_rxq()
4551 iq->desc, iq->phys_addr); in t4_sge_alloc_rxq()
4552 iq->desc = NULL; in t4_sge_alloc_rxq()
4554 if (fl && fl->desc) { in t4_sge_alloc_rxq()
4555 kfree(fl->sdesc); in t4_sge_alloc_rxq()
4556 fl->sdesc = NULL; in t4_sge_alloc_rxq()
4557 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), in t4_sge_alloc_rxq()
4558 fl->desc, fl->addr); in t4_sge_alloc_rxq()
4559 fl->desc = NULL; in t4_sge_alloc_rxq()
4566 q->cntxt_id = id; in init_txq()
4567 q->bar2_addr = bar2_address(adap, in init_txq()
4568 q->cntxt_id, in init_txq()
4570 &q->bar2_qid); in init_txq()
4571 q->in_use = 0; in init_txq()
4572 q->cidx = q->pidx = 0; in init_txq()
4573 q->stops = q->restarts = 0; in init_txq()
4574 q->stat = (void *)&q->desc[q->size]; in init_txq()
4575 spin_lock_init(&q->db_lock); in init_txq()
4576 adap->sge.egr_map[id - adap->sge.egr_start] = q; in init_txq()
4580 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
4592 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_eth_txq()
4594 struct sge *s = &adap->sge; in t4_sge_alloc_eth_txq()
4599 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
4601 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
4603 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
4605 if (!txq->q.desc) in t4_sge_alloc_eth_txq()
4606 return -ENOMEM; in t4_sge_alloc_eth_txq()
4611 FW_EQ_ETH_CMD_PFN_V(adap->pf) | in t4_sge_alloc_eth_txq()
4625 FW_EQ_ETH_CMD_VIID_V(pi->viid)); in t4_sge_alloc_eth_txq()
4631 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_eth_txq()
4644 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4648 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE in t4_sge_alloc_eth_txq()
4655 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); in t4_sge_alloc_eth_txq()
4657 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_eth_txq()
4659 kfree(txq->q.sdesc); in t4_sge_alloc_eth_txq()
4660 txq->q.sdesc = NULL; in t4_sge_alloc_eth_txq()
4661 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_eth_txq()
4663 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4664 txq->q.desc = NULL; in t4_sge_alloc_eth_txq()
4668 txq->q.q_type = CXGB4_TXQ_ETH; in t4_sge_alloc_eth_txq()
4669 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
4670 txq->txq = netdevq; in t4_sge_alloc_eth_txq()
4671 txq->tso = 0; in t4_sge_alloc_eth_txq()
4672 txq->uso = 0; in t4_sge_alloc_eth_txq()
4673 txq->tx_cso = 0; in t4_sge_alloc_eth_txq()
4674 txq->vlan_ins = 0; in t4_sge_alloc_eth_txq()
4675 txq->mapping_err = 0; in t4_sge_alloc_eth_txq()
4676 txq->dbqt = dbqt; in t4_sge_alloc_eth_txq()
4685 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_ctrl_txq()
4687 struct sge *s = &adap->sge; in t4_sge_alloc_ctrl_txq()
4692 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ctrl_txq()
4694 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
4695 sizeof(struct tx_desc), 0, &txq->q.phys_addr, in t4_sge_alloc_ctrl_txq()
4696 NULL, 0, dev_to_node(adap->pdev_dev)); in t4_sge_alloc_ctrl_txq()
4697 if (!txq->q.desc) in t4_sge_alloc_ctrl_txq()
4698 return -ENOMEM; in t4_sge_alloc_ctrl_txq()
4702 FW_EQ_CTRL_CMD_PFN_V(adap->pf) | in t4_sge_alloc_ctrl_txq()
4710 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_ctrl_txq()
4719 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4721 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_ctrl_txq()
4723 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ctrl_txq()
4725 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4726 txq->q.desc = NULL; in t4_sge_alloc_ctrl_txq()
4730 txq->q.q_type = CXGB4_TXQ_CTRL; in t4_sge_alloc_ctrl_txq()
4731 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
4732 txq->adap = adap; in t4_sge_alloc_ctrl_txq()
4733 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_ctrl_txq()
4734 tasklet_setup(&txq->qresume_tsk, restart_ctrlq); in t4_sge_alloc_ctrl_txq()
4735 txq->full = 0; in t4_sge_alloc_ctrl_txq()
4748 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, ¶m, &val); in t4_sge_mod_ctrl_txq()
4754 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_ofld_txq()
4756 struct sge *s = &adap->sge; in t4_sge_alloc_ofld_txq()
4762 nentries = q->size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ofld_txq()
4763 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc), in t4_sge_alloc_ofld_txq()
4764 sizeof(struct tx_sw_desc), &q->phys_addr, in t4_sge_alloc_ofld_txq()
4765 &q->sdesc, s->stat_len, NUMA_NO_NODE); in t4_sge_alloc_ofld_txq()
4766 if (!q->desc) in t4_sge_alloc_ofld_txq()
4767 return -ENOMEM; in t4_sge_alloc_ofld_txq()
4777 FW_EQ_OFLD_CMD_PFN_V(adap->pf) | in t4_sge_alloc_ofld_txq()
4783 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_ofld_txq()
4790 c.eqaddr = cpu_to_be64(q->phys_addr); in t4_sge_alloc_ofld_txq()
4792 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_ofld_txq()
4794 kfree(q->sdesc); in t4_sge_alloc_ofld_txq()
4795 q->sdesc = NULL; in t4_sge_alloc_ofld_txq()
4796 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ofld_txq()
4798 q->desc, q->phys_addr); in t4_sge_alloc_ofld_txq()
4799 q->desc = NULL; in t4_sge_alloc_ofld_txq()
4817 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); in t4_sge_alloc_uld_txq()
4821 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_uld_txq()
4822 txq->adap = adap; in t4_sge_alloc_uld_txq()
4823 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_uld_txq()
4824 tasklet_setup(&txq->qresume_tsk, restart_ofldq); in t4_sge_alloc_uld_txq()
4825 txq->full = 0; in t4_sge_alloc_uld_txq()
4826 txq->mapping_err = 0; in t4_sge_alloc_uld_txq()
4835 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); in t4_sge_alloc_ethofld_txq()
4839 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_ethofld_txq()
4840 spin_lock_init(&txq->lock); in t4_sge_alloc_ethofld_txq()
4841 txq->adap = adap; in t4_sge_alloc_ethofld_txq()
4842 txq->tso = 0; in t4_sge_alloc_ethofld_txq()
4843 txq->uso = 0; in t4_sge_alloc_ethofld_txq()
4844 txq->tx_cso = 0; in t4_sge_alloc_ethofld_txq()
4845 txq->vlan_ins = 0; in t4_sge_alloc_ethofld_txq()
4846 txq->mapping_err = 0; in t4_sge_alloc_ethofld_txq()
4852 struct sge *s = &adap->sge; in free_txq()
4854 dma_free_coherent(adap->pdev_dev, in free_txq()
4855 q->size * sizeof(struct tx_desc) + s->stat_len, in free_txq()
4856 q->desc, q->phys_addr); in free_txq()
4857 q->cntxt_id = 0; in free_txq()
4858 q->sdesc = NULL; in free_txq()
4859 q->desc = NULL; in free_txq()
4865 struct sge *s = &adap->sge; in free_rspq_fl()
4866 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; in free_rspq_fl()
4868 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; in free_rspq_fl()
4869 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, in free_rspq_fl()
4870 rq->cntxt_id, fl_id, 0xffff); in free_rspq_fl()
4871 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, in free_rspq_fl()
4872 rq->desc, rq->phys_addr); in free_rspq_fl()
4873 netif_napi_del(&rq->napi); in free_rspq_fl()
4874 rq->netdev = NULL; in free_rspq_fl()
4875 rq->cntxt_id = rq->abs_id = 0; in free_rspq_fl()
4876 rq->desc = NULL; in free_rspq_fl()
4879 free_rx_bufs(adap, fl, fl->avail); in free_rspq_fl()
4880 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, in free_rspq_fl()
4881 fl->desc, fl->addr); in free_rspq_fl()
4882 kfree(fl->sdesc); in free_rspq_fl()
4883 fl->sdesc = NULL; in free_rspq_fl()
4884 fl->cntxt_id = 0; in free_rspq_fl()
4885 fl->desc = NULL; in free_rspq_fl()
4890 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
4899 for ( ; n; n--, q++) in t4_free_ofld_rxqs()
4900 if (q->rspq.desc) in t4_free_ofld_rxqs()
4901 free_rspq_fl(adap, &q->rspq, in t4_free_ofld_rxqs()
4902 q->fl.size ? &q->fl : NULL); in t4_free_ofld_rxqs()
4907 if (txq->q.desc) { in t4_sge_free_ethofld_txq()
4908 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, in t4_sge_free_ethofld_txq()
4909 txq->q.cntxt_id); in t4_sge_free_ethofld_txq()
4910 free_tx_desc(adap, &txq->q, txq->q.in_use, false); in t4_sge_free_ethofld_txq()
4911 kfree(txq->q.sdesc); in t4_sge_free_ethofld_txq()
4912 free_txq(adap, &txq->q); in t4_sge_free_ethofld_txq()
4917 * t4_free_sge_resources - free SGE resources
4929 for (i = 0; i < adap->sge.ethqsets; i++) { in t4_free_sge_resources()
4930 eq = &adap->sge.ethrxq[i]; in t4_free_sge_resources()
4931 if (eq->rspq.desc) in t4_free_sge_resources()
4932 t4_iq_stop(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4934 eq->rspq.cntxt_id, in t4_free_sge_resources()
4935 eq->fl.size ? eq->fl.cntxt_id : 0xffff, in t4_free_sge_resources()
4940 for (i = 0; i < adap->sge.ethqsets; i++) { in t4_free_sge_resources()
4941 eq = &adap->sge.ethrxq[i]; in t4_free_sge_resources()
4942 if (eq->rspq.desc) in t4_free_sge_resources()
4943 free_rspq_fl(adap, &eq->rspq, in t4_free_sge_resources()
4944 eq->fl.size ? &eq->fl : NULL); in t4_free_sge_resources()
4945 if (eq->msix) { in t4_free_sge_resources()
4946 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx); in t4_free_sge_resources()
4947 eq->msix = NULL; in t4_free_sge_resources()
4950 etq = &adap->sge.ethtxq[i]; in t4_free_sge_resources()
4951 if (etq->q.desc) { in t4_free_sge_resources()
4952 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4953 etq->q.cntxt_id); in t4_free_sge_resources()
4954 __netif_tx_lock_bh(etq->txq); in t4_free_sge_resources()
4955 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
4956 __netif_tx_unlock_bh(etq->txq); in t4_free_sge_resources()
4957 kfree(etq->q.sdesc); in t4_free_sge_resources()
4958 free_txq(adap, &etq->q); in t4_free_sge_resources()
4963 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { in t4_free_sge_resources()
4964 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; in t4_free_sge_resources()
4966 if (cq->q.desc) { in t4_free_sge_resources()
4967 tasklet_kill(&cq->qresume_tsk); in t4_free_sge_resources()
4968 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4969 cq->q.cntxt_id); in t4_free_sge_resources()
4970 __skb_queue_purge(&cq->sendq); in t4_free_sge_resources()
4971 free_txq(adap, &cq->q); in t4_free_sge_resources()
4975 if (adap->sge.fw_evtq.desc) { in t4_free_sge_resources()
4976 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); in t4_free_sge_resources()
4977 if (adap->sge.fwevtq_msix_idx >= 0) in t4_free_sge_resources()
4979 adap->sge.fwevtq_msix_idx); in t4_free_sge_resources()
4982 if (adap->sge.nd_msix_idx >= 0) in t4_free_sge_resources()
4983 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx); in t4_free_sge_resources()
4985 if (adap->sge.intrq.desc) in t4_free_sge_resources()
4986 free_rspq_fl(adap, &adap->sge.intrq, NULL); in t4_free_sge_resources()
4988 if (!is_t4(adap->params.chip)) { in t4_free_sge_resources()
4989 etq = &adap->sge.ptptxq; in t4_free_sge_resources()
4990 if (etq->q.desc) { in t4_free_sge_resources()
4991 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4992 etq->q.cntxt_id); in t4_free_sge_resources()
4993 spin_lock_bh(&adap->ptp_lock); in t4_free_sge_resources()
4994 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
4995 spin_unlock_bh(&adap->ptp_lock); in t4_free_sge_resources()
4996 kfree(etq->q.sdesc); in t4_free_sge_resources()
4997 free_txq(adap, &etq->q); in t4_free_sge_resources()
5002 memset(adap->sge.egr_map, 0, in t4_free_sge_resources()
5003 adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); in t4_free_sge_resources()
5008 adap->sge.ethtxq_rover = 0; in t4_sge_start()
5009 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); in t4_sge_start()
5010 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); in t4_sge_start()
5014 * t4_sge_stop - disable SGE operation
5024 struct sge *s = &adap->sge; in t4_sge_stop()
5026 if (s->rx_timer.function) in t4_sge_stop()
5027 del_timer_sync(&s->rx_timer); in t4_sge_stop()
5028 if (s->tx_timer.function) in t4_sge_stop()
5029 del_timer_sync(&s->tx_timer); in t4_sge_stop()
5034 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in t4_sge_stop()
5036 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop()
5038 for_each_ofldtxq(&adap->sge, i) { in t4_sge_stop()
5039 if (txq->q.desc) in t4_sge_stop()
5040 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5048 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; in t4_sge_stop()
5050 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop()
5052 for_each_ofldtxq(&adap->sge, i) { in t4_sge_stop()
5053 if (txq->q.desc) in t4_sge_stop()
5054 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5059 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { in t4_sge_stop()
5060 struct sge_ctrl_txq *cq = &s->ctrlq[i]; in t4_sge_stop()
5062 if (cq->q.desc) in t4_sge_stop()
5063 tasklet_kill(&cq->qresume_tsk); in t4_sge_stop()
5068 * t4_sge_init_soft - grab core SGE values needed by SGE code
5077 struct sge *s = &adap->sge; in t4_sge_init_soft()
5089 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); in t4_sge_init_soft()
5090 return -EINVAL; in t4_sge_init_soft()
5121 (fl_large_pg & (fl_large_pg-1)) != 0) { in t4_sge_init_soft()
5122 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", in t4_sge_init_soft()
5124 return -EINVAL; in t4_sge_init_soft()
5127 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; in t4_sge_init_soft()
5131 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", in t4_sge_init_soft()
5133 return -EINVAL; in t4_sge_init_soft()
5143 s->timer_val[0] = core_ticks_to_us(adap, in t4_sge_init_soft()
5145 s->timer_val[1] = core_ticks_to_us(adap, in t4_sge_init_soft()
5147 s->timer_val[2] = core_ticks_to_us(adap, in t4_sge_init_soft()
5149 s->timer_val[3] = core_ticks_to_us(adap, in t4_sge_init_soft()
5151 s->timer_val[4] = core_ticks_to_us(adap, in t4_sge_init_soft()
5153 s->timer_val[5] = core_ticks_to_us(adap, in t4_sge_init_soft()
5157 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); in t4_sge_init_soft()
5158 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); in t4_sge_init_soft()
5159 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); in t4_sge_init_soft()
5160 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); in t4_sge_init_soft()
5166 * t4_sge_init - initialize SGE
5169 * Perform low-level SGE code initialization needed every time after a
5174 struct sge *s = &adap->sge; in t4_sge_init()
5183 s->pktshift = PKTSHIFT_G(sge_control); in t4_sge_init()
5184 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; in t4_sge_init()
5186 s->fl_align = t4_fl_pkt_align(adap); in t4_sge_init()
5204 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { in t4_sge_init()
5215 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", in t4_sge_init()
5216 CHELSIO_CHIP_VERSION(adap->params.chip)); in t4_sge_init()
5217 return -EINVAL; in t4_sge_init()
5219 s->fl_starve_thres = 2*egress_threshold + 1; in t4_sge_init()
5221 t4_idma_monitor_init(adap, &s->idma_monitor); in t4_sge_init()
5226 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); in t4_sge_init()
5227 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); in t4_sge_init()
5229 spin_lock_init(&s->intrq_lock); in t4_sge_init()