Lines Matching +full:tx +full:- +full:termination +full:- +full:fix

4  * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
16 * - Redistributions of source code must retain the above
20 * - Redistributions in binary form must reproduce the above
40 #include <linux/dma-mapping.h>
68 # define FL_PG_ORDER (16 - PAGE_SHIFT)
82 * Max number of Tx descriptors we clean up at a time. Should be modest as
103 * Period of the Tx queue check timer.
108 * Max number of Tx descriptors to be reclaimed by the Tx timer.
115 #define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
118 * Suspension threshold for non-Ethernet Tx queues. We require enough room
124 * Max Tx descriptor space we allow for an Ethernet packet to be inlined
130 * Max size of a WR sent through a control Tx queue.
141 * buffer). We currently only support two sizes for 1500- and 9000-byte MTUs.
151 struct sge *s = &adapter->sge; in fl_mtu_bufsize()
153 return ALIGN(s->pktshift + ETH_HLEN + VLAN_HLEN + mtu, s->fl_align); in fl_mtu_bufsize()
165 * Buffers which we allocate have the bottom 5 bits free (0) -- i.e. are
166 * 32-byte or or a power of 2 greater in alignment. Since the SGE's minimal
193 return d->dma_addr & ~(dma_addr_t)RX_BUF_FLAGS; in get_buf_addr()
198 return !(d->dma_addr & RX_UNMAPPED_BUF); in is_buf_mapped()
202 * txq_avail - return the number of available slots in a Tx queue
203 * @q: the Tx queue
205 * Returns the number of descriptors in a Tx queue available to write new
210 return q->size - 1 - q->in_use; in txq_avail()
214 * fl_cap - return the capacity of a free-buffer list
217 * Returns the capacity of a free-buffer list. The capacity is less than
223 return fl->size - 8; /* 1 descriptor = 8 buffers */ in fl_cap()
227 * fl_starving - return whether a Free List is starving.
238 const struct sge *s = &adapter->sge; in fl_starving()
240 return fl->avail - fl->pend_cred <= s->fl_starve_thres; in fl_starving()
249 *addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); in cxgb4_map_skb()
254 end = &si->frags[si->nr_frags]; in cxgb4_map_skb()
256 for (fp = si->frags; fp < end; fp++) { in cxgb4_map_skb()
265 while (fp-- > si->frags) in cxgb4_map_skb()
266 dma_unmap_page(dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); in cxgb4_map_skb()
268 dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE); in cxgb4_map_skb()
270 return -ENOMEM; in cxgb4_map_skb()
283 end = &si->frags[si->nr_frags]; in unmap_skb()
284 for (fp = si->frags; fp < end; fp++) in unmap_skb()
290 * deferred_unmap_destructor - unmap a packet when it is freed
293 * This is the packet destructor used for Tx packets that need to remain
294 * mapped until they are freed rather than until their Tx descriptors are
299 unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head); in deferred_unmap_destructor()
304 * free_tx_desc - reclaims Tx descriptors and their buffers
306 * @q: the Tx queue to reclaim descriptors from
310 * Reclaims Tx descriptors from an SGE Tx queue and frees the associated
311 * Tx buffers. Called with the Tx queue lock held.
316 unsigned int cidx = q->cidx; in free_tx_desc()
319 d = &q->sdesc[cidx]; in free_tx_desc()
320 while (n--) { in free_tx_desc()
321 if (d->skb) { /* an SGL is present */ in free_tx_desc()
322 if (unmap && d->addr[0]) { in free_tx_desc()
323 unmap_skb(adap->pdev_dev, d->skb, d->addr); in free_tx_desc()
324 memset(d->addr, 0, sizeof(d->addr)); in free_tx_desc()
326 dev_consume_skb_any(d->skb); in free_tx_desc()
327 d->skb = NULL; in free_tx_desc()
330 if (++cidx == q->size) { in free_tx_desc()
332 d = q->sdesc; in free_tx_desc()
335 q->cidx = cidx; in free_tx_desc()
339 * Return the number of reclaimable descriptors in a Tx queue.
343 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in reclaimable()
344 hw_cidx -= q->cidx; in reclaimable()
345 return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; in reclaimable()
349 * reclaim_completed_tx - reclaims completed TX Descriptors
351 * @q: the Tx queue to reclaim completed descriptors from
352 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
355 * Reclaims Tx Descriptors that the SGE has indicated it has processed,
356 * and frees the associated buffers if possible. If @max == -1, then
357 * we'll use a defaiult maximum. Called with the TX Queue locked.
367 * the Tx lock hold time O(1). in reclaim_completed_tx()
375 q->in_use -= reclaim; in reclaim_completed_tx()
382 * cxgb4_reclaim_completed_tx - reclaims completed Tx descriptors
384 * @q: the Tx queue to reclaim completed descriptors from
387 * Reclaims Tx descriptors that the SGE has indicated it has processed,
388 * and frees the associated buffers if possible. Called with the Tx
394 (void)reclaim_completed_tx(adap, q, -1, unmap); in cxgb4_reclaim_completed_tx()
401 struct sge *s = &adapter->sge; in get_buf_size()
402 unsigned int rx_buf_size_idx = d->dma_addr & RX_BUF_SIZE; in get_buf_size()
411 buf_size = PAGE_SIZE << s->fl_pg_order; in get_buf_size()
430 * free_rx_bufs - free the Rx buffers on an SGE free list
435 * Release the next @n buffers on an SGE free-buffer Rx queue. The
440 while (n--) { in free_rx_bufs()
441 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in free_rx_bufs()
444 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in free_rx_bufs()
447 put_page(d->page); in free_rx_bufs()
448 d->page = NULL; in free_rx_bufs()
449 if (++q->cidx == q->size) in free_rx_bufs()
450 q->cidx = 0; in free_rx_bufs()
451 q->avail--; in free_rx_bufs()
456 * unmap_rx_buf - unmap the current Rx buffer on an SGE free list
460 * Unmap the current buffer on an SGE free-buffer Rx queue. The
468 struct rx_sw_desc *d = &q->sdesc[q->cidx]; in unmap_rx_buf()
471 dma_unmap_page(adap->pdev_dev, get_buf_addr(d), in unmap_rx_buf()
473 d->page = NULL; in unmap_rx_buf()
474 if (++q->cidx == q->size) in unmap_rx_buf()
475 q->cidx = 0; in unmap_rx_buf()
476 q->avail--; in unmap_rx_buf()
481 if (q->pend_cred >= 8) { in ring_fl_db()
482 u32 val = adap->params.arch.sge_fl_db; in ring_fl_db()
484 if (is_t4(adap->params.chip)) in ring_fl_db()
485 val |= PIDX_V(q->pend_cred / 8); in ring_fl_db()
487 val |= PIDX_T5_V(q->pend_cred / 8); in ring_fl_db()
498 if (unlikely(q->bar2_addr == NULL)) { in ring_fl_db()
500 val | QID_V(q->cntxt_id)); in ring_fl_db()
502 writel(val | QID_V(q->bar2_qid), in ring_fl_db()
503 q->bar2_addr + SGE_UDB_KDOORBELL); in ring_fl_db()
510 q->pend_cred &= 7; in ring_fl_db()
517 sd->page = pg; in set_rx_sw_desc()
518 sd->dma_addr = mapping; /* includes size low bits */ in set_rx_sw_desc()
522 * refill_fl - refill an SGE Rx buffer ring
528 * (Re)populate an SGE free-buffer queue with up to @n new packet buffers,
538 struct sge *s = &adap->sge; in refill_fl()
541 unsigned int cred = q->avail; in refill_fl()
542 __be64 *d = &q->desc[q->pidx]; in refill_fl()
543 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
547 if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl)) in refill_fl()
552 node = dev_to_node(adap->pdev_dev); in refill_fl()
554 if (s->fl_pg_order == 0) in refill_fl()
561 pg = alloc_pages_node(node, gfp | __GFP_COMP, s->fl_pg_order); in refill_fl()
563 q->large_alloc_failed++; in refill_fl()
567 mapping = dma_map_page(adap->pdev_dev, pg, 0, in refill_fl()
568 PAGE_SIZE << s->fl_pg_order, in refill_fl()
570 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
571 __free_pages(pg, s->fl_pg_order); in refill_fl()
572 q->mapping_err++; in refill_fl()
581 q->avail++; in refill_fl()
582 if (++q->pidx == q->size) { in refill_fl()
583 q->pidx = 0; in refill_fl()
584 sd = q->sdesc; in refill_fl()
585 d = q->desc; in refill_fl()
587 n--; in refill_fl()
591 while (n--) { in refill_fl()
594 q->alloc_failed++; in refill_fl()
598 mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, in refill_fl()
600 if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { in refill_fl()
602 q->mapping_err++; in refill_fl()
610 q->avail++; in refill_fl()
611 if (++q->pidx == q->size) { in refill_fl()
612 q->pidx = 0; in refill_fl()
613 sd = q->sdesc; in refill_fl()
614 d = q->desc; in refill_fl()
618 out: cred = q->avail - cred; in refill_fl()
619 q->pend_cred += cred; in refill_fl()
624 q->low++; in refill_fl()
625 set_bit(q->cntxt_id - adap->sge.egr_start, in refill_fl()
626 adap->sge.starving_fl); in refill_fl()
634 refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail), in __refill_fl()
639 * alloc_ring - allocate resources for an SGE descriptor ring
649 * Allocates resources for an SGE descriptor ring, such as Tx queues,
681 * sgl_len - calculates the size of an SGL of the given capacity
689 /* A Direct Scatter Gather List uses 32-bit lengths and 64-bit PCI DMA in sgl_len()
690 * addresses. The DSGL Work Request starts off with a 32-bit DSGL in sgl_len()
693 * Address[i+1] } (this ensures that all addresses are on 64-bit in sgl_len()
700 * Address0; the "(3*(n-1))/2" covers the main body of list entries (3 in sgl_len()
701 * flits for every pair of the remaining N) +1 if (n-1) is odd; and in sgl_len()
702 * finally the "+((n-1)&1)" adds the one remaining flit needed if in sgl_len()
703 * (n-1) is odd ... in sgl_len()
705 n--; in sgl_len()
710 * flits_to_desc - returns the num of Tx descriptors for the given flits
713 * Returns the number of Tx descriptors needed for the supplied number
723 * is_eth_imm - can an Ethernet packet be sent as immediate data?
734 if (skb->encapsulation && skb_shinfo(skb)->gso_size && in is_eth_imm()
738 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in is_eth_imm()
741 hdrlen = skb_shinfo(skb)->gso_size ? in is_eth_imm()
745 if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen) in is_eth_imm()
751 * calc_tx_flits - calculate the number of flits for a packet Tx WR
755 * Returns the number of flits needed for a Tx WR for the given Ethernet
766 * TX Packet header plus the skb data in the Work Request. in calc_tx_flits()
770 return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64)); in calc_tx_flits()
774 * for the TX Packet Work Request and CPL. We always have a firmware in calc_tx_flits()
776 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL in calc_tx_flits()
778 * with an embedded TX Packet Write CPL message. in calc_tx_flits()
780 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); in calc_tx_flits()
781 if (skb_shinfo(skb)->gso_size) { in calc_tx_flits()
782 if (skb->encapsulation && chip_ver > CHELSIO_T5) { in calc_tx_flits()
785 } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in calc_tx_flits()
788 pkt_hdrlen = eth_get_headlen(skb->dev, skb->data, in calc_tx_flits()
807 * calc_tx_descs - calculate the number of Tx descriptors for a packet
811 * Returns the number of Tx descriptors needed for the given Ethernet
821 * cxgb4_write_sgl - populate a scatter/gather list for a packet
823 * @q: the Tx queue we are writing into
826 * @start: start offset into skb main-body data to include in the SGL
832 * main body except for the first @start bytes. @sgl must be 16-byte
833 * aligned and within a Tx descriptor with available space. @end points
844 unsigned int nfrags = si->nr_frags; in cxgb4_write_sgl()
847 len = skb_headlen(skb) - start; in cxgb4_write_sgl()
849 sgl->len0 = htonl(len); in cxgb4_write_sgl()
850 sgl->addr0 = cpu_to_be64(addr[0] + start); in cxgb4_write_sgl()
853 sgl->len0 = htonl(skb_frag_size(&si->frags[0])); in cxgb4_write_sgl()
854 sgl->addr0 = cpu_to_be64(addr[1]); in cxgb4_write_sgl()
857 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | in cxgb4_write_sgl()
859 if (likely(--nfrags == 0)) in cxgb4_write_sgl()
866 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in cxgb4_write_sgl()
868 for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) { in cxgb4_write_sgl()
869 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); in cxgb4_write_sgl()
870 to->len[1] = cpu_to_be32(skb_frag_size(&si->frags[++i])); in cxgb4_write_sgl()
871 to->addr[0] = cpu_to_be64(addr[i]); in cxgb4_write_sgl()
872 to->addr[1] = cpu_to_be64(addr[++i]); in cxgb4_write_sgl()
875 to->len[0] = cpu_to_be32(skb_frag_size(&si->frags[i])); in cxgb4_write_sgl()
876 to->len[1] = cpu_to_be32(0); in cxgb4_write_sgl()
877 to->addr[0] = cpu_to_be64(addr[i + 1]); in cxgb4_write_sgl()
879 if (unlikely((u8 *)end > (u8 *)q->stat)) { in cxgb4_write_sgl()
880 unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in cxgb4_write_sgl()
883 memcpy(sgl->sge, buf, part0); in cxgb4_write_sgl()
884 part1 = (u8 *)end - (u8 *)q->stat; in cxgb4_write_sgl()
885 memcpy(q->desc, (u8 *)buf + part0, part1); in cxgb4_write_sgl()
886 end = (void *)q->desc + part1; in cxgb4_write_sgl()
888 if ((uintptr_t)end & 8) /* 0-pad to multiple of 16 */ in cxgb4_write_sgl()
893 /* cxgb4_write_partial_sgl - populate SGL for partial packet
895 * @q: the Tx queue we are writing into
920 frag_size = min(len, skb_linear_data_len - start); in cxgb4_write_partial_sgl()
921 sgl->len0 = htonl(frag_size); in cxgb4_write_partial_sgl()
922 sgl->addr0 = cpu_to_be64(addr[0] + start); in cxgb4_write_partial_sgl()
923 len -= frag_size; in cxgb4_write_partial_sgl()
926 start -= skb_linear_data_len; in cxgb4_write_partial_sgl()
927 frag = &si->frags[frag_idx]; in cxgb4_write_partial_sgl()
931 start -= frag_size; in cxgb4_write_partial_sgl()
933 frag = &si->frags[frag_idx]; in cxgb4_write_partial_sgl()
937 frag_size = min(len, skb_frag_size(frag) - start); in cxgb4_write_partial_sgl()
938 sgl->len0 = cpu_to_be32(frag_size); in cxgb4_write_partial_sgl()
939 sgl->addr0 = cpu_to_be64(addr[frag_idx + 1] + start); in cxgb4_write_partial_sgl()
940 len -= frag_size; in cxgb4_write_partial_sgl()
955 to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge; in cxgb4_write_partial_sgl()
962 frag_size = min(len, skb_frag_size(&si->frags[frag_idx])); in cxgb4_write_partial_sgl()
963 to->len[i & 1] = cpu_to_be32(frag_size); in cxgb4_write_partial_sgl()
964 to->addr[i & 1] = cpu_to_be64(addr[frag_idx + 1]); in cxgb4_write_partial_sgl()
970 len -= frag_size; in cxgb4_write_partial_sgl()
977 to->len[1] = cpu_to_be32(0); in cxgb4_write_partial_sgl()
979 /* Copy from temporary buffer to Tx ring, in case we hit the in cxgb4_write_partial_sgl()
982 if (unlikely((u8 *)end > (u8 *)q->stat)) { in cxgb4_write_partial_sgl()
983 u32 part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1; in cxgb4_write_partial_sgl()
986 memcpy(sgl->sge, buf, part0); in cxgb4_write_partial_sgl()
987 part1 = (u8 *)end - (u8 *)q->stat; in cxgb4_write_partial_sgl()
988 memcpy(q->desc, (u8 *)buf + part0, part1); in cxgb4_write_partial_sgl()
989 end = (void *)q->desc + part1; in cxgb4_write_partial_sgl()
992 /* 0-pad to multiple of 16 */ in cxgb4_write_partial_sgl()
996 sgl->cmd_nsge = htonl(ULPTX_CMD_V(ULP_TX_SC_DSGL) | in cxgb4_write_partial_sgl()
1013 count--; in cxgb_pio_copy()
1018 * cxgb4_ring_tx_db - check and potentially ring a Tx queue's doorbell
1020 * @q: the Tx queue
1023 * Ring the doorbel for a Tx queue.
1027 /* Make sure that all writes to the TX Descriptors are committed in cxgb4_ring_tx_db()
1035 if (unlikely(q->bar2_addr == NULL)) { in cxgb4_ring_tx_db()
1042 spin_lock_irqsave(&q->db_lock, flags); in cxgb4_ring_tx_db()
1043 if (!q->db_disabled) in cxgb4_ring_tx_db()
1045 QID_V(q->cntxt_id) | val); in cxgb4_ring_tx_db()
1047 q->db_pidx_inc += n; in cxgb4_ring_tx_db()
1048 q->db_pidx = q->pidx; in cxgb4_ring_tx_db()
1049 spin_unlock_irqrestore(&q->db_lock, flags); in cxgb4_ring_tx_db()
1061 /* If we're only writing a single TX Descriptor and we can use in cxgb4_ring_tx_db()
1065 if (n == 1 && q->bar2_qid == 0) { in cxgb4_ring_tx_db()
1066 int index = (q->pidx in cxgb4_ring_tx_db()
1067 ? (q->pidx - 1) in cxgb4_ring_tx_db()
1068 : (q->size - 1)); in cxgb4_ring_tx_db()
1069 u64 *wr = (u64 *)&q->desc[index]; in cxgb4_ring_tx_db()
1072 (q->bar2_addr + SGE_UDB_WCDOORBELL), in cxgb4_ring_tx_db()
1075 writel(val | QID_V(q->bar2_qid), in cxgb4_ring_tx_db()
1076 q->bar2_addr + SGE_UDB_KDOORBELL); in cxgb4_ring_tx_db()
1095 * cxgb4_inline_tx_skb - inline a packet's data into Tx descriptors
1097 * @q: the Tx queue where the packet will be inlined
1098 * @pos: starting position in the Tx queue where to inline the packet
1100 * Inline a packet's contents directly into Tx descriptors, starting at
1101 * the given position within the Tx DMA ring.
1108 int left = (void *)q->stat - pos; in cxgb4_inline_tx_skb()
1111 if (likely(skb->len <= left)) { in cxgb4_inline_tx_skb()
1112 if (likely(!skb->data_len)) in cxgb4_inline_tx_skb()
1113 skb_copy_from_linear_data(skb, pos, skb->len); in cxgb4_inline_tx_skb()
1115 skb_copy_bits(skb, 0, pos, skb->len); in cxgb4_inline_tx_skb()
1116 pos += skb->len; in cxgb4_inline_tx_skb()
1119 skb_copy_bits(skb, left, q->desc, skb->len - left); in cxgb4_inline_tx_skb()
1120 pos = (void *)q->desc + (skb->len - left); in cxgb4_inline_tx_skb()
1123 /* 0-pad to multiple of 16 */ in cxgb4_inline_tx_skb()
1135 int left = (void *)q->stat - pos; in inline_tx_skb_header()
1138 memcpy(pos, skb->data, length); in inline_tx_skb_header()
1141 memcpy(pos, skb->data, left); in inline_tx_skb_header()
1142 memcpy(q->desc, skb->data + left, length - left); in inline_tx_skb_header()
1143 pos = (void *)q->desc + (length - left); in inline_tx_skb_header()
1145 /* 0-pad to multiple of 16 */ in inline_tx_skb_header()
1164 if (skb->encapsulation && in hwcsum()
1169 ver = inner_ip_hdr(skb)->version; in hwcsum()
1170 proto = (ver == 4) ? inner_ip_hdr(skb)->protocol : in hwcsum()
1171 inner_ipv6_hdr(skb)->nexthdr; in hwcsum()
1173 ver = ip_hdr(skb)->version; in hwcsum()
1174 proto = (ver == 4) ? ip_hdr(skb)->protocol : in hwcsum()
1175 ipv6_hdr(skb)->nexthdr; in hwcsum()
1211 eth_hdr_len = skb_inner_network_offset(skb) - ETH_HLEN; in hwcsum()
1214 eth_hdr_len = skb_network_offset(skb) - ETH_HLEN; in hwcsum()
1228 TXPKT_CSUM_LOC_V(start + skb->csum_offset); in hwcsum()
1234 netif_tx_stop_queue(q->txq); in eth_txq_stop()
1235 q->q.stops++; in eth_txq_stop()
1240 q->in_use += n; in txq_advance()
1241 q->pidx += n; in txq_advance()
1242 if (q->pidx >= q->size) in txq_advance()
1243 q->pidx -= q->size; in txq_advance()
1251 const struct cxgb_fcoe *fcoe = &pi->fcoe; in cxgb_fcoe_offload()
1253 if (!(fcoe->flags & CXGB_FCOE_ENABLED)) in cxgb_fcoe_offload()
1256 if (skb->protocol != htons(ETH_P_FCOE)) in cxgb_fcoe_offload()
1260 skb->mac_len = sizeof(struct ethhdr); in cxgb_fcoe_offload()
1262 skb_set_network_header(skb, skb->mac_len); in cxgb_fcoe_offload()
1263 skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr)); in cxgb_fcoe_offload()
1266 return -ENOTSUPP; in cxgb_fcoe_offload()
1285 struct port_info *pi = netdev_priv(skb->dev); in cxgb_encap_offload_supported()
1286 struct adapter *adapter = pi->adapter; in cxgb_encap_offload_supported()
1288 if (skb->inner_protocol_type != ENCAP_TYPE_ETHER || in cxgb_encap_offload_supported()
1289 skb->inner_protocol != htons(ETH_P_TEB)) in cxgb_encap_offload_supported()
1294 l4_hdr = ip_hdr(skb)->protocol; in cxgb_encap_offload_supported()
1297 l4_hdr = ipv6_hdr(skb)->nexthdr; in cxgb_encap_offload_supported()
1305 if (adapter->vxlan_port == udp_hdr(skb)->dest) in cxgb_encap_offload_supported()
1307 else if (adapter->geneve_port == udp_hdr(skb)->dest) in cxgb_encap_offload_supported()
1324 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in t6_fill_tnl_lso()
1326 bool v6 = (ip_hdr(skb)->version == 6); in t6_fill_tnl_lso()
1337 tnl_lso->op_to_IpIdSplitOut = htonl(val); in t6_fill_tnl_lso()
1339 tnl_lso->IpIdOffsetOut = 0; in t6_fill_tnl_lso()
1342 val = skb_inner_mac_header(skb) - skb_mac_header(skb); in t6_fill_tnl_lso()
1343 in_eth_xtra_len = skb_inner_network_header(skb) - in t6_fill_tnl_lso()
1344 skb_inner_mac_header(skb) - ETH_HLEN; in t6_fill_tnl_lso()
1349 tnl_lso->UdpLenSetOut_to_TnlHdrLen = in t6_fill_tnl_lso()
1354 tnl_lso->UdpLenSetOut_to_TnlHdrLen = 0; in t6_fill_tnl_lso()
1358 tnl_lso->UdpLenSetOut_to_TnlHdrLen |= in t6_fill_tnl_lso()
1362 tnl_lso->r1 = 0; in t6_fill_tnl_lso()
1365 CPL_TX_TNL_LSO_IPV6_V(inner_ip_hdr(skb)->version == 6) | in t6_fill_tnl_lso()
1368 tnl_lso->Flow_to_TcpHdrLen = htonl(val); in t6_fill_tnl_lso()
1370 tnl_lso->IpIdOffset = htons(0); in t6_fill_tnl_lso()
1372 tnl_lso->IpIdSplit_to_Mss = htons(CPL_TX_TNL_LSO_MSS_V(ssi->gso_size)); in t6_fill_tnl_lso()
1373 tnl_lso->TCPSeqOffset = htonl(0); in t6_fill_tnl_lso()
1374 tnl_lso->EthLenOffset_Size = htonl(CPL_TX_TNL_LSO_SIZE_V(skb->len)); in t6_fill_tnl_lso()
1380 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in write_tso_wr()
1386 if (ssi->gso_type & SKB_GSO_TCPV6) in write_tso_wr()
1389 lso->lso_ctrl = htonl(LSO_OPCODE_V(CPL_TX_PKT_LSO) | in write_tso_wr()
1394 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); in write_tso_wr()
1395 lso->ipid_ofst = htons(0); in write_tso_wr()
1396 lso->mss = htons(ssi->gso_size); in write_tso_wr()
1397 lso->seqno_offset = htonl(0); in write_tso_wr()
1398 if (is_t4(adap->params.chip)) in write_tso_wr()
1399 lso->len = htonl(skb->len); in write_tso_wr()
1401 lso->len = htonl(LSO_T5_XFER_SIZE_V(skb->len)); in write_tso_wr()
1407 * t4_sge_eth_txq_egress_update - handle Ethernet TX Queue update
1409 * @eq: the Ethernet TX Queue
1410 * @maxreclaim: the maximum number of TX Descriptors to reclaim or -1
1412 * We're typically called here to update the state of an Ethernet TX
1413 * Queue with respect to the hardware's progress in consuming the TX
1416 * in regular timer-based Ethernet TX Queue maintenance.
1422 struct sge_txq *q = &eq->q; in t4_sge_eth_txq_egress_update()
1425 if (!q->in_use || !__netif_tx_trylock(eq->txq)) in t4_sge_eth_txq_egress_update()
1428 /* Reclaim pending completed TX Descriptors. */ in t4_sge_eth_txq_egress_update()
1429 reclaimed = reclaim_completed_tx(adap, &eq->q, maxreclaim, true); in t4_sge_eth_txq_egress_update()
1431 hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in t4_sge_eth_txq_egress_update()
1432 hw_in_use = q->pidx - hw_cidx; in t4_sge_eth_txq_egress_update()
1434 hw_in_use += q->size; in t4_sge_eth_txq_egress_update()
1436 /* If the TX Queue is currently stopped and there's now more than half in t4_sge_eth_txq_egress_update()
1439 * currently buffered Coalesced TX Work Request. in t4_sge_eth_txq_egress_update()
1441 if (netif_tx_queue_stopped(eq->txq) && hw_in_use < (q->size / 2)) { in t4_sge_eth_txq_egress_update()
1442 netif_tx_wake_queue(eq->txq); in t4_sge_eth_txq_egress_update()
1443 eq->q.restarts++; in t4_sge_eth_txq_egress_update()
1446 __netif_tx_unlock(eq->txq); in t4_sge_eth_txq_egress_update()
1460 if (unlikely(skb->len < min_pkt_len)) in cxgb4_validate_skb()
1461 return -EINVAL; in cxgb4_validate_skb()
1464 max_pkt_len = ETH_HLEN + dev->mtu; in cxgb4_validate_skb()
1469 if (!skb_shinfo(skb)->gso_size && (unlikely(skb->len > max_pkt_len))) in cxgb4_validate_skb()
1470 return -EINVAL; in cxgb4_validate_skb()
1478 wr->u.udpseg.type = FW_ETH_TX_EO_TYPE_UDPSEG; in write_eo_udp_wr()
1479 wr->u.udpseg.ethlen = skb_network_offset(skb); in write_eo_udp_wr()
1480 wr->u.udpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); in write_eo_udp_wr()
1481 wr->u.udpseg.udplen = sizeof(struct udphdr); in write_eo_udp_wr()
1482 wr->u.udpseg.rtplen = 0; in write_eo_udp_wr()
1483 wr->u.udpseg.r4 = 0; in write_eo_udp_wr()
1484 if (skb_shinfo(skb)->gso_size) in write_eo_udp_wr()
1485 wr->u.udpseg.mss = cpu_to_be16(skb_shinfo(skb)->gso_size); in write_eo_udp_wr()
1487 wr->u.udpseg.mss = cpu_to_be16(skb->len - hdr_len); in write_eo_udp_wr()
1488 wr->u.udpseg.schedpktsize = wr->u.udpseg.mss; in write_eo_udp_wr()
1489 wr->u.udpseg.plen = cpu_to_be32(skb->len - hdr_len); in write_eo_udp_wr()
1495 * cxgb4_eth_xmit - add a packet to an Ethernet Tx queue
1499 * Add a packet to an SGE Ethernet Tx queue. Runs with softirqs disabled.
1525 adap = pi->adapter; in cxgb4_eth_xmit()
1528 if (xfrm_offload(skb) && !ssi->gso_size) in cxgb4_eth_xmit()
1529 return adap->uld[CXGB4_ULD_IPSEC].tx_handler(skb, dev); in cxgb4_eth_xmit()
1534 (skb->len - (skb_transport_offset(skb) + tcp_hdrlen(skb)))) in cxgb4_eth_xmit()
1535 return adap->uld[CXGB4_ULD_KTLS].tx_handler(skb, dev); in cxgb4_eth_xmit()
1540 if (!(adap->ptp_tx_skb)) { in cxgb4_eth_xmit()
1541 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in cxgb4_eth_xmit()
1542 adap->ptp_tx_skb = skb_get(skb); in cxgb4_eth_xmit()
1546 q = &adap->sge.ptptxq; in cxgb4_eth_xmit()
1548 q = &adap->sge.ethtxq[qidx + pi->first_qset]; in cxgb4_eth_xmit()
1552 reclaim_completed_tx(adap, &q->q, -1, true); in cxgb4_eth_xmit()
1557 if (unlikely(ret == -EOPNOTSUPP)) in cxgb4_eth_xmit()
1561 chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in cxgb4_eth_xmit()
1564 credits = txq_avail(&q->q) - ndesc; in cxgb4_eth_xmit()
1568 dev_err(adap->pdev_dev, in cxgb4_eth_xmit()
1569 "%s: Tx ring %u full while queue awake!\n", in cxgb4_eth_xmit()
1570 dev->name, qidx); in cxgb4_eth_xmit()
1577 if (skb->encapsulation && chip_ver > CHELSIO_T5) in cxgb4_eth_xmit()
1580 last_desc = q->q.pidx + ndesc - 1; in cxgb4_eth_xmit()
1581 if (last_desc >= q->q.size) in cxgb4_eth_xmit()
1582 last_desc -= q->q.size; in cxgb4_eth_xmit()
1583 sgl_sdesc = &q->q.sdesc[last_desc]; in cxgb4_eth_xmit()
1586 unlikely(cxgb4_map_skb(adap->pdev_dev, skb, sgl_sdesc->addr) < 0)) { in cxgb4_eth_xmit()
1587 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); in cxgb4_eth_xmit()
1588 q->mapping_err++; in cxgb4_eth_xmit()
1595 * packet, we'll be below our "stop threshold" so stop the TX in cxgb4_eth_xmit()
1606 wr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_eth_xmit()
1607 eowr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_eth_xmit()
1608 wr->equiq_to_len16 = htonl(wr_mid); in cxgb4_eth_xmit()
1609 wr->r3 = cpu_to_be64(0); in cxgb4_eth_xmit()
1610 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in cxgb4_eth_xmit()
1615 len = immediate ? skb->len : 0; in cxgb4_eth_xmit()
1617 if (ssi->gso_size && !(ssi->gso_type & SKB_GSO_UDP_L4)) { in cxgb4_eth_xmit()
1626 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | in cxgb4_eth_xmit()
1636 if (iph->version == 4) { in cxgb4_eth_xmit()
1637 iph->check = 0; in cxgb4_eth_xmit()
1638 iph->tot_len = 0; in cxgb4_eth_xmit()
1639 iph->check = ~ip_fast_csum((u8 *)iph, iph->ihl); in cxgb4_eth_xmit()
1641 if (skb->ip_summed == CHECKSUM_PARTIAL) in cxgb4_eth_xmit()
1642 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1645 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1648 q->tso++; in cxgb4_eth_xmit()
1649 q->tx_cso += ssi->gso_segs; in cxgb4_eth_xmit()
1650 } else if (ssi->gso_size) { in cxgb4_eth_xmit()
1654 hdrlen = eth_get_headlen(dev, skb->data, skb_headlen(skb)); in cxgb4_eth_xmit()
1656 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | in cxgb4_eth_xmit()
1659 cntrl = hwcsum(adap->params.chip, skb); in cxgb4_eth_xmit()
1662 sgl = (u64 *)inline_tx_skb_header(skb, &q->q, (void *)start, in cxgb4_eth_xmit()
1665 left = (u8 *)end - (u8 *)q->q.stat; in cxgb4_eth_xmit()
1666 end = (void *)q->q.desc + left; in cxgb4_eth_xmit()
1669 q->uso++; in cxgb4_eth_xmit()
1670 q->tx_cso += ssi->gso_segs; in cxgb4_eth_xmit()
1676 wr->op_immdlen = htonl(FW_WR_OP_V(op) | in cxgb4_eth_xmit()
1680 if (skb->ip_summed == CHECKSUM_PARTIAL) { in cxgb4_eth_xmit()
1681 cntrl = hwcsum(adap->params.chip, skb) | in cxgb4_eth_xmit()
1683 q->tx_cso++; in cxgb4_eth_xmit()
1687 if (unlikely((u8 *)sgl >= (u8 *)q->q.stat)) { in cxgb4_eth_xmit()
1692 left = (u8 *)end - (u8 *)q->q.stat; in cxgb4_eth_xmit()
1693 end = (void *)q->q.desc + left; in cxgb4_eth_xmit()
1694 sgl = (void *)q->q.desc; in cxgb4_eth_xmit()
1698 q->vlan_ins++; in cxgb4_eth_xmit()
1701 if (skb->protocol == htons(ETH_P_FCOE)) in cxgb4_eth_xmit()
1703 ((skb->priority & 0x7) << VLAN_PRIO_SHIFT)); in cxgb4_eth_xmit()
1707 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_INTF_V(pi->tx_chan) | in cxgb4_eth_xmit()
1708 TXPKT_PF_V(adap->pf); in cxgb4_eth_xmit()
1712 if (is_t4(adap->params.chip)) in cxgb4_eth_xmit()
1713 ctrl0 |= TXPKT_OVLAN_IDX_V(q->dcb_prio); in cxgb4_eth_xmit()
1715 ctrl0 |= TXPKT_T5_OVLAN_IDX_V(q->dcb_prio); in cxgb4_eth_xmit()
1717 cpl->ctrl0 = htonl(ctrl0); in cxgb4_eth_xmit()
1718 cpl->pack = htons(0); in cxgb4_eth_xmit()
1719 cpl->len = htons(skb->len); in cxgb4_eth_xmit()
1720 cpl->ctrl1 = cpu_to_be64(cntrl); in cxgb4_eth_xmit()
1723 cxgb4_inline_tx_skb(skb, &q->q, sgl); in cxgb4_eth_xmit()
1726 cxgb4_write_sgl(skb, &q->q, (void *)sgl, end, sgl_off, in cxgb4_eth_xmit()
1727 sgl_sdesc->addr); in cxgb4_eth_xmit()
1729 sgl_sdesc->skb = skb; in cxgb4_eth_xmit()
1732 txq_advance(&q->q, ndesc); in cxgb4_eth_xmit()
1734 cxgb4_ring_tx_db(adap, &q->q, ndesc); in cxgb4_eth_xmit()
1748 * 64-bit PCI DMA addresses.
1760 * t4vf_is_eth_imm - can an Ethernet packet be sent as immediate data?
1778 * t4vf_calc_tx_flits - calculate the number of flits for a packet TX WR
1781 * Returns the number of flits needed for a TX Work Request for the
1790 * TX Packet header plus the skb data in the Work Request. in t4vf_calc_tx_flits()
1793 return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), in t4vf_calc_tx_flits()
1798 * for the TX Packet Work Request and CPL. We always have a firmware in t4vf_calc_tx_flits()
1800 * cpl_tx_pkt structures), followed by either a TX Packet Write CPL in t4vf_calc_tx_flits()
1802 * with an embedded TX Packet Write CPL message. in t4vf_calc_tx_flits()
1804 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1); in t4vf_calc_tx_flits()
1805 if (skb_shinfo(skb)->gso_size) in t4vf_calc_tx_flits()
1816 * cxgb4_vf_eth_xmit - add a packet to an Ethernet TX queue
1820 * Add a packet to an SGE Ethernet TX queue. Runs with softirqs disabled.
1843 fw_hdr_copy_len = sizeof(wr->ethmacdst) + sizeof(wr->ethmacsrc) + in cxgb4_vf_eth_xmit()
1844 sizeof(wr->ethtype) + sizeof(wr->vlantci); in cxgb4_vf_eth_xmit()
1849 /* Figure out which TX Queue we're going to use. */ in cxgb4_vf_eth_xmit()
1851 adapter = pi->adapter; in cxgb4_vf_eth_xmit()
1853 WARN_ON(qidx >= pi->nqsets); in cxgb4_vf_eth_xmit()
1854 txq = &adapter->sge.ethtxq[pi->first_qset + qidx]; in cxgb4_vf_eth_xmit()
1856 /* Take this opportunity to reclaim any TX Descriptors whose DMA in cxgb4_vf_eth_xmit()
1859 reclaim_completed_tx(adapter, &txq->q, -1, true); in cxgb4_vf_eth_xmit()
1861 /* Calculate the number of flits and TX Descriptors we're going to in cxgb4_vf_eth_xmit()
1862 * need along with how many TX Descriptors will be left over after in cxgb4_vf_eth_xmit()
1867 credits = txq_avail(&txq->q) - ndesc; in cxgb4_vf_eth_xmit()
1871 * TX Queue and return a "busy" condition. The queue will get in cxgb4_vf_eth_xmit()
1876 dev_err(adapter->pdev_dev, in cxgb4_vf_eth_xmit()
1877 "%s: TX ring %u full while queue awake!\n", in cxgb4_vf_eth_xmit()
1878 dev->name, qidx); in cxgb4_vf_eth_xmit()
1882 last_desc = txq->q.pidx + ndesc - 1; in cxgb4_vf_eth_xmit()
1883 if (last_desc >= txq->q.size) in cxgb4_vf_eth_xmit()
1884 last_desc -= txq->q.size; in cxgb4_vf_eth_xmit()
1885 sgl_sdesc = &txq->q.sdesc[last_desc]; in cxgb4_vf_eth_xmit()
1888 unlikely(cxgb4_map_skb(adapter->pdev_dev, skb, in cxgb4_vf_eth_xmit()
1889 sgl_sdesc->addr) < 0)) { in cxgb4_vf_eth_xmit()
1891 * be in-lined directly into the Work Request) and the mapping in cxgb4_vf_eth_xmit()
1894 memset(sgl_sdesc->addr, 0, sizeof(sgl_sdesc->addr)); in cxgb4_vf_eth_xmit()
1895 txq->mapping_err++; in cxgb4_vf_eth_xmit()
1902 * packet, we'll be below our "stop threshold" so stop the TX in cxgb4_vf_eth_xmit()
1914 * the WR Header wrapping around the TX Descriptor Ring. If our in cxgb4_vf_eth_xmit()
1915 * maximum header size ever exceeds one TX Descriptor, we'll need to in cxgb4_vf_eth_xmit()
1919 wr = (void *)&txq->q.desc[txq->q.pidx]; in cxgb4_vf_eth_xmit()
1920 wr->equiq_to_len16 = cpu_to_be32(wr_mid); in cxgb4_vf_eth_xmit()
1921 wr->r3[0] = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1922 wr->r3[1] = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1923 skb_copy_from_linear_data(skb, (void *)wr->ethmacdst, fw_hdr_copy_len); in cxgb4_vf_eth_xmit()
1927 * message with an encapsulated TX Packet CPL message. Otherwise we in cxgb4_vf_eth_xmit()
1928 * just use a TX Packet CPL message. in cxgb4_vf_eth_xmit()
1931 if (ssi->gso_size) { in cxgb4_vf_eth_xmit()
1933 bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0; in cxgb4_vf_eth_xmit()
1935 int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN; in cxgb4_vf_eth_xmit()
1937 wr->op_immdlen = in cxgb4_vf_eth_xmit()
1942 lso->lso_ctrl = in cxgb4_vf_eth_xmit()
1949 LSO_TCPHDR_LEN_V(tcp_hdr(skb)->doff)); in cxgb4_vf_eth_xmit()
1950 lso->ipid_ofst = cpu_to_be16(0); in cxgb4_vf_eth_xmit()
1951 lso->mss = cpu_to_be16(ssi->gso_size); in cxgb4_vf_eth_xmit()
1952 lso->seqno_offset = cpu_to_be32(0); in cxgb4_vf_eth_xmit()
1953 if (is_t4(adapter->params.chip)) in cxgb4_vf_eth_xmit()
1954 lso->len = cpu_to_be32(skb->len); in cxgb4_vf_eth_xmit()
1956 lso->len = cpu_to_be32(LSO_T5_XFER_SIZE_V(skb->len)); in cxgb4_vf_eth_xmit()
1958 /* Set up TX Packet CPL pointer, control word and perform in cxgb4_vf_eth_xmit()
1963 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) in cxgb4_vf_eth_xmit()
1971 txq->tso++; in cxgb4_vf_eth_xmit()
1972 txq->tx_cso += ssi->gso_segs; in cxgb4_vf_eth_xmit()
1977 ? skb->len + sizeof(*cpl) in cxgb4_vf_eth_xmit()
1979 wr->op_immdlen = in cxgb4_vf_eth_xmit()
1983 /* Set up TX Packet CPL pointer, control word and perform in cxgb4_vf_eth_xmit()
1987 if (skb->ip_summed == CHECKSUM_PARTIAL) { in cxgb4_vf_eth_xmit()
1988 cntrl = hwcsum(adapter->params.chip, skb) | in cxgb4_vf_eth_xmit()
1990 txq->tx_cso++; in cxgb4_vf_eth_xmit()
2000 txq->vlan_ins++; in cxgb4_vf_eth_xmit()
2004 /* Fill in the TX Packet CPL message header. */ in cxgb4_vf_eth_xmit()
2005 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | in cxgb4_vf_eth_xmit()
2006 TXPKT_INTF_V(pi->port_id) | in cxgb4_vf_eth_xmit()
2008 cpl->pack = cpu_to_be16(0); in cxgb4_vf_eth_xmit()
2009 cpl->len = cpu_to_be16(skb->len); in cxgb4_vf_eth_xmit()
2010 cpl->ctrl1 = cpu_to_be64(cntrl); in cxgb4_vf_eth_xmit()
2012 /* Fill in the body of the TX Packet CPL message with either in-lined in cxgb4_vf_eth_xmit()
2016 /* In-line the packet's data and free the skb since we don't in cxgb4_vf_eth_xmit()
2019 cxgb4_inline_tx_skb(skb, &txq->q, cpl + 1); in cxgb4_vf_eth_xmit()
2022 /* Write the skb's Scatter/Gather list into the TX Packet CPL in cxgb4_vf_eth_xmit()
2025 * in the Software Descriptor corresponding to the last TX in cxgb4_vf_eth_xmit()
2028 * The retained skb will be freed when the corresponding TX in cxgb4_vf_eth_xmit()
2032 * completion notifications to us and we mostly perform TX in cxgb4_vf_eth_xmit()
2036 * TX packets arriving to run the destructors of completed in cxgb4_vf_eth_xmit()
2038 * Sometimes we do not get such new packets causing TX to in cxgb4_vf_eth_xmit()
2046 * extra memory is reasonable (limited by the number of TX in cxgb4_vf_eth_xmit()
2059 struct sge_txq *tq = &txq->q; in cxgb4_vf_eth_xmit()
2061 /* If the Work Request header was an exact multiple of our TX in cxgb4_vf_eth_xmit()
2063 * pointer lines up exactly with the end of our TX Descriptor in cxgb4_vf_eth_xmit()
2067 if (unlikely((void *)sgl == (void *)tq->stat)) { in cxgb4_vf_eth_xmit()
2068 sgl = (void *)tq->desc; in cxgb4_vf_eth_xmit()
2069 end = (void *)((void *)tq->desc + in cxgb4_vf_eth_xmit()
2070 ((void *)end - (void *)tq->stat)); in cxgb4_vf_eth_xmit()
2073 cxgb4_write_sgl(skb, tq, sgl, end, 0, sgl_sdesc->addr); in cxgb4_vf_eth_xmit()
2075 sgl_sdesc->skb = skb; in cxgb4_vf_eth_xmit()
2078 /* Advance our internal TX Queue state, tell the hardware about in cxgb4_vf_eth_xmit()
2079 * the new TX descriptors and return success. in cxgb4_vf_eth_xmit()
2081 txq_advance(&txq->q, ndesc); in cxgb4_vf_eth_xmit()
2083 cxgb4_ring_tx_db(adapter, &txq->q, ndesc); in cxgb4_vf_eth_xmit()
2087 /* An error of some sort happened. Free the TX skb and tell the in cxgb4_vf_eth_xmit()
2095 * reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
2096 * @q: the SGE control Tx queue
2099 * for Tx queues that send only immediate data (presently just
2104 int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); in reclaim_completed_tx_imm()
2105 int reclaim = hw_cidx - q->cidx; in reclaim_completed_tx_imm()
2108 reclaim += q->size; in reclaim_completed_tx_imm()
2110 q->in_use -= reclaim; in reclaim_completed_tx_imm()
2111 q->cidx = hw_cidx; in reclaim_completed_tx_imm()
2119 val -= max; in eosw_txq_advance_index()
2129 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2130 while (ndesc--) { in cxgb4_eosw_txq_free_desc()
2131 if (d->skb) { in cxgb4_eosw_txq_free_desc()
2132 if (d->addr[0]) { in cxgb4_eosw_txq_free_desc()
2133 unmap_skb(adap->pdev_dev, d->skb, d->addr); in cxgb4_eosw_txq_free_desc()
2134 memset(d->addr, 0, sizeof(d->addr)); in cxgb4_eosw_txq_free_desc()
2136 dev_consume_skb_any(d->skb); in cxgb4_eosw_txq_free_desc()
2137 d->skb = NULL; in cxgb4_eosw_txq_free_desc()
2139 eosw_txq_advance_index(&eosw_txq->last_cidx, 1, in cxgb4_eosw_txq_free_desc()
2140 eosw_txq->ndesc); in cxgb4_eosw_txq_free_desc()
2141 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2147 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); in eosw_txq_advance()
2148 eosw_txq->inuse += n; in eosw_txq_advance()
2154 if (eosw_txq->inuse == eosw_txq->ndesc) in eosw_txq_enqueue()
2155 return -ENOMEM; in eosw_txq_enqueue()
2157 eosw_txq->desc[eosw_txq->pidx].skb = skb; in eosw_txq_enqueue()
2163 return eosw_txq->desc[eosw_txq->last_pidx].skb; in eosw_txq_peek()
2173 if (skb_shinfo(skb)->gso_size && in ethofld_calc_tx_flits()
2174 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) in ethofld_calc_tx_flits()
2182 if (skb_shinfo(skb)->nr_frags > 0) { in ethofld_calc_tx_flits()
2183 if (skb_headlen(skb) - hdr_len) in ethofld_calc_tx_flits()
2184 nsgl = sgl_len(skb_shinfo(skb)->nr_frags + 1); in ethofld_calc_tx_flits()
2186 nsgl = sgl_len(skb_shinfo(skb)->nr_frags); in ethofld_calc_tx_flits()
2187 } else if (skb->len - hdr_len) { in ethofld_calc_tx_flits()
2204 ver = ip_hdr(skb)->version; in write_eo_wr()
2205 proto = (ver == 6) ? ipv6_hdr(skb)->nexthdr : ip_hdr(skb)->protocol; in write_eo_wr()
2209 if (skb_shinfo(skb)->gso_size && in write_eo_wr()
2210 !(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)) in write_eo_wr()
2214 if (!eosw_txq->ncompl || in write_eo_wr()
2215 (eosw_txq->last_compl + wrlen16) >= in write_eo_wr()
2216 (adap->params.ofldq_wr_cred / 2)) { in write_eo_wr()
2218 eosw_txq->ncompl++; in write_eo_wr()
2219 eosw_txq->last_compl = 0; in write_eo_wr()
2222 wr->op_immdlen = cpu_to_be32(FW_WR_OP_V(FW_ETH_TX_EO_WR) | in write_eo_wr()
2225 wr->equiq_to_len16 = cpu_to_be32(FW_WR_LEN16_V(wrlen16) | in write_eo_wr()
2226 FW_WR_FLOWID_V(eosw_txq->hwtid)); in write_eo_wr()
2227 wr->r3 = 0; in write_eo_wr()
2231 wr->u.tcpseg.type = FW_ETH_TX_EO_TYPE_TCPSEG; in write_eo_wr()
2232 wr->u.tcpseg.ethlen = skb_network_offset(skb); in write_eo_wr()
2233 wr->u.tcpseg.iplen = cpu_to_be16(skb_network_header_len(skb)); in write_eo_wr()
2234 wr->u.tcpseg.tcplen = tcp_hdrlen(skb); in write_eo_wr()
2235 wr->u.tcpseg.tsclk_tsoff = 0; in write_eo_wr()
2236 wr->u.tcpseg.r4 = 0; in write_eo_wr()
2237 wr->u.tcpseg.r5 = 0; in write_eo_wr()
2238 wr->u.tcpseg.plen = cpu_to_be32(skb->len - hdr_len); in write_eo_wr()
2240 if (ssi->gso_size) { in write_eo_wr()
2243 wr->u.tcpseg.mss = cpu_to_be16(ssi->gso_size); in write_eo_wr()
2246 wr->u.tcpseg.mss = cpu_to_be16(0xffff); in write_eo_wr()
2251 eosw_txq->cred -= wrlen16; in write_eo_wr()
2252 eosw_txq->last_compl += wrlen16; in write_eo_wr()
2273 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; in ethofld_hard_xmit()
2274 spin_lock(&eohw_txq->lock); in ethofld_hard_xmit()
2275 reclaim_completed_tx_imm(&eohw_txq->q); in ethofld_hard_xmit()
2277 d = &eosw_txq->desc[eosw_txq->last_pidx]; in ethofld_hard_xmit()
2278 skb = d->skb; in ethofld_hard_xmit()
2281 wr = (struct fw_eth_tx_eo_wr *)&eohw_txq->q.desc[eohw_txq->q.pidx]; in ethofld_hard_xmit()
2282 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && in ethofld_hard_xmit()
2283 eosw_txq->last_pidx == eosw_txq->flowc_idx)) { in ethofld_hard_xmit()
2284 hdr_len = skb->len; in ethofld_hard_xmit()
2287 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) in ethofld_hard_xmit()
2293 hdr_len = eth_get_headlen(dev, skb->data, skb_headlen(skb)); in ethofld_hard_xmit()
2294 data_len = skb->len - hdr_len; in ethofld_hard_xmit()
2301 left = txq_avail(&eohw_txq->q) - ndesc; in ethofld_hard_xmit()
2308 * credits and invoke the Tx path again. in ethofld_hard_xmit()
2310 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { in ethofld_hard_xmit()
2311 ret = -ENOMEM; in ethofld_hard_xmit()
2317 eosw_txq->state = next_state; in ethofld_hard_xmit()
2318 eosw_txq->cred -= wrlen16; in ethofld_hard_xmit()
2319 eosw_txq->ncompl++; in ethofld_hard_xmit()
2320 eosw_txq->last_compl = 0; in ethofld_hard_xmit()
2325 cntrl = hwcsum(adap->params.chip, skb); in ethofld_hard_xmit()
2329 cpl->ctrl0 = cpu_to_be32(TXPKT_OPCODE_V(CPL_TX_PKT_XT) | in ethofld_hard_xmit()
2330 TXPKT_INTF_V(pi->tx_chan) | in ethofld_hard_xmit()
2331 TXPKT_PF_V(adap->pf)); in ethofld_hard_xmit()
2332 cpl->pack = 0; in ethofld_hard_xmit()
2333 cpl->len = cpu_to_be16(skb->len); in ethofld_hard_xmit()
2334 cpl->ctrl1 = cpu_to_be64(cntrl); in ethofld_hard_xmit()
2339 sgl = (u64 *)inline_tx_skb_header(skb, &eohw_txq->q, (void *)start, in ethofld_hard_xmit()
2342 ret = cxgb4_map_skb(adap->pdev_dev, skb, d->addr); in ethofld_hard_xmit()
2344 memset(d->addr, 0, sizeof(d->addr)); in ethofld_hard_xmit()
2345 eohw_txq->mapping_err++; in ethofld_hard_xmit()
2351 left = (u8 *)end - (u8 *)eohw_txq->q.stat; in ethofld_hard_xmit()
2352 end = (void *)eohw_txq->q.desc + left; in ethofld_hard_xmit()
2355 if (unlikely((u8 *)sgl >= (u8 *)eohw_txq->q.stat)) { in ethofld_hard_xmit()
2360 left = (u8 *)end - (u8 *)eohw_txq->q.stat; in ethofld_hard_xmit()
2362 end = (void *)eohw_txq->q.desc + left; in ethofld_hard_xmit()
2363 sgl = (void *)eohw_txq->q.desc; in ethofld_hard_xmit()
2366 cxgb4_write_sgl(skb, &eohw_txq->q, (void *)sgl, end, hdr_len, in ethofld_hard_xmit()
2367 d->addr); in ethofld_hard_xmit()
2370 if (skb_shinfo(skb)->gso_size) { in ethofld_hard_xmit()
2371 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) in ethofld_hard_xmit()
2372 eohw_txq->uso++; in ethofld_hard_xmit()
2374 eohw_txq->tso++; in ethofld_hard_xmit()
2375 eohw_txq->tx_cso += skb_shinfo(skb)->gso_segs; in ethofld_hard_xmit()
2376 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { in ethofld_hard_xmit()
2377 eohw_txq->tx_cso++; in ethofld_hard_xmit()
2381 eohw_txq->vlan_ins++; in ethofld_hard_xmit()
2383 txq_advance(&eohw_txq->q, ndesc); in ethofld_hard_xmit()
2384 cxgb4_ring_tx_db(adap, &eohw_txq->q, ndesc); in ethofld_hard_xmit()
2385 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); in ethofld_hard_xmit()
2388 spin_unlock(&eohw_txq->lock); in ethofld_hard_xmit()
2397 switch (eosw_txq->state) { in ethofld_xmit()
2401 pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in ethofld_xmit()
2403 pktcount += eosw_txq->ndesc; in ethofld_xmit()
2412 while (pktcount--) { in ethofld_xmit()
2415 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, in ethofld_xmit()
2416 eosw_txq->ndesc); in ethofld_xmit()
2440 tc_port_mqprio = &adap->tc_mqprio->port_mqprio[pi->port_id]; in cxgb4_ethofld_xmit()
2441 qid = skb_get_queue_mapping(skb) - pi->nqsets; in cxgb4_ethofld_xmit()
2442 eosw_txq = &tc_port_mqprio->eosw_txq[qid]; in cxgb4_ethofld_xmit()
2443 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2444 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_xmit()
2459 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2463 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2474 if (unlikely(pi->eth_flags & PRIV_FLAG_PORT_TX_VM)) in t4_start_xmit()
2477 if (unlikely(qid >= pi->nqsets)) in t4_start_xmit()
2484 spin_lock(&adap->ptp_lock); in t4_start_xmit()
2486 spin_unlock(&adap->ptp_lock); in t4_start_xmit()
2495 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in eosw_txq_flush_pending_skbs()
2496 int pidx = eosw_txq->pidx; in eosw_txq_flush_pending_skbs()
2503 pktcount += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2505 while (pktcount--) { in eosw_txq_flush_pending_skbs()
2506 pidx--; in eosw_txq_flush_pending_skbs()
2508 pidx += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2510 skb = eosw_txq->desc[pidx].skb; in eosw_txq_flush_pending_skbs()
2513 eosw_txq->desc[pidx].skb = NULL; in eosw_txq_flush_pending_skbs()
2514 eosw_txq->inuse--; in eosw_txq_flush_pending_skbs()
2518 eosw_txq->pidx = eosw_txq->last_pidx + 1; in eosw_txq_flush_pending_skbs()
2522 * cxgb4_ethofld_send_flowc - Send ETHOFLD flowc request to bind eotid to tc.
2547 entry = cxgb4_lookup_eotid(&adap->tids, eotid); in cxgb4_ethofld_send_flowc()
2549 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2551 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_send_flowc()
2553 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2557 return -ENOMEM; in cxgb4_ethofld_send_flowc()
2559 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2561 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) in cxgb4_ethofld_send_flowc()
2566 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_send_flowc()
2575 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; in cxgb4_ethofld_send_flowc()
2576 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(len16) | in cxgb4_ethofld_send_flowc()
2577 FW_WR_FLOWID_V(eosw_txq->hwtid)); in cxgb4_ethofld_send_flowc()
2578 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | in cxgb4_ethofld_send_flowc()
2581 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; in cxgb4_ethofld_send_flowc()
2582 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V(adap->pf)); in cxgb4_ethofld_send_flowc()
2583 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; in cxgb4_ethofld_send_flowc()
2584 flowc->mnemval[1].val = cpu_to_be32(pi->tx_chan); in cxgb4_ethofld_send_flowc()
2585 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; in cxgb4_ethofld_send_flowc()
2586 flowc->mnemval[2].val = cpu_to_be32(pi->tx_chan); in cxgb4_ethofld_send_flowc()
2587 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; in cxgb4_ethofld_send_flowc()
2588 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); in cxgb4_ethofld_send_flowc()
2589 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; in cxgb4_ethofld_send_flowc()
2590 flowc->mnemval[4].val = cpu_to_be32(tc); in cxgb4_ethofld_send_flowc()
2591 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_EOSTATE; in cxgb4_ethofld_send_flowc()
2592 flowc->mnemval[5].val = cpu_to_be32(tc == FW_SCHED_CLS_NONE ? in cxgb4_ethofld_send_flowc()
2597 * termination FLOWC. in cxgb4_ethofld_send_flowc()
2608 eosw_txq->state = next_state; in cxgb4_ethofld_send_flowc()
2609 eosw_txq->flowc_idx = eosw_txq->pidx; in cxgb4_ethofld_send_flowc()
2614 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2619 * is_imm - check whether a packet can be sent as immediate data
2626 return skb->len <= MAX_CTRL_WR_LEN; in is_imm()
2630 * ctrlq_check_stop - check if a control queue is full and should stop
2641 reclaim_completed_tx_imm(&q->q); in ctrlq_check_stop()
2642 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in ctrlq_check_stop()
2643 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); in ctrlq_check_stop()
2644 q->q.stops++; in ctrlq_check_stop()
2645 q->full = 1; in ctrlq_check_stop()
2654 struct adapter *adap = pi->adapter; in cxgb4_selftest_lb_pkt()
2669 lb = &pi->ethtool_lb; in cxgb4_selftest_lb_pkt()
2670 lb->loopback = 1; in cxgb4_selftest_lb_pkt()
2672 q = &adap->sge.ethtxq[pi->first_qset]; in cxgb4_selftest_lb_pkt()
2673 __netif_tx_lock(q->txq, smp_processor_id()); in cxgb4_selftest_lb_pkt()
2675 reclaim_completed_tx(adap, &q->q, -1, true); in cxgb4_selftest_lb_pkt()
2676 credits = txq_avail(&q->q) - ndesc; in cxgb4_selftest_lb_pkt()
2678 __netif_tx_unlock(q->txq); in cxgb4_selftest_lb_pkt()
2679 return -ENOMEM; in cxgb4_selftest_lb_pkt()
2682 wr = (void *)&q->q.desc[q->q.pidx]; in cxgb4_selftest_lb_pkt()
2685 wr->op_immdlen = htonl(FW_WR_OP_V(FW_ETH_TX_PKT_WR) | in cxgb4_selftest_lb_pkt()
2688 wr->equiq_to_len16 = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(flits, 2))); in cxgb4_selftest_lb_pkt()
2689 wr->r3 = cpu_to_be64(0); in cxgb4_selftest_lb_pkt()
2694 ctrl0 = TXPKT_OPCODE_V(CPL_TX_PKT_XT) | TXPKT_PF_V(adap->pf) | in cxgb4_selftest_lb_pkt()
2695 TXPKT_INTF_V(pi->tx_chan + 4); in cxgb4_selftest_lb_pkt()
2697 cpl->ctrl0 = htonl(ctrl0); in cxgb4_selftest_lb_pkt()
2698 cpl->pack = htons(0); in cxgb4_selftest_lb_pkt()
2699 cpl->len = htons(pkt_len); in cxgb4_selftest_lb_pkt()
2700 cpl->ctrl1 = cpu_to_be64(TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F); in cxgb4_selftest_lb_pkt()
2704 ether_addr_copy(&sgl[i], netdev->dev_addr); in cxgb4_selftest_lb_pkt()
2710 init_completion(&lb->completion); in cxgb4_selftest_lb_pkt()
2711 txq_advance(&q->q, ndesc); in cxgb4_selftest_lb_pkt()
2712 cxgb4_ring_tx_db(adap, &q->q, ndesc); in cxgb4_selftest_lb_pkt()
2713 __netif_tx_unlock(q->txq); in cxgb4_selftest_lb_pkt()
2716 ret = wait_for_completion_timeout(&lb->completion, 10 * HZ); in cxgb4_selftest_lb_pkt()
2718 ret = -ETIMEDOUT; in cxgb4_selftest_lb_pkt()
2720 ret = lb->result; in cxgb4_selftest_lb_pkt()
2722 lb->loopback = 0; in cxgb4_selftest_lb_pkt()
2728 * ctrl_xmit - send a packet through an SGE control Tx queue
2732 * Send a packet through an SGE control Tx queue. Packets sent through
2746 ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc)); in ctrl_xmit()
2747 spin_lock(&q->sendq.lock); in ctrl_xmit()
2749 if (unlikely(q->full)) { in ctrl_xmit()
2750 skb->priority = ndesc; /* save for restart */ in ctrl_xmit()
2751 __skb_queue_tail(&q->sendq, skb); in ctrl_xmit()
2752 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2756 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in ctrl_xmit()
2757 cxgb4_inline_tx_skb(skb, &q->q, wr); in ctrl_xmit()
2759 txq_advance(&q->q, ndesc); in ctrl_xmit()
2760 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) in ctrl_xmit()
2763 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); in ctrl_xmit()
2764 spin_unlock(&q->sendq.lock); in ctrl_xmit()
2771 * restart_ctrlq - restart a suspended control queue
2774 * Resumes transmission on a suspended Tx control queue.
2782 spin_lock(&q->sendq.lock); in restart_ctrlq()
2783 reclaim_completed_tx_imm(&q->q); in restart_ctrlq()
2784 BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES); /* q should be empty */ in restart_ctrlq()
2786 while ((skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
2788 unsigned int ndesc = skb->priority; /* previously saved */ in restart_ctrlq()
2792 * wait times. q->full is still set so new skbs will be queued. in restart_ctrlq()
2794 wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx]; in restart_ctrlq()
2795 txq_advance(&q->q, ndesc); in restart_ctrlq()
2796 spin_unlock(&q->sendq.lock); in restart_ctrlq()
2798 cxgb4_inline_tx_skb(skb, &q->q, wr); in restart_ctrlq()
2801 if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) { in restart_ctrlq()
2802 unsigned long old = q->q.stops; in restart_ctrlq()
2805 if (q->q.stops != old) { /* suspended anew */ in restart_ctrlq()
2806 spin_lock(&q->sendq.lock); in restart_ctrlq()
2811 cxgb4_ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
2814 spin_lock(&q->sendq.lock); in restart_ctrlq()
2816 q->full = 0; in restart_ctrlq()
2819 cxgb4_ring_tx_db(q->adap, &q->q, written); in restart_ctrlq()
2820 spin_unlock(&q->sendq.lock); in restart_ctrlq()
2824 * t4_mgmt_tx - send a management message
2835 ret = ctrl_xmit(&adap->sge.ctrlq[0], skb); in t4_mgmt_tx()
2841 * is_ofld_imm - check whether a packet can be sent as immediate data
2849 struct work_request_hdr *req = (struct work_request_hdr *)skb->data; in is_ofld_imm()
2850 unsigned long opcode = FW_WR_OP_G(ntohl(req->wr_hi)); in is_ofld_imm()
2853 return skb->len <= SGE_MAX_WR_LEN; in is_ofld_imm()
2855 return skb->len <= MAX_IMM_TX_PKT_LEN; in is_ofld_imm()
2859 * calc_tx_flits_ofld - calculate # of flits for an offload packet
2871 return DIV_ROUND_UP(skb->len, 8); in calc_tx_flits_ofld()
2874 cnt = skb_shinfo(skb)->nr_frags; in calc_tx_flits_ofld()
2881 * txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
2884 * Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
2890 q->mapping_err++; in txq_stop_maperr()
2891 q->q.stops++; in txq_stop_maperr()
2892 set_bit(q->q.cntxt_id - q->adap->sge.egr_start, in txq_stop_maperr()
2893 q->adap->sge.txq_maperr); in txq_stop_maperr()
2897 * ofldtxq_stop - stop an offload Tx queue that has become full
2901 * Stops an offload Tx queue that has become full and modifies the packet
2906 wr->lo |= htonl(FW_WR_EQUEQ_F | FW_WR_EQUIQ_F); in ofldtxq_stop()
2907 q->q.stops++; in ofldtxq_stop()
2908 q->full = 1; in ofldtxq_stop()
2912 * service_ofldq - service/restart a suspended offload queue
2915 * Services an offload Tx queue by moving packets from its Pending Send
2916 * Queue to the Hardware TX ring. The function starts and ends with the
2918 * head of the Send Queue onto the Hardware TX Ring. Dropping the lock
2930 __must_hold(&q->sendq.lock) in service_ofldq()
2946 if (q->service_ofldq_running) in service_ofldq()
2948 q->service_ofldq_running = true; in service_ofldq()
2950 while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) { in service_ofldq()
2954 * this one. We don't need to lock to guard the TX Ring in service_ofldq()
2958 spin_unlock(&q->sendq.lock); in service_ofldq()
2960 cxgb4_reclaim_completed_tx(q->adap, &q->q, false); in service_ofldq()
2962 flits = skb->priority; /* previously saved */ in service_ofldq()
2964 credits = txq_avail(&q->q) - ndesc; in service_ofldq()
2967 ofldtxq_stop(q, (struct fw_wr_hdr *)skb->data); in service_ofldq()
2969 pos = (u64 *)&q->q.desc[q->q.pidx]; in service_ofldq()
2971 cxgb4_inline_tx_skb(skb, &q->q, pos); in service_ofldq()
2972 else if (cxgb4_map_skb(q->adap->pdev_dev, skb, in service_ofldq()
2973 (dma_addr_t *)skb->head)) { in service_ofldq()
2975 spin_lock(&q->sendq.lock); in service_ofldq()
2981 * So we need to deal with wrap-around here. in service_ofldq()
2985 txq = &q->q; in service_ofldq()
2986 pos = (void *)inline_tx_skb_header(skb, &q->q, in service_ofldq()
2990 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
2991 end = (void *)txq->desc + left; in service_ofldq()
2998 if (pos == (u64 *)txq->stat) { in service_ofldq()
2999 left = (u8 *)end - (u8 *)txq->stat; in service_ofldq()
3000 end = (void *)txq->desc + left; in service_ofldq()
3001 pos = (void *)txq->desc; in service_ofldq()
3004 cxgb4_write_sgl(skb, &q->q, (void *)pos, in service_ofldq()
3006 (dma_addr_t *)skb->head); in service_ofldq()
3008 skb->dev = q->adap->port[0]; in service_ofldq()
3009 skb->destructor = deferred_unmap_destructor; in service_ofldq()
3011 last_desc = q->q.pidx + ndesc - 1; in service_ofldq()
3012 if (last_desc >= q->q.size) in service_ofldq()
3013 last_desc -= q->q.size; in service_ofldq()
3014 q->q.sdesc[last_desc].skb = skb; in service_ofldq()
3017 txq_advance(&q->q, ndesc); in service_ofldq()
3020 cxgb4_ring_tx_db(q->adap, &q->q, written); in service_ofldq()
3025 * skb we've just successfully transferred to the TX Ring and in service_ofldq()
3029 spin_lock(&q->sendq.lock); in service_ofldq()
3030 __skb_unlink(skb, &q->sendq); in service_ofldq()
3035 cxgb4_ring_tx_db(q->adap, &q->q, written); in service_ofldq()
3040 q->service_ofldq_running = false; in service_ofldq()
3044 * ofld_xmit - send a packet through an offload queue
3045 * @q: the Tx offload queue
3052 skb->priority = calc_tx_flits_ofld(skb); /* save for restart */ in ofld_xmit()
3053 spin_lock(&q->sendq.lock); in ofld_xmit()
3061 * or filling the Hardware TX Ring. in ofld_xmit()
3063 __skb_queue_tail(&q->sendq, skb); in ofld_xmit()
3064 if (q->sendq.qlen == 1) in ofld_xmit()
3067 spin_unlock(&q->sendq.lock); in ofld_xmit()
3072 * restart_ofldq - restart a suspended offload queue
3075 * Resumes transmission on a suspended Tx offload queue.
3081 spin_lock(&q->sendq.lock); in restart_ofldq()
3082 q->full = 0; /* the queue actually is completely empty now */ in restart_ofldq()
3084 spin_unlock(&q->sendq.lock); in restart_ofldq()
3088 * skb_txq - return the Tx queue an offload packet should use
3091 * Returns the Tx queue an offload packet should use as indicated by bits
3092 * 1-15 in the packet's queue_mapping.
3096 return skb->queue_mapping >> 1; in skb_txq()
3100 * is_ctrl_pkt - return whether an offload packet is a control packet
3104 * Tx queue as indicated by bit 0 in the packet's queue_mapping.
3108 return skb->queue_mapping & 1; in is_ctrl_pkt()
3120 if (adap->tids.nsftids) in uld_send()
3122 return ctrl_xmit(&adap->sge.ctrlq[idx], skb); in uld_send()
3125 txq_info = adap->sge.uld_txq_info[tx_uld_type]; in uld_send()
3132 txq = &txq_info->uldtxq[idx]; in uld_send()
3137 * t4_ofld_send - send an offload packet
3142 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3143 * should be sent as regular or control, bits 1-15 select the queue.
3156 * cxgb4_ofld_send - send an offload packet
3173 int left = (void *)q->stat - pos; in inline_tx_header()
3181 memcpy(q->desc, src + left, length - left); in inline_tx_header()
3182 pos = (void *)q->desc + (length - left); in inline_tx_header()
3184 /* 0-pad to multiple of 16 */ in inline_tx_header()
3194 * ofld_xmit_direct - copy a WR into offload queue
3195 * @q: the Tx offload queue
3208 /* Use the lower limit as the cut-off */ in ofld_xmit_direct()
3218 if (!spin_trylock(&q->sendq.lock)) in ofld_xmit_direct()
3221 if (q->full || !skb_queue_empty(&q->sendq) || in ofld_xmit_direct()
3222 q->service_ofldq_running) { in ofld_xmit_direct()
3223 spin_unlock(&q->sendq.lock); in ofld_xmit_direct()
3227 credits = txq_avail(&q->q) - ndesc; in ofld_xmit_direct()
3228 pos = (u64 *)&q->q.desc[q->q.pidx]; in ofld_xmit_direct()
3230 /* ofldtxq_stop modifies WR header in-situ */ in ofld_xmit_direct()
3231 inline_tx_header(src, &q->q, pos, len); in ofld_xmit_direct()
3234 txq_advance(&q->q, ndesc); in ofld_xmit_direct()
3235 cxgb4_ring_tx_db(q->adap, &q->q, ndesc); in ofld_xmit_direct()
3237 spin_unlock(&q->sendq.lock); in ofld_xmit_direct()
3252 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in cxgb4_immdata_send()
3258 txq = &txq_info->uldtxq[idx]; in cxgb4_immdata_send()
3267 * t4_crypto_send - send crypto packet
3272 * appropriate Tx queue as follows: bit 0 indicates whether the packet
3273 * should be sent as regular or control, bits 1-15 select the queue.
3286 * cxgb4_crypto_send - send crypto packet
3305 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags()
3306 gl->frags[0].offset + offset, in copy_frags()
3307 gl->frags[0].size - offset); in copy_frags()
3308 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags()
3309 for (i = 1; i < gl->nfrags; i++) in copy_frags()
3310 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags()
3311 gl->frags[i].offset, in copy_frags()
3312 gl->frags[i].size); in copy_frags()
3315 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags()
3319 * cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
3337 if (gl->tot_len <= RX_COPY_THRES) { in cxgb4_pktgl_to_skb()
3338 skb = dev_alloc_skb(gl->tot_len); in cxgb4_pktgl_to_skb()
3341 __skb_put(skb, gl->tot_len); in cxgb4_pktgl_to_skb()
3342 skb_copy_to_linear_data(skb, gl->va, gl->tot_len); in cxgb4_pktgl_to_skb()
3348 skb_copy_to_linear_data(skb, gl->va, pull_len); in cxgb4_pktgl_to_skb()
3351 skb->len = gl->tot_len; in cxgb4_pktgl_to_skb()
3352 skb->data_len = skb->len - pull_len; in cxgb4_pktgl_to_skb()
3353 skb->truesize += skb->data_len; in cxgb4_pktgl_to_skb()
3360 * t4_pktgl_free - free a packet gather list
3371 for (p = gl->frags, n = gl->nfrags - 1; n--; p++) in t4_pktgl_free()
3372 put_page(p->page); in t4_pktgl_free()
3390 if (is_t4(adap->params.chip)) in handle_trace_pkt()
3396 skb->protocol = htons(0xffff); in handle_trace_pkt()
3397 skb->dev = adap->port[0]; in handle_trace_pkt()
3403 * cxgb4_sgetim_to_hwtstamp - convert sge time stamp to hw time stamp
3408 * Every ingress queue entry has the 60-bit timestamp, convert that timestamp
3416 u64 tmp = (sgetstamp * 1000 * 1000 + adap->params.vpd.cclk / 2); in cxgb4_sgetim_to_hwtstamp()
3418 ns = div_u64(tmp, adap->params.vpd.cclk); in cxgb4_sgetim_to_hwtstamp()
3421 hwtstamps->hwtstamp = ns_to_ktime(ns); in cxgb4_sgetim_to_hwtstamp()
3427 struct adapter *adapter = rxq->rspq.adap; in do_gro()
3428 struct sge *s = &adapter->sge; in do_gro()
3433 skb = napi_get_frags(&rxq->rspq.napi); in do_gro()
3436 rxq->stats.rx_drops++; in do_gro()
3440 copy_frags(skb, gl, s->pktshift); in do_gro()
3442 skb->csum_level = 1; in do_gro()
3443 skb->len = gl->tot_len - s->pktshift; in do_gro()
3444 skb->data_len = skb->len; in do_gro()
3445 skb->truesize += skb->data_len; in do_gro()
3446 skb->ip_summed = CHECKSUM_UNNECESSARY; in do_gro()
3447 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro()
3448 pi = netdev_priv(skb->dev); in do_gro()
3449 if (pi->rxtstamp) in do_gro()
3451 gl->sgetstamp); in do_gro()
3452 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) in do_gro()
3453 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, in do_gro()
3456 if (unlikely(pkt->vlan_ex)) { in do_gro()
3457 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); in do_gro()
3458 rxq->stats.vlan_ex++; in do_gro()
3460 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro()
3462 rxq->stats.lro_pkts++; in do_gro()
3464 rxq->stats.lro_merged++; in do_gro()
3465 rxq->stats.pkts++; in do_gro()
3466 rxq->stats.rx_cso++; in do_gro()
3476 * t4_systim_to_hwstamp - read hardware time stamp
3491 cpl = (struct cpl_rx_mps_pkt *)skb->data; in t4_systim_to_hwstamp()
3492 if (!(CPL_RX_MPS_PKT_TYPE_G(ntohl(cpl->op_to_r1_hi)) & in t4_systim_to_hwstamp()
3496 data = skb->data + sizeof(*cpl); in t4_systim_to_hwstamp()
3498 offset = ETH_HLEN + IPV4_HLEN(skb->data) + UDP_HLEN; in t4_systim_to_hwstamp()
3499 if (skb->len < offset + OFF_PTP_SEQUENCE_ID + sizeof(short)) in t4_systim_to_hwstamp()
3504 hwtstamps->hwtstamp = ns_to_ktime(get_unaligned_be64(data)); in t4_systim_to_hwstamp()
3510 * t4_rx_hststamp - Recv PTP Event Message
3524 !is_t4(adapter->params.chip))) { in t4_rx_hststamp()
3528 rxq->stats.rx_drops++; in t4_rx_hststamp()
3536 * t4_tx_hststamp - Loopback PTP Transmit Event Message
3541 * Read hardware timestamp for the loopback PTP Tx event message
3548 if (!is_t4(adapter->params.chip) && adapter->ptp_tx_skb) { in t4_tx_hststamp()
3557 * t4_tx_completion_handler - handle CPL_SGE_EGR_UPDATE messages
3558 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue
3563 * we configure the Ethernet TX Queues to send CIDX Updates to the
3569 * of TX Data outstanding before receiving DMA Completions.
3575 u8 opcode = ((const struct rss_header *)rsp)->opcode; in t4_tx_completion_handler()
3576 struct port_info *pi = netdev_priv(rspq->netdev); in t4_tx_completion_handler()
3577 struct adapter *adapter = rspq->adap; in t4_tx_completion_handler()
3578 struct sge *s = &adapter->sge; in t4_tx_completion_handler()
3587 ((const struct cpl_fw4_msg *)rsp)->type == in t4_tx_completion_handler()
3590 opcode = ((const struct rss_header *)rsp)->opcode; in t4_tx_completion_handler()
3600 txq = &s->ethtxq[pi->first_qset + rspq->idx]; in t4_tx_completion_handler()
3601 t4_sge_eth_txq_egress_update(adapter, txq, -1); in t4_tx_completion_handler()
3606 struct adapter *adap = pi->adapter; in cxgb4_validate_lb_pkt()
3608 struct sge *s = &adap->sge; in cxgb4_validate_lb_pkt()
3613 netdev = adap->port[pi->port_id]; in cxgb4_validate_lb_pkt()
3614 lb = &pi->ethtool_lb; in cxgb4_validate_lb_pkt()
3615 data = si->va + s->pktshift; in cxgb4_validate_lb_pkt()
3618 if (!ether_addr_equal(data + i, netdev->dev_addr)) in cxgb4_validate_lb_pkt()
3619 return -1; in cxgb4_validate_lb_pkt()
3623 lb->result = -EIO; in cxgb4_validate_lb_pkt()
3625 complete(&lb->completion); in cxgb4_validate_lb_pkt()
3630 * t4_ethrx_handler - process an ingress ethernet packet
3644 struct adapter *adapter = q->adap; in t4_ethrx_handler()
3645 struct sge *s = &q->adap->sge; in t4_ethrx_handler()
3646 int cpl_trace_pkt = is_t4(q->adap->params.chip) ? in t4_ethrx_handler()
3652 pi = netdev_priv(q->netdev); in t4_ethrx_handler()
3653 /* If we're looking at TX Queue CIDX Update, handle that separately in t4_ethrx_handler()
3663 return handle_trace_pkt(q->adap, si); in t4_ethrx_handler()
3667 if (q->adap->params.tp.rx_pkt_encap) { in t4_ethrx_handler()
3668 err_vec = T6_COMPR_RXERR_VEC_G(be16_to_cpu(pkt->err_vec)); in t4_ethrx_handler()
3669 tnl_hdr_len = T6_RX_TNLHDR_LEN_G(ntohs(pkt->err_vec)); in t4_ethrx_handler()
3671 err_vec = be16_to_cpu(pkt->err_vec); in t4_ethrx_handler()
3674 csum_ok = pkt->csum_calc && !err_vec && in t4_ethrx_handler()
3675 (q->netdev->features & NETIF_F_RXCSUM); in t4_ethrx_handler()
3678 rxq->stats.bad_rx_pkts++; in t4_ethrx_handler()
3680 if (unlikely(pi->ethtool_lb.loopback && pkt->iff >= NCHAN)) { in t4_ethrx_handler()
3686 if (((pkt->l2info & htonl(RXF_TCP_F)) || in t4_ethrx_handler()
3688 (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) { in t4_ethrx_handler()
3696 rxq->stats.rx_drops++; in t4_ethrx_handler()
3701 if (unlikely(pi->ptp_enable)) { in t4_ethrx_handler()
3707 __skb_pull(skb, s->pktshift); /* remove ethernet header pad */ in t4_ethrx_handler()
3709 /* Handle the PTP Event Tx Loopback packet */ in t4_ethrx_handler()
3710 if (unlikely(pi->ptp_enable && !ret && in t4_ethrx_handler()
3711 (pkt->l2info & htonl(RXF_UDP_F)) && in t4_ethrx_handler()
3713 if (!t4_tx_hststamp(adapter, skb, q->netdev)) in t4_ethrx_handler()
3717 skb->protocol = eth_type_trans(skb, q->netdev); in t4_ethrx_handler()
3718 skb_record_rx_queue(skb, q->idx); in t4_ethrx_handler()
3719 if (skb->dev->features & NETIF_F_RXHASH) in t4_ethrx_handler()
3720 skb_set_hash(skb, (__force u32)pkt->rsshdr.hash_val, in t4_ethrx_handler()
3723 rxq->stats.pkts++; in t4_ethrx_handler()
3725 if (pi->rxtstamp) in t4_ethrx_handler()
3726 cxgb4_sgetim_to_hwtstamp(q->adap, skb_hwtstamps(skb), in t4_ethrx_handler()
3727 si->sgetstamp); in t4_ethrx_handler()
3728 if (csum_ok && (pkt->l2info & htonl(RXF_UDP_F | RXF_TCP_F))) { in t4_ethrx_handler()
3729 if (!pkt->ip_frag) { in t4_ethrx_handler()
3730 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3731 rxq->stats.rx_cso++; in t4_ethrx_handler()
3732 } else if (pkt->l2info & htonl(RXF_IP_F)) { in t4_ethrx_handler()
3733 __sum16 c = (__force __sum16)pkt->csum; in t4_ethrx_handler()
3734 skb->csum = csum_unfold(c); in t4_ethrx_handler()
3737 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3738 skb->csum_level = 1; in t4_ethrx_handler()
3740 skb->ip_summed = CHECKSUM_COMPLETE; in t4_ethrx_handler()
3742 rxq->stats.rx_cso++; in t4_ethrx_handler()
3750 if (!(pkt->l2info & cpu_to_be32(CPL_RX_PKT_FLAGS))) { in t4_ethrx_handler()
3751 if ((pkt->l2info & cpu_to_be32(RXF_FCOE_F)) && in t4_ethrx_handler()
3752 (pi->fcoe.flags & CXGB_FCOE_ENABLED)) { in t4_ethrx_handler()
3753 if (q->adap->params.tp.rx_pkt_encap) in t4_ethrx_handler()
3759 skb->ip_summed = CHECKSUM_UNNECESSARY; in t4_ethrx_handler()
3767 if (unlikely(pkt->vlan_ex)) { in t4_ethrx_handler()
3768 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(pkt->vlan)); in t4_ethrx_handler()
3769 rxq->stats.vlan_ex++; in t4_ethrx_handler()
3771 skb_mark_napi_id(skb, &q->napi); in t4_ethrx_handler()
3777 * restore_rx_bufs - put back a packet's Rx buffers
3796 while (frags--) { in restore_rx_bufs()
3797 if (q->cidx == 0) in restore_rx_bufs()
3798 q->cidx = q->size - 1; in restore_rx_bufs()
3800 q->cidx--; in restore_rx_bufs()
3801 d = &q->sdesc[q->cidx]; in restore_rx_bufs()
3802 d->page = si->frags[frags].page; in restore_rx_bufs()
3803 d->dma_addr |= RX_UNMAPPED_BUF; in restore_rx_bufs()
3804 q->avail++; in restore_rx_bufs()
3809 * is_new_response - check if a response is newly written
3819 return (r->type_gen >> RSPD_GEN_S) == q->gen; in is_new_response()
3823 * rspq_next - advance to the next entry in a response queue
3830 q->cur_desc = (void *)q->cur_desc + q->iqe_len; in rspq_next()
3831 if (unlikely(++q->cidx == q->size)) { in rspq_next()
3832 q->cidx = 0; in rspq_next()
3833 q->gen ^= 1; in rspq_next()
3834 q->cur_desc = q->desc; in rspq_next()
3839 * process_responses - process responses from an SGE response queue
3857 struct adapter *adapter = q->adap; in process_responses()
3858 struct sge *s = &adapter->sge; in process_responses()
3861 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_responses()
3863 if (q->flush_handler) in process_responses()
3864 q->flush_handler(q); in process_responses()
3869 rsp_type = RSPD_TYPE_G(rc->type_gen); in process_responses()
3874 u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags; in process_responses()
3877 if (likely(q->offset > 0)) { in process_responses()
3878 free_rx_bufs(q->adap, &rxq->fl, 1); in process_responses()
3879 q->offset = 0; in process_responses()
3887 rsd = &rxq->fl.sdesc[rxq->fl.cidx]; in process_responses()
3889 fp->page = rsd->page; in process_responses()
3890 fp->offset = q->offset; in process_responses()
3891 fp->size = min(bufsz, len); in process_responses()
3892 len -= fp->size; in process_responses()
3895 unmap_rx_buf(q->adap, &rxq->fl); in process_responses()
3899 be64_to_cpu(rc->last_flit)); in process_responses()
3904 dma_sync_single_for_cpu(q->adap->pdev_dev, in process_responses()
3906 fp->size, DMA_FROM_DEVICE); in process_responses()
3913 ret = q->handler(q, q->cur_desc, &si); in process_responses()
3915 q->offset += ALIGN(fp->size, s->fl_align); in process_responses()
3917 restore_rx_bufs(&si, &rxq->fl, frags); in process_responses()
3919 ret = q->handler(q, q->cur_desc, NULL); in process_responses()
3921 ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN); in process_responses()
3926 q->next_intr_params = QINTR_TIMER_IDX_V(NOMEM_TMR_IDX); in process_responses()
3931 budget_left--; in process_responses()
3934 if (q->offset >= 0 && fl_cap(&rxq->fl) - rxq->fl.avail >= 16) in process_responses()
3935 __refill_fl(q->adap, &rxq->fl); in process_responses()
3936 return budget - budget_left; in process_responses()
3940 * napi_rx_handler - the NAPI handler for Rx processing
3947 * in not a concern at all with MSI-X as non-data interrupts then have
3962 timer_index = QINTR_TIMER_IDX_G(q->next_intr_params); in napi_rx_handler()
3964 if (q->adaptive_rx) { in napi_rx_handler()
3969 timer_index = timer_index - 1; in napi_rx_handler()
3971 timer_index = clamp(timer_index, 0, SGE_TIMERREGS - 1); in napi_rx_handler()
3972 q->next_intr_params = in napi_rx_handler()
3975 params = q->next_intr_params; in napi_rx_handler()
3977 params = q->next_intr_params; in napi_rx_handler()
3978 q->next_intr_params = q->intr_params; in napi_rx_handler()
3988 if (unlikely(q->bar2_addr == NULL)) { in napi_rx_handler()
3989 t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A), in napi_rx_handler()
3990 val | INGRESSQID_V((u32)q->cntxt_id)); in napi_rx_handler()
3992 writel(val | INGRESSQID_V(q->bar2_qid), in napi_rx_handler()
3993 q->bar2_addr + SGE_UDB_GTS); in napi_rx_handler()
4005 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4006 pktcount = eosw_txq->cidx - eosw_txq->last_cidx; in cxgb4_ethofld_restart()
4008 pktcount += eosw_txq->ndesc; in cxgb4_ethofld_restart()
4011 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), in cxgb4_ethofld_restart()
4013 eosw_txq->inuse -= pktcount; in cxgb4_ethofld_restart()
4019 ethofld_xmit(eosw_txq->netdev, eosw_txq); in cxgb4_ethofld_restart()
4020 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4023 /* cxgb4_ethofld_rx_handler - Process ETHOFLD Tx completions
4028 * Process a ETHOFLD Tx completion. Increment the cidx here, but
4034 u8 opcode = ((const struct rss_header *)rsp)->opcode; in cxgb4_ethofld_rx_handler()
4049 eotid = CPL_FW4_ACK_FLOWID_G(ntohl(OPCODE_TID(cpl))) - in cxgb4_ethofld_rx_handler()
4050 q->adap->tids.eotid_base; in cxgb4_ethofld_rx_handler()
4051 entry = cxgb4_lookup_eotid(&q->adap->tids, eotid); in cxgb4_ethofld_rx_handler()
4055 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_rx_handler()
4059 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4060 credits = cpl->credits; in cxgb4_ethofld_rx_handler()
4062 skb = eosw_txq->desc[eosw_txq->cidx].skb; in cxgb4_ethofld_rx_handler()
4066 if (unlikely((eosw_txq->state == in cxgb4_ethofld_rx_handler()
4068 eosw_txq->state == in cxgb4_ethofld_rx_handler()
4070 eosw_txq->cidx == eosw_txq->flowc_idx)) { in cxgb4_ethofld_rx_handler()
4071 flits = DIV_ROUND_UP(skb->len, 8); in cxgb4_ethofld_rx_handler()
4072 if (eosw_txq->state == in cxgb4_ethofld_rx_handler()
4074 eosw_txq->state = CXGB4_EO_STATE_ACTIVE; in cxgb4_ethofld_rx_handler()
4076 eosw_txq->state = CXGB4_EO_STATE_CLOSED; in cxgb4_ethofld_rx_handler()
4077 complete(&eosw_txq->completion); in cxgb4_ethofld_rx_handler()
4079 hdr_len = eth_get_headlen(eosw_txq->netdev, in cxgb4_ethofld_rx_handler()
4080 skb->data, in cxgb4_ethofld_rx_handler()
4082 flits = ethofld_calc_tx_flits(q->adap, skb, in cxgb4_ethofld_rx_handler()
4085 eosw_txq_advance_index(&eosw_txq->cidx, 1, in cxgb4_ethofld_rx_handler()
4086 eosw_txq->ndesc); in cxgb4_ethofld_rx_handler()
4088 credits -= wrlen16; in cxgb4_ethofld_rx_handler()
4091 eosw_txq->cred += cpl->credits; in cxgb4_ethofld_rx_handler()
4092 eosw_txq->ncompl--; in cxgb4_ethofld_rx_handler()
4094 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4096 /* Schedule a tasklet to reclaim SKBs and restart ETHOFLD Tx, in cxgb4_ethofld_rx_handler()
4099 tasklet_schedule(&eosw_txq->qresume_tsk); in cxgb4_ethofld_rx_handler()
4107 * The MSI-X interrupt handler for an SGE response queue.
4113 napi_schedule(&q->napi); in t4_sge_intr_msix()
4125 struct sge_rspq *q = &adap->sge.intrq; in process_intrq()
4128 spin_lock(&adap->sge.intrq_lock); in process_intrq()
4130 rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc)); in process_intrq()
4135 if (RSPD_TYPE_G(rc->type_gen) == RSPD_TYPE_INTR_X) { in process_intrq()
4136 unsigned int qid = ntohl(rc->pldbuflen_qid); in process_intrq()
4138 qid -= adap->sge.ingr_start; in process_intrq()
4139 napi_schedule(&adap->sge.ingr_map[qid]->napi); in process_intrq()
4145 val = CIDXINC_V(credits) | SEINTARM_V(q->intr_params); in process_intrq()
4150 if (unlikely(q->bar2_addr == NULL)) { in process_intrq()
4152 val | INGRESSQID_V(q->cntxt_id)); in process_intrq()
4154 writel(val | INGRESSQID_V(q->bar2_qid), in process_intrq()
4155 q->bar2_addr + SGE_UDB_GTS); in process_intrq()
4158 spin_unlock(&adap->sge.intrq_lock); in process_intrq()
4170 if (adap->flags & CXGB4_MASTER_PF) in t4_intr_msi()
4186 if (((adap->flags & CXGB4_MASTER_PF) && t4_slow_intr_handler(adap)) | in t4_intr_intx()
4193 * t4_intr_handler - select the top-level interrupt handler
4196 * Selects the top-level interrupt handler based on the type of interrupts
4197 * (MSI-X, MSI, or INTx).
4201 if (adap->flags & CXGB4_USING_MSIX) in t4_intr_handler()
4203 if (adap->flags & CXGB4_USING_MSI) in t4_intr_handler()
4213 struct sge *s = &adap->sge; in sge_rx_timer_cb()
4215 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) in sge_rx_timer_cb()
4216 for (m = s->starving_fl[i]; m; m &= m - 1) { in sge_rx_timer_cb()
4219 struct sge_fl *fl = s->egr_map[id]; in sge_rx_timer_cb()
4221 clear_bit(id, s->starving_fl); in sge_rx_timer_cb()
4226 if (napi_reschedule(&rxq->rspq.napi)) in sge_rx_timer_cb()
4227 fl->starving++; in sge_rx_timer_cb()
4229 set_bit(id, s->starving_fl); in sge_rx_timer_cb()
4236 if (!(adap->flags & CXGB4_MASTER_PF)) in sge_rx_timer_cb()
4239 t4_idma_monitor(adap, &s->idma_monitor, HZ, RX_QCHECK_PERIOD); in sge_rx_timer_cb()
4242 mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD); in sge_rx_timer_cb()
4248 struct sge *s = &adap->sge; in sge_tx_timer_cb()
4252 for (i = 0; i < BITS_TO_LONGS(s->egr_sz); i++) in sge_tx_timer_cb()
4253 for (m = s->txq_maperr[i]; m; m &= m - 1) { in sge_tx_timer_cb()
4255 struct sge_uld_txq *txq = s->egr_map[id]; in sge_tx_timer_cb()
4257 clear_bit(id, s->txq_maperr); in sge_tx_timer_cb()
4258 tasklet_schedule(&txq->qresume_tsk); in sge_tx_timer_cb()
4261 if (!is_t4(adap->params.chip)) { in sge_tx_timer_cb()
4262 struct sge_eth_txq *q = &s->ptptxq; in sge_tx_timer_cb()
4265 spin_lock(&adap->ptp_lock); in sge_tx_timer_cb()
4266 avail = reclaimable(&q->q); in sge_tx_timer_cb()
4269 free_tx_desc(adap, &q->q, avail, false); in sge_tx_timer_cb()
4270 q->q.in_use -= avail; in sge_tx_timer_cb()
4272 spin_unlock(&adap->ptp_lock); in sge_tx_timer_cb()
4276 i = s->ethtxq_rover; in sge_tx_timer_cb()
4278 budget -= t4_sge_eth_txq_egress_update(adap, &s->ethtxq[i], in sge_tx_timer_cb()
4283 if (++i >= s->ethqsets) in sge_tx_timer_cb()
4285 } while (i != s->ethtxq_rover); in sge_tx_timer_cb()
4286 s->ethtxq_rover = i; in sge_tx_timer_cb()
4294 /* We reclaimed all reclaimable TX Descriptors, so reschedule in sge_tx_timer_cb()
4300 mod_timer(&s->tx_timer, jiffies + period); in sge_tx_timer_cb()
4304 * bar2_address - return the BAR2 address for an SGE Queue's Registers
4329 return adapter->bar2 + bar2_qoffset; in bar2_address()
4332 /* @intr_idx: MSI/MSI-X vector if >=0, -(absolute qid + 1) if < 0
4333 * @cong: < 0 -> no congestion feedback, >= 0 -> congestion channel map
4342 struct sge *s = &adap->sge; in t4_sge_alloc_rxq()
4344 int relaxed = !(adap->flags & CXGB4_ROOT_NO_RELAXED_ORDERING); in t4_sge_alloc_rxq()
4347 iq->size = roundup(iq->size, 16); in t4_sge_alloc_rxq()
4349 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, in t4_sge_alloc_rxq()
4350 &iq->phys_addr, NULL, 0, in t4_sge_alloc_rxq()
4351 dev_to_node(adap->pdev_dev)); in t4_sge_alloc_rxq()
4352 if (!iq->desc) in t4_sge_alloc_rxq()
4353 return -ENOMEM; in t4_sge_alloc_rxq()
4358 FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0)); in t4_sge_alloc_rxq()
4362 FW_IQ_CMD_IQASYNCH_V(fwevtq) | FW_IQ_CMD_VIID_V(pi->viid) | in t4_sge_alloc_rxq()
4366 -intr_idx - 1)); in t4_sge_alloc_rxq()
4367 c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH_V(pi->tx_chan) | in t4_sge_alloc_rxq()
4369 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | in t4_sge_alloc_rxq()
4370 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); in t4_sge_alloc_rxq()
4371 c.iqsize = htons(iq->size); in t4_sge_alloc_rxq()
4372 c.iqaddr = cpu_to_be64(iq->phys_addr); in t4_sge_alloc_rxq()
4380 CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_rxq()
4387 * (fl_starve_thres - 1). in t4_sge_alloc_rxq()
4389 if (fl->size < s->fl_starve_thres - 1 + 2 * 8) in t4_sge_alloc_rxq()
4390 fl->size = s->fl_starve_thres - 1 + 2 * 8; in t4_sge_alloc_rxq()
4391 fl->size = roundup(fl->size, 8); in t4_sge_alloc_rxq()
4392 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), in t4_sge_alloc_rxq()
4393 sizeof(struct rx_sw_desc), &fl->addr, in t4_sge_alloc_rxq()
4394 &fl->sdesc, s->stat_len, in t4_sge_alloc_rxq()
4395 dev_to_node(adap->pdev_dev)); in t4_sge_alloc_rxq()
4396 if (!fl->desc) in t4_sge_alloc_rxq()
4399 flsz = fl->size / 8 + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_rxq()
4413 * Free List pointers are provided, so we use a 128-byte Fetch in t4_sge_alloc_rxq()
4415 * the smaller 64-byte value there). in t4_sge_alloc_rxq()
4425 c.fl0addr = cpu_to_be64(fl->addr); in t4_sge_alloc_rxq()
4428 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_rxq()
4432 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); in t4_sge_alloc_rxq()
4433 iq->cur_desc = iq->desc; in t4_sge_alloc_rxq()
4434 iq->cidx = 0; in t4_sge_alloc_rxq()
4435 iq->gen = 1; in t4_sge_alloc_rxq()
4436 iq->next_intr_params = iq->intr_params; in t4_sge_alloc_rxq()
4437 iq->cntxt_id = ntohs(c.iqid); in t4_sge_alloc_rxq()
4438 iq->abs_id = ntohs(c.physiqid); in t4_sge_alloc_rxq()
4439 iq->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
4440 iq->cntxt_id, in t4_sge_alloc_rxq()
4442 &iq->bar2_qid); in t4_sge_alloc_rxq()
4443 iq->size--; /* subtract status entry */ in t4_sge_alloc_rxq()
4444 iq->netdev = dev; in t4_sge_alloc_rxq()
4445 iq->handler = hnd; in t4_sge_alloc_rxq()
4446 iq->flush_handler = flush_hnd; in t4_sge_alloc_rxq()
4448 memset(&iq->lro_mgr, 0, sizeof(struct t4_lro_mgr)); in t4_sge_alloc_rxq()
4449 skb_queue_head_init(&iq->lro_mgr.lroq); in t4_sge_alloc_rxq()
4451 /* set offset to -1 to distinguish ingress queues without FL */ in t4_sge_alloc_rxq()
4452 iq->offset = fl ? 0 : -1; in t4_sge_alloc_rxq()
4454 adap->sge.ingr_map[iq->cntxt_id - adap->sge.ingr_start] = iq; in t4_sge_alloc_rxq()
4457 fl->cntxt_id = ntohs(c.fl0id); in t4_sge_alloc_rxq()
4458 fl->avail = fl->pend_cred = 0; in t4_sge_alloc_rxq()
4459 fl->pidx = fl->cidx = 0; in t4_sge_alloc_rxq()
4460 fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0; in t4_sge_alloc_rxq()
4461 adap->sge.egr_map[fl->cntxt_id - adap->sge.egr_start] = fl; in t4_sge_alloc_rxq()
4466 fl->bar2_addr = bar2_address(adap, in t4_sge_alloc_rxq()
4467 fl->cntxt_id, in t4_sge_alloc_rxq()
4469 &fl->bar2_qid); in t4_sge_alloc_rxq()
4478 * a lot easier to fix in one place ... For now we do something very in t4_sge_alloc_rxq()
4481 if (!is_t4(adap->params.chip) && cong >= 0) { in t4_sge_alloc_rxq()
4484 u16 cng_ch_bits_log = adap->params.arch.cng_ch_bits_log; in t4_sge_alloc_rxq()
4488 FW_PARAMS_PARAM_YZ_V(iq->cntxt_id)); in t4_sge_alloc_rxq()
4500 ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1, in t4_sge_alloc_rxq()
4503 dev_warn(adap->pdev_dev, "Failed to set Congestion" in t4_sge_alloc_rxq()
4505 iq->cntxt_id, -ret); in t4_sge_alloc_rxq()
4511 ret = -ENOMEM; in t4_sge_alloc_rxq()
4513 if (iq->desc) { in t4_sge_alloc_rxq()
4514 dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len, in t4_sge_alloc_rxq()
4515 iq->desc, iq->phys_addr); in t4_sge_alloc_rxq()
4516 iq->desc = NULL; in t4_sge_alloc_rxq()
4518 if (fl && fl->desc) { in t4_sge_alloc_rxq()
4519 kfree(fl->sdesc); in t4_sge_alloc_rxq()
4520 fl->sdesc = NULL; in t4_sge_alloc_rxq()
4521 dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc), in t4_sge_alloc_rxq()
4522 fl->desc, fl->addr); in t4_sge_alloc_rxq()
4523 fl->desc = NULL; in t4_sge_alloc_rxq()
4530 q->cntxt_id = id; in init_txq()
4531 q->bar2_addr = bar2_address(adap, in init_txq()
4532 q->cntxt_id, in init_txq()
4534 &q->bar2_qid); in init_txq()
4535 q->in_use = 0; in init_txq()
4536 q->cidx = q->pidx = 0; in init_txq()
4537 q->stops = q->restarts = 0; in init_txq()
4538 q->stat = (void *)&q->desc[q->size]; in init_txq()
4539 spin_lock_init(&q->db_lock); in init_txq()
4540 adap->sge.egr_map[id - adap->sge.egr_start] = q; in init_txq()
4544 * t4_sge_alloc_eth_txq - allocate an Ethernet TX Queue
4546 * @txq: the SGE Ethernet TX Queue to initialize
4548 * @netdevq: the corresponding Linux TX Queue
4550 * @dbqt: whether this TX Queue will use the SGE Doorbell Queue Timers
4556 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_eth_txq()
4558 struct sge *s = &adap->sge; in t4_sge_alloc_eth_txq()
4563 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_eth_txq()
4565 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, in t4_sge_alloc_eth_txq()
4567 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len, in t4_sge_alloc_eth_txq()
4569 if (!txq->q.desc) in t4_sge_alloc_eth_txq()
4570 return -ENOMEM; in t4_sge_alloc_eth_txq()
4575 FW_EQ_ETH_CMD_PFN_V(adap->pf) | in t4_sge_alloc_eth_txq()
4580 /* For TX Ethernet Queues using the SGE Doorbell Queue Timer in t4_sge_alloc_eth_txq()
4582 * Index Updates on the TX Queue. Otherwise we have the Hardware in t4_sge_alloc_eth_txq()
4584 * TX Queue. in t4_sge_alloc_eth_txq()
4587 FW_EQ_ETH_CMD_VIID_V(pi->viid)); in t4_sge_alloc_eth_txq()
4591 FW_EQ_ETH_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_eth_txq()
4603 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4607 * ethtool -C tx-usecs {Timer Val} command. Note that the SGE in t4_sge_alloc_eth_txq()
4614 FW_EQ_ETH_CMD_TIMERIX_V(txq->dbqtimerix)); in t4_sge_alloc_eth_txq()
4616 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_eth_txq()
4618 kfree(txq->q.sdesc); in t4_sge_alloc_eth_txq()
4619 txq->q.sdesc = NULL; in t4_sge_alloc_eth_txq()
4620 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_eth_txq()
4622 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_eth_txq()
4623 txq->q.desc = NULL; in t4_sge_alloc_eth_txq()
4627 txq->q.q_type = CXGB4_TXQ_ETH; in t4_sge_alloc_eth_txq()
4628 init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_G(ntohl(c.eqid_pkd))); in t4_sge_alloc_eth_txq()
4629 txq->txq = netdevq; in t4_sge_alloc_eth_txq()
4630 txq->tso = 0; in t4_sge_alloc_eth_txq()
4631 txq->uso = 0; in t4_sge_alloc_eth_txq()
4632 txq->tx_cso = 0; in t4_sge_alloc_eth_txq()
4633 txq->vlan_ins = 0; in t4_sge_alloc_eth_txq()
4634 txq->mapping_err = 0; in t4_sge_alloc_eth_txq()
4635 txq->dbqt = dbqt; in t4_sge_alloc_eth_txq()
4644 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_ctrl_txq()
4646 struct sge *s = &adap->sge; in t4_sge_alloc_ctrl_txq()
4651 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ctrl_txq()
4653 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, in t4_sge_alloc_ctrl_txq()
4654 sizeof(struct tx_desc), 0, &txq->q.phys_addr, in t4_sge_alloc_ctrl_txq()
4655 NULL, 0, dev_to_node(adap->pdev_dev)); in t4_sge_alloc_ctrl_txq()
4656 if (!txq->q.desc) in t4_sge_alloc_ctrl_txq()
4657 return -ENOMEM; in t4_sge_alloc_ctrl_txq()
4661 FW_EQ_CTRL_CMD_PFN_V(adap->pf) | in t4_sge_alloc_ctrl_txq()
4669 FW_EQ_CTRL_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_ctrl_txq()
4678 c.eqaddr = cpu_to_be64(txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4680 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_ctrl_txq()
4682 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ctrl_txq()
4684 txq->q.desc, txq->q.phys_addr); in t4_sge_alloc_ctrl_txq()
4685 txq->q.desc = NULL; in t4_sge_alloc_ctrl_txq()
4689 txq->q.q_type = CXGB4_TXQ_CTRL; in t4_sge_alloc_ctrl_txq()
4690 init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_G(ntohl(c.cmpliqid_eqid))); in t4_sge_alloc_ctrl_txq()
4691 txq->adap = adap; in t4_sge_alloc_ctrl_txq()
4692 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_ctrl_txq()
4693 tasklet_setup(&txq->qresume_tsk, restart_ctrlq); in t4_sge_alloc_ctrl_txq()
4694 txq->full = 0; in t4_sge_alloc_ctrl_txq()
4707 return t4_set_params(adap, adap->mbox, adap->pf, 0, 1, &param, &val); in t4_sge_mod_ctrl_txq()
4713 unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip); in t4_sge_alloc_ofld_txq()
4715 struct sge *s = &adap->sge; in t4_sge_alloc_ofld_txq()
4721 nentries = q->size + s->stat_len / sizeof(struct tx_desc); in t4_sge_alloc_ofld_txq()
4722 q->desc = alloc_ring(adap->pdev_dev, q->size, sizeof(struct tx_desc), in t4_sge_alloc_ofld_txq()
4723 sizeof(struct tx_sw_desc), &q->phys_addr, in t4_sge_alloc_ofld_txq()
4724 &q->sdesc, s->stat_len, NUMA_NO_NODE); in t4_sge_alloc_ofld_txq()
4725 if (!q->desc) in t4_sge_alloc_ofld_txq()
4726 return -ENOMEM; in t4_sge_alloc_ofld_txq()
4736 FW_EQ_OFLD_CMD_PFN_V(adap->pf) | in t4_sge_alloc_ofld_txq()
4742 FW_EQ_OFLD_CMD_PCIECHN_V(pi->tx_chan) | in t4_sge_alloc_ofld_txq()
4749 c.eqaddr = cpu_to_be64(q->phys_addr); in t4_sge_alloc_ofld_txq()
4751 ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c); in t4_sge_alloc_ofld_txq()
4753 kfree(q->sdesc); in t4_sge_alloc_ofld_txq()
4754 q->sdesc = NULL; in t4_sge_alloc_ofld_txq()
4755 dma_free_coherent(adap->pdev_dev, in t4_sge_alloc_ofld_txq()
4757 q->desc, q->phys_addr); in t4_sge_alloc_ofld_txq()
4758 q->desc = NULL; in t4_sge_alloc_ofld_txq()
4776 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, cmd, iqid); in t4_sge_alloc_uld_txq()
4780 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_uld_txq()
4781 txq->adap = adap; in t4_sge_alloc_uld_txq()
4782 skb_queue_head_init(&txq->sendq); in t4_sge_alloc_uld_txq()
4783 tasklet_setup(&txq->qresume_tsk, restart_ofldq); in t4_sge_alloc_uld_txq()
4784 txq->full = 0; in t4_sge_alloc_uld_txq()
4785 txq->mapping_err = 0; in t4_sge_alloc_uld_txq()
4794 ret = t4_sge_alloc_ofld_txq(adap, &txq->q, dev, FW_EQ_OFLD_CMD, iqid); in t4_sge_alloc_ethofld_txq()
4798 txq->q.q_type = CXGB4_TXQ_ULD; in t4_sge_alloc_ethofld_txq()
4799 spin_lock_init(&txq->lock); in t4_sge_alloc_ethofld_txq()
4800 txq->adap = adap; in t4_sge_alloc_ethofld_txq()
4801 txq->tso = 0; in t4_sge_alloc_ethofld_txq()
4802 txq->uso = 0; in t4_sge_alloc_ethofld_txq()
4803 txq->tx_cso = 0; in t4_sge_alloc_ethofld_txq()
4804 txq->vlan_ins = 0; in t4_sge_alloc_ethofld_txq()
4805 txq->mapping_err = 0; in t4_sge_alloc_ethofld_txq()
4811 struct sge *s = &adap->sge; in free_txq()
4813 dma_free_coherent(adap->pdev_dev, in free_txq()
4814 q->size * sizeof(struct tx_desc) + s->stat_len, in free_txq()
4815 q->desc, q->phys_addr); in free_txq()
4816 q->cntxt_id = 0; in free_txq()
4817 q->sdesc = NULL; in free_txq()
4818 q->desc = NULL; in free_txq()
4824 struct sge *s = &adap->sge; in free_rspq_fl()
4825 unsigned int fl_id = fl ? fl->cntxt_id : 0xffff; in free_rspq_fl()
4827 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; in free_rspq_fl()
4828 t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP, in free_rspq_fl()
4829 rq->cntxt_id, fl_id, 0xffff); in free_rspq_fl()
4830 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, in free_rspq_fl()
4831 rq->desc, rq->phys_addr); in free_rspq_fl()
4832 netif_napi_del(&rq->napi); in free_rspq_fl()
4833 rq->netdev = NULL; in free_rspq_fl()
4834 rq->cntxt_id = rq->abs_id = 0; in free_rspq_fl()
4835 rq->desc = NULL; in free_rspq_fl()
4838 free_rx_bufs(adap, fl, fl->avail); in free_rspq_fl()
4839 dma_free_coherent(adap->pdev_dev, fl->size * 8 + s->stat_len, in free_rspq_fl()
4840 fl->desc, fl->addr); in free_rspq_fl()
4841 kfree(fl->sdesc); in free_rspq_fl()
4842 fl->sdesc = NULL; in free_rspq_fl()
4843 fl->cntxt_id = 0; in free_rspq_fl()
4844 fl->desc = NULL; in free_rspq_fl()
4849 * t4_free_ofld_rxqs - free a block of consecutive Rx queues
4858 for ( ; n; n--, q++) in t4_free_ofld_rxqs()
4859 if (q->rspq.desc) in t4_free_ofld_rxqs()
4860 free_rspq_fl(adap, &q->rspq, in t4_free_ofld_rxqs()
4861 q->fl.size ? &q->fl : NULL); in t4_free_ofld_rxqs()
4866 if (txq->q.desc) { in t4_sge_free_ethofld_txq()
4867 t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0, in t4_sge_free_ethofld_txq()
4868 txq->q.cntxt_id); in t4_sge_free_ethofld_txq()
4869 free_tx_desc(adap, &txq->q, txq->q.in_use, false); in t4_sge_free_ethofld_txq()
4870 kfree(txq->q.sdesc); in t4_sge_free_ethofld_txq()
4871 free_txq(adap, &txq->q); in t4_sge_free_ethofld_txq()
4876 * t4_free_sge_resources - free SGE resources
4888 for (i = 0; i < adap->sge.ethqsets; i++) { in t4_free_sge_resources()
4889 eq = &adap->sge.ethrxq[i]; in t4_free_sge_resources()
4890 if (eq->rspq.desc) in t4_free_sge_resources()
4891 t4_iq_stop(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4893 eq->rspq.cntxt_id, in t4_free_sge_resources()
4894 eq->fl.size ? eq->fl.cntxt_id : 0xffff, in t4_free_sge_resources()
4898 /* clean up Ethernet Tx/Rx queues */ in t4_free_sge_resources()
4899 for (i = 0; i < adap->sge.ethqsets; i++) { in t4_free_sge_resources()
4900 eq = &adap->sge.ethrxq[i]; in t4_free_sge_resources()
4901 if (eq->rspq.desc) in t4_free_sge_resources()
4902 free_rspq_fl(adap, &eq->rspq, in t4_free_sge_resources()
4903 eq->fl.size ? &eq->fl : NULL); in t4_free_sge_resources()
4904 if (eq->msix) { in t4_free_sge_resources()
4905 cxgb4_free_msix_idx_in_bmap(adap, eq->msix->idx); in t4_free_sge_resources()
4906 eq->msix = NULL; in t4_free_sge_resources()
4909 etq = &adap->sge.ethtxq[i]; in t4_free_sge_resources()
4910 if (etq->q.desc) { in t4_free_sge_resources()
4911 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4912 etq->q.cntxt_id); in t4_free_sge_resources()
4913 __netif_tx_lock_bh(etq->txq); in t4_free_sge_resources()
4914 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
4915 __netif_tx_unlock_bh(etq->txq); in t4_free_sge_resources()
4916 kfree(etq->q.sdesc); in t4_free_sge_resources()
4917 free_txq(adap, &etq->q); in t4_free_sge_resources()
4921 /* clean up control Tx queues */ in t4_free_sge_resources()
4922 for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) { in t4_free_sge_resources()
4923 struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i]; in t4_free_sge_resources()
4925 if (cq->q.desc) { in t4_free_sge_resources()
4926 tasklet_kill(&cq->qresume_tsk); in t4_free_sge_resources()
4927 t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4928 cq->q.cntxt_id); in t4_free_sge_resources()
4929 __skb_queue_purge(&cq->sendq); in t4_free_sge_resources()
4930 free_txq(adap, &cq->q); in t4_free_sge_resources()
4934 if (adap->sge.fw_evtq.desc) { in t4_free_sge_resources()
4935 free_rspq_fl(adap, &adap->sge.fw_evtq, NULL); in t4_free_sge_resources()
4936 if (adap->sge.fwevtq_msix_idx >= 0) in t4_free_sge_resources()
4938 adap->sge.fwevtq_msix_idx); in t4_free_sge_resources()
4941 if (adap->sge.nd_msix_idx >= 0) in t4_free_sge_resources()
4942 cxgb4_free_msix_idx_in_bmap(adap, adap->sge.nd_msix_idx); in t4_free_sge_resources()
4944 if (adap->sge.intrq.desc) in t4_free_sge_resources()
4945 free_rspq_fl(adap, &adap->sge.intrq, NULL); in t4_free_sge_resources()
4947 if (!is_t4(adap->params.chip)) { in t4_free_sge_resources()
4948 etq = &adap->sge.ptptxq; in t4_free_sge_resources()
4949 if (etq->q.desc) { in t4_free_sge_resources()
4950 t4_eth_eq_free(adap, adap->mbox, adap->pf, 0, in t4_free_sge_resources()
4951 etq->q.cntxt_id); in t4_free_sge_resources()
4952 spin_lock_bh(&adap->ptp_lock); in t4_free_sge_resources()
4953 free_tx_desc(adap, &etq->q, etq->q.in_use, true); in t4_free_sge_resources()
4954 spin_unlock_bh(&adap->ptp_lock); in t4_free_sge_resources()
4955 kfree(etq->q.sdesc); in t4_free_sge_resources()
4956 free_txq(adap, &etq->q); in t4_free_sge_resources()
4961 memset(adap->sge.egr_map, 0, in t4_free_sge_resources()
4962 adap->sge.egr_sz * sizeof(*adap->sge.egr_map)); in t4_free_sge_resources()
4967 adap->sge.ethtxq_rover = 0; in t4_sge_start()
4968 mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD); in t4_sge_start()
4969 mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD); in t4_sge_start()
4973 * t4_sge_stop - disable SGE operation
4983 struct sge *s = &adap->sge; in t4_sge_stop()
4985 if (s->rx_timer.function) in t4_sge_stop()
4986 del_timer_sync(&s->rx_timer); in t4_sge_stop()
4987 if (s->tx_timer.function) in t4_sge_stop()
4988 del_timer_sync(&s->tx_timer); in t4_sge_stop()
4993 txq_info = adap->sge.uld_txq_info[CXGB4_TX_OFLD]; in t4_sge_stop()
4995 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop()
4997 for_each_ofldtxq(&adap->sge, i) { in t4_sge_stop()
4998 if (txq->q.desc) in t4_sge_stop()
4999 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5007 txq_info = adap->sge.uld_txq_info[CXGB4_TX_CRYPTO]; in t4_sge_stop()
5009 struct sge_uld_txq *txq = txq_info->uldtxq; in t4_sge_stop()
5011 for_each_ofldtxq(&adap->sge, i) { in t4_sge_stop()
5012 if (txq->q.desc) in t4_sge_stop()
5013 tasklet_kill(&txq->qresume_tsk); in t4_sge_stop()
5018 for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) { in t4_sge_stop()
5019 struct sge_ctrl_txq *cq = &s->ctrlq[i]; in t4_sge_stop()
5021 if (cq->q.desc) in t4_sge_stop()
5022 tasklet_kill(&cq->qresume_tsk); in t4_sge_stop()
5027 * t4_sge_init_soft - grab core SGE values needed by SGE code
5036 struct sge *s = &adap->sge; in t4_sge_init_soft()
5048 dev_err(adap->pdev_dev, "bad SGE CPL MODE\n"); in t4_sge_init_soft()
5049 return -EINVAL; in t4_sge_init_soft()
5080 (fl_large_pg & (fl_large_pg-1)) != 0) { in t4_sge_init_soft()
5081 dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n", in t4_sge_init_soft()
5083 return -EINVAL; in t4_sge_init_soft()
5086 s->fl_pg_order = ilog2(fl_large_pg) - PAGE_SHIFT; in t4_sge_init_soft()
5090 dev_err(adap->pdev_dev, "bad SGE FL MTU sizes [%d, %d]\n", in t4_sge_init_soft()
5092 return -EINVAL; in t4_sge_init_soft()
5102 s->timer_val[0] = core_ticks_to_us(adap, in t4_sge_init_soft()
5104 s->timer_val[1] = core_ticks_to_us(adap, in t4_sge_init_soft()
5106 s->timer_val[2] = core_ticks_to_us(adap, in t4_sge_init_soft()
5108 s->timer_val[3] = core_ticks_to_us(adap, in t4_sge_init_soft()
5110 s->timer_val[4] = core_ticks_to_us(adap, in t4_sge_init_soft()
5112 s->timer_val[5] = core_ticks_to_us(adap, in t4_sge_init_soft()
5116 s->counter_val[0] = THRESHOLD_0_G(ingress_rx_threshold); in t4_sge_init_soft()
5117 s->counter_val[1] = THRESHOLD_1_G(ingress_rx_threshold); in t4_sge_init_soft()
5118 s->counter_val[2] = THRESHOLD_2_G(ingress_rx_threshold); in t4_sge_init_soft()
5119 s->counter_val[3] = THRESHOLD_3_G(ingress_rx_threshold); in t4_sge_init_soft()
5125 * t4_sge_init - initialize SGE
5128 * Perform low-level SGE code initialization needed every time after a
5133 struct sge *s = &adap->sge; in t4_sge_init()
5142 s->pktshift = PKTSHIFT_G(sge_control); in t4_sge_init()
5143 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_F) ? 128 : 64; in t4_sge_init()
5145 s->fl_align = t4_fl_pkt_align(adap); in t4_sge_init()
5163 switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { in t4_sge_init()
5174 dev_err(adap->pdev_dev, "Unsupported Chip version %d\n", in t4_sge_init()
5175 CHELSIO_CHIP_VERSION(adap->params.chip)); in t4_sge_init()
5176 return -EINVAL; in t4_sge_init()
5178 s->fl_starve_thres = 2*egress_threshold + 1; in t4_sge_init()
5180 t4_idma_monitor_init(adap, &s->idma_monitor); in t4_sge_init()
5182 /* Set up timers used for recuring callbacks to process RX and TX in t4_sge_init()
5185 timer_setup(&s->rx_timer, sge_rx_timer_cb, 0); in t4_sge_init()
5186 timer_setup(&s->tx_timer, sge_tx_timer_cb, 0); in t4_sge_init()
5188 spin_lock_init(&s->intrq_lock); in t4_sge_init()