Lines Matching +full:1 +full:q
66 #define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
94 TXQ_RUNNING = 1 << 0, /* fetch engine is running */
95 TXQ_LAST_PKT_DB = 1 << 1, /* last packet rang the doorbell */
139 dma_addr_t addr[MAX_SKB_FRAGS + 1];
146 * desc = 1 + (flits - 2) / (WR_FLITS - 1).
152 #if SGE_NUM_GENBITS == 1
153 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
158 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
163 # error "SGE_NUM_GENBITS must be 1 or 2"
167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) in fl_to_qset() argument
169 return container_of(q, struct sge_qset, fl[qidx]); in fl_to_qset()
172 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) in rspq_to_qset() argument
174 return container_of(q, struct sge_qset, rspq); in rspq_to_qset()
177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument
179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
185 * @q: the response queue to replenish
192 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument
196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq()
208 return 1; in need_skb_unmap()
217 * @q: the Tx queue containing Tx descriptors for the packet
236 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument
240 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb()
243 sgp = (struct sg_ent *)&q->desc[cidx].flit[d->sflit]; in unmap_skb()
249 j = 1; in unmap_skb()
252 curflit = d->sflit + 1 + j; in unmap_skb()
259 j ^= 1; in unmap_skb()
269 d = cidx + 1 == q->size ? q->sdesc : d + 1; in unmap_skb()
272 d->sflit = curflit - WR_FLITS - j; /* sflit can be -1 */ in unmap_skb()
279 * @q: the Tx queue to reclaim descriptors from
285 static void free_tx_desc(struct adapter *adapter, struct sge_txq *q, in free_tx_desc() argument
290 unsigned int cidx = q->cidx; in free_tx_desc()
293 q->cntxt_id >= FW_TUNNEL_SGEEC_START; in free_tx_desc()
295 d = &q->sdesc[cidx]; in free_tx_desc()
299 unmap_skb(d->skb, q, cidx, pdev); in free_tx_desc()
306 if (++cidx == q->size) { in free_tx_desc()
308 d = q->sdesc; in free_tx_desc()
311 q->cidx = cidx; in free_tx_desc()
317 * @q: the Tx queue to reclaim completed descriptors from
325 struct sge_txq *q, in reclaim_completed_tx() argument
328 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx()
332 free_tx_desc(adapter, q, reclaim); in reclaim_completed_tx()
333 q->cleaned += reclaim; in reclaim_completed_tx()
334 q->in_use -= reclaim; in reclaim_completed_tx()
336 return q->processed - q->cleaned; in reclaim_completed_tx()
341 * @q: the Tx queue
345 static inline int should_restart_tx(const struct sge_txq *q) in should_restart_tx() argument
347 unsigned int r = q->processed - q->cleaned; in should_restart_tx()
349 return q->in_use - r < (q->size >> 1); in should_restart_tx()
352 static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, in clear_rx_desc() argument
355 if (q->use_pages && d->pg_chunk.page) { in clear_rx_desc()
359 q->alloc_size, DMA_FROM_DEVICE); in clear_rx_desc()
365 q->buf_size, DMA_FROM_DEVICE); in clear_rx_desc()
374 * @q: the SGE free list to clean up
379 static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q) in free_rx_bufs() argument
381 unsigned int cidx = q->cidx; in free_rx_bufs()
383 while (q->credits--) { in free_rx_bufs()
384 struct rx_sw_desc *d = &q->sdesc[cidx]; in free_rx_bufs()
387 clear_rx_desc(pdev, q, d); in free_rx_bufs()
388 if (++cidx == q->size) in free_rx_bufs()
392 if (q->pg_chunk.page) { in free_rx_bufs()
393 __free_pages(q->pg_chunk.page, q->order); in free_rx_bufs()
394 q->pg_chunk.page = NULL; in free_rx_bufs()
441 static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, in alloc_pg_chunk() argument
445 if (!q->pg_chunk.page) { in alloc_pg_chunk()
448 q->pg_chunk.page = alloc_pages(gfp, order); in alloc_pg_chunk()
449 if (unlikely(!q->pg_chunk.page)) in alloc_pg_chunk()
451 q->pg_chunk.va = page_address(q->pg_chunk.page); in alloc_pg_chunk()
452 q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - in alloc_pg_chunk()
454 q->pg_chunk.offset = 0; in alloc_pg_chunk()
455 mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page, in alloc_pg_chunk()
456 0, q->alloc_size, DMA_FROM_DEVICE); in alloc_pg_chunk()
458 __free_pages(q->pg_chunk.page, order); in alloc_pg_chunk()
459 q->pg_chunk.page = NULL; in alloc_pg_chunk()
462 q->pg_chunk.mapping = mapping; in alloc_pg_chunk()
464 sd->pg_chunk = q->pg_chunk; in alloc_pg_chunk()
468 q->pg_chunk.offset += q->buf_size; in alloc_pg_chunk()
469 if (q->pg_chunk.offset == (PAGE_SIZE << order)) in alloc_pg_chunk()
470 q->pg_chunk.page = NULL; in alloc_pg_chunk()
472 q->pg_chunk.va += q->buf_size; in alloc_pg_chunk()
473 get_page(q->pg_chunk.page); in alloc_pg_chunk()
477 *sd->pg_chunk.p_cnt = 1; in alloc_pg_chunk()
479 *sd->pg_chunk.p_cnt += 1; in alloc_pg_chunk()
484 static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q) in ring_fl_db() argument
486 if (q->pend_cred >= q->credits / 4) { in ring_fl_db()
487 q->pend_cred = 0; in ring_fl_db()
489 t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id)); in ring_fl_db()
496 * @q: the free-list to refill
504 static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) in refill_fl() argument
506 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; in refill_fl()
507 struct rx_desc *d = &q->desc[q->pidx]; in refill_fl()
514 if (q->use_pages) { in refill_fl()
515 if (unlikely(alloc_pg_chunk(adap, q, sd, gfp, in refill_fl()
516 q->order))) { in refill_fl()
517 nomem: q->alloc_failed++; in refill_fl()
523 add_one_rx_chunk(mapping, d, q->gen); in refill_fl()
525 q->buf_size - SGE_PG_RSVD, in refill_fl()
530 struct sk_buff *skb = alloc_skb(q->buf_size, gfp); in refill_fl()
536 err = add_one_rx_buf(buf_start, q->buf_size, d, sd, in refill_fl()
537 q->gen, adap->pdev); in refill_fl()
539 clear_rx_desc(adap->pdev, q, sd); in refill_fl()
546 if (++q->pidx == q->size) { in refill_fl()
547 q->pidx = 0; in refill_fl()
548 q->gen ^= 1; in refill_fl()
549 sd = q->sdesc; in refill_fl()
550 d = q->desc; in refill_fl()
555 q->credits += count; in refill_fl()
556 q->pend_cred += count; in refill_fl()
557 ring_fl_db(adap, q); in refill_fl()
571 * @q: the SGE free list
577 static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q, in recycle_rx_buf() argument
580 struct rx_desc *from = &q->desc[idx]; in recycle_rx_buf()
581 struct rx_desc *to = &q->desc[q->pidx]; in recycle_rx_buf()
583 q->sdesc[q->pidx] = q->sdesc[idx]; in recycle_rx_buf()
587 to->len_gen = cpu_to_be32(V_FLD_GEN1(q->gen)); in recycle_rx_buf()
588 to->gen2 = cpu_to_be32(V_FLD_GEN2(q->gen)); in recycle_rx_buf()
590 if (++q->pidx == q->size) { in recycle_rx_buf()
591 q->pidx = 0; in recycle_rx_buf()
592 q->gen ^= 1; in recycle_rx_buf()
595 q->credits++; in recycle_rx_buf()
596 q->pend_cred++; in recycle_rx_buf()
597 ring_fl_db(adap, q); in recycle_rx_buf()
640 * @q: the queue set
646 static void t3_reset_qset(struct sge_qset *q) in t3_reset_qset() argument
648 if (q->adap && in t3_reset_qset()
649 !(q->adap->flags & NAPI_INIT)) { in t3_reset_qset()
650 memset(q, 0, sizeof(*q)); in t3_reset_qset()
654 q->adap = NULL; in t3_reset_qset()
655 memset(&q->rspq, 0, sizeof(q->rspq)); in t3_reset_qset()
656 memset(q->fl, 0, sizeof(struct sge_fl) * SGE_RXQ_PER_SET); in t3_reset_qset()
657 memset(q->txq, 0, sizeof(struct sge_txq) * SGE_TXQ_PER_SET); in t3_reset_qset()
658 q->txq_stopped = 0; in t3_reset_qset()
659 q->tx_reclaim_timer.function = NULL; /* for t3_stop_sge_timers() */ in t3_reset_qset()
660 q->rx_reclaim_timer.function = NULL; in t3_reset_qset()
661 q->nomem = 0; in t3_reset_qset()
662 napi_free_frags(&q->napi); in t3_reset_qset()
669 * @q: the queue set
675 static void t3_free_qset(struct adapter *adapter, struct sge_qset *q) in t3_free_qset() argument
681 if (q->fl[i].desc) { in t3_free_qset()
683 t3_sge_disable_fl(adapter, q->fl[i].cntxt_id); in t3_free_qset()
685 free_rx_bufs(pdev, &q->fl[i]); in t3_free_qset()
686 kfree(q->fl[i].sdesc); in t3_free_qset()
688 q->fl[i].size * in t3_free_qset()
689 sizeof(struct rx_desc), q->fl[i].desc, in t3_free_qset()
690 q->fl[i].phys_addr); in t3_free_qset()
694 if (q->txq[i].desc) { in t3_free_qset()
696 t3_sge_enable_ecntxt(adapter, q->txq[i].cntxt_id, 0); in t3_free_qset()
698 if (q->txq[i].sdesc) { in t3_free_qset()
699 free_tx_desc(adapter, &q->txq[i], in t3_free_qset()
700 q->txq[i].in_use); in t3_free_qset()
701 kfree(q->txq[i].sdesc); in t3_free_qset()
704 q->txq[i].size * in t3_free_qset()
706 q->txq[i].desc, q->txq[i].phys_addr); in t3_free_qset()
707 __skb_queue_purge(&q->txq[i].sendq); in t3_free_qset()
710 if (q->rspq.desc) { in t3_free_qset()
712 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); in t3_free_qset()
715 q->rspq.size * sizeof(struct rsp_desc), in t3_free_qset()
716 q->rspq.desc, q->rspq.phys_addr); in t3_free_qset()
719 t3_reset_qset(q); in t3_free_qset()
733 qs->fl[1].cntxt_id = 2 * id + 1; in init_qset_cntxt()
750 /* alternatively: 3 * (n / 2) + 2 * (n & 1) */ in sgl_len()
751 return (3 * n) / 2 + (n & 1); in sgl_len()
810 refill_fl(adap, fl, min(MAX_RX_REFILL, fl->size - fl->credits - 1), in get_packet()
827 * @q: the queue
843 struct sge_rspq *q, unsigned int len, in get_packet_pg() argument
851 newskb = skb = q->pg_skb; in get_packet_pg()
866 q->rx_recycle_buf++; in get_packet_pg()
870 if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres))) in get_packet_pg()
945 return 1; in calc_tx_descs()
947 flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 2; in calc_tx_descs()
991 dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb), in map_skb()
1026 j ^= 1; in write_sgl()
1038 * @q: the Tx queue
1047 static inline void check_ring_tx_db(struct adapter *adap, struct sge_txq *q) in check_ring_tx_db() argument
1050 clear_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1051 if (test_and_set_bit(TXQ_RUNNING, &q->flags) == 0) { in check_ring_tx_db()
1052 set_bit(TXQ_LAST_PKT_DB, &q->flags); in check_ring_tx_db()
1054 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1059 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in check_ring_tx_db()
1066 d->flit[TX_DESC_FLITS - 1] = cpu_to_be64(gen); in wr_gen2()
1076 * @q: the SGE Tx queue
1091 const struct sge_txq *q, in write_wr_hdr_sgl() argument
1098 struct tx_sw_desc *sd = &q->sdesc[pidx]; in write_wr_hdr_sgl()
1107 if (likely(ndesc == 1)) { in write_wr_hdr_sgl()
1108 sd->eop = 1; in write_wr_hdr_sgl()
1109 wrp->wr_hi = htonl(F_WR_SOP | F_WR_EOP | V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1120 wrp->wr_hi = htonl(F_WR_SOP | V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1138 if (++pidx == q->size) { in write_wr_hdr_sgl()
1140 gen ^= 1; in write_wr_hdr_sgl()
1141 d = q->desc; in write_wr_hdr_sgl()
1142 sd = q->sdesc; in write_wr_hdr_sgl()
1147 wrp->wr_hi = htonl(V_WR_DATATYPE(1) | in write_wr_hdr_sgl()
1148 V_WR_SGLSFLT(1)) | wr_hi; in write_wr_hdr_sgl()
1150 sgl_flits + 1)) | in write_wr_hdr_sgl()
1153 flits = 1; in write_wr_hdr_sgl()
1155 sd->eop = 1; in write_wr_hdr_sgl()
1171 * @q: the Tx queue
1181 struct sge_txq *q, unsigned int ndesc, in write_tx_pkt_wr() argument
1185 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; in write_tx_pkt_wr()
1186 struct tx_desc *d = &q->desc[pidx]; in write_tx_pkt_wr()
1217 q->sdesc[pidx].skb = NULL; in write_tx_pkt_wr()
1230 V_WR_TID(q->token)); in write_tx_pkt_wr()
1239 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; in write_tx_pkt_wr()
1242 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, in write_tx_pkt_wr()
1244 htonl(V_WR_TID(q->token))); in write_tx_pkt_wr()
1248 struct sge_qset *qs, struct sge_txq *q) in t3_stop_tx_queue() argument
1252 q->stops++; in t3_stop_tx_queue()
1270 struct sge_txq *q; in t3_eth_xmit() local
1271 dma_addr_t addr[MAX_SKB_FRAGS + 1]; in t3_eth_xmit()
1284 q = &qs->txq[TXQ_ETH]; in t3_eth_xmit()
1287 reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in t3_eth_xmit()
1289 credits = q->size - q->in_use; in t3_eth_xmit()
1293 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1296 dev->name, q->cntxt_id & 7); in t3_eth_xmit()
1308 q->in_use += ndesc; in t3_eth_xmit()
1309 if (unlikely(credits - ndesc < q->stop_thres)) { in t3_eth_xmit()
1310 t3_stop_tx_queue(txq, qs, q); in t3_eth_xmit()
1312 if (should_restart_tx(q) && in t3_eth_xmit()
1314 q->restarts++; in t3_eth_xmit()
1319 gen = q->gen; in t3_eth_xmit()
1320 q->unacked += ndesc; in t3_eth_xmit()
1321 compl = (q->unacked & 8) << (S_WR_COMPL - 3); in t3_eth_xmit()
1322 q->unacked &= 7; in t3_eth_xmit()
1323 pidx = q->pidx; in t3_eth_xmit()
1324 q->pidx += ndesc; in t3_eth_xmit()
1325 if (q->pidx >= q->size) { in t3_eth_xmit()
1326 q->pidx -= q->size; in t3_eth_xmit()
1327 q->gen ^= 1; in t3_eth_xmit()
1365 write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); in t3_eth_xmit()
1366 check_ring_tx_db(adap, q); in t3_eth_xmit()
1389 memcpy(&to[1], &from[1], len - sizeof(*from)); in write_imm()
1391 skb_copy_bits(skb, sizeof(*from), &to[1], len - sizeof(*from)); in write_imm()
1405 * @q: the send queue
1415 * Returns 0 if enough descriptors are available, 1 if there aren't
1420 static inline int check_desc_avail(struct adapter *adap, struct sge_txq *q, in check_desc_avail() argument
1424 if (unlikely(!skb_queue_empty(&q->sendq))) { in check_desc_avail()
1425 addq_exit:__skb_queue_tail(&q->sendq, skb); in check_desc_avail()
1426 return 1; in check_desc_avail()
1428 if (unlikely(q->size - q->in_use < ndesc)) { in check_desc_avail()
1429 struct sge_qset *qs = txq_to_qset(q, qid); in check_desc_avail()
1434 if (should_restart_tx(q) && in check_desc_avail()
1438 q->stops++; in check_desc_avail()
1446 * @q: the SGE control Tx queue
1452 static inline void reclaim_completed_tx_imm(struct sge_txq *q) in reclaim_completed_tx_imm() argument
1454 unsigned int reclaim = q->processed - q->cleaned; in reclaim_completed_tx_imm()
1456 q->in_use -= reclaim; in reclaim_completed_tx_imm()
1457 q->cleaned += reclaim; in reclaim_completed_tx_imm()
1468 * @q: the control queue
1475 static int ctrl_xmit(struct adapter *adap, struct sge_txq *q, in ctrl_xmit() argument
1482 WARN_ON(1); in ctrl_xmit()
1488 wrp->wr_lo = htonl(V_WR_TID(q->token)); in ctrl_xmit()
1490 spin_lock(&q->lock); in ctrl_xmit()
1491 again:reclaim_completed_tx_imm(q); in ctrl_xmit()
1493 ret = check_desc_avail(adap, q, skb, 1, TXQ_CTRL); in ctrl_xmit()
1495 if (ret == 1) { in ctrl_xmit()
1496 spin_unlock(&q->lock); in ctrl_xmit()
1502 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in ctrl_xmit()
1504 q->in_use++; in ctrl_xmit()
1505 if (++q->pidx >= q->size) { in ctrl_xmit()
1506 q->pidx = 0; in ctrl_xmit()
1507 q->gen ^= 1; in ctrl_xmit()
1509 spin_unlock(&q->lock); in ctrl_xmit()
1512 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in ctrl_xmit()
1527 struct sge_txq *q = &qs->txq[TXQ_CTRL]; in restart_ctrlq() local
1529 spin_lock(&q->lock); in restart_ctrlq()
1530 again:reclaim_completed_tx_imm(q); in restart_ctrlq()
1532 while (q->in_use < q->size && in restart_ctrlq()
1533 (skb = __skb_dequeue(&q->sendq)) != NULL) { in restart_ctrlq()
1535 write_imm(&q->desc[q->pidx], skb, skb->len, q->gen); in restart_ctrlq()
1537 if (++q->pidx >= q->size) { in restart_ctrlq()
1538 q->pidx = 0; in restart_ctrlq()
1539 q->gen ^= 1; in restart_ctrlq()
1541 q->in_use++; in restart_ctrlq()
1544 if (!skb_queue_empty(&q->sendq)) { in restart_ctrlq()
1548 if (should_restart_tx(q) && in restart_ctrlq()
1551 q->stops++; in restart_ctrlq()
1554 spin_unlock(&q->lock); in restart_ctrlq()
1557 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_ctrlq()
1612 *p++ = be64_to_cpu(sgl->addr[1]); in setup_deferred_unmapping()
1622 * @q: the Tx queue
1632 struct sge_txq *q, unsigned int pidx, in write_ofld_wr() argument
1638 struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; in write_ofld_wr()
1639 struct tx_desc *d = &q->desc[pidx]; in write_ofld_wr()
1642 q->sdesc[pidx].skb = NULL; in write_ofld_wr()
1650 memcpy(&d->flit[1], &from[1], in write_ofld_wr()
1654 sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; in write_ofld_wr()
1663 write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, in write_ofld_wr()
1679 return 1; /* packet fits as immediate data */ in calc_tx_descs_ofld()
1691 * @q: the Tx offload queue
1696 static int ofld_xmit(struct adapter *adap, struct sge_txq *q, in ofld_xmit() argument
1702 spin_lock(&q->lock); in ofld_xmit()
1703 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in ofld_xmit()
1705 ret = check_desc_avail(adap, q, skb, ndesc, TXQ_OFLD); in ofld_xmit()
1707 if (ret == 1) { in ofld_xmit()
1709 spin_unlock(&q->lock); in ofld_xmit()
1717 spin_unlock(&q->lock); in ofld_xmit()
1721 gen = q->gen; in ofld_xmit()
1722 q->in_use += ndesc; in ofld_xmit()
1723 pidx = q->pidx; in ofld_xmit()
1724 q->pidx += ndesc; in ofld_xmit()
1725 if (q->pidx >= q->size) { in ofld_xmit()
1726 q->pidx -= q->size; in ofld_xmit()
1727 q->gen ^= 1; in ofld_xmit()
1729 spin_unlock(&q->lock); in ofld_xmit()
1731 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); in ofld_xmit()
1732 check_ring_tx_db(adap, q); in ofld_xmit()
1747 struct sge_txq *q = &qs->txq[TXQ_OFLD]; in restart_offloadq() local
1752 spin_lock(&q->lock); in restart_offloadq()
1753 again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); in restart_offloadq()
1755 while ((skb = skb_peek(&q->sendq)) != NULL) { in restart_offloadq()
1759 if (unlikely(q->size - q->in_use < ndesc)) { in restart_offloadq()
1763 if (should_restart_tx(q) && in restart_offloadq()
1766 q->stops++; in restart_offloadq()
1774 gen = q->gen; in restart_offloadq()
1775 q->in_use += ndesc; in restart_offloadq()
1776 pidx = q->pidx; in restart_offloadq()
1777 q->pidx += ndesc; in restart_offloadq()
1779 if (q->pidx >= q->size) { in restart_offloadq()
1780 q->pidx -= q->size; in restart_offloadq()
1781 q->gen ^= 1; in restart_offloadq()
1783 __skb_unlink(skb, &q->sendq); in restart_offloadq()
1784 spin_unlock(&q->lock); in restart_offloadq()
1786 write_ofld_wr(adap, skb, q, pidx, gen, ndesc, in restart_offloadq()
1788 spin_lock(&q->lock); in restart_offloadq()
1790 spin_unlock(&q->lock); in restart_offloadq()
1793 set_bit(TXQ_RUNNING, &q->flags); in restart_offloadq()
1794 set_bit(TXQ_LAST_PKT_DB, &q->flags); in restart_offloadq()
1799 F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); in restart_offloadq()
1807 * set is carried in bits 1-3 in the packet's priority.
1811 return skb->priority >> 1; in queue_set()
1823 return skb->priority & 1; in is_ctrl_pkt()
1833 * should be sent as regular or control, bits 1-3 select the queue set.
1848 * @q: the SGE response queue
1855 static inline void offload_enqueue(struct sge_rspq *q, struct sk_buff *skb) in offload_enqueue() argument
1857 int was_empty = skb_queue_empty(&q->rx_queue); in offload_enqueue()
1859 __skb_queue_tail(&q->rx_queue, skb); in offload_enqueue()
1862 struct sge_qset *qs = rspq_to_qset(q); in offload_enqueue()
1871 * @q: the SGE response queue that assembled the bundle
1878 struct sge_rspq *q, in deliver_partial_bundle() argument
1882 q->offload_bundles++; in deliver_partial_bundle()
1901 struct sge_rspq *q = &qs->rspq; in ofld_poll() local
1910 spin_lock_irq(&q->lock); in ofld_poll()
1912 skb_queue_splice_init(&q->rx_queue, &queue); in ofld_poll()
1915 spin_unlock_irq(&q->lock); in ofld_poll()
1918 spin_unlock_irq(&q->lock); in ofld_poll()
1930 q->offload_bundles++; in ofld_poll()
1938 spin_lock_irq(&q->lock); in ofld_poll()
1939 skb_queue_splice(&queue, &q->rx_queue); in ofld_poll()
1940 spin_unlock_irq(&q->lock); in ofld_poll()
1942 deliver_partial_bundle(&adapter->tdev, q, skbs, ngathered); in ofld_poll()
2040 arp_ptr = (unsigned char *)(arp + 1); in cxgb3_arp_process()
2284 * @q: the response queue
2290 const struct sge_rspq *q) in is_new_response() argument
2292 return (r->intr_gen & F_RSPD_GEN2) == q->gen; in is_new_response()
2295 static inline void clear_rspq_bufstate(struct sge_rspq * const q) in clear_rspq_bufstate() argument
2297 q->pg_skb = NULL; in clear_rspq_bufstate()
2298 q->rx_recycle_buf = 0; in clear_rspq_bufstate()
2328 struct sge_rspq *q = &qs->rspq; in process_responses() local
2329 struct rsp_desc *r = &q->desc[q->cidx]; in process_responses()
2335 q->next_holdoff = q->holdoff_tmr; in process_responses()
2337 while (likely(budget_left && is_new_response(r, q))) { in process_responses()
2358 q->async_notif++; in process_responses()
2363 q->next_holdoff = NOMEM_INTR_DELAY; in process_responses()
2364 q->nomem++; in process_responses()
2369 q->imm_data++; in process_responses()
2376 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0]; in process_responses()
2389 skb = get_packet_pg(adap, fl, q, in process_responses()
2393 q->pg_skb = skb; in process_responses()
2400 q->rx_drops++; in process_responses()
2407 q->pure_rsps++; in process_responses()
2415 if (unlikely(++q->cidx == q->size)) { in process_responses()
2416 q->cidx = 0; in process_responses()
2417 q->gen ^= 1; in process_responses()
2418 r = q->desc; in process_responses()
2422 if (++q->credits >= (q->size / 4)) { in process_responses()
2423 refill_rspq(adap, q, q->credits); in process_responses()
2424 q->credits = 0; in process_responses()
2433 rx_eth(adap, q, skb, ethpad, lro); in process_responses()
2435 q->offload_pkts++; in process_responses()
2439 ngathered = rx_offload(&adap->tdev, q, skb, in process_responses()
2445 clear_rspq_bufstate(q); in process_responses()
2450 deliver_partial_bundle(&adap->tdev, q, offload_skbs, ngathered); in process_responses()
2527 * Returns 1 if it encounters a valid data-carrying response, 0 otherwise.
2532 struct sge_rspq *q = &qs->rspq; in process_pure_responses() local
2539 if (unlikely(++q->cidx == q->size)) { in process_pure_responses()
2540 q->cidx = 0; in process_pure_responses()
2541 q->gen ^= 1; in process_pure_responses()
2542 r = q->desc; in process_pure_responses()
2551 q->pure_rsps++; in process_pure_responses()
2552 if (++q->credits >= (q->size / 4)) { in process_pure_responses()
2553 refill_rspq(adap, q, q->credits); in process_pure_responses()
2554 q->credits = 0; in process_pure_responses()
2556 if (!is_new_response(r, q)) in process_pure_responses()
2568 return is_new_response(r, q); in process_pure_responses()
2574 * @q: the response queue
2577 * new SGE responses. If there are no new responses it returns -1. If
2581 * signaling responses it schedules the NAPI handler. Returns 1 if it
2586 static inline int handle_responses(struct adapter *adap, struct sge_rspq *q) in handle_responses() argument
2588 struct sge_qset *qs = rspq_to_qset(q); in handle_responses()
2589 struct rsp_desc *r = &q->desc[q->cidx]; in handle_responses()
2591 if (!is_new_response(r, q)) in handle_responses()
2592 return -1; in handle_responses()
2595 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in handle_responses()
2596 V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx)); in handle_responses()
2600 return 1; in handle_responses()
2611 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix() local
2613 spin_lock(&q->lock); in t3_sge_intr_msix()
2614 if (process_responses(adap, qs, -1) == 0) in t3_sge_intr_msix()
2615 q->unhandled_irqs++; in t3_sge_intr_msix()
2616 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_sge_intr_msix()
2617 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_sge_intr_msix()
2618 spin_unlock(&q->lock); in t3_sge_intr_msix()
2629 struct sge_rspq *q = &qs->rspq; in t3_sge_intr_msix_napi() local
2631 spin_lock(&q->lock); in t3_sge_intr_msix_napi()
2633 if (handle_responses(qs->adap, q) < 0) in t3_sge_intr_msix_napi()
2634 q->unhandled_irqs++; in t3_sge_intr_msix_napi()
2635 spin_unlock(&q->lock); in t3_sge_intr_msix_napi()
2649 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi() local
2651 spin_lock(&q->lock); in t3_intr_msi()
2653 if (process_responses(adap, &adap->sge.qs[0], -1)) { in t3_intr_msi()
2654 t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) | in t3_intr_msi()
2655 V_NEWTIMER(q->next_holdoff) | V_NEWINDEX(q->cidx)); in t3_intr_msi()
2656 new_packets = 1; in t3_intr_msi()
2660 process_responses(adap, &adap->sge.qs[1], -1)) { in t3_intr_msi()
2661 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr_msi()
2666 new_packets = 1; in t3_intr_msi()
2670 q->unhandled_irqs++; in t3_intr_msi()
2672 spin_unlock(&q->lock); in t3_intr_msi()
2678 struct sge_rspq *q = &qs->rspq; in rspq_check_napi() local
2681 is_new_response(&q->desc[q->cidx], q)) { in rspq_check_napi()
2683 return 1; in rspq_check_napi()
2699 struct sge_rspq *q = &adap->sge.qs[0].rspq; in t3_intr_msi_napi() local
2701 spin_lock(&q->lock); in t3_intr_msi_napi()
2705 new_packets += rspq_check_napi(&adap->sge.qs[1]); in t3_intr_msi_napi()
2707 q->unhandled_irqs++; in t3_intr_msi_napi()
2709 spin_unlock(&q->lock); in t3_intr_msi_napi()
2721 work = process_responses(adap, rspq_to_qset(rq), -1); in process_responses_gts()
2738 struct sge_rspq *q1 = &adap->sge.qs[1].rspq; in t3_intr()
2788 if (likely(map & 1)) in t3b_intr()
2792 process_responses_gts(adap, &adap->sge.qs[1].rspq); in t3b_intr()
2823 if (likely(map & 1)) in t3b_intr_napi()
2827 napi_schedule(&adap->sge.qs[1].napi); in t3b_intr_napi()
2983 if (status & (1 << qs->rspq.cntxt_id)) { in sge_timer_rx()
2987 refill_rspq(adap, &qs->rspq, 1); in sge_timer_rx()
2990 1 << qs->rspq.cntxt_id); in sge_timer_rx()
2997 if (qs->fl[1].credits < qs->fl[1].size) in sge_timer_rx()
2998 __refill_fl(adap, &qs->fl[1]); in sge_timer_rx()
3016 qs->rspq.holdoff_tmr = max(p->coalesce_usecs * 10, 1U);/* can't be 0 */ in t3_update_qset_coalesce()
3043 struct sge_qset *q = &adapter->sge.qs[id]; in t3_sge_alloc_qset() local
3045 init_qset_cntxt(q, id); in t3_sge_alloc_qset()
3046 timer_setup(&q->tx_reclaim_timer, sge_timer_tx, 0); in t3_sge_alloc_qset()
3047 timer_setup(&q->rx_reclaim_timer, sge_timer_rx, 0); in t3_sge_alloc_qset()
3049 q->fl[0].desc = alloc_ring(adapter->pdev, p->fl_size, in t3_sge_alloc_qset()
3052 &q->fl[0].phys_addr, &q->fl[0].sdesc); in t3_sge_alloc_qset()
3053 if (!q->fl[0].desc) in t3_sge_alloc_qset()
3056 q->fl[1].desc = alloc_ring(adapter->pdev, p->jumbo_size, in t3_sge_alloc_qset()
3059 &q->fl[1].phys_addr, &q->fl[1].sdesc); in t3_sge_alloc_qset()
3060 if (!q->fl[1].desc) in t3_sge_alloc_qset()
3063 q->rspq.desc = alloc_ring(adapter->pdev, p->rspq_size, in t3_sge_alloc_qset()
3065 &q->rspq.phys_addr, NULL); in t3_sge_alloc_qset()
3066 if (!q->rspq.desc) in t3_sge_alloc_qset()
3076 q->txq[i].desc = alloc_ring(adapter->pdev, p->txq_size[i], in t3_sge_alloc_qset()
3078 &q->txq[i].phys_addr, in t3_sge_alloc_qset()
3079 &q->txq[i].sdesc); in t3_sge_alloc_qset()
3080 if (!q->txq[i].desc) in t3_sge_alloc_qset()
3083 q->txq[i].gen = 1; in t3_sge_alloc_qset()
3084 q->txq[i].size = p->txq_size[i]; in t3_sge_alloc_qset()
3085 spin_lock_init(&q->txq[i].lock); in t3_sge_alloc_qset()
3086 skb_queue_head_init(&q->txq[i].sendq); in t3_sge_alloc_qset()
3089 INIT_WORK(&q->txq[TXQ_OFLD].qresume_task, restart_offloadq); in t3_sge_alloc_qset()
3090 INIT_WORK(&q->txq[TXQ_CTRL].qresume_task, restart_ctrlq); in t3_sge_alloc_qset()
3092 q->fl[0].gen = q->fl[1].gen = 1; in t3_sge_alloc_qset()
3093 q->fl[0].size = p->fl_size; in t3_sge_alloc_qset()
3094 q->fl[1].size = p->jumbo_size; in t3_sge_alloc_qset()
3096 q->rspq.gen = 1; in t3_sge_alloc_qset()
3097 q->rspq.size = p->rspq_size; in t3_sge_alloc_qset()
3098 spin_lock_init(&q->rspq.lock); in t3_sge_alloc_qset()
3099 skb_queue_head_init(&q->rspq.rx_queue); in t3_sge_alloc_qset()
3101 q->txq[TXQ_ETH].stop_thres = nports * in t3_sge_alloc_qset()
3102 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); in t3_sge_alloc_qset()
3105 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3107 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data); in t3_sge_alloc_qset()
3110 q->fl[1].buf_size = FL1_PG_CHUNK_SIZE; in t3_sge_alloc_qset()
3112 q->fl[1].buf_size = is_offload(adapter) ? in t3_sge_alloc_qset()
3117 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3118 q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0; in t3_sge_alloc_qset()
3119 q->fl[0].order = FL0_PG_ORDER; in t3_sge_alloc_qset()
3120 q->fl[1].order = FL1_PG_ORDER; in t3_sge_alloc_qset()
3121 q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3122 q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE; in t3_sge_alloc_qset()
3127 ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx, in t3_sge_alloc_qset()
3128 q->rspq.phys_addr, q->rspq.size, in t3_sge_alloc_qset()
3129 q->fl[0].buf_size - SGE_PG_RSVD, 1, 0); in t3_sge_alloc_qset()
3134 ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0, in t3_sge_alloc_qset()
3135 q->fl[i].phys_addr, q->fl[i].size, in t3_sge_alloc_qset()
3136 q->fl[i].buf_size - SGE_PG_RSVD, in t3_sge_alloc_qset()
3137 p->cong_thres, 1, 0); in t3_sge_alloc_qset()
3142 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_ETH].cntxt_id, USE_GTS, in t3_sge_alloc_qset()
3143 SGE_CNTXT_ETH, id, q->txq[TXQ_ETH].phys_addr, in t3_sge_alloc_qset()
3144 q->txq[TXQ_ETH].size, q->txq[TXQ_ETH].token, in t3_sge_alloc_qset()
3145 1, 0); in t3_sge_alloc_qset()
3149 if (ntxq > 1) { in t3_sge_alloc_qset()
3150 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_OFLD].cntxt_id, in t3_sge_alloc_qset()
3152 q->txq[TXQ_OFLD].phys_addr, in t3_sge_alloc_qset()
3153 q->txq[TXQ_OFLD].size, 0, 1, 0); in t3_sge_alloc_qset()
3159 ret = t3_sge_init_ecntxt(adapter, q->txq[TXQ_CTRL].cntxt_id, 0, in t3_sge_alloc_qset()
3161 q->txq[TXQ_CTRL].phys_addr, in t3_sge_alloc_qset()
3162 q->txq[TXQ_CTRL].size, in t3_sge_alloc_qset()
3163 q->txq[TXQ_CTRL].token, 1, 0); in t3_sge_alloc_qset()
3170 q->adap = adapter; in t3_sge_alloc_qset()
3171 q->netdev = dev; in t3_sge_alloc_qset()
3172 q->tx_q = netdevq; in t3_sge_alloc_qset()
3173 t3_update_qset_coalesce(q, p); in t3_sge_alloc_qset()
3175 avail = refill_fl(adapter, &q->fl[0], q->fl[0].size, in t3_sge_alloc_qset()
3182 if (avail < q->fl[0].size) in t3_sge_alloc_qset()
3186 avail = refill_fl(adapter, &q->fl[1], q->fl[1].size, in t3_sge_alloc_qset()
3188 if (avail < q->fl[1].size) in t3_sge_alloc_qset()
3189 CH_WARN(adapter, "free list queue 1 enabled with %d credits\n", in t3_sge_alloc_qset()
3191 refill_rspq(adapter, &q->rspq, q->rspq.size - 1); in t3_sge_alloc_qset()
3193 t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) | in t3_sge_alloc_qset()
3194 V_NEWTIMER(q->rspq.holdoff_tmr)); in t3_sge_alloc_qset()
3201 t3_free_qset(adapter, q); in t3_sge_alloc_qset()
3216 struct sge_qset *q = &adap->sge.qs[i]; in t3_start_sge_timers() local
3218 if (q->tx_reclaim_timer.function) in t3_start_sge_timers()
3219 mod_timer(&q->tx_reclaim_timer, in t3_start_sge_timers()
3222 if (q->rx_reclaim_timer.function) in t3_start_sge_timers()
3223 mod_timer(&q->rx_reclaim_timer, in t3_start_sge_timers()
3239 struct sge_qset *q = &adap->sge.qs[i]; in t3_stop_sge_timers() local
3241 if (q->tx_reclaim_timer.function) in t3_stop_sge_timers()
3242 del_timer_sync(&q->tx_reclaim_timer); in t3_stop_sge_timers()
3243 if (q->rx_reclaim_timer.function) in t3_stop_sge_timers()
3244 del_timer_sync(&q->rx_reclaim_timer); in t3_stop_sge_timers()
3332 V_USERSPACESIZE(ups ? ups - 1 : 0) | F_ISCSICOALESCING; in t3_sge_init()
3333 #if SGE_NUM_GENBITS == 1 in t3_sge_init()
3372 struct qset_params *q = p->qset + i; in t3_sge_prep() local
3374 q->polling = adap->params.rev > 0; in t3_sge_prep()
3375 q->coalesce_usecs = 5; in t3_sge_prep()
3376 q->rspq_size = 1024; in t3_sge_prep()
3377 q->fl_size = 1024; in t3_sge_prep()
3378 q->jumbo_size = 512; in t3_sge_prep()
3379 q->txq_size[TXQ_ETH] = 1024; in t3_sge_prep()
3380 q->txq_size[TXQ_OFLD] = 1024; in t3_sge_prep()
3381 q->txq_size[TXQ_CTRL] = 256; in t3_sge_prep()
3382 q->cong_thres = 0; in t3_sge_prep()