Lines Matching refs:eosw_txq
2125 struct sge_eosw_txq *eosw_txq, u32 ndesc) in cxgb4_eosw_txq_free_desc() argument
2129 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2139 eosw_txq_advance_index(&eosw_txq->last_cidx, 1, in cxgb4_eosw_txq_free_desc()
2140 eosw_txq->ndesc); in cxgb4_eosw_txq_free_desc()
2141 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2145 static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n) in eosw_txq_advance() argument
2147 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); in eosw_txq_advance()
2148 eosw_txq->inuse += n; in eosw_txq_advance()
2151 static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq, in eosw_txq_enqueue() argument
2154 if (eosw_txq->inuse == eosw_txq->ndesc) in eosw_txq_enqueue()
2157 eosw_txq->desc[eosw_txq->pidx].skb = skb; in eosw_txq_enqueue()
2161 static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq) in eosw_txq_peek() argument
2163 return eosw_txq->desc[eosw_txq->last_pidx].skb; in eosw_txq_peek()
2194 static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq, in write_eo_wr() argument
2214 if (!eosw_txq->ncompl || in write_eo_wr()
2215 (eosw_txq->last_compl + wrlen16) >= in write_eo_wr()
2218 eosw_txq->ncompl++; in write_eo_wr()
2219 eosw_txq->last_compl = 0; in write_eo_wr()
2226 FW_WR_FLOWID_V(eosw_txq->hwtid)); in write_eo_wr()
2251 eosw_txq->cred -= wrlen16; in write_eo_wr()
2252 eosw_txq->last_compl += wrlen16; in write_eo_wr()
2257 struct sge_eosw_txq *eosw_txq) in ethofld_hard_xmit() argument
2273 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; in ethofld_hard_xmit()
2277 d = &eosw_txq->desc[eosw_txq->last_pidx]; in ethofld_hard_xmit()
2282 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && in ethofld_hard_xmit()
2283 eosw_txq->last_pidx == eosw_txq->flowc_idx)) { in ethofld_hard_xmit()
2287 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) in ethofld_hard_xmit()
2310 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { in ethofld_hard_xmit()
2317 eosw_txq->state = next_state; in ethofld_hard_xmit()
2318 eosw_txq->cred -= wrlen16; in ethofld_hard_xmit()
2319 eosw_txq->ncompl++; in ethofld_hard_xmit()
2320 eosw_txq->last_compl = 0; in ethofld_hard_xmit()
2324 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen); in ethofld_hard_xmit()
2385 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); in ethofld_hard_xmit()
2392 static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) in ethofld_xmit() argument
2397 switch (eosw_txq->state) { in ethofld_xmit()
2401 pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in ethofld_xmit()
2403 pktcount += eosw_txq->ndesc; in ethofld_xmit()
2413 skb = eosw_txq_peek(eosw_txq); in ethofld_xmit()
2415 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, in ethofld_xmit()
2416 eosw_txq->ndesc); in ethofld_xmit()
2420 ret = ethofld_hard_xmit(dev, eosw_txq); in ethofld_xmit()
2432 struct sge_eosw_txq *eosw_txq; in cxgb4_ethofld_xmit() local
2442 eosw_txq = &tc_port_mqprio->eosw_txq[qid]; in cxgb4_ethofld_xmit()
2443 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2444 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_xmit()
2447 ret = eosw_txq_enqueue(eosw_txq, skb); in cxgb4_ethofld_xmit()
2457 eosw_txq_advance(eosw_txq, 1); in cxgb4_ethofld_xmit()
2458 ethofld_xmit(dev, eosw_txq); in cxgb4_ethofld_xmit()
2459 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2463 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2493 static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) in eosw_txq_flush_pending_skbs() argument
2495 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in eosw_txq_flush_pending_skbs()
2496 int pidx = eosw_txq->pidx; in eosw_txq_flush_pending_skbs()
2503 pktcount += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2508 pidx += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2510 skb = eosw_txq->desc[pidx].skb; in eosw_txq_flush_pending_skbs()
2513 eosw_txq->desc[pidx].skb = NULL; in eosw_txq_flush_pending_skbs()
2514 eosw_txq->inuse--; in eosw_txq_flush_pending_skbs()
2518 eosw_txq->pidx = eosw_txq->last_pidx + 1; in eosw_txq_flush_pending_skbs()
2536 struct sge_eosw_txq *eosw_txq; in cxgb4_ethofld_send_flowc() local
2551 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_send_flowc()
2552 if (!eosw_txq) in cxgb4_ethofld_send_flowc()
2559 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2561 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) in cxgb4_ethofld_send_flowc()
2566 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_send_flowc()
2575 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; in cxgb4_ethofld_send_flowc()
2577 FW_WR_FLOWID_V(eosw_txq->hwtid)); in cxgb4_ethofld_send_flowc()
2600 eosw_txq_flush_pending_skbs(eosw_txq); in cxgb4_ethofld_send_flowc()
2602 ret = eosw_txq_enqueue(eosw_txq, skb); in cxgb4_ethofld_send_flowc()
2608 eosw_txq->state = next_state; in cxgb4_ethofld_send_flowc()
2609 eosw_txq->flowc_idx = eosw_txq->pidx; in cxgb4_ethofld_send_flowc()
2610 eosw_txq_advance(eosw_txq, 1); in cxgb4_ethofld_send_flowc()
2611 ethofld_xmit(dev, eosw_txq); in cxgb4_ethofld_send_flowc()
2614 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
4001 struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t, in cxgb4_ethofld_restart() local
4005 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4006 pktcount = eosw_txq->cidx - eosw_txq->last_cidx; in cxgb4_ethofld_restart()
4008 pktcount += eosw_txq->ndesc; in cxgb4_ethofld_restart()
4011 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), in cxgb4_ethofld_restart()
4012 eosw_txq, pktcount); in cxgb4_ethofld_restart()
4013 eosw_txq->inuse -= pktcount; in cxgb4_ethofld_restart()
4019 ethofld_xmit(eosw_txq->netdev, eosw_txq); in cxgb4_ethofld_restart()
4020 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4041 struct sge_eosw_txq *eosw_txq; in cxgb4_ethofld_rx_handler() local
4055 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_rx_handler()
4056 if (!eosw_txq) in cxgb4_ethofld_rx_handler()
4059 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4062 skb = eosw_txq->desc[eosw_txq->cidx].skb; in cxgb4_ethofld_rx_handler()
4066 if (unlikely((eosw_txq->state == in cxgb4_ethofld_rx_handler()
4068 eosw_txq->state == in cxgb4_ethofld_rx_handler()
4070 eosw_txq->cidx == eosw_txq->flowc_idx)) { in cxgb4_ethofld_rx_handler()
4072 if (eosw_txq->state == in cxgb4_ethofld_rx_handler()
4074 eosw_txq->state = CXGB4_EO_STATE_ACTIVE; in cxgb4_ethofld_rx_handler()
4076 eosw_txq->state = CXGB4_EO_STATE_CLOSED; in cxgb4_ethofld_rx_handler()
4077 complete(&eosw_txq->completion); in cxgb4_ethofld_rx_handler()
4079 hdr_len = eth_get_headlen(eosw_txq->netdev, in cxgb4_ethofld_rx_handler()
4085 eosw_txq_advance_index(&eosw_txq->cidx, 1, in cxgb4_ethofld_rx_handler()
4086 eosw_txq->ndesc); in cxgb4_ethofld_rx_handler()
4091 eosw_txq->cred += cpl->credits; in cxgb4_ethofld_rx_handler()
4092 eosw_txq->ncompl--; in cxgb4_ethofld_rx_handler()
4094 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4099 tasklet_schedule(&eosw_txq->qresume_tsk); in cxgb4_ethofld_rx_handler()