Lines Matching refs:eosw_txq
2129 struct sge_eosw_txq *eosw_txq, u32 ndesc) in cxgb4_eosw_txq_free_desc() argument
2133 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2143 eosw_txq_advance_index(&eosw_txq->last_cidx, 1, in cxgb4_eosw_txq_free_desc()
2144 eosw_txq->ndesc); in cxgb4_eosw_txq_free_desc()
2145 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2149 static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n) in eosw_txq_advance() argument
2151 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); in eosw_txq_advance()
2152 eosw_txq->inuse += n; in eosw_txq_advance()
2155 static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq, in eosw_txq_enqueue() argument
2158 if (eosw_txq->inuse == eosw_txq->ndesc) in eosw_txq_enqueue()
2161 eosw_txq->desc[eosw_txq->pidx].skb = skb; in eosw_txq_enqueue()
2165 static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq) in eosw_txq_peek() argument
2167 return eosw_txq->desc[eosw_txq->last_pidx].skb; in eosw_txq_peek()
2198 static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq, in write_eo_wr() argument
2218 if (!eosw_txq->ncompl || in write_eo_wr()
2219 (eosw_txq->last_compl + wrlen16) >= in write_eo_wr()
2222 eosw_txq->ncompl++; in write_eo_wr()
2223 eosw_txq->last_compl = 0; in write_eo_wr()
2230 FW_WR_FLOWID_V(eosw_txq->hwtid)); in write_eo_wr()
2255 eosw_txq->cred -= wrlen16; in write_eo_wr()
2256 eosw_txq->last_compl += wrlen16; in write_eo_wr()
2261 struct sge_eosw_txq *eosw_txq) in ethofld_hard_xmit() argument
2277 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; in ethofld_hard_xmit()
2281 d = &eosw_txq->desc[eosw_txq->last_pidx]; in ethofld_hard_xmit()
2286 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && in ethofld_hard_xmit()
2287 eosw_txq->last_pidx == eosw_txq->flowc_idx)) { in ethofld_hard_xmit()
2291 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) in ethofld_hard_xmit()
2314 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { in ethofld_hard_xmit()
2321 eosw_txq->state = next_state; in ethofld_hard_xmit()
2322 eosw_txq->cred -= wrlen16; in ethofld_hard_xmit()
2323 eosw_txq->ncompl++; in ethofld_hard_xmit()
2324 eosw_txq->last_compl = 0; in ethofld_hard_xmit()
2328 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen); in ethofld_hard_xmit()
2389 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); in ethofld_hard_xmit()
2396 static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) in ethofld_xmit() argument
2401 switch (eosw_txq->state) { in ethofld_xmit()
2405 pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in ethofld_xmit()
2407 pktcount += eosw_txq->ndesc; in ethofld_xmit()
2417 skb = eosw_txq_peek(eosw_txq); in ethofld_xmit()
2419 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, in ethofld_xmit()
2420 eosw_txq->ndesc); in ethofld_xmit()
2424 ret = ethofld_hard_xmit(dev, eosw_txq); in ethofld_xmit()
2436 struct sge_eosw_txq *eosw_txq; in cxgb4_ethofld_xmit() local
2446 eosw_txq = &tc_port_mqprio->eosw_txq[qid]; in cxgb4_ethofld_xmit()
2447 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2448 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_xmit()
2451 ret = eosw_txq_enqueue(eosw_txq, skb); in cxgb4_ethofld_xmit()
2461 eosw_txq_advance(eosw_txq, 1); in cxgb4_ethofld_xmit()
2462 ethofld_xmit(dev, eosw_txq); in cxgb4_ethofld_xmit()
2463 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2467 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2497 static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) in eosw_txq_flush_pending_skbs() argument
2499 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in eosw_txq_flush_pending_skbs()
2500 int pidx = eosw_txq->pidx; in eosw_txq_flush_pending_skbs()
2507 pktcount += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2512 pidx += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2514 skb = eosw_txq->desc[pidx].skb; in eosw_txq_flush_pending_skbs()
2517 eosw_txq->desc[pidx].skb = NULL; in eosw_txq_flush_pending_skbs()
2518 eosw_txq->inuse--; in eosw_txq_flush_pending_skbs()
2522 eosw_txq->pidx = eosw_txq->last_pidx + 1; in eosw_txq_flush_pending_skbs()
2540 struct sge_eosw_txq *eosw_txq; in cxgb4_ethofld_send_flowc() local
2555 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_send_flowc()
2556 if (!eosw_txq) in cxgb4_ethofld_send_flowc()
2561 complete(&eosw_txq->completion); in cxgb4_ethofld_send_flowc()
2569 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2571 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) in cxgb4_ethofld_send_flowc()
2576 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_send_flowc()
2585 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; in cxgb4_ethofld_send_flowc()
2587 FW_WR_FLOWID_V(eosw_txq->hwtid)); in cxgb4_ethofld_send_flowc()
2610 eosw_txq_flush_pending_skbs(eosw_txq); in cxgb4_ethofld_send_flowc()
2612 ret = eosw_txq_enqueue(eosw_txq, skb); in cxgb4_ethofld_send_flowc()
2616 eosw_txq->state = next_state; in cxgb4_ethofld_send_flowc()
2617 eosw_txq->flowc_idx = eosw_txq->pidx; in cxgb4_ethofld_send_flowc()
2618 eosw_txq_advance(eosw_txq, 1); in cxgb4_ethofld_send_flowc()
2619 ethofld_xmit(dev, eosw_txq); in cxgb4_ethofld_send_flowc()
2621 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2626 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
4037 struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t, in cxgb4_ethofld_restart() local
4041 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4042 pktcount = eosw_txq->cidx - eosw_txq->last_cidx; in cxgb4_ethofld_restart()
4044 pktcount += eosw_txq->ndesc; in cxgb4_ethofld_restart()
4047 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), in cxgb4_ethofld_restart()
4048 eosw_txq, pktcount); in cxgb4_ethofld_restart()
4049 eosw_txq->inuse -= pktcount; in cxgb4_ethofld_restart()
4055 ethofld_xmit(eosw_txq->netdev, eosw_txq); in cxgb4_ethofld_restart()
4056 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4077 struct sge_eosw_txq *eosw_txq; in cxgb4_ethofld_rx_handler() local
4091 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_rx_handler()
4092 if (!eosw_txq) in cxgb4_ethofld_rx_handler()
4095 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4098 skb = eosw_txq->desc[eosw_txq->cidx].skb; in cxgb4_ethofld_rx_handler()
4102 if (unlikely((eosw_txq->state == in cxgb4_ethofld_rx_handler()
4104 eosw_txq->state == in cxgb4_ethofld_rx_handler()
4106 eosw_txq->cidx == eosw_txq->flowc_idx)) { in cxgb4_ethofld_rx_handler()
4108 if (eosw_txq->state == in cxgb4_ethofld_rx_handler()
4110 eosw_txq->state = CXGB4_EO_STATE_ACTIVE; in cxgb4_ethofld_rx_handler()
4112 eosw_txq->state = CXGB4_EO_STATE_CLOSED; in cxgb4_ethofld_rx_handler()
4113 complete(&eosw_txq->completion); in cxgb4_ethofld_rx_handler()
4115 hdr_len = eth_get_headlen(eosw_txq->netdev, in cxgb4_ethofld_rx_handler()
4121 eosw_txq_advance_index(&eosw_txq->cidx, 1, in cxgb4_ethofld_rx_handler()
4122 eosw_txq->ndesc); in cxgb4_ethofld_rx_handler()
4127 eosw_txq->cred += cpl->credits; in cxgb4_ethofld_rx_handler()
4128 eosw_txq->ncompl--; in cxgb4_ethofld_rx_handler()
4130 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4135 tasklet_schedule(&eosw_txq->qresume_tsk); in cxgb4_ethofld_rx_handler()