Lines Matching refs:eosw_txq

2131 			      struct sge_eosw_txq *eosw_txq, u32 ndesc)  in cxgb4_eosw_txq_free_desc()  argument
2135 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2145 eosw_txq_advance_index(&eosw_txq->last_cidx, 1, in cxgb4_eosw_txq_free_desc()
2146 eosw_txq->ndesc); in cxgb4_eosw_txq_free_desc()
2147 d = &eosw_txq->desc[eosw_txq->last_cidx]; in cxgb4_eosw_txq_free_desc()
2151 static inline void eosw_txq_advance(struct sge_eosw_txq *eosw_txq, u32 n) in eosw_txq_advance() argument
2153 eosw_txq_advance_index(&eosw_txq->pidx, n, eosw_txq->ndesc); in eosw_txq_advance()
2154 eosw_txq->inuse += n; in eosw_txq_advance()
2157 static inline int eosw_txq_enqueue(struct sge_eosw_txq *eosw_txq, in eosw_txq_enqueue() argument
2160 if (eosw_txq->inuse == eosw_txq->ndesc) in eosw_txq_enqueue()
2163 eosw_txq->desc[eosw_txq->pidx].skb = skb; in eosw_txq_enqueue()
2167 static inline struct sk_buff *eosw_txq_peek(struct sge_eosw_txq *eosw_txq) in eosw_txq_peek() argument
2169 return eosw_txq->desc[eosw_txq->last_pidx].skb; in eosw_txq_peek()
2200 static void *write_eo_wr(struct adapter *adap, struct sge_eosw_txq *eosw_txq, in write_eo_wr() argument
2220 if (!eosw_txq->ncompl || in write_eo_wr()
2221 (eosw_txq->last_compl + wrlen16) >= in write_eo_wr()
2224 eosw_txq->ncompl++; in write_eo_wr()
2225 eosw_txq->last_compl = 0; in write_eo_wr()
2232 FW_WR_FLOWID_V(eosw_txq->hwtid)); in write_eo_wr()
2257 eosw_txq->cred -= wrlen16; in write_eo_wr()
2258 eosw_txq->last_compl += wrlen16; in write_eo_wr()
2263 struct sge_eosw_txq *eosw_txq) in ethofld_hard_xmit() argument
2279 eohw_txq = &adap->sge.eohw_txq[eosw_txq->hwqid]; in ethofld_hard_xmit()
2283 d = &eosw_txq->desc[eosw_txq->last_pidx]; in ethofld_hard_xmit()
2288 if (unlikely(eosw_txq->state != CXGB4_EO_STATE_ACTIVE && in ethofld_hard_xmit()
2289 eosw_txq->last_pidx == eosw_txq->flowc_idx)) { in ethofld_hard_xmit()
2293 if (eosw_txq->state == CXGB4_EO_STATE_FLOWC_OPEN_SEND) in ethofld_hard_xmit()
2316 if (unlikely(left < 0 || wrlen16 > eosw_txq->cred)) { in ethofld_hard_xmit()
2323 eosw_txq->state = next_state; in ethofld_hard_xmit()
2324 eosw_txq->cred -= wrlen16; in ethofld_hard_xmit()
2325 eosw_txq->ncompl++; in ethofld_hard_xmit()
2326 eosw_txq->last_compl = 0; in ethofld_hard_xmit()
2330 cpl = write_eo_wr(adap, eosw_txq, skb, wr, hdr_len, wrlen); in ethofld_hard_xmit()
2391 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, eosw_txq->ndesc); in ethofld_hard_xmit()
2398 static void ethofld_xmit(struct net_device *dev, struct sge_eosw_txq *eosw_txq) in ethofld_xmit() argument
2403 switch (eosw_txq->state) { in ethofld_xmit()
2407 pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in ethofld_xmit()
2409 pktcount += eosw_txq->ndesc; in ethofld_xmit()
2419 skb = eosw_txq_peek(eosw_txq); in ethofld_xmit()
2421 eosw_txq_advance_index(&eosw_txq->last_pidx, 1, in ethofld_xmit()
2422 eosw_txq->ndesc); in ethofld_xmit()
2426 ret = ethofld_hard_xmit(dev, eosw_txq); in ethofld_xmit()
2438 struct sge_eosw_txq *eosw_txq; in cxgb4_ethofld_xmit() local
2448 eosw_txq = &tc_port_mqprio->eosw_txq[qid]; in cxgb4_ethofld_xmit()
2449 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2450 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_xmit()
2453 ret = eosw_txq_enqueue(eosw_txq, skb); in cxgb4_ethofld_xmit()
2463 eosw_txq_advance(eosw_txq, 1); in cxgb4_ethofld_xmit()
2464 ethofld_xmit(dev, eosw_txq); in cxgb4_ethofld_xmit()
2465 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2469 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_xmit()
2499 static void eosw_txq_flush_pending_skbs(struct sge_eosw_txq *eosw_txq) in eosw_txq_flush_pending_skbs() argument
2501 int pktcount = eosw_txq->pidx - eosw_txq->last_pidx; in eosw_txq_flush_pending_skbs()
2502 int pidx = eosw_txq->pidx; in eosw_txq_flush_pending_skbs()
2509 pktcount += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2514 pidx += eosw_txq->ndesc; in eosw_txq_flush_pending_skbs()
2516 skb = eosw_txq->desc[pidx].skb; in eosw_txq_flush_pending_skbs()
2519 eosw_txq->desc[pidx].skb = NULL; in eosw_txq_flush_pending_skbs()
2520 eosw_txq->inuse--; in eosw_txq_flush_pending_skbs()
2524 eosw_txq->pidx = eosw_txq->last_pidx + 1; in eosw_txq_flush_pending_skbs()
2542 struct sge_eosw_txq *eosw_txq; in cxgb4_ethofld_send_flowc() local
2557 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_send_flowc()
2558 if (!eosw_txq) in cxgb4_ethofld_send_flowc()
2563 complete(&eosw_txq->completion); in cxgb4_ethofld_send_flowc()
2571 spin_lock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2573 if (eosw_txq->state != CXGB4_EO_STATE_CLOSED) in cxgb4_ethofld_send_flowc()
2578 if (eosw_txq->state != CXGB4_EO_STATE_ACTIVE) in cxgb4_ethofld_send_flowc()
2587 rxq = &adap->sge.eohw_rxq[eosw_txq->hwqid]; in cxgb4_ethofld_send_flowc()
2589 FW_WR_FLOWID_V(eosw_txq->hwtid)); in cxgb4_ethofld_send_flowc()
2612 eosw_txq_flush_pending_skbs(eosw_txq); in cxgb4_ethofld_send_flowc()
2614 ret = eosw_txq_enqueue(eosw_txq, skb); in cxgb4_ethofld_send_flowc()
2618 eosw_txq->state = next_state; in cxgb4_ethofld_send_flowc()
2619 eosw_txq->flowc_idx = eosw_txq->pidx; in cxgb4_ethofld_send_flowc()
2620 eosw_txq_advance(eosw_txq, 1); in cxgb4_ethofld_send_flowc()
2621 ethofld_xmit(dev, eosw_txq); in cxgb4_ethofld_send_flowc()
2623 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
2628 spin_unlock_bh(&eosw_txq->lock); in cxgb4_ethofld_send_flowc()
4039 struct sge_eosw_txq *eosw_txq = from_tasklet(eosw_txq, t, in cxgb4_ethofld_restart() local
4043 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4044 pktcount = eosw_txq->cidx - eosw_txq->last_cidx; in cxgb4_ethofld_restart()
4046 pktcount += eosw_txq->ndesc; in cxgb4_ethofld_restart()
4049 cxgb4_eosw_txq_free_desc(netdev2adap(eosw_txq->netdev), in cxgb4_ethofld_restart()
4050 eosw_txq, pktcount); in cxgb4_ethofld_restart()
4051 eosw_txq->inuse -= pktcount; in cxgb4_ethofld_restart()
4057 ethofld_xmit(eosw_txq->netdev, eosw_txq); in cxgb4_ethofld_restart()
4058 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_restart()
4079 struct sge_eosw_txq *eosw_txq; in cxgb4_ethofld_rx_handler() local
4093 eosw_txq = (struct sge_eosw_txq *)entry->data; in cxgb4_ethofld_rx_handler()
4094 if (!eosw_txq) in cxgb4_ethofld_rx_handler()
4097 spin_lock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4100 skb = eosw_txq->desc[eosw_txq->cidx].skb; in cxgb4_ethofld_rx_handler()
4104 if (unlikely((eosw_txq->state == in cxgb4_ethofld_rx_handler()
4106 eosw_txq->state == in cxgb4_ethofld_rx_handler()
4108 eosw_txq->cidx == eosw_txq->flowc_idx)) { in cxgb4_ethofld_rx_handler()
4110 if (eosw_txq->state == in cxgb4_ethofld_rx_handler()
4112 eosw_txq->state = CXGB4_EO_STATE_ACTIVE; in cxgb4_ethofld_rx_handler()
4114 eosw_txq->state = CXGB4_EO_STATE_CLOSED; in cxgb4_ethofld_rx_handler()
4115 complete(&eosw_txq->completion); in cxgb4_ethofld_rx_handler()
4117 hdr_len = eth_get_headlen(eosw_txq->netdev, in cxgb4_ethofld_rx_handler()
4123 eosw_txq_advance_index(&eosw_txq->cidx, 1, in cxgb4_ethofld_rx_handler()
4124 eosw_txq->ndesc); in cxgb4_ethofld_rx_handler()
4129 eosw_txq->cred += cpl->credits; in cxgb4_ethofld_rx_handler()
4130 eosw_txq->ncompl--; in cxgb4_ethofld_rx_handler()
4132 spin_unlock(&eosw_txq->lock); in cxgb4_ethofld_rx_handler()
4137 tasklet_schedule(&eosw_txq->qresume_tsk); in cxgb4_ethofld_rx_handler()