Lines Matching full:tx

190  * The tx request, once initialized,  is manipulated with calls to
199 * in the tx. Memory locations added with sdma_txadd_page()
201 * to the tx and nmapped as part of the progress processing in the
205 * tx. An example of a use case would be a pre-allocated
212 * a tx to the ring after the appropriate number of
219 * long as the tx isn't in flight.
440 struct sdma_txreq *tx,
448 * @tx: tx request to initialize
479 * being submitted. The callback will be provided this tx, a status, and a flag.
499 struct sdma_txreq *tx, in sdma_txinit_ahg() argument
512 tx->desc_limit = ARRAY_SIZE(tx->descs); in sdma_txinit_ahg()
513 tx->descp = &tx->descs[0]; in sdma_txinit_ahg()
514 INIT_LIST_HEAD(&tx->list); in sdma_txinit_ahg()
515 tx->num_desc = 0; in sdma_txinit_ahg()
516 tx->flags = flags; in sdma_txinit_ahg()
517 tx->complete = cb; in sdma_txinit_ahg()
518 tx->coalesce_buf = NULL; in sdma_txinit_ahg()
519 tx->wait = NULL; in sdma_txinit_ahg()
520 tx->packet_len = tlen; in sdma_txinit_ahg()
521 tx->tlen = tx->packet_len; in sdma_txinit_ahg()
522 tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG; in sdma_txinit_ahg()
523 tx->descs[0].qw[1] = 0; in sdma_txinit_ahg()
525 tx->descs[0].qw[1] |= in sdma_txinit_ahg()
531 _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen); in sdma_txinit_ahg()
537 * @tx: tx request to initialize
561 * The callback, if non-NULL, will be provided this tx and a status. The
567 struct sdma_txreq *tx, in sdma_txinit() argument
572 return sdma_txinit_ahg(tx, flags, tlen, 0, 0, NULL, 0, cb); in sdma_txinit()
595 struct sdma_txreq *tx, in make_tx_sdma_desc() argument
600 struct sdma_desc *desc = &tx->descp[tx->num_desc]; in make_tx_sdma_desc()
602 if (!tx->num_desc) { in make_tx_sdma_desc()
618 int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
624 static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx) in sdma_txclean() argument
626 if (tx->num_desc) in sdma_txclean()
627 __sdma_txclean(dd, tx); in sdma_txclean()
632 struct sdma_txreq *tx) in _sdma_close_tx() argument
634 tx->descp[tx->num_desc].qw[0] |= in _sdma_close_tx()
636 tx->descp[tx->num_desc].qw[1] |= in _sdma_close_tx()
638 if (tx->flags & SDMA_TXREQ_F_URGENT) in _sdma_close_tx()
639 tx->descp[tx->num_desc].qw[1] |= in _sdma_close_tx()
647 struct sdma_txreq *tx, in _sdma_txadd_daddr() argument
654 tx, in _sdma_txadd_daddr()
657 WARN_ON(len > tx->tlen); in _sdma_txadd_daddr()
658 tx->tlen -= len; in _sdma_txadd_daddr()
660 if (!tx->tlen) { in _sdma_txadd_daddr()
661 if (tx->packet_len & (sizeof(u32) - 1)) { in _sdma_txadd_daddr()
662 rval = _pad_sdma_tx_descs(dd, tx); in _sdma_txadd_daddr()
666 _sdma_close_tx(dd, tx); in _sdma_txadd_daddr()
669 tx->num_desc++; in _sdma_txadd_daddr()
676 * @tx: tx request to which the page is added
691 struct sdma_txreq *tx, in sdma_txadd_page() argument
699 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_page()
700 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE, in sdma_txadd_page()
714 __sdma_txclean(dd, tx); in sdma_txadd_page()
719 dd, SDMA_MAP_PAGE, tx, addr, len); in sdma_txadd_page()
725 * @tx: sdma_txreq to which the page is added
740 struct sdma_txreq *tx, in sdma_txadd_daddr() argument
746 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_daddr()
747 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE, in sdma_txadd_daddr()
753 return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len); in sdma_txadd_daddr()
759 * @tx: sdma_txreq to which the page is added
774 struct sdma_txreq *tx, in sdma_txadd_kvaddr() argument
781 if ((unlikely(tx->num_desc == tx->desc_limit))) { in sdma_txadd_kvaddr()
782 rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE, in sdma_txadd_kvaddr()
795 __sdma_txclean(dd, tx); in sdma_txadd_kvaddr()
800 dd, SDMA_MAP_SINGLE, tx, addr, len); in sdma_txadd_kvaddr()
807 struct sdma_txreq *tx,
847 * @tx: txreq for which we need to check descriptor availability
858 struct sdma_txreq *tx) in sdma_progress() argument
862 if (tx->num_desc > sde->desc_avail) in sdma_progress()