Home
last modified time | relevance | path

Searched refs:txreq (Results 1 – 13 of 13) sorted by relevance

/Linux-v5.4/drivers/infiniband/hw/hfi1/
Dvnic_sdma.c71 struct sdma_txreq txreq; member
80 static void vnic_sdma_complete(struct sdma_txreq *txreq, in vnic_sdma_complete() argument
83 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); in vnic_sdma_complete()
86 sdma_txclean(vnic_sdma->dd, txreq); in vnic_sdma_complete()
98 &tx->txreq, in build_vnic_ulp_payload()
109 &tx->txreq, in build_vnic_ulp_payload()
118 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, in build_vnic_ulp_payload()
134 &tx->txreq, in build_vnic_tx_desc()
149 &tx->txreq, in build_vnic_tx_desc()
197 &tx->txreq, vnic_sdma->pkts_sent); in hfi1_vnic_send_dma()
[all …]
Dverbs_txreq.h60 struct sdma_txreq txreq; member
95 tx->txreq.num_desc = 0; in get_txreq()
98 tx->txreq.flags = 0; in get_txreq()
104 return &tx->txreq; in get_sdma_txreq()
113 return container_of(stx, struct verbs_txreq, txreq); in get_waiting_verbs_txreq()
Duser_sdma.c80 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status);
104 struct sdma_txreq *txreq,
127 struct sdma_txreq *txreq, in defer_packet_queue() argument
135 if (sdma_progress(sde, seq, txreq)) in defer_packet_queue()
703 ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY, in user_sdma_txadd_ahg()
708 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); in user_sdma_txadd_ahg()
710 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_txadd_ahg()
734 ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx], in user_sdma_txadd()
866 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + in user_sdma_send_pkts()
902 list_add_tail(&tx->txreq.list, &req->txps); in user_sdma_send_pkts()
[all …]
Dverbs.c629 container_of(cookie, struct verbs_txreq, txreq); in verbs_sdma_complete()
676 list_add_tail(&ps->s_txreq->txreq.list, in wait_kmem()
710 &tx->txreq, in build_verbs_ulp_payload()
779 &tx->txreq, in build_verbs_tx_desc()
793 &tx->txreq, in build_verbs_tx_desc()
800 &tx->txreq, in build_verbs_tx_desc()
820 ret = sdma_txadd_daddr(sde->dd, &tx->txreq, in build_verbs_tx_desc()
862 if (!sdma_txreq_built(&tx->txreq)) { in hfi1_verbs_send_dma()
891 ret = sdma_send_txreq(tx->sde, ps->wait, &tx->txreq, ps->pkts_sent); in hfi1_verbs_send_dma()
939 list_add_tail(&ps->s_txreq->txreq.list, in pio_wait()
[all …]
Dverbs_txreq.c69 sdma_txclean(dd_from_dev(dev), &tx->txreq); in hfi1_put_txreq()
Duser_sdma.h238 struct sdma_txreq txreq; member
Dqp.c160 container_of(tx, struct verbs_txreq, txreq)); in flush_list_head()
495 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); in iowait_sleep()
Drc.c391 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; in make_rc_ack()
Dtid_rdma.c5296 ps->s_txreq->txreq.flags |= SDMA_TXREQ_F_VIP; in make_tid_rdma_ack()
/Linux-v5.4/drivers/net/xen-netback/
Dnetback.c799 struct xen_netif_tx_request txreq; in xenvif_tx_build_gops() local
826 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops()
829 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops()
830 tx_credit_exceeded(queue, txreq.size)) in xenvif_tx_build_gops()
833 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops()
840 if (txreq.flags & XEN_NETTXF_extra_info) { in xenvif_tx_build_gops()
855 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops()
869 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops()
875 ret = xenvif_count_requests(queue, &txreq, extra_count, in xenvif_tx_build_gops()
882 if (unlikely(txreq.size < ETH_HLEN)) { in xenvif_tx_build_gops()
[all …]
/Linux-v5.4/drivers/infiniband/hw/qib/
Dqib_sdma.c496 tx->txreq.start_idx = 0; in complete_sdma_err_req()
497 tx->txreq.next_descq_idx = 0; in complete_sdma_err_req()
498 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); in complete_sdma_err_req()
534 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { in qib_sdma_verbs_send()
544 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); in qib_sdma_verbs_send()
547 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send()
563 tx->txreq.start_idx = tail; in qib_sdma_verbs_send()
580 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send()
601 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) in qib_sdma_verbs_send()
603 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) in qib_sdma_verbs_send()
[all …]
Dqib_verbs.c574 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in __get_txreq()
603 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in get_txreq()
626 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { in qib_put_txreq()
627 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; in qib_put_txreq()
629 tx->txreq.addr, tx->hdr_dwords << 2, in qib_put_txreq()
637 list_add(&tx->txreq.list, &dev->txreq_free); in qib_put_txreq()
685 if (qpp->s_tx->txreq.sg_count > avail) in qib_verbs_sdma_desc_avail()
687 avail -= qpp->s_tx->txreq.sg_count; in qib_verbs_sdma_desc_avail()
713 container_of(cookie, struct qib_verbs_txreq, txreq); in sdma_complete()
723 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) in sdma_complete()
[all …]
Dqib.h248 struct qib_sdma_txreq txreq; member