/Linux-v4.19/drivers/infiniband/hw/hfi1/ |
D | vnic_sdma.c | 73 struct sdma_txreq txreq; member 84 static void vnic_sdma_complete(struct sdma_txreq *txreq, in vnic_sdma_complete() argument 87 struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); in vnic_sdma_complete() 90 sdma_txclean(vnic_sdma->dd, txreq); in vnic_sdma_complete() 102 &tx->txreq, in build_vnic_ulp_payload() 113 &tx->txreq, in build_vnic_ulp_payload() 122 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, in build_vnic_ulp_payload() 138 &tx->txreq, in build_vnic_tx_desc() 153 &tx->txreq, in build_vnic_tx_desc() 201 ret = sdma_send_txreq(sde, &vnic_sdma->wait, &tx->txreq, in hfi1_vnic_send_dma() [all …]
|
D | verbs_txreq.h | 60 struct sdma_txreq txreq; member 94 tx->txreq.num_desc = 0; in get_txreq() 102 return &tx->txreq; in get_sdma_txreq() 112 return container_of(stx, struct verbs_txreq, txreq); in get_waiting_verbs_txreq()
|
D | user_sdma.c | 81 static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status); 105 struct sdma_txreq *txreq, 128 struct sdma_txreq *txreq, in defer_packet_queue() argument 136 container_of(txreq, struct user_sdma_txreq, txreq); in defer_packet_queue() 138 if (sdma_progress(sde, seq, txreq)) { in defer_packet_queue() 710 ret = sdma_txinit_ahg(&tx->txreq, SDMA_TXREQ_F_AHG_COPY, in user_sdma_txadd_ahg() 715 ret = sdma_txadd_kvaddr(pq->dd, &tx->txreq, &tx->hdr, sizeof(tx->hdr)); in user_sdma_txadd_ahg() 717 sdma_txclean(pq->dd, &tx->txreq); in user_sdma_txadd_ahg() 741 ret = sdma_txadd_page(pq->dd, &tx->txreq, iovec->pages[pageidx], in user_sdma_txadd() 871 ret = sdma_txinit(&tx->txreq, 0, sizeof(req->hdr) + in user_sdma_send_pkts() [all …]
|
D | verbs.c | 711 container_of(cookie, struct verbs_txreq, txreq); in verbs_sdma_complete() 739 list_add_tail(&ps->s_txreq->txreq.list, in wait_kmem() 785 &tx->txreq, in build_verbs_ulp_payload() 854 &tx->txreq, in build_verbs_tx_desc() 868 &tx->txreq, in build_verbs_tx_desc() 875 &tx->txreq, in build_verbs_tx_desc() 895 ret = sdma_txadd_kvaddr(sde->dd, &tx->txreq, in build_verbs_tx_desc() 928 if (!sdma_txreq_built(&tx->txreq)) { in hfi1_verbs_send_dma() 953 ret = sdma_send_txreq(tx->sde, &priv->s_iowait, &tx->txreq, in hfi1_verbs_send_dma() 1003 list_add_tail(&ps->s_txreq->txreq.list, in pio_wait() [all …]
|
D | verbs_txreq.c | 69 sdma_txclean(dd_from_dev(dev), &tx->txreq); in hfi1_put_txreq()
|
D | user_sdma.h | 246 struct sdma_txreq txreq; member
|
D | qp.c | 150 container_of(tx, struct verbs_txreq, txreq)); in flush_tx_list() 405 struct verbs_txreq *tx = container_of(stx, struct verbs_txreq, txreq); in iowait_sleep()
|
/Linux-v4.19/drivers/net/xen-netback/ |
D | netback.c | 799 struct xen_netif_tx_request txreq; in xenvif_tx_build_gops() local 826 RING_COPY_REQUEST(&queue->tx, idx, &txreq); in xenvif_tx_build_gops() 829 if (txreq.size > queue->remaining_credit && in xenvif_tx_build_gops() 830 tx_credit_exceeded(queue, txreq.size)) in xenvif_tx_build_gops() 833 queue->remaining_credit -= txreq.size; in xenvif_tx_build_gops() 840 if (txreq.flags & XEN_NETTXF_extra_info) { in xenvif_tx_build_gops() 855 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops() 869 make_tx_response(queue, &txreq, extra_count, in xenvif_tx_build_gops() 875 ret = xenvif_count_requests(queue, &txreq, extra_count, in xenvif_tx_build_gops() 882 if (unlikely(txreq.size < ETH_HLEN)) { in xenvif_tx_build_gops() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/qib/ |
D | qib_sdma.c | 496 tx->txreq.start_idx = 0; in complete_sdma_err_req() 497 tx->txreq.next_descq_idx = 0; in complete_sdma_err_req() 498 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); in complete_sdma_err_req() 534 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { in qib_sdma_verbs_send() 544 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); in qib_sdma_verbs_send() 547 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 563 tx->txreq.start_idx = tail; in qib_sdma_verbs_send() 584 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) in qib_sdma_verbs_send() 622 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) in qib_sdma_verbs_send() 624 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) in qib_sdma_verbs_send() [all …]
|
D | qib_verbs.c | 610 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in __get_txreq() 639 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); in get_txreq() 662 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { in qib_put_txreq() 663 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; in qib_put_txreq() 665 tx->txreq.addr, tx->hdr_dwords << 2, in qib_put_txreq() 673 list_add(&tx->txreq.list, &dev->txreq_free); in qib_put_txreq() 721 if (qpp->s_tx->txreq.sg_count > avail) in qib_verbs_sdma_desc_avail() 723 avail -= qpp->s_tx->txreq.sg_count; in qib_verbs_sdma_desc_avail() 749 container_of(cookie, struct qib_verbs_txreq, txreq); in sdma_complete() 759 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) in sdma_complete() [all …]
|
D | qib.h | 247 struct qib_sdma_txreq txreq; member
|