/Linux-v5.4/drivers/infiniband/sw/rdmavt/ |
D | trace_tx.h | 95 __field(u64, wr_id) 116 __entry->wr_id = wqe->wr.wr_id; 137 __entry->wr_id, 163 __field(u64, wr_id) 175 __entry->wr_id = wqe->wr.wr_id; 191 __entry->wr_id,
|
D | trace_cq.h | 116 __field(u64, wr_id) 127 __entry->wr_id = wc->wr_id; 140 __entry->wr_id,
|
/Linux-v5.4/net/smc/ |
D | smc_wr.h | 67 u64 wr_id, temp_wr_id; in smc_wr_rx_post() local 70 wr_id = ++link->wr_rx_id; /* tasklet context, thus not atomic */ in smc_wr_rx_post() 71 temp_wr_id = wr_id; in smc_wr_rx_post() 73 link->wr_rx_ibs[index].wr_id = wr_id; in smc_wr_rx_post()
|
D | smc_wr.c | 41 u64 wr_id; /* work request id sent */ member 53 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) in smc_wr_tx_find_pending_index() argument 58 if (link->wr_tx_pends[i].wr_id == wr_id) in smc_wr_tx_find_pending_index() 82 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); in smc_wr_tx_process_cqe() 177 u64 wr_id; in smc_wr_tx_get_free_slot() local 200 wr_id = smc_wr_tx_get_next_wr_id(link); in smc_wr_tx_get_free_slot() 202 wr_pend->wr_id = wr_id; in smc_wr_tx_get_free_slot() 207 wr_ib->wr_id = wr_id; in smc_wr_tx_get_free_slot() 263 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr; in smc_wr_reg_send() 347 temp_wr_id = wc->wr_id; in smc_wr_rx_demultiplex()
|
/Linux-v5.4/net/rds/ |
D | ib_ring.c | 156 u32 rds_ib_ring_completed(struct rds_ib_work_ring *ring, u32 wr_id, u32 oldest) in rds_ib_ring_completed() argument 160 if (oldest <= (unsigned long long)wr_id) in rds_ib_ring_completed() 161 ret = (unsigned long long)wr_id - oldest + 1; in rds_ib_ring_completed() 163 ret = ring->w_nr - oldest + (unsigned long long)wr_id + 1; in rds_ib_ring_completed() 166 wr_id, oldest); in rds_ib_ring_completed()
|
D | ib_frmr.c | 154 reg_wr.wr.wr_id = (unsigned long)(void *)ibmr; in rds_ib_post_reg_frmr() 288 s_wr->wr_id = (unsigned long)(void *)ibmr; in rds_ib_post_inv() 324 struct rds_ib_mr *ibmr = (void *)(unsigned long)wc->wr_id; in rds_ib_mr_cqe_handler()
|
/Linux-v5.4/drivers/infiniband/ulp/ipoib/ |
D | ipoib_ib.c | 106 priv->rx_wr.wr_id = id | IPOIB_OP_RECV; in ipoib_ib_post_receive() 176 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; in ipoib_ib_handle_rx_wc() local 183 wr_id, wc->status); in ipoib_ib_handle_rx_wc() 185 if (unlikely(wr_id >= ipoib_recvq_size)) { in ipoib_ib_handle_rx_wc() 187 wr_id, ipoib_recvq_size); in ipoib_ib_handle_rx_wc() 191 skb = priv->rx_ring[wr_id].skb; in ipoib_ib_handle_rx_wc() 197 wc->status, wr_id, wc->vendor_err); in ipoib_ib_handle_rx_wc() 198 ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); in ipoib_ib_handle_rx_wc() 200 priv->rx_ring[wr_id].skb = NULL; in ipoib_ib_handle_rx_wc() 204 memcpy(mapping, priv->rx_ring[wr_id].mapping, in ipoib_ib_handle_rx_wc() [all …]
|
D | ipoib_cm.c | 99 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_srq() 124 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV; in ipoib_cm_post_receive_nonsrq() 226 ipoib_cm_rx_drain_wr.wr_id = IPOIB_CM_RX_DRAIN_WRID; in ipoib_cm_start_rx_drain() 564 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV); in ipoib_cm_handle_rx_wc() local 574 wr_id, wc->status); in ipoib_cm_handle_rx_wc() 576 if (unlikely(wr_id >= ipoib_recvq_size)) { in ipoib_cm_handle_rx_wc() 577 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) { in ipoib_cm_handle_rx_wc() 585 wr_id, ipoib_recvq_size); in ipoib_cm_handle_rx_wc() 594 skb = rx_ring[wr_id].skb; in ipoib_cm_handle_rx_wc() 599 wc->status, wr_id, wc->vendor_err); in ipoib_cm_handle_rx_wc() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | gsi.c | 101 u64 wr_id; in handle_single_completion() local 106 wr_id = wr->wc.wr_id; in handle_single_completion() 108 wr->wc.wr_id = wr_id; in handle_single_completion() 432 gsi_wr->wc.wr_id = wr->wr.wr_id; in mlx5_ib_add_outstanding_wr() 449 { .wr_id = wr->wr.wr_id }, in mlx5_ib_gsi_silent_drop()
|
/Linux-v5.4/drivers/infiniband/hw/i40iw/ |
D | i40iw_uk.c | 138 u64 wr_id in i40iw_qp_get_next_send_wqe() argument 193 qp->sq_wrtrk_array[*wqe_idx].wrid = wr_id; in i40iw_qp_get_next_send_wqe() 274 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); in i40iw_rdma_write() 331 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->lo_addr.len, info->wr_id); in i40iw_rdma_read() 385 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, total_size, info->wr_id); in i40iw_send() 444 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id); in i40iw_inline_rdma_write() 520 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, wqe_size, op_info->len, info->wr_id); in i40iw_inline_send() 582 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id); in i40iw_stag_local_invalidate() 624 wqe = i40iw_qp_get_next_send_wqe(qp, &wqe_idx, I40IW_QP_WQE_MIN_SIZE, 0, info->wr_id); in i40iw_mw_bind() 673 qp->rq_wrid_array[wqe_idx] = info->wr_id; in i40iw_post_receive() [all …]
|
D | i40iw_user.h | 238 u64 wr_id; member 258 u64 wr_id; member 264 u64 wr_id; member 410 u64 wr_id 421 enum i40iw_status_code i40iw_nop(struct i40iw_qp_uk *qp, u64 wr_id,
|
/Linux-v5.4/drivers/infiniband/hw/bnxt_re/ |
D | qplib_fp.c | 696 srqe->wr_id[0] = cpu_to_le32((u32)next); in bnxt_qplib_post_srq_recv() 697 srq->swq[next].wr_id = wqe->wr_id; in bnxt_qplib_post_srq_recv() 1555 swq->wr_id = wqe->wr_id; in bnxt_qplib_post_send() 1780 swq->wr_id = wqe->wr_id; in bnxt_qplib_post_send() 1846 rq->swq[sw_prod].wr_id = wqe->wr_id; in bnxt_qplib_post_recv() 1871 rqe->wr_id[0] = cpu_to_le32(sw_prod); in bnxt_qplib_post_recv() 1877 rq->swq[sw_prod].wr_id = wqe->wr_id; in bnxt_qplib_post_recv() 2031 if (sq->swq[sw_cons].wr_id == BNXT_QPLIB_FENCE_WRID) { in __flush_sq() 2039 cqe->wr_id = sq->swq[sw_cons].wr_id; in __flush_sq() 2088 cqe->wr_id = rq->swq[sw_cons].wr_id; in __flush_rq() [all …]
|
/Linux-v5.4/include/uapi/rdma/ |
D | vmw_pvrdma-abi.h | 218 __aligned_u64 wr_id; /* wr id */ member 226 __aligned_u64 wr_id; /* wr id */ member 279 __aligned_u64 wr_id; member
|
D | rdma_user_rxe.h | 70 __aligned_u64 wr_id; member 149 __aligned_u64 wr_id; member
|
D | mlx5_user_ioctl_verbs.h | 57 __aligned_u64 wr_id; member
|
D | rvt-abi.h | 44 __u64 wr_id; member
|
/Linux-v5.4/drivers/infiniband/hw/cxgb4/ |
D | cq.c | 469 srq->sw_rq[srq->pidx].wr_id = pwr->wr_id; in post_pending_srq_wrs() 476 (unsigned long long)pwr->wr_id); in post_pending_srq_wrs() 494 u64 wr_id; in reap_srq_cqe() local 497 wr_id = srq->sw_rq[rel_idx].wr_id; in reap_srq_cqe() 503 (unsigned long long)srq->sw_rq[rel_idx].wr_id); in reap_srq_cqe() 511 srq->sw_rq[srq->cidx].wr_id); in reap_srq_cqe() 522 (unsigned long long)srq->sw_rq[rel_idx].wr_id); in reap_srq_cqe() 525 return wr_id; in reap_srq_cqe() 717 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; in poll_cq() 724 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; in poll_cq() [all …]
|
/Linux-v5.4/drivers/infiniband/hw/qedr/ |
D | qedr_roce_cm.c | 586 qp->wqe_wr_id[qp->sq.prod].wr_id = wr->wr_id; in qedr_gsi_post_send() 590 wr->opcode, in_irq(), irqs_disabled(), wr->wr_id); in qedr_gsi_post_send() 656 qp->rqe_wr_id[qp->rq.prod].wr_id = wr->wr_id; in qedr_gsi_post_recv() 687 wc[i].wr_id = qp->rqe_wr_id[qp->rq.cons].wr_id; in qedr_gsi_poll_cq() 715 wc[i].wr_id = qp->wqe_wr_id[qp->sq.cons].wr_id; in qedr_gsi_poll_cq()
|
/Linux-v5.4/drivers/infiniband/hw/mlx4/ |
D | mad.c | 645 wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt); in mlx4_ib_send_to_slave() 1329 recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV | in mlx4_ib_post_pv_qp_buf() 1445 wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum); in mlx4_ib_send_to_wire() 1484 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; in mlx4_ib_multiplex_mad() 1485 int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1); in mlx4_ib_multiplex_mad() 1742 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; in mlx4_ib_tunnel_comp_worker() 1748 wc.wr_id & in mlx4_ib_tunnel_comp_worker() 1752 "buf:%lld\n", wc.wr_id); in mlx4_ib_tunnel_comp_worker() 1757 wc.wr_id, wc.status); in mlx4_ib_tunnel_comp_worker() 1758 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id & in mlx4_ib_tunnel_comp_worker() [all …]
|
/Linux-v5.4/include/rdma/ |
D | rdmavt_qp.h | 850 u64 wr_id; in rvt_qp_complete_swqe() local 863 wr_id = wqe->wr.wr_id; in rvt_qp_complete_swqe() 872 .wr_id = wr_id, in rvt_qp_complete_swqe()
|
/Linux-v5.4/drivers/net/ethernet/cisco/enic/ |
D | vnic_rq.h | 76 uint64_t wr_id; member 130 buf->wr_id = wrid; in vnic_rq_post()
|
D | vnic_wq.h | 61 uint64_t wr_id; /* Cookie */ member 146 buf->wr_id = wrid; in vnic_wq_post()
|
/Linux-v5.4/drivers/net/ethernet/ibm/ehea/ |
D | ehea_qmr.h | 87 u64 wr_id; member 126 u64 wr_id; /* work request ID */ member 149 u64 wr_id; /* work request ID from WQE */ member
|
/Linux-v5.4/drivers/infiniband/sw/rxe/ |
D | rxe_comp.c | 410 wc->wr_id = wqe->wr.wr_id; in make_send_cqe() 421 uwc->wr_id = wqe->wr.wr_id; in make_send_cqe()
|
/Linux-v5.4/drivers/infiniband/hw/cxgb3/ |
D | iwch_qp.c | 282 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; in build_rdma_recv() 345 qhp->wq.rq_size_log2)].wr_id = wr->wr_id; in build_zero_stag_recv() 437 sqp->wr_id = wr->wr_id; in iwch_post_send() 448 __func__, (unsigned long long)wr->wr_id, idx, in iwch_post_send() 512 __func__, (unsigned long long)wr->wr_id, in iwch_post_receive()
|