/Linux-v5.4/net/sunrpc/xprtrdma/ |
D | frwr_ops.c | 402 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_fastreg() 435 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe; in frwr_send() 484 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv() 503 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_wake() 549 last->wr_cqe = &frwr->fr_cqe; in frwr_unmap_sync() 605 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_done() 652 last->wr_cqe = &frwr->fr_cqe; in frwr_unmap_async()
|
D | svc_rdma_recvfrom.c | 139 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc() 288 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_receive()
|
D | verbs.c | 135 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_send() 153 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_receive() 820 sc->sc_wr.wr_cqe = &sc->sc_cqe; in rpcrdma_sendctx_create() 1064 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; in rpcrdma_rep_create()
|
D | svc_rdma_sendto.c | 149 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc() 261 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_send()
|
D | svc_rdma_rw.c | 204 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_write_done() 262 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_read_done()
|
/Linux-v5.4/net/9p/ |
D | trans_rdma.c | 295 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in recv_done() 346 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in send_done() 402 wr.wr_cqe = &c->cqe; in post_recv() 494 wr.wr_cqe = &c->cqe; in rdma_request()
|
/Linux-v5.4/drivers/infiniband/core/ |
D | cq.c | 83 if (wc->wr_cqe) in __ib_process_cq() 84 wc->wr_cqe->done(cq, wc); in __ib_process_cq()
|
D | mad.c | 764 wc->wr_cqe = cqe; in build_smp_wc() 904 send_wr->wr.wr_cqe, drslid, in handle_outgoing_dr_smp() 1110 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; in ib_create_send_mad() 1223 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; in ib_send_mad() 2260 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_recv_done() 2511 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_send_done() 2599 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_send_error() 2782 local->mad_send_wr->send_wr.wr.wr_cqe, in local_completions() 2969 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; in ib_mad_post_receive_mads()
|
D | rw.c | 416 ctx->reg->reg_wr.wr.wr_cqe = NULL; in rdma_rw_ctx_signature_init() 523 last_wr->wr_cqe = cqe; in rdma_rw_ctx_wrs()
|
D | verbs.c | 2631 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, in ib_drain_qp_done() 2648 { .wr_cqe = &sdrain.cqe, }, in __ib_drain_sq() 2693 rwr.wr_cqe = &rdrain.cqe; in __ib_drain_rq()
|
/Linux-v5.4/drivers/infiniband/ulp/iser/ |
D | iser_initiator.c | 562 struct iser_login_desc *desc = iser_login(wc->wr_cqe); in iser_login_rsp() 654 struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); in iser_task_rsp() 710 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_ctrl_comp() 726 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_dataout_comp()
|
D | iser_memory.c | 372 inv_wr->wr_cqe = cqe; in iser_inv_rkey() 416 wr->wr.wr_cqe = cqe; in iser_reg_sig_mr() 463 wr->wr.wr_cqe = cqe; in iser_fast_reg_mr()
|
D | iser_verbs.c | 984 wr.wr_cqe = &desc->cqe; in iser_post_recvl() 1010 wr->wr_cqe = &rx_desc->cqe; in iser_post_recvm() 1049 wr->wr_cqe = &tx_desc->cqe; in iser_post_send()
|
/Linux-v5.4/drivers/nvme/target/ |
D | rdma.c | 300 c->wr.wr_cqe = &c->cqe; in nvmet_rdma_alloc_cmd() 382 r->send_wr.wr_cqe = &r->send_cqe; in nvmet_rdma_alloc_rsp() 537 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); in nvmet_rdma_send_done() 545 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_send_done() 585 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); in nvmet_rdma_read_data_done() 600 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_read_data_done() 790 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); in nvmet_rdma_recv_done() 797 wc->wr_cqe, ib_wc_status_msg(wc->status), in nvmet_rdma_recv_done()
|
/Linux-v5.4/fs/cifs/ |
D | smbdirect.c | 270 container_of(wc->wr_cqe, struct smbd_request, cqe); in send_done() 486 container_of(wc->wr_cqe, struct smbd_response, cqe); in recv_done() 749 send_wr.wr_cqe = &request->cqe; in smbd_post_send_negotiate_req() 948 send_wr.wr_cqe = &request->cqe; in smbd_post_send() 1106 recv_wr.wr_cqe = &response->cqe; in smbd_post_recv() 2249 cqe = wc->wr_cqe; in register_mr_done() 2516 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; in smbd_register_mr() 2557 cqe = wc->wr_cqe; in local_inv_done() 2584 wr->wr_cqe = &smbdirect_mr->cqe; in smbd_deregister_mr()
|
/Linux-v5.4/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 812 rx_wr->wr_cqe = &rx_desc->rx_cqe; in isert_post_recvm() 843 rx_wr.wr_cqe = &rx_desc->rx_cqe; in isert_post_recv() 868 send_wr.wr_cqe = &tx_desc->tx_cqe; in isert_login_post_send() 945 send_wr->wr_cqe = &tx_desc->tx_cqe; in isert_init_send_wr() 977 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; in isert_login_post_recv() 1400 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); in isert_recv_done() 1657 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_write_done() 1699 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_read_done() 1775 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_login_send_done() 1791 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_send_done()
|
/Linux-v5.4/drivers/nvme/host/ |
D | rdma.c | 1103 op, wc->wr_cqe, in nvme_rdma_wr_error() 1117 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); in nvme_rdma_inv_rkey_done() 1142 wr.wr_cqe = &req->reg_cqe; in nvme_rdma_inv_rkey() 1245 req->reg_wr.wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_fr() 1326 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_send_done() 1352 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_send() 1385 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_recv() 1486 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_recv_done()
|
/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | gsi.c | 100 container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe); in handle_single_completion() 439 wr->wr.wr_cqe = &gsi_wr->cqe; in mlx5_ib_add_outstanding_wr()
|
D | mr.c | 809 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); in mlx5_ib_umr_done() 831 umrwr->wr.wr_cqe = &umr_context.cqe; in mlx5_ib_post_send_wait()
|
D | qp.c | 6378 struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe, in mlx5_ib_drain_qp_done() 6444 { .wr_cqe = &sdrain.cqe, }, in mlx5_ib_drain_sq() 6487 rwr.wr_cqe = &rdrain.cqe; in mlx5_ib_drain_rq()
|
/Linux-v5.4/include/trace/events/ |
D | rpcrdma.h | 786 __entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep, 1593 __entry->cqe = wc->wr_cqe; 1630 __entry->cqe = wr->wr_cqe; 1659 __entry->cqe = wr->wr_cqe; 1683 __entry->cqe = wc->wr_cqe;
|
/Linux-v5.4/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.c | 833 wr.wr_cqe = &ioctx->ioctx.cqe; in srpt_post_recv() 858 { .wr_cqe = &ch->zw_cqe, }, in srpt_zerolength_write() 1327 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe); in srpt_rdma_read_done() 1664 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe); in srpt_recv_done() 1725 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe); in srpt_send_done() 2840 send_wr.wr_cqe = &ioctx->ioctx.cqe; in srpt_queue_response()
|
/Linux-v5.4/drivers/infiniband/ulp/srp/ |
D | ib_srp.c | 1237 wr.wr_cqe = &req->reg_cqe; in srp_inv_rkey() 1575 wr.wr.wr_cqe = &req->reg_cqe; in srp_map_finish_fr() 2052 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_send_done() 2086 wr.wr_cqe = &iu->cqe; in srp_post_send() 2108 wr.wr_cqe = &iu->cqe; in srp_post_recv() 2250 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_recv_done() 2334 wc->wr_cqe); in srp_handle_qp_err()
|
/Linux-v5.4/include/rdma/ |
D | ib_verbs.h | 971 struct ib_cqe *wr_cqe; member 1328 struct ib_cqe *wr_cqe; member 1399 struct ib_cqe *wr_cqe; member
|
/Linux-v5.4/drivers/infiniband/hw/mlx4/ |
D | qp.c | 4412 struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe, in mlx4_ib_drain_qp_done() 4478 { .wr_cqe = &sdrain.cqe, }, in mlx4_ib_drain_sq() 4521 rwr.wr_cqe = &rdrain.cqe; in mlx4_ib_drain_rq()
|