Home
last modified time | relevance | path

Searched refs:wr_cqe (Results 1 – 25 of 25) sorted by relevance

/Linux-v4.19/net/sunrpc/xprtrdma/
Dfrwr_ops.c312 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_fastreg()
333 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv()
355 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_wake()
478 frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe; in frwr_op_send()
544 last->wr_cqe = &frwr->fr_cqe; in frwr_op_unmap_sync()
Dverbs.c138 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_send()
161 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_receive()
847 sc->sc_wr.wr_cqe = &sc->sc_cqe; in rpcrdma_sendctx_create()
1117 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; in rpcrdma_create_rep()
1552 trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe); in rpcrdma_post_recvs()
Dsvc_rdma_recvfrom.c139 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc()
295 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_receive()
Dsvc_rdma_sendto.c149 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc()
261 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_send()
Dsvc_rdma_rw.c204 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_write_done()
267 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_read_done()
/Linux-v4.19/drivers/infiniband/core/
Dcq.c44 if (wc->wr_cqe) in __ib_process_cq()
45 wc->wr_cqe->done(cq, wc); in __ib_process_cq()
Dmad.c738 wc->wr_cqe = cqe; in build_smp_wc()
874 send_wr->wr.wr_cqe, drslid, in handle_outgoing_dr_smp()
1080 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; in ib_create_send_mad()
1193 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; in ib_send_mad()
2225 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_recv_done()
2474 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_send_done()
2558 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_send_error()
2740 local->mad_send_wr->send_wr.wr.wr_cqe, in local_completions()
2927 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; in ib_mad_post_receive_mads()
Drw.c416 ctx->sig->sig_wr.wr.wr_cqe = NULL; in rdma_rw_ctx_signature_init()
542 last_wr->wr_cqe = cqe; in rdma_rw_ctx_wrs()
Dverbs.c2467 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, in ib_drain_qp_done()
2484 { .wr_cqe = &sdrain.cqe, }, in __ib_drain_sq()
2529 rwr.wr_cqe = &rdrain.cqe; in __ib_drain_rq()
/Linux-v4.19/net/9p/
Dtrans_rdma.c311 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in recv_done()
361 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in send_done()
416 wr.wr_cqe = &c->cqe; in post_recv()
508 wr.wr_cqe = &c->cqe; in rdma_request()
/Linux-v4.19/drivers/nvme/target/
Drdma.c295 c->wr.wr_cqe = &c->cqe; in nvmet_rdma_alloc_cmd()
376 r->send_wr.wr_cqe = &r->send_cqe; in nvmet_rdma_alloc_rsp()
531 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); in nvmet_rdma_send_done()
538 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_send_done()
578 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); in nvmet_rdma_read_data_done()
593 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_read_data_done()
771 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); in nvmet_rdma_recv_done()
778 wc->wr_cqe, ib_wc_status_msg(wc->status), in nvmet_rdma_recv_done()
/Linux-v4.19/drivers/infiniband/ulp/iser/
Diser_initiator.c562 struct iser_login_desc *desc = iser_login(wc->wr_cqe); in iser_login_rsp()
647 struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); in iser_task_rsp()
703 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_ctrl_comp()
719 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_dataout_comp()
Diser_memory.c376 inv_wr->wr_cqe = cqe; in iser_inv_rkey()
411 wr->wr.wr_cqe = cqe; in iser_reg_sig_mr()
463 wr->wr.wr_cqe = cqe; in iser_fast_reg_mr()
Diser_verbs.c1033 wr.wr_cqe = &desc->cqe; in iser_post_recvl()
1059 wr->wr_cqe = &rx_desc->cqe; in iser_post_recvm()
1097 wr->wr_cqe = &tx_desc->cqe; in iser_post_send()
/Linux-v4.19/drivers/nvme/host/
Drdma.c1066 op, wc->wr_cqe, in nvme_rdma_wr_error()
1080 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); in nvme_rdma_inv_rkey_done()
1105 wr.wr_cqe = &req->reg_cqe; in nvme_rdma_inv_rkey()
1210 req->reg_wr.wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_fr()
1292 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_send_done()
1318 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_send()
1351 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_recv()
1456 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in __nvme_rdma_recv_done()
1751 struct ib_cqe *cqe = wc.wr_cqe; in nvme_rdma_poll()
/Linux-v4.19/fs/cifs/
Dsmbdirect.c366 container_of(wc->wr_cqe, struct smbd_request, cqe); in send_done()
582 container_of(wc->wr_cqe, struct smbd_response, cqe); in recv_done()
845 send_wr.wr_cqe = &request->cqe; in smbd_post_send_negotiate_req()
1044 send_wr.wr_cqe = &request->cqe; in smbd_post_send()
1201 recv_wr.wr_cqe = &response->cqe; in smbd_post_recv()
2275 cqe = wc->wr_cqe; in register_mr_done()
2542 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; in smbd_register_mr()
2583 cqe = wc->wr_cqe; in local_inv_done()
2610 wr->wr_cqe = &smbdirect_mr->cqe; in smbd_deregister_mr()
/Linux-v4.19/drivers/infiniband/ulp/isert/
Dib_isert.c820 rx_wr->wr_cqe = &rx_desc->rx_cqe; in isert_post_recvm()
851 rx_wr.wr_cqe = &rx_desc->rx_cqe; in isert_post_recv()
876 send_wr.wr_cqe = &tx_desc->tx_cqe; in isert_login_post_send()
953 send_wr->wr_cqe = &tx_desc->tx_cqe; in isert_init_send_wr()
985 rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe; in isert_login_post_recv()
1408 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); in isert_recv_done()
1665 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_write_done()
1707 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_read_done()
1783 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_login_send_done()
1799 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_send_done()
/Linux-v4.19/include/trace/events/
Drpcrdma.h632 __entry->cqe = wc->wr_cqe;
1274 __entry->cqe = wc->wr_cqe;
1311 __entry->cqe = wr->wr_cqe;
1340 __entry->cqe = wr->wr_cqe;
1364 __entry->cqe = wc->wr_cqe;
/Linux-v4.19/drivers/infiniband/hw/mlx5/
Dgsi.c100 container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe); in handle_single_completion()
439 wr->wr.wr_cqe = &gsi_wr->cqe; in mlx5_ib_add_outstanding_wr()
Dmr.c887 container_of(wc->wr_cqe, struct mlx5_ib_umr_context, cqe); in mlx5_ib_umr_done()
909 umrwr->wr.wr_cqe = &umr_context.cqe; in mlx5_ib_post_send_wait()
Dqp.c5707 struct mlx5_ib_drain_cqe *cqe = container_of(wc->wr_cqe, in mlx5_ib_drain_qp_done()
5773 { .wr_cqe = &sdrain.cqe, }, in mlx5_ib_drain_sq()
5816 rwr.wr_cqe = &rdrain.cqe; in mlx5_ib_drain_rq()
/Linux-v4.19/drivers/infiniband/ulp/srpt/
Dib_srpt.c827 wr.wr_cqe = &ioctx->ioctx.cqe; in srpt_post_recv()
852 { .wr_cqe = &ch->zw_cqe, }, in srpt_zerolength_write()
1287 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe); in srpt_rdma_read_done()
1625 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe); in srpt_recv_done()
1685 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe); in srpt_send_done()
2755 send_wr.wr_cqe = &ioctx->ioctx.cqe; in srpt_queue_response()
/Linux-v4.19/drivers/infiniband/ulp/srp/
Dib_srp.c1220 wr.wr_cqe = &req->reg_cqe; in srp_inv_rkey()
1551 wr.wr.wr_cqe = &req->reg_cqe; in srp_map_finish_fr()
2000 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_send_done()
2026 wr.wr_cqe = &iu->cqe; in srp_post_send()
2048 wr.wr_cqe = &iu->cqe; in srp_post_recv()
2189 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_recv_done()
2273 wc->wr_cqe); in srp_handle_qp_err()
/Linux-v4.19/include/rdma/
Dib_verbs.h988 struct ib_cqe *wr_cqe; member
1337 struct ib_cqe *wr_cqe; member
1422 struct ib_cqe *wr_cqe; member
/Linux-v4.19/drivers/infiniband/hw/mlx4/
Dqp.c4336 struct mlx4_ib_drain_cqe *cqe = container_of(wc->wr_cqe, in mlx4_ib_drain_qp_done()
4402 { .wr_cqe = &sdrain.cqe, }, in mlx4_ib_drain_sq()
4445 rwr.wr_cqe = &rdrain.cqe; in mlx4_ib_drain_rq()