Home
last modified time | relevance | path

Searched refs:wr_cqe (Results 1 – 25 of 29) sorted by relevance

12

/Linux-v5.15/net/sunrpc/xprtrdma/
Dfrwr_ops.c368 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_fastreg()
406 mr->mr_regwr.wr.wr_cqe = &mr->mr_cqe; in frwr_send()
463 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv()
482 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_wake()
525 last->wr_cqe = &mr->mr_cqe; in frwr_unmap_sync()
532 last->wr_cqe->done = frwr_wc_localinv; in frwr_unmap_sync()
543 last->wr_cqe->done = frwr_wc_localinv_wake; in frwr_unmap_sync()
578 struct ib_cqe *cqe = wc->wr_cqe; in frwr_wc_localinv_done()
627 last->wr_cqe = &mr->mr_cqe; in frwr_unmap_async()
634 last->wr_cqe->done = frwr_wc_localinv; in frwr_unmap_async()
[all …]
Dsvc_rdma_recvfrom.c150 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe; in svc_rdma_recv_ctxt_alloc()
325 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_receive()
Dsvc_rdma_sendto.c148 ctxt->sc_send_wr.wr_cqe = &ctxt->sc_cqe; in svc_rdma_send_ctxt_alloc()
279 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_send()
Dverbs.c154 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_send()
173 struct ib_cqe *cqe = wc->wr_cqe; in rpcrdma_wc_receive()
973 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; in rpcrdma_rep_create()
Dsvc_rdma_rw.c263 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_write_done()
320 struct ib_cqe *cqe = wc->wr_cqe; in svc_rdma_wc_read_done()
/Linux-v5.15/drivers/infiniband/ulp/rtrs/
Drtrs.c86 .wr_cqe = &iu->cqe, in rtrs_iu_post_recv()
100 .wr_cqe = cqe, in rtrs_post_recv_empty()
141 .wr_cqe = &iu->cqe, in rtrs_iu_post_send()
163 .wr.wr_cqe = &iu->cqe, in rtrs_iu_post_rdma_write_imm()
199 .wr.wr_cqe = cqe, in rtrs_post_rdma_write_imm_empty()
Drtrs-srv.c250 wr->wr.wr_cqe = &io_comp_cqe; in rdma_write_sg()
278 inv_wr.wr_cqe = &io_comp_cqe; in rdma_write_sg()
289 rwr.wr.wr_cqe = &local_reg_cqe; in rdma_write_sg()
319 imm_wr.wr.wr_cqe = &io_comp_cqe; in rdma_write_sg()
365 inv_wr.wr_cqe = &io_comp_cqe; in send_io_resp_imm()
408 rwr.wr.wr_cqe = &local_reg_cqe; in send_io_resp_imm()
435 imm_wr.wr.wr_cqe = &io_comp_cqe; in send_io_resp_imm()
709 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_srv_info_rsp_done()
842 rwr[mri].wr.wr_cqe = &local_reg_cqe; in process_info_req()
895 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_srv_info_req_done()
[all …]
Drtrs-clt.c349 container_of(wc->wr_cqe, typeof(*req), inv_cqe); in rtrs_clt_inv_rkey_done()
370 .wr_cqe = &req->inv_cqe, in rtrs_inv_rkey()
513 iu = container_of(wc->wr_cqe, struct rtrs_iu, in rtrs_clt_recv_done()
534 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); in rtrs_clt_rkey_rsp_done()
591 wr->wr_cqe = cqe; in rtrs_post_recv_empty_x2()
624 if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done)) in rtrs_clt_rdma_done()
672 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); in rtrs_clt_rdma_done()
1128 .wr_cqe = &req->inv_cqe, in rtrs_clt_write_req()
1135 .wr.wr_cqe = &fast_reg_cqe, in rtrs_clt_write_req()
1217 .wr.wr_cqe = &fast_reg_cqe, in rtrs_clt_read_req()
[all …]
/Linux-v5.15/drivers/infiniband/ulp/iser/
Diser_memory.c225 inv_wr->wr_cqe = cqe; in iser_inv_rkey()
269 wr->wr.wr_cqe = cqe; in iser_reg_sig_mr()
316 wr->wr.wr_cqe = cqe; in iser_fast_reg_mr()
Diser_initiator.c566 struct iser_login_desc *desc = iser_login(wc->wr_cqe); in iser_login_rsp()
658 struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); in iser_task_rsp()
714 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_ctrl_comp()
730 struct iser_tx_desc *desc = iser_tx(wc->wr_cqe); in iser_dataout_comp()
Diser_verbs.c839 wr.wr_cqe = &desc->cqe; in iser_post_recvl()
865 wr->wr_cqe = &rx_desc->cqe; in iser_post_recvm()
907 wr->wr_cqe = &tx_desc->cqe; in iser_post_send()
/Linux-v5.15/net/9p/
Dtrans_rdma.c298 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in recv_done()
349 container_of(wc->wr_cqe, struct p9_rdma_context, cqe); in send_done()
405 wr.wr_cqe = &c->cqe; in post_recv()
497 wr.wr_cqe = &c->cqe; in rdma_request()
/Linux-v5.15/drivers/nvme/target/
Drdma.c341 c->wr.wr_cqe = &c->cqe; in nvmet_rdma_alloc_cmd()
424 r->send_wr.wr_cqe = &r->send_cqe; in nvmet_rdma_alloc_rsp()
702 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); in nvmet_rdma_send_done()
710 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_send_done()
755 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); in nvmet_rdma_read_data_done()
769 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_read_data_done()
788 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); in nvmet_rdma_write_data_done()
1010 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); in nvmet_rdma_recv_done()
1017 wc->wr_cqe, ib_wc_status_msg(wc->status), in nvmet_rdma_recv_done()
/Linux-v5.15/drivers/infiniband/ulp/isert/
Dib_isert.c734 rx_wr->wr_cqe = &rx_desc->rx_cqe; in isert_post_recvm()
765 rx_wr.wr_cqe = &rx_desc->rx_cqe; in isert_post_recv()
790 send_wr.wr_cqe = &tx_desc->tx_cqe; in isert_login_post_send()
867 send_wr->wr_cqe = &tx_desc->tx_cqe; in isert_init_send_wr()
900 rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe; in isert_login_post_recv()
1319 struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe); in isert_recv_done()
1576 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_write_done()
1618 struct iser_tx_desc *desc = cqe_to_tx_desc(wc->wr_cqe); in isert_rdma_read_done()
1694 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_login_send_done()
1710 struct iser_tx_desc *tx_desc = cqe_to_tx_desc(wc->wr_cqe); in isert_send_done()
/Linux-v5.15/drivers/infiniband/core/
Dcq.c110 if (wc->wr_cqe) in __ib_process_cq()
111 wc->wr_cqe->done(cq, wc); in __ib_process_cq()
Dmad.c554 wc->wr_cqe = cqe; in build_smp_wc()
694 send_wr->wr.wr_cqe, drslid, in handle_outgoing_dr_smp()
898 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; in ib_create_send_mad()
1011 mad_send_wr->send_wr.wr.wr_cqe = &mad_send_wr->mad_list.cqe; in ib_send_mad()
2026 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_recv_done()
2275 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_send_done()
2360 container_of(wc->wr_cqe, struct ib_mad_list_head, cqe); in ib_mad_send_error()
2538 local->mad_send_wr->send_wr.wr.wr_cqe, in local_completions()
2718 recv_wr.wr_cqe = &mad_priv->header.mad_list.cqe; in ib_mad_post_receive_mads()
Drw.c437 ctx->reg->reg_wr.wr.wr_cqe = NULL; in rdma_rw_ctx_signature_init()
543 last_wr->wr_cqe = cqe; in rdma_rw_ctx_wrs()
Dverbs.c2738 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe, in ib_drain_qp_done()
2755 { .wr_cqe = &sdrain.cqe, }, in __ib_drain_sq()
2800 rwr.wr_cqe = &rdrain.cqe; in __ib_drain_rq()
/Linux-v5.15/fs/cifs/
Dsmbdirect.c271 container_of(wc->wr_cqe, struct smbd_request, cqe); in send_done()
447 container_of(wc->wr_cqe, struct smbd_response, cqe); in recv_done()
720 send_wr.wr_cqe = &request->cqe; in smbd_post_send_negotiate_req()
807 send_wr.wr_cqe = &request->cqe; in smbd_post_send()
1058 recv_wr.wr_cqe = &response->cqe; in smbd_post_recv()
2156 cqe = wc->wr_cqe; in register_mr_done()
2415 reg_wr->wr.wr_cqe = &smbdirect_mr->cqe; in smbd_register_mr()
2456 cqe = wc->wr_cqe; in local_inv_done()
2483 wr->wr_cqe = &smbdirect_mr->cqe; in smbd_deregister_mr()
/Linux-v5.15/fs/ksmbd/
Dtransport_rdma.c529 recvmsg = container_of(wc->wr_cqe, struct smb_direct_recvmsg, cqe); in recv_done()
642 wr.wr_cqe = &recvmsg->cqe; in smb_direct_post_recv()
847 sendmsg = container_of(wc->wr_cqe, struct smb_direct_sendmsg, cqe); in send_done()
948 last->wr.wr_cqe = &last->cqe; in smb_direct_flush_send_list()
1129 msg->wr.wr_cqe = NULL; in post_sendmsg()
1144 msg->wr.wr_cqe = &msg->cqe; in post_sendmsg()
1309 struct smb_direct_rdma_rw_msg *msg = container_of(wc->wr_cqe, in read_write_done()
/Linux-v5.15/drivers/infiniband/hw/mlx5/
Dgsi.c72 container_of(wc->wr_cqe, struct mlx5_ib_gsi_wr, cqe); in handle_single_completion()
385 wr->wr.wr_cqe = &gsi_wr->cqe; in mlx5_ib_add_outstanding_wr()
/Linux-v5.15/drivers/nvme/host/
Drdma.c1236 op, wc->wr_cqe, in nvme_rdma_wr_error()
1250 container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe); in nvme_rdma_inv_rkey_done()
1270 wr.wr_cqe = &req->reg_cqe; in nvme_rdma_inv_rkey()
1385 req->reg_wr.wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_fr()
1490 wr->wr.wr_cqe = &req->reg_cqe; in nvme_rdma_map_sg_pi()
1618 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_send_done()
1640 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_send()
1673 wr.wr_cqe = &qe->cqe; in nvme_rdma_post_recv()
1774 container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe); in nvme_rdma_recv_done()
/Linux-v5.15/drivers/infiniband/ulp/srpt/
Dib_srpt.c834 wr.wr_cqe = &ioctx->ioctx.cqe; in srpt_post_recv()
859 { .wr_cqe = &ch->zw_cqe, }, in srpt_zerolength_write()
1328 container_of(wc->wr_cqe, struct srpt_send_ioctx, rdma_cqe); in srpt_rdma_read_done()
1693 container_of(wc->wr_cqe, struct srpt_recv_ioctx, ioctx.cqe); in srpt_recv_done()
1754 container_of(wc->wr_cqe, struct srpt_send_ioctx, ioctx.cqe); in srpt_send_done()
2873 send_wr.wr_cqe = &ioctx->ioctx.cqe; in srpt_queue_response()
/Linux-v5.15/drivers/infiniband/ulp/srp/
Dib_srp.c1169 wr.wr_cqe = &req->reg_cqe; in srp_inv_rkey()
1474 wr.wr.wr_cqe = &req->reg_cqe; in srp_map_finish_fr()
1869 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_send_done()
1903 wr.wr_cqe = &iu->cqe; in srp_post_send()
1925 wr.wr_cqe = &iu->cqe; in srp_post_recv()
2063 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_recv_done()
2147 wc->wr_cqe); in srp_handle_qp_err()
/Linux-v5.15/include/rdma/
Dib_verbs.h994 struct ib_cqe *wr_cqe; member
1359 struct ib_cqe *wr_cqe; member
1430 struct ib_cqe *wr_cqe; member

12