Home
last modified time | relevance | path

Searched refs:cm_id (Results 1 – 25 of 43) sorted by relevance

12

/Linux-v5.4/net/rds/
Drdma_transport.c49 static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, in rds_rdma_cm_event_handler_cmn() argument
54 struct rds_connection *conn = cm_id->context; in rds_rdma_cm_event_handler_cmn()
60 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id, in rds_rdma_cm_event_handler_cmn()
63 if (cm_id->device->node_type == RDMA_NODE_IB_CA) in rds_rdma_cm_event_handler_cmn()
85 ret = trans->cm_handle_connect(cm_id, event, isv6); in rds_rdma_cm_event_handler_cmn()
89 rdma_set_service_type(cm_id, conn->c_tos); in rds_rdma_cm_event_handler_cmn()
91 ret = rdma_resolve_route(cm_id, in rds_rdma_cm_event_handler_cmn()
103 if (ibic && ibic->i_cm_id == cm_id) { in rds_rdma_cm_event_handler_cmn()
104 cm_id->route.path_rec[0].sl = in rds_rdma_cm_event_handler_cmn()
106 ret = trans->cm_initiate_connect(cm_id, isv6); in rds_rdma_cm_event_handler_cmn()
[all …]
Dib.c406 struct rdma_cm_id *cm_id; in rds_ib_laddr_check() local
418 cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, in rds_ib_laddr_check()
420 if (IS_ERR(cm_id)) in rds_ib_laddr_check()
421 return PTR_ERR(cm_id); in rds_ib_laddr_check()
471 ret = rdma_bind_addr(cm_id, sa); in rds_ib_laddr_check()
474 if (ret || !cm_id->device || in rds_ib_laddr_check()
475 cm_id->device->node_type != RDMA_NODE_IB_CA) in rds_ib_laddr_check()
480 cm_id->device ? cm_id->device->node_type : -1); in rds_ib_laddr_check()
483 rdma_destroy_id(cm_id); in rds_ib_laddr_check()
Drdma_transport.h21 int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
23 int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
/Linux-v5.4/drivers/infiniband/core/
Diwcm.c97 struct iwcm_id_private *cm_id; member
155 list_add(&work->free_list, &work->cm_id->work_free_list); in put_work()
177 work->cm_id = cm_id_priv; in alloc_work_entries()
222 static void add_ref(struct iw_cm_id *cm_id) in add_ref() argument
225 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in add_ref()
229 static void rem_ref(struct iw_cm_id *cm_id) in rem_ref() argument
233 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in rem_ref()
238 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
305 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) in iw_cm_disconnect() argument
312 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in iw_cm_disconnect()
[all …]
Ducma.c93 struct rdma_cm_id *cm_id; member
122 struct rdma_cm_id *cm_id; member
140 else if (ctx->file != file || !ctx->cm_id) in _ucma_find_context()
177 if (!ctx->cm_id->device) { in ucma_get_ctx_dev()
188 rdma_destroy_id(uevent_close->cm_id); in ucma_close_event_id()
203 rdma_destroy_id(ctx->cm_id); in ucma_close_id()
301 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) in ucma_removal_event_handler() argument
303 struct ucma_context *ctx = cm_id->context; in ucma_removal_event_handler()
316 if (ctx->cm_id == cm_id) { in ucma_removal_event_handler()
325 if (con_req_eve->cm_id == cm_id && in ucma_removal_event_handler()
[all …]
Dcm.c1013 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) in cm_destroy_id() argument
1018 cm_id_priv = container_of(cm_id, struct cm_id_private, id); in cm_destroy_id()
1021 switch (cm_id->state) { in cm_destroy_id()
1036 cm_id->state = IB_CM_IDLE; in cm_destroy_id()
1053 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, in cm_destroy_id()
1065 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id()
1077 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id()
1084 ib_send_cm_dreq(cm_id, NULL, 0); in cm_destroy_id()
1093 ib_send_cm_drep(cm_id, NULL, 0); in cm_destroy_id()
1109 cm_free_id(cm_id->local_id); in cm_destroy_id()
[all …]
Dcma.c159 return id_priv->cm_id.iw; in rdma_iw_cm_id()
1117 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr()
1120 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr()
1126 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr()
1130 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr()
1627 const struct ib_cm_id *cm_id, in cma_find_listener() argument
1639 if (id_priv->id.device == cm_id->device && in cma_find_listener()
1645 if (id_priv_dev->id.device == cm_id->device && in cma_find_listener()
1657 cma_ib_id_from_event(struct ib_cm_id *cm_id, in cma_ib_id_from_event() argument
1718 id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev); in cma_ib_id_from_event()
[all …]
/Linux-v5.4/drivers/infiniband/hw/qedr/
Dqedr_iw_cm.c113 listener->cm_id->event_handler(listener->cm_id, &event); in qedr_iw_mpa_request()
135 if (ep->cm_id) in qedr_iw_issue_event()
136 ep->cm_id->event_handler(ep->cm_id, &event); in qedr_iw_issue_event()
144 if (ep->cm_id) { in qedr_iw_close_event()
147 ep->cm_id->rem_ref(ep->cm_id); in qedr_iw_close_event()
148 ep->cm_id = NULL; in qedr_iw_close_event()
209 if (ep->cm_id) in qedr_iw_disconnect_worker()
210 ep->cm_id->event_handler(ep->cm_id, &event); in qedr_iw_disconnect_worker()
303 ep->cm_id->rem_ref(ep->cm_id); in qedr_iw_event_handler()
304 ep->cm_id = NULL; in qedr_iw_event_handler()
[all …]
Dqedr_iw_cm.h34 int qedr_iw_connect(struct iw_cm_id *cm_id,
37 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog);
39 int qedr_iw_destroy_listen(struct iw_cm_id *cm_id);
41 int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
43 int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
/Linux-v5.4/include/rdma/
Diw_cm.h68 typedef int (*iw_cm_handler)(struct iw_cm_id *cm_id,
79 typedef int (*iw_event_handler)(struct iw_cm_id *cm_id,
140 void iw_destroy_cm_id(struct iw_cm_id *cm_id);
153 void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
174 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog);
190 int iw_cm_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param);
204 int iw_cm_reject(struct iw_cm_id *cm_id, const void *private_data,
219 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param);
231 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt);
242 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id, struct ib_qp_attr *qp_attr,
Dib_cm.h313 typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
350 void ib_destroy_cm_id(struct ib_cm_id *cm_id);
372 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id,
408 int ib_send_cm_req(struct ib_cm_id *cm_id,
432 int ib_send_cm_rep(struct ib_cm_id *cm_id,
443 int ib_send_cm_rtu(struct ib_cm_id *cm_id,
456 int ib_send_cm_dreq(struct ib_cm_id *cm_id,
471 int ib_send_cm_drep(struct ib_cm_id *cm_id,
489 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event);
503 int ib_send_cm_rej(struct ib_cm_id *cm_id,
[all …]
/Linux-v5.4/drivers/nvme/target/
Drdma.c77 struct rdma_cm_id *cm_id; member
467 ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); in nvmet_rdma_post_recv()
506 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_release_rsp()
507 queue->cm_id->port_num, rsp->req.sg, in nvmet_rdma_release_rsp()
554 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response() local
565 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, in nvmet_rdma_queue_response()
566 cm_id->port_num, NULL, &rsp->send_wr); in nvmet_rdma_queue_response()
576 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { in nvmet_rdma_queue_response()
590 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_read_data_done()
591 queue->cm_id->port_num, rsp->req.sg, in nvmet_rdma_read_data_done()
[all …]
/Linux-v5.4/drivers/infiniband/hw/cxgb3/
Diwch_cm.h155 struct iw_cm_id *cm_id; member
196 static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument
198 return cm_id->provider_data; in to_ep()
201 static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument
203 return cm_id->provider_data; in to_listen_ep()
217 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
218 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog);
219 int iwch_destroy_listen(struct iw_cm_id *cm_id);
220 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
221 int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
Diwch_cm.c671 if (ep->com.cm_id) { in close_complete_upcall()
673 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall()
674 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall()
675 ep->com.cm_id->rem_ref(ep->com.cm_id); in close_complete_upcall()
676 ep->com.cm_id = NULL; in close_complete_upcall()
688 if (ep->com.cm_id) { in peer_close_upcall()
690 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall()
691 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall()
703 if (ep->com.cm_id) { in peer_abort_upcall()
705 ep->com.cm_id, ep->hwtid); in peer_abort_upcall()
[all …]
/Linux-v5.4/drivers/infiniband/hw/i40iw/
Di40iw_cm.c233 struct iw_cm_id *cm_id, in i40iw_get_cmevent_info() argument
236 memcpy(&event->local_addr, &cm_id->m_local_addr, in i40iw_get_cmevent_info()
238 memcpy(&event->remote_addr, &cm_id->m_remote_addr, in i40iw_get_cmevent_info()
256 struct iw_cm_id *cm_id, in i40iw_send_cm_event() argument
277 i40iw_get_cmevent_info(cm_node, cm_id, &event); in i40iw_send_cm_event()
291 return cm_id->event_handler(cm_id, &event); in i40iw_send_cm_event()
304 if (!cm_node->cm_id) in i40iw_create_event()
318 event->cm_info.cm_id = cm_node->cm_id; in i40iw_create_event()
610 struct iw_cm_id *cm_id; in i40iw_event_connect_error() local
612 cm_id = event->cm_node->cm_id; in i40iw_event_connect_error()
[all …]
/Linux-v5.4/net/9p/
Dtrans_rdma.c79 struct rdma_cm_id *cm_id; member
280 rdma_disconnect(rdma->cm_id); in p9_cm_event_handler()
301 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, in recv_done()
348 ib_dma_unmap_single(rdma->cm_id->device, in send_done()
376 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans()
377 rdma_destroy_id(rdma->cm_id); in rdma_destroy_trans()
389 c->busa = ib_dma_map_single(rdma->cm_id->device, in post_recv()
392 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) in post_recv()
479 c->busa = ib_dma_map_single(rdma->cm_id->device, in rdma_request()
482 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { in rdma_request()
[all …]
/Linux-v5.4/drivers/infiniband/hw/cxgb4/
Dcm.c152 epc->cm_id->rem_ref(epc->cm_id); in deref_cm_id()
153 epc->cm_id = NULL; in deref_cm_id()
160 epc->cm_id->add_ref(epc->cm_id); in ref_cm_id()
1275 if (ep->com.cm_id) { in close_complete_upcall()
1277 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall()
1278 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall()
1291 if (ep->com.cm_id) { in peer_close_upcall()
1293 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall()
1294 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall()
1307 if (ep->com.cm_id) { in peer_abort_upcall()
[all …]
Diw_cxgb4.h854 struct iw_cm_id *cm_id; member
918 static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument
920 return cm_id->provider_data; in to_ep()
923 static inline struct c4iw_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument
925 return cm_id->provider_data; in to_listen_ep()
975 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
976 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog);
977 int c4iw_destroy_listen(struct iw_cm_id *cm_id);
978 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
979 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
/Linux-v5.4/drivers/infiniband/ulp/ipoib/
Dipoib_cm.c80 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
274 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument
282 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
293 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
314 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp()
347 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_nonsrq_init_rx() argument
375 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); in ipoib_cm_nonsrq_init_rx()
420 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_send_rep() argument
439 return ib_send_cm_rep(cm_id, &rep); in ipoib_cm_send_rep()
442 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, in ipoib_cm_req_handler() argument
[all …]
/Linux-v5.4/drivers/infiniband/sw/siw/
Dsiw_cm.c321 id = cep->listen_cep->cm_id; in siw_cm_upcall()
323 id = cep->cm_id; in siw_cm_upcall()
396 if (cep->cm_id) { in siw_qp_cm_drop()
416 cep->cm_id->rem_ref(cep->cm_id); in siw_qp_cm_drop()
417 cep->cm_id = NULL; in siw_qp_cm_drop()
1069 if (cep->cm_id) in siw_cm_work_handler()
1076 if (cep->cm_id) { in siw_cm_work_handler()
1125 if (cep->cm_id) in siw_cm_work_handler()
1176 if (cep->cm_id) { in siw_cm_work_handler()
1177 cep->cm_id->rem_ref(cep->cm_id); in siw_cm_work_handler()
[all …]
/Linux-v5.4/drivers/nvme/host/
Drdma.c85 struct rdma_cm_id *cm_id; member
138 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
273 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); in nvme_rdma_create_qp()
275 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp()
354 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) in nvme_rdma_find_get_device() argument
360 if (ndev->dev->node_guid == cm_id->device->node_guid && in nvme_rdma_find_get_device()
369 ndev->dev = cm_id->device; in nvme_rdma_find_get_device()
442 queue->device = nvme_rdma_find_get_device(queue->cm_id); in nvme_rdma_create_queue_ib()
444 dev_err(queue->cm_id->device->dev.parent, in nvme_rdma_create_queue_ib()
507 rdma_destroy_qp(queue->cm_id); in nvme_rdma_create_queue_ib()
[all …]
/Linux-v5.4/drivers/infiniband/ulp/srpt/
Dib_srpt.c226 rdma_notify(ch->rdma_cm.cm_id, event->event); in srpt_qp_event()
228 ib_cm_notify(ch->ib_cm.cm_id, event->event); in srpt_qp_event()
1159 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rtr()
1189 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rts()
1809 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init); in srpt_create_ch_ib()
1810 ch->qp = ch->rdma_cm.cm_id->qp; in srpt_create_ch_ib()
1921 ret = rdma_disconnect(ch->rdma_cm.cm_id); in srpt_disconnect_ch()
1923 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0); in srpt_disconnect_ch()
1925 ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0); in srpt_disconnect_ch()
2086 rdma_destroy_id(ch->rdma_cm.cm_id); in srpt_release_channel_work()
[all …]
Dib_srpt.h299 struct ib_cm_id *cm_id; member
302 struct rdma_cm_id *cm_id; member
427 struct ib_cm_id *cm_id; member
/Linux-v5.4/drivers/infiniband/ulp/isert/
Dib_isert.c74 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); in isert_qp_event_callback()
517 isert_conn->cm_id = cma_id; in isert_connect_request()
570 if (isert_conn->cm_id && in isert_connect_release()
572 rdma_destroy_id(isert_conn->cm_id); in isert_connect_release()
634 struct isert_np *isert_np = isert_conn->cm_id->context; in isert_handle_unbound_conn()
672 err = rdma_disconnect(isert_conn->cm_id); in isert_conn_terminate()
687 isert_np->cm_id = NULL; in isert_np_cma_handler()
690 isert_np->cm_id = isert_setup_id(isert_np); in isert_np_cma_handler()
691 if (IS_ERR(isert_np->cm_id)) { in isert_np_cma_handler()
693 isert_np, PTR_ERR(isert_np->cm_id)); in isert_np_cma_handler()
[all …]
/Linux-v5.4/drivers/infiniband/hw/mlx4/
Dcm.c75 static void set_local_comm_id(struct ib_mad *mad, u32 cm_id) in set_local_comm_id() argument
80 msg->request_id = cpu_to_be32(cm_id); in set_local_comm_id()
86 msg->local_comm_id = cpu_to_be32(cm_id); in set_local_comm_id()
105 static void set_remote_comm_id(struct ib_mad *mad, u32 cm_id) in set_remote_comm_id() argument
110 msg->request_id = cpu_to_be32(cm_id); in set_remote_comm_id()
116 msg->remote_comm_id = cpu_to_be32(cm_id); in set_remote_comm_id()

12