/Linux-v4.19/drivers/infiniband/core/ |
D | iwcm.c | 96 struct iwcm_id_private *cm_id; member 154 list_add(&work->free_list, &work->cm_id->work_free_list); in put_work() 176 work->cm_id = cm_id_priv; in alloc_work_entries() 221 static void add_ref(struct iw_cm_id *cm_id) in add_ref() argument 224 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in add_ref() 228 static void rem_ref(struct iw_cm_id *cm_id) in rem_ref() argument 232 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in rem_ref() 237 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event); 304 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt) in iw_cm_disconnect() argument 311 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); in iw_cm_disconnect() [all …]
|
D | ucma.c | 91 struct rdma_cm_id *cm_id; member 120 struct rdma_cm_id *cm_id; member 139 else if (ctx->file != file || !ctx->cm_id) in _ucma_find_context() 176 if (!ctx->cm_id->device) { in ucma_get_ctx_dev() 187 rdma_destroy_id(uevent_close->cm_id); in ucma_close_event_id() 202 rdma_destroy_id(ctx->cm_id); in ucma_close_id() 306 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id) in ucma_removal_event_handler() argument 308 struct ucma_context *ctx = cm_id->context; in ucma_removal_event_handler() 321 if (ctx->cm_id == cm_id) { in ucma_removal_event_handler() 330 if (con_req_eve->cm_id == cm_id && in ucma_removal_event_handler() [all …]
|
D | cm.c | 1016 static void cm_destroy_id(struct ib_cm_id *cm_id, int err) in cm_destroy_id() argument 1021 cm_id_priv = container_of(cm_id, struct cm_id_private, id); in cm_destroy_id() 1024 switch (cm_id->state) { in cm_destroy_id() 1039 cm_id->state = IB_CM_IDLE; in cm_destroy_id() 1056 ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, in cm_destroy_id() 1068 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id() 1080 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, in cm_destroy_id() 1087 ib_send_cm_dreq(cm_id, NULL, 0); in cm_destroy_id() 1096 ib_send_cm_drep(cm_id, NULL, 0); in cm_destroy_id() 1112 cm_free_id(cm_id->local_id); in cm_destroy_id() [all …]
|
D | ucm.c | 88 struct ib_cm_id *cm_id; member 100 struct ib_cm_id *cm_id; member 176 ib_destroy_cm_id(uevent->cm_id); in ib_ucm_cleanup_events() 355 static int ib_ucm_event_handler(struct ib_cm_id *cm_id, in ib_ucm_event_handler() argument 362 ctx = cm_id->context; in ib_ucm_event_handler() 369 uevent->cm_id = cm_id; in ib_ucm_event_handler() 430 ctx->cm_id = uevent->cm_id; in ib_ucm_event() 431 ctx->cm_id->context = ctx; in ib_ucm_event() 499 ctx->cm_id = ib_create_cm_id(file->device->ib_dev, in ib_ucm_create_id() 501 if (IS_ERR(ctx->cm_id)) { in ib_ucm_create_id() [all …]
|
D | cma.c | 159 return id_priv->cm_id.iw; in rdma_iw_cm_id() 1021 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr() 1024 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr() 1030 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr() 1034 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr() 1490 const struct ib_cm_id *cm_id, in cma_find_listener() argument 1502 if (id_priv->id.device == cm_id->device && in cma_find_listener() 1508 if (id_priv_dev->id.device == cm_id->device && in cma_find_listener() 1519 cma_ib_id_from_event(struct ib_cm_id *cm_id, in cma_ib_id_from_event() argument 1580 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); in cma_ib_id_from_event() [all …]
|
D | nldev.c | 432 struct rdma_cm_id *cm_id = &id_priv->id; in fill_res_cm_id_entry() local 435 if (port && port != cm_id->port_num) in fill_res_cm_id_entry() 442 if (cm_id->port_num && in fill_res_cm_id_entry() 443 nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, cm_id->port_num)) in fill_res_cm_id_entry() 449 if (nla_put_u8(msg, RDMA_NLDEV_ATTR_RES_TYPE, cm_id->qp_type)) in fill_res_cm_id_entry() 453 if (nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_PS, cm_id->ps)) in fill_res_cm_id_entry() 459 if (cm_id->route.addr.src_addr.ss_family && in fill_res_cm_id_entry() 461 sizeof(cm_id->route.addr.src_addr), in fill_res_cm_id_entry() 462 &cm_id->route.addr.src_addr)) in fill_res_cm_id_entry() 464 if (cm_id->route.addr.dst_addr.ss_family && in fill_res_cm_id_entry() [all …]
|
/Linux-v4.19/net/rds/ |
D | rdma_transport.c | 46 static int rds_rdma_cm_event_handler_cmn(struct rdma_cm_id *cm_id, in rds_rdma_cm_event_handler_cmn() argument 51 struct rds_connection *conn = cm_id->context; in rds_rdma_cm_event_handler_cmn() 55 rdsdebug("conn %p id %p handling event %u (%s)\n", conn, cm_id, in rds_rdma_cm_event_handler_cmn() 58 if (cm_id->device->node_type == RDMA_NODE_IB_CA) in rds_rdma_cm_event_handler_cmn() 80 ret = trans->cm_handle_connect(cm_id, event, isv6); in rds_rdma_cm_event_handler_cmn() 85 ret = rdma_resolve_route(cm_id, in rds_rdma_cm_event_handler_cmn() 97 if (ibic && ibic->i_cm_id == cm_id) in rds_rdma_cm_event_handler_cmn() 98 ret = trans->cm_initiate_connect(cm_id, isv6); in rds_rdma_cm_event_handler_cmn() 110 rdma_reject_msg(cm_id, event->status)); in rds_rdma_cm_event_handler_cmn() 148 rdsdebug("id %p event %u (%s) handling ret %d\n", cm_id, event->event, in rds_rdma_cm_event_handler_cmn() [all …]
|
D | ib.c | 399 struct rdma_cm_id *cm_id; in rds_ib_laddr_check() local 411 cm_id = rdma_create_id(&init_net, rds_rdma_cm_event_handler, in rds_ib_laddr_check() 413 if (IS_ERR(cm_id)) in rds_ib_laddr_check() 414 return PTR_ERR(cm_id); in rds_ib_laddr_check() 464 ret = rdma_bind_addr(cm_id, sa); in rds_ib_laddr_check() 467 if (ret || !cm_id->device || in rds_ib_laddr_check() 468 cm_id->device->node_type != RDMA_NODE_IB_CA) in rds_ib_laddr_check() 473 cm_id->device ? cm_id->device->node_type : -1); in rds_ib_laddr_check() 476 rdma_destroy_id(cm_id); in rds_ib_laddr_check()
|
D | rdma_transport.h | 15 int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id, 17 int rds6_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
|
/Linux-v4.19/include/rdma/ |
D | iw_cm.h | 68 typedef int (*iw_cm_handler)(struct iw_cm_id *cm_id, 79 typedef int (*iw_event_handler)(struct iw_cm_id *cm_id, 116 int (*connect)(struct iw_cm_id *cm_id, 119 int (*accept)(struct iw_cm_id *cm_id, 122 int (*reject)(struct iw_cm_id *cm_id, 125 int (*create_listen)(struct iw_cm_id *cm_id, 128 int (*destroy_listen)(struct iw_cm_id *cm_id); 151 void iw_destroy_cm_id(struct iw_cm_id *cm_id); 164 void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp); 185 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog); [all …]
|
D | ib_cm.h | 313 typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id, 350 void ib_destroy_cm_id(struct ib_cm_id *cm_id); 372 int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, 408 int ib_send_cm_req(struct ib_cm_id *cm_id, 432 int ib_send_cm_rep(struct ib_cm_id *cm_id, 443 int ib_send_cm_rtu(struct ib_cm_id *cm_id, 456 int ib_send_cm_dreq(struct ib_cm_id *cm_id, 471 int ib_send_cm_drep(struct ib_cm_id *cm_id, 489 int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event); 503 int ib_send_cm_rej(struct ib_cm_id *cm_id, [all …]
|
/Linux-v4.19/drivers/infiniband/hw/qedr/ |
D | qedr_iw_cm.c | 113 listener->cm_id->event_handler(listener->cm_id, &event); in qedr_iw_mpa_request() 135 if (ep->cm_id) in qedr_iw_issue_event() 136 ep->cm_id->event_handler(ep->cm_id, &event); in qedr_iw_issue_event() 144 if (ep->cm_id) { in qedr_iw_close_event() 147 ep->cm_id->rem_ref(ep->cm_id); in qedr_iw_close_event() 148 ep->cm_id = NULL; in qedr_iw_close_event() 209 if (ep->cm_id) in qedr_iw_disconnect_worker() 210 ep->cm_id->event_handler(ep->cm_id, &event); in qedr_iw_disconnect_worker() 303 ep->cm_id->rem_ref(ep->cm_id); in qedr_iw_event_handler() 304 ep->cm_id = NULL; in qedr_iw_event_handler() [all …]
|
D | qedr_iw_cm.h | 34 int qedr_iw_connect(struct iw_cm_id *cm_id, 37 int qedr_iw_create_listen(struct iw_cm_id *cm_id, int backlog); 39 int qedr_iw_destroy_listen(struct iw_cm_id *cm_id); 41 int qedr_iw_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 43 int qedr_iw_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
|
/Linux-v4.19/drivers/infiniband/hw/nes/ |
D | nes_cm.c | 179 if (!cm_node->cm_id) in create_event() 194 event->cm_info.cm_id = cm_node->cm_id; in create_event() 778 struct iw_cm_id *cm_id = cm_node->cm_id; in nes_retrans_expired() local 789 if (cm_node->cm_id) in nes_retrans_expired() 790 cm_id->rem_ref(cm_id); in nes_retrans_expired() 803 struct iw_cm_id *cm_id = cm_node->cm_id; in handle_recv_entry() local 812 if (nesqp->cm_id) { in handle_recv_entry() 816 "to do!!!\n", nesqp->hwqp.qp_id, cm_id, in handle_recv_entry() 828 "to do!!!\n", nesqp->hwqp.qp_id, cm_id, in handle_recv_entry() 835 if (cm_node->cm_id) in handle_recv_entry() [all …]
|
/Linux-v4.19/drivers/nvme/target/ |
D | rdma.c | 85 struct rdma_cm_id *cm_id; member 461 ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL); in nvmet_rdma_post_recv() 500 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_release_rsp() 501 queue->cm_id->port_num, rsp->req.sg, in nvmet_rdma_release_rsp() 547 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response() local 558 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, in nvmet_rdma_queue_response() 559 cm_id->port_num, NULL, &rsp->send_wr); in nvmet_rdma_queue_response() 569 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { in nvmet_rdma_queue_response() 583 rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp, in nvmet_rdma_read_data_done() 584 queue->cm_id->port_num, rsp->req.sg, in nvmet_rdma_read_data_done() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/cxgb3/ |
D | iwch_cm.h | 155 struct iw_cm_id *cm_id; member 196 static inline struct iwch_ep *to_ep(struct iw_cm_id *cm_id) in to_ep() argument 198 return cm_id->provider_data; in to_ep() 201 static inline struct iwch_listen_ep *to_listen_ep(struct iw_cm_id *cm_id) in to_listen_ep() argument 203 return cm_id->provider_data; in to_listen_ep() 217 int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); 218 int iwch_create_listen(struct iw_cm_id *cm_id, int backlog); 219 int iwch_destroy_listen(struct iw_cm_id *cm_id); 220 int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); 221 int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
|
D | iwch_cm.c | 671 if (ep->com.cm_id) { in close_complete_upcall() 673 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall() 674 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall() 675 ep->com.cm_id->rem_ref(ep->com.cm_id); in close_complete_upcall() 676 ep->com.cm_id = NULL; in close_complete_upcall() 688 if (ep->com.cm_id) { in peer_close_upcall() 690 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall() 691 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall() 703 if (ep->com.cm_id) { in peer_abort_upcall() 705 ep->com.cm_id, ep->hwtid); in peer_abort_upcall() [all …]
|
/Linux-v4.19/drivers/infiniband/hw/i40iw/ |
D | i40iw_cm.c | 233 struct iw_cm_id *cm_id, in i40iw_get_cmevent_info() argument 236 memcpy(&event->local_addr, &cm_id->m_local_addr, in i40iw_get_cmevent_info() 238 memcpy(&event->remote_addr, &cm_id->m_remote_addr, in i40iw_get_cmevent_info() 256 struct iw_cm_id *cm_id, in i40iw_send_cm_event() argument 277 i40iw_get_cmevent_info(cm_node, cm_id, &event); in i40iw_send_cm_event() 291 return cm_id->event_handler(cm_id, &event); in i40iw_send_cm_event() 304 if (!cm_node->cm_id) in i40iw_create_event() 318 event->cm_info.cm_id = cm_node->cm_id; in i40iw_create_event() 610 struct iw_cm_id *cm_id; in i40iw_event_connect_error() local 612 cm_id = event->cm_node->cm_id; in i40iw_event_connect_error() [all …]
|
/Linux-v4.19/net/9p/ |
D | trans_rdma.c | 94 struct rdma_cm_id *cm_id; member 296 rdma_disconnect(rdma->cm_id); in p9_cm_event_handler() 317 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, in recv_done() 363 ib_dma_unmap_single(rdma->cm_id->device, in send_done() 390 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) in rdma_destroy_trans() 391 rdma_destroy_id(rdma->cm_id); in rdma_destroy_trans() 403 c->busa = ib_dma_map_single(rdma->cm_id->device, in post_recv() 406 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) in post_recv() 493 c->busa = ib_dma_map_single(rdma->cm_id->device, in rdma_request() 496 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) { in rdma_request() [all …]
|
/Linux-v4.19/drivers/infiniband/ulp/ipoib/ |
D | ipoib_cm.c | 80 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, 274 struct ib_cm_id *cm_id, struct ib_qp *qp, in ipoib_cm_modify_rx_qp() argument 282 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 293 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 314 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask); in ipoib_cm_modify_rx_qp() 347 static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_nonsrq_init_rx() argument 375 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0); in ipoib_cm_nonsrq_init_rx() 420 static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id, in ipoib_cm_send_rep() argument 439 return ib_send_cm_rep(cm_id, &rep); in ipoib_cm_send_rep() 442 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, in ipoib_cm_req_handler() argument [all …]
|
/Linux-v4.19/drivers/infiniband/hw/cxgb4/ |
D | cm.c | 152 epc->cm_id->rem_ref(epc->cm_id); in deref_cm_id() 153 epc->cm_id = NULL; in deref_cm_id() 160 epc->cm_id->add_ref(epc->cm_id); in ref_cm_id() 1233 if (ep->com.cm_id) { in close_complete_upcall() 1235 ep, ep->com.cm_id, ep->hwtid); in close_complete_upcall() 1236 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in close_complete_upcall() 1249 if (ep->com.cm_id) { in peer_close_upcall() 1251 ep, ep->com.cm_id, ep->hwtid); in peer_close_upcall() 1252 ep->com.cm_id->event_handler(ep->com.cm_id, &event); in peer_close_upcall() 1265 if (ep->com.cm_id) { in peer_abort_upcall() [all …]
|
/Linux-v4.19/drivers/nvme/host/ |
D | rdma.c | 93 struct rdma_cm_id *cm_id; member 145 static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, 260 ret = rdma_create_qp(queue->cm_id, dev->pd, &init_attr); in nvme_rdma_create_qp() 262 queue->qp = queue->cm_id->qp; in nvme_rdma_create_qp() 350 nvme_rdma_find_get_device(struct rdma_cm_id *cm_id) in nvme_rdma_find_get_device() argument 356 if (ndev->dev->node_guid == cm_id->device->node_guid && in nvme_rdma_find_get_device() 365 ndev->dev = cm_id->device; in nvme_rdma_find_get_device() 437 queue->device = nvme_rdma_find_get_device(queue->cm_id); in nvme_rdma_create_queue_ib() 439 dev_err(queue->cm_id->device->dev.parent, in nvme_rdma_create_queue_ib() 490 rdma_destroy_qp(queue->cm_id); in nvme_rdma_create_queue_ib() [all …]
|
/Linux-v4.19/drivers/infiniband/ulp/srpt/ |
D | ib_srpt.h | 283 struct ib_cm_id *cm_id; member 286 struct rdma_cm_id *cm_id; member 408 struct ib_cm_id *cm_id; member
|
D | ib_srpt.c | 230 rdma_notify(ch->rdma_cm.cm_id, event->event); in srpt_qp_event() 232 ib_cm_notify(ch->ib_cm.cm_id, event->event); in srpt_qp_event() 1115 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rtr() 1145 ret = ib_cm_init_qp_attr(ch->ib_cm.cm_id, &qp_attr, &attr_mask); in srpt_ch_qp_rts() 1767 ret = rdma_create_qp(ch->rdma_cm.cm_id, sdev->pd, qp_init); in srpt_create_ch_ib() 1768 ch->qp = ch->rdma_cm.cm_id->qp; in srpt_create_ch_ib() 1879 ret = rdma_disconnect(ch->rdma_cm.cm_id); in srpt_disconnect_ch() 1881 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, NULL, 0); in srpt_disconnect_ch() 1883 ret = ib_send_cm_drep(ch->ib_cm.cm_id, NULL, 0); in srpt_disconnect_ch() 2035 rdma_destroy_id(ch->rdma_cm.cm_id); in srpt_release_channel_work() [all …]
|
/Linux-v4.19/drivers/infiniband/ulp/isert/ |
D | ib_isert.c | 82 rdma_notify(isert_conn->cm_id, IB_EVENT_COMM_EST); in isert_qp_event_callback() 525 isert_conn->cm_id = cma_id; in isert_connect_request() 578 if (isert_conn->cm_id && in isert_connect_release() 580 rdma_destroy_id(isert_conn->cm_id); in isert_connect_release() 642 struct isert_np *isert_np = isert_conn->cm_id->context; in isert_handle_unbound_conn() 680 err = rdma_disconnect(isert_conn->cm_id); in isert_conn_terminate() 695 isert_np->cm_id = NULL; in isert_np_cma_handler() 698 isert_np->cm_id = isert_setup_id(isert_np); in isert_np_cma_handler() 699 if (IS_ERR(isert_np->cm_id)) { in isert_np_cma_handler() 701 isert_np, PTR_ERR(isert_np->cm_id)); in isert_np_cma_handler() [all …]
|