Lines Matching +full:device +full:- +full:addr
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
4 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
5 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
6 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
63 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal",
70 static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
85 if (rdma_ib_or_roce(id->device, id->port_num)) in rdma_reject_msg()
88 if (rdma_protocol_iwarp(id->device, id->port_num)) in rdma_reject_msg()
97 * rdma_is_consumer_reject - return true if the consumer rejected the connect
104 if (rdma_ib_or_roce(id->device, id->port_num)) in rdma_is_consumer_reject()
107 if (rdma_protocol_iwarp(id->device, id->port_num)) in rdma_is_consumer_reject()
108 return reason == -ECONNREFUSED; in rdma_is_consumer_reject()
119 if (rdma_is_consumer_reject(id, ev->status)) { in rdma_consumer_reject_data()
120 *data_len = ev->param.conn.private_data_len; in rdma_consumer_reject_data()
121 p = ev->param.conn.private_data; in rdma_consumer_reject_data()
131 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
139 if (id->device->node_type == RDMA_NODE_RNIC) in rdma_iw_cm_id()
140 return id_priv->cm_id.iw; in rdma_iw_cm_id()
146 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
154 return &id_priv->id; in rdma_res_to_id()
158 static int cma_add_one(struct ib_device *device);
159 static void cma_remove_one(struct ib_device *device, void *client_data);
193 return &pernet->tcp_ps; in cma_pernet_xa()
195 return &pernet->udp_ps; in cma_pernet_xa()
197 return &pernet->ipoib_ps; in cma_pernet_xa()
199 return &pernet->ib_ps; in cma_pernet_xa()
207 struct ib_device *device; member
251 refcount_inc(&cma_dev->refcount); in cma_dev_get()
256 if (refcount_dec_and_test(&cma_dev->refcount)) in cma_dev_put()
257 complete(&cma_dev->comp); in cma_dev_put()
269 if (filter(cma_dev->device, cookie)) { in cma_enum_devices_by_ibdev()
283 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_get_default_gid_type()
284 return -EINVAL; in cma_get_default_gid_type()
286 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)]; in cma_get_default_gid_type()
295 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_set_default_gid_type()
296 return -EINVAL; in cma_set_default_gid_type()
299 rdma_protocol_roce_eth_encap(cma_dev->device, port)) in cma_set_default_gid_type()
302 supported_gids = roce_gid_type_mask_support(cma_dev->device, port); in cma_set_default_gid_type()
305 return -EINVAL; in cma_set_default_gid_type()
307 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] = in cma_set_default_gid_type()
315 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_get_default_roce_tos()
316 return -EINVAL; in cma_get_default_roce_tos()
318 return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)]; in cma_get_default_roce_tos()
324 if (!rdma_is_port_valid(cma_dev->device, port)) in cma_set_default_roce_tos()
325 return -EINVAL; in cma_set_default_roce_tos()
327 cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] = in cma_set_default_roce_tos()
334 return cma_dev->device; in cma_get_ib_dev()
338 * Device removal can occur at anytime, so we need extra handling to
339 * serialize notifying the user of device removal with other callbacks.
355 struct sockaddr_storage addr; member
371 __be32 addr; member
388 struct ib_device *device; member
409 lockdep_assert_held(&id_priv->handler_mutex); in cma_comp_exch()
411 spin_lock_irqsave(&id_priv->lock, flags); in cma_comp_exch()
412 if ((ret = (id_priv->state == comp))) in cma_comp_exch()
413 id_priv->state = exch; in cma_comp_exch()
414 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_comp_exch()
420 return hdr->ip_version >> 4; in cma_get_ip_ver()
425 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF); in cma_set_ip_ver()
438 *(__be32 *)(mgid->raw + 12)); in cma_igmp_send()
441 *(__be32 *)(mgid->raw + 12)); in cma_igmp_send()
445 return (in_dev) ? 0 : -ENODEV; in cma_igmp_send()
452 id_priv->cma_dev = cma_dev; in _cma_attach_to_dev()
453 id_priv->id.device = cma_dev->device; in _cma_attach_to_dev()
454 id_priv->id.route.addr.dev_addr.transport = in _cma_attach_to_dev()
455 rdma_node_get_transport(cma_dev->device->node_type); in _cma_attach_to_dev()
456 list_add_tail(&id_priv->list, &cma_dev->id_list); in _cma_attach_to_dev()
458 trace_cm_id_attach(id_priv, cma_dev->device); in _cma_attach_to_dev()
465 id_priv->gid_type = in cma_attach_to_dev()
466 cma_dev->default_gid_type[id_priv->id.port_num - in cma_attach_to_dev()
467 rdma_start_port(cma_dev->device)]; in cma_attach_to_dev()
473 list_del(&id_priv->list); in cma_release_dev()
474 cma_dev_put(id_priv->cma_dev); in cma_release_dev()
475 id_priv->cma_dev = NULL; in cma_release_dev()
476 id_priv->id.device = NULL; in cma_release_dev()
477 if (id_priv->id.route.addr.dev_addr.sgid_attr) { in cma_release_dev()
478 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); in cma_release_dev()
479 id_priv->id.route.addr.dev_addr.sgid_attr = NULL; in cma_release_dev()
486 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; in cma_src_addr()
491 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; in cma_dst_addr()
496 return id_priv->id.route.addr.src_addr.ss_family; in cma_family()
504 if (id_priv->qkey) { in cma_set_qkey()
505 if (qkey && id_priv->qkey != qkey) in cma_set_qkey()
506 return -EINVAL; in cma_set_qkey()
511 id_priv->qkey = qkey; in cma_set_qkey()
515 switch (id_priv->id.ps) { in cma_set_qkey()
518 id_priv->qkey = RDMA_UDP_QKEY; in cma_set_qkey()
521 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); in cma_set_qkey()
522 ret = ib_sa_get_mcmember_rec(id_priv->id.device, in cma_set_qkey()
523 id_priv->id.port_num, &rec.mgid, in cma_set_qkey()
526 id_priv->qkey = be32_to_cpu(rec.qkey); in cma_set_qkey()
536 dev_addr->dev_type = ARPHRD_INFINIBAND; in cma_translate_ib()
537 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr); in cma_translate_ib()
538 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey)); in cma_translate_ib()
541 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr) in cma_translate_addr() argument
545 if (addr->sa_family != AF_IB) { in cma_translate_addr()
546 ret = rdma_translate_ip(addr, dev_addr); in cma_translate_addr()
548 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr); in cma_translate_addr()
556 cma_validate_port(struct ib_device *device, u32 port, in cma_validate_port() argument
561 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_validate_port()
562 int bound_if_index = dev_addr->bound_dev_if; in cma_validate_port()
564 int dev_type = dev_addr->dev_type; in cma_validate_port()
567 if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) in cma_validate_port()
568 return ERR_PTR(-ENODEV); in cma_validate_port()
570 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port)) in cma_validate_port()
571 return ERR_PTR(-ENODEV); in cma_validate_port()
573 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) in cma_validate_port()
574 return ERR_PTR(-ENODEV); in cma_validate_port()
576 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { in cma_validate_port()
577 ndev = dev_get_by_index(dev_addr->net, bound_if_index); in cma_validate_port()
579 return ERR_PTR(-ENODEV); in cma_validate_port()
584 sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev); in cma_validate_port()
593 WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); in cma_bind_sgid_attr()
594 id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; in cma_bind_sgid_attr()
598 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
600 * @id_priv: cm_id which should be bound to cma device
602 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
608 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_acquire_dev_by_src_ip()
613 int ret = -ENODEV; in cma_acquire_dev_by_src_ip()
616 if (dev_addr->dev_type != ARPHRD_INFINIBAND && in cma_acquire_dev_by_src_ip()
617 id_priv->id.ps == RDMA_PS_IPOIB) in cma_acquire_dev_by_src_ip()
618 return -EINVAL; in cma_acquire_dev_by_src_ip()
620 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_acquire_dev_by_src_ip()
623 memcpy(&gid, dev_addr->src_dev_addr + in cma_acquire_dev_by_src_ip()
628 rdma_for_each_port (cma_dev->device, port) { in cma_acquire_dev_by_src_ip()
629 gidp = rdma_protocol_roce(cma_dev->device, port) ? in cma_acquire_dev_by_src_ip()
631 gid_type = cma_dev->default_gid_type[port - 1]; in cma_acquire_dev_by_src_ip()
632 sgid_attr = cma_validate_port(cma_dev->device, port, in cma_acquire_dev_by_src_ip()
635 id_priv->id.port_num = port; in cma_acquire_dev_by_src_ip()
649 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
650 * @id_priv: cm id to bind to cma device
654 * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
655 * rdma device matches for listen_id and incoming request. It also verifies
663 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_ib_acquire_dev()
668 if (dev_addr->dev_type != ARPHRD_INFINIBAND && in cma_ib_acquire_dev()
669 id_priv->id.ps == RDMA_PS_IPOIB) in cma_ib_acquire_dev()
670 return -EINVAL; in cma_ib_acquire_dev()
672 if (rdma_protocol_roce(req->device, req->port)) in cma_ib_acquire_dev()
673 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_ib_acquire_dev()
676 memcpy(&gid, dev_addr->src_dev_addr + in cma_ib_acquire_dev()
679 gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1]; in cma_ib_acquire_dev()
680 sgid_attr = cma_validate_port(req->device, req->port, in cma_ib_acquire_dev()
685 id_priv->id.port_num = req->port; in cma_ib_acquire_dev()
688 * of cma_dev->id_list such as cma_netdev_callback() and in cma_ib_acquire_dev()
692 cma_attach_to_dev(id_priv, listen_id_priv->cma_dev); in cma_ib_acquire_dev()
694 rdma_restrack_add(&id_priv->res); in cma_ib_acquire_dev()
701 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_iw_acquire_dev()
705 int ret = -ENODEV; in cma_iw_acquire_dev()
709 if (dev_addr->dev_type != ARPHRD_INFINIBAND && in cma_iw_acquire_dev()
710 id_priv->id.ps == RDMA_PS_IPOIB) in cma_iw_acquire_dev()
711 return -EINVAL; in cma_iw_acquire_dev()
713 memcpy(&gid, dev_addr->src_dev_addr + in cma_iw_acquire_dev()
718 cma_dev = listen_id_priv->cma_dev; in cma_iw_acquire_dev()
719 port = listen_id_priv->id.port_num; in cma_iw_acquire_dev()
720 gid_type = listen_id_priv->gid_type; in cma_iw_acquire_dev()
721 sgid_attr = cma_validate_port(cma_dev->device, port, in cma_iw_acquire_dev()
724 id_priv->id.port_num = port; in cma_iw_acquire_dev()
731 rdma_for_each_port (cma_dev->device, port) { in cma_iw_acquire_dev()
732 if (listen_id_priv->cma_dev == cma_dev && in cma_iw_acquire_dev()
733 listen_id_priv->id.port_num == port) in cma_iw_acquire_dev()
736 gid_type = cma_dev->default_gid_type[port - 1]; in cma_iw_acquire_dev()
737 sgid_attr = cma_validate_port(cma_dev->device, port, in cma_iw_acquire_dev()
740 id_priv->id.port_num = port; in cma_iw_acquire_dev()
751 rdma_restrack_add(&id_priv->res); in cma_iw_acquire_dev()
759 * Select the source IB device and address to reach the destination IB address.
764 struct sockaddr_ib *addr; in cma_resolve_ib_dev() local
772 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); in cma_resolve_ib_dev()
773 dgid = (union ib_gid *) &addr->sib_addr; in cma_resolve_ib_dev()
774 pkey = ntohs(addr->sib_pkey); in cma_resolve_ib_dev()
778 rdma_for_each_port (cur_dev->device, p) { in cma_resolve_ib_dev()
779 if (!rdma_cap_af_ib(cur_dev->device, p)) in cma_resolve_ib_dev()
782 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) in cma_resolve_ib_dev()
785 if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) in cma_resolve_ib_dev()
787 for (i = 0; !rdma_query_gid(cur_dev->device, in cma_resolve_ib_dev()
793 id_priv->id.port_num = p; in cma_resolve_ib_dev()
798 dgid->global.subnet_prefix) && in cma_resolve_ib_dev()
802 id_priv->id.port_num = p; in cma_resolve_ib_dev()
809 return -ENODEV; in cma_resolve_ib_dev()
813 rdma_restrack_add(&id_priv->res); in cma_resolve_ib_dev()
815 addr = (struct sockaddr_ib *)cma_src_addr(id_priv); in cma_resolve_ib_dev()
816 memcpy(&addr->sib_addr, &sgid, sizeof(sgid)); in cma_resolve_ib_dev()
817 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); in cma_resolve_ib_dev()
823 refcount_inc(&id_priv->refcount); in cma_id_get()
828 if (refcount_dec_and_test(&id_priv->refcount)) in cma_id_put()
829 complete(&id_priv->comp); in cma_id_put()
841 return ERR_PTR(-ENOMEM); in __rdma_create_id()
843 id_priv->state = RDMA_CM_IDLE; in __rdma_create_id()
844 id_priv->id.context = context; in __rdma_create_id()
845 id_priv->id.event_handler = event_handler; in __rdma_create_id()
846 id_priv->id.ps = ps; in __rdma_create_id()
847 id_priv->id.qp_type = qp_type; in __rdma_create_id()
848 id_priv->tos_set = false; in __rdma_create_id()
849 id_priv->timeout_set = false; in __rdma_create_id()
850 id_priv->min_rnr_timer_set = false; in __rdma_create_id()
851 id_priv->gid_type = IB_GID_TYPE_IB; in __rdma_create_id()
852 spin_lock_init(&id_priv->lock); in __rdma_create_id()
853 mutex_init(&id_priv->qp_mutex); in __rdma_create_id()
854 init_completion(&id_priv->comp); in __rdma_create_id()
855 refcount_set(&id_priv->refcount, 1); in __rdma_create_id()
856 mutex_init(&id_priv->handler_mutex); in __rdma_create_id()
857 INIT_LIST_HEAD(&id_priv->listen_list); in __rdma_create_id()
858 INIT_LIST_HEAD(&id_priv->mc_list); in __rdma_create_id()
859 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); in __rdma_create_id()
860 id_priv->id.route.addr.dev_addr.net = get_net(net); in __rdma_create_id()
861 id_priv->seq_num &= 0x00ffffff; in __rdma_create_id()
863 rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID); in __rdma_create_id()
865 rdma_restrack_parent_name(&id_priv->res, &parent->res); in __rdma_create_id()
881 rdma_restrack_set_name(&ret->res, caller); in __rdma_create_kernel_id()
882 return &ret->id; in __rdma_create_kernel_id()
893 ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context, in rdma_create_user_id()
898 rdma_restrack_set_name(&ret->res, NULL); in rdma_create_user_id()
899 return &ret->id; in rdma_create_user_id()
909 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_init_ud_qp()
935 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_init_conn_qp()
950 if (id->device != pd->device) { in rdma_create_qp()
951 ret = -EINVAL; in rdma_create_qp()
955 qp_init_attr->port_num = id->port_num; in rdma_create_qp()
962 if (id->qp_type == IB_QPT_UD) in rdma_create_qp()
969 id->qp = qp; in rdma_create_qp()
970 id_priv->qp_num = qp->qp_num; in rdma_create_qp()
971 id_priv->srq = (qp->srq != NULL); in rdma_create_qp()
988 mutex_lock(&id_priv->qp_mutex); in rdma_destroy_qp()
989 ib_destroy_qp(id_priv->id.qp); in rdma_destroy_qp()
990 id_priv->id.qp = NULL; in rdma_destroy_qp()
991 mutex_unlock(&id_priv->qp_mutex); in rdma_destroy_qp()
1001 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_rtr()
1002 if (!id_priv->id.qp) { in cma_modify_qp_rtr()
1009 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rtr()
1013 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rtr()
1018 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rtr()
1022 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); in cma_modify_qp_rtr()
1025 qp_attr.max_dest_rd_atomic = conn_param->responder_resources; in cma_modify_qp_rtr()
1026 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rtr()
1028 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_rtr()
1038 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_rts()
1039 if (!id_priv->id.qp) { in cma_modify_qp_rts()
1045 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rts()
1050 qp_attr.max_rd_atomic = conn_param->initiator_depth; in cma_modify_qp_rts()
1051 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rts()
1053 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_rts()
1062 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_err()
1063 if (!id_priv->id.qp) { in cma_modify_qp_err()
1069 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); in cma_modify_qp_err()
1071 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_err()
1078 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_ib_init_qp_attr()
1082 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) in cma_ib_init_qp_attr()
1087 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, in cma_ib_init_qp_attr()
1088 pkey, &qp_attr->pkey_index); in cma_ib_init_qp_attr()
1092 qp_attr->port_num = id_priv->id.port_num; in cma_ib_init_qp_attr()
1095 if (id_priv->id.qp_type == IB_QPT_UD) { in cma_ib_init_qp_attr()
1100 qp_attr->qkey = id_priv->qkey; in cma_ib_init_qp_attr()
1103 qp_attr->qp_access_flags = 0; in cma_ib_init_qp_attr()
1116 if (rdma_cap_ib_cm(id->device, id->port_num)) { in rdma_init_qp_attr()
1117 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr()
1120 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr()
1123 if (qp_attr->qp_state == IB_QPS_RTR) in rdma_init_qp_attr()
1124 qp_attr->rq_psn = id_priv->seq_num; in rdma_init_qp_attr()
1125 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { in rdma_init_qp_attr()
1126 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr()
1127 qp_attr->qp_access_flags = 0; in rdma_init_qp_attr()
1130 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr()
1132 qp_attr->port_num = id_priv->id.port_num; in rdma_init_qp_attr()
1135 ret = -ENOSYS; in rdma_init_qp_attr()
1138 if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set) in rdma_init_qp_attr()
1139 qp_attr->timeout = id_priv->timeout; in rdma_init_qp_attr()
1141 if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set) in rdma_init_qp_attr()
1142 qp_attr->min_rnr_timer = id_priv->min_rnr_timer; in rdma_init_qp_attr()
1148 static inline bool cma_zero_addr(const struct sockaddr *addr) in cma_zero_addr() argument
1150 switch (addr->sa_family) { in cma_zero_addr()
1152 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr); in cma_zero_addr()
1154 return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr); in cma_zero_addr()
1156 return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr); in cma_zero_addr()
1162 static inline bool cma_loopback_addr(const struct sockaddr *addr) in cma_loopback_addr() argument
1164 switch (addr->sa_family) { in cma_loopback_addr()
1167 ((struct sockaddr_in *)addr)->sin_addr.s_addr); in cma_loopback_addr()
1170 &((struct sockaddr_in6 *)addr)->sin6_addr); in cma_loopback_addr()
1173 &((struct sockaddr_ib *)addr)->sib_addr); in cma_loopback_addr()
1179 static inline bool cma_any_addr(const struct sockaddr *addr) in cma_any_addr() argument
1181 return cma_zero_addr(addr) || cma_loopback_addr(addr); in cma_any_addr()
1186 if (src->sa_family != dst->sa_family) in cma_addr_cmp()
1187 return -1; in cma_addr_cmp()
1189 switch (src->sa_family) { in cma_addr_cmp()
1191 return ((struct sockaddr_in *)src)->sin_addr.s_addr != in cma_addr_cmp()
1192 ((struct sockaddr_in *)dst)->sin_addr.s_addr; in cma_addr_cmp()
1198 if (ipv6_addr_cmp(&src_addr6->sin6_addr, in cma_addr_cmp()
1199 &dst_addr6->sin6_addr)) in cma_addr_cmp()
1201 link_local = ipv6_addr_type(&dst_addr6->sin6_addr) & in cma_addr_cmp()
1204 return link_local ? (src_addr6->sin6_scope_id != in cma_addr_cmp()
1205 dst_addr6->sin6_scope_id) : in cma_addr_cmp()
1210 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr, in cma_addr_cmp()
1211 &((struct sockaddr_ib *) dst)->sib_addr); in cma_addr_cmp()
1215 static __be16 cma_port(const struct sockaddr *addr) in cma_port() argument
1219 switch (addr->sa_family) { in cma_port()
1221 return ((struct sockaddr_in *) addr)->sin_port; in cma_port()
1223 return ((struct sockaddr_in6 *) addr)->sin6_port; in cma_port()
1225 sib = (struct sockaddr_ib *) addr; in cma_port()
1226 return htons((u16) (be64_to_cpu(sib->sib_sid) & in cma_port()
1227 be64_to_cpu(sib->sib_sid_mask))); in cma_port()
1233 static inline int cma_any_port(const struct sockaddr *addr) in cma_any_port() argument
1235 return !cma_port(addr); in cma_any_port()
1245 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; in cma_save_ib_info()
1248 ib->sib_family = AF_IB; in cma_save_ib_info()
1250 ib->sib_pkey = path->pkey; in cma_save_ib_info()
1251 ib->sib_flowinfo = path->flow_label; in cma_save_ib_info()
1252 memcpy(&ib->sib_addr, &path->sgid, 16); in cma_save_ib_info()
1253 ib->sib_sid = path->service_id; in cma_save_ib_info()
1254 ib->sib_scope_id = 0; in cma_save_ib_info()
1256 ib->sib_pkey = listen_ib->sib_pkey; in cma_save_ib_info()
1257 ib->sib_flowinfo = listen_ib->sib_flowinfo; in cma_save_ib_info()
1258 ib->sib_addr = listen_ib->sib_addr; in cma_save_ib_info()
1259 ib->sib_sid = listen_ib->sib_sid; in cma_save_ib_info()
1260 ib->sib_scope_id = listen_ib->sib_scope_id; in cma_save_ib_info()
1262 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL); in cma_save_ib_info()
1266 ib->sib_family = AF_IB; in cma_save_ib_info()
1268 ib->sib_pkey = path->pkey; in cma_save_ib_info()
1269 ib->sib_flowinfo = path->flow_label; in cma_save_ib_info()
1270 memcpy(&ib->sib_addr, &path->dgid, 16); in cma_save_ib_info()
1283 .sin_addr.s_addr = hdr->dst_addr.ip4.addr, in cma_save_ip4_info()
1291 .sin_addr.s_addr = hdr->src_addr.ip4.addr, in cma_save_ip4_info()
1292 .sin_port = hdr->port, in cma_save_ip4_info()
1305 .sin6_addr = hdr->dst_addr.ip6, in cma_save_ip6_info()
1313 .sin6_addr = hdr->src_addr.ip6, in cma_save_ip6_info()
1314 .sin6_port = hdr->port, in cma_save_ip6_info()
1332 hdr = ib_event->private_data; in cma_save_ip_info()
1333 if (hdr->cma_version != CMA_VERSION) in cma_save_ip_info()
1334 return -EINVAL; in cma_save_ip_info()
1348 return -EAFNOSUPPORT; in cma_save_ip_info()
1361 if (ib_event->event == IB_CM_REQ_RECEIVED) in cma_save_net_info()
1363 ib_event->param.req_rcvd.primary_path); in cma_save_net_info()
1364 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) in cma_save_net_info()
1376 &ib_event->param.req_rcvd; in cma_save_req_info()
1378 &ib_event->param.sidr_req_rcvd; in cma_save_req_info()
1380 switch (ib_event->event) { in cma_save_req_info()
1382 req->device = req_param->listen_id->device; in cma_save_req_info()
1383 req->port = req_param->port; in cma_save_req_info()
1384 memcpy(&req->local_gid, &req_param->primary_path->sgid, in cma_save_req_info()
1385 sizeof(req->local_gid)); in cma_save_req_info()
1386 req->has_gid = true; in cma_save_req_info()
1387 req->service_id = req_param->primary_path->service_id; in cma_save_req_info()
1388 req->pkey = be16_to_cpu(req_param->primary_path->pkey); in cma_save_req_info()
1389 if (req->pkey != req_param->bth_pkey) in cma_save_req_info()
1392 req_param->bth_pkey, req->pkey); in cma_save_req_info()
1395 req->device = sidr_param->listen_id->device; in cma_save_req_info()
1396 req->port = sidr_param->port; in cma_save_req_info()
1397 req->has_gid = false; in cma_save_req_info()
1398 req->service_id = sidr_param->service_id; in cma_save_req_info()
1399 req->pkey = sidr_param->pkey; in cma_save_req_info()
1400 if (req->pkey != sidr_param->bth_pkey) in cma_save_req_info()
1403 sidr_param->bth_pkey, req->pkey); in cma_save_req_info()
1406 return -EINVAL; in cma_save_req_info()
1416 __be32 daddr = dst_addr->sin_addr.s_addr, in validate_ipv4_net_dev()
1417 saddr = src_addr->sin_addr.s_addr; in validate_ipv4_net_dev()
1430 fl4.flowi4_iif = net_dev->ifindex; in validate_ipv4_net_dev()
1447 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) & in validate_ipv6_net_dev()
1449 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr, in validate_ipv6_net_dev()
1450 &src_addr->sin6_addr, net_dev->ifindex, in validate_ipv6_net_dev()
1457 ret = rt->rt6i_idev->dev == net_dev; in validate_ipv6_net_dev()
1475 switch (daddr->sa_family) { in validate_net_dev()
1477 return saddr->sa_family == AF_INET && in validate_net_dev()
1481 return saddr->sa_family == AF_INET6 && in validate_net_dev()
1495 if (ib_event->event == IB_CM_REQ_RECEIVED) in roce_get_net_dev_by_cm_event()
1496 sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr; in roce_get_net_dev_by_cm_event()
1497 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) in roce_get_net_dev_by_cm_event()
1498 sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr; in roce_get_net_dev_by_cm_event()
1517 (struct sockaddr *)&req->listen_addr_storage; in cma_get_net_dev()
1518 struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage; in cma_get_net_dev()
1520 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL; in cma_get_net_dev()
1524 req->service_id); in cma_get_net_dev()
1528 if (rdma_protocol_roce(req->device, req->port)) in cma_get_net_dev()
1531 net_dev = ib_get_net_dev_by_params(req->device, req->port, in cma_get_net_dev()
1532 req->pkey, in cma_get_net_dev()
1535 return ERR_PTR(-ENODEV); in cma_get_net_dev()
1548 struct sockaddr *addr = cma_src_addr(id_priv); in cma_match_private_data() local
1552 if (cma_any_addr(addr) && !id_priv->afonly) in cma_match_private_data()
1555 switch (addr->sa_family) { in cma_match_private_data()
1557 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr; in cma_match_private_data()
1560 if (!cma_any_addr(addr) && in cma_match_private_data()
1561 hdr->dst_addr.ip4.addr != ip4_addr) in cma_match_private_data()
1565 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr; in cma_match_private_data()
1568 if (!cma_any_addr(addr) && in cma_match_private_data()
1569 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr))) in cma_match_private_data()
1583 struct ib_device *device = id->device; in cma_protocol_roce() local
1584 const u32 port_num = id->port_num ?: rdma_start_port(device); in cma_protocol_roce()
1586 return rdma_protocol_roce(device, port_num); in cma_protocol_roce()
1592 (const struct sockaddr *)&req->listen_addr_storage; in cma_is_req_ipv6_ll()
1596 return (daddr->sa_family == AF_INET6 && in cma_is_req_ipv6_ll()
1597 (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)); in cma_is_req_ipv6_ll()
1604 const struct rdma_addr *addr = &id->route.addr; in cma_match_net_dev() local
1608 return (!id->port_num || id->port_num == req->port) && in cma_match_net_dev()
1609 (addr->src_addr.ss_family == AF_IB); in cma_match_net_dev()
1613 * request to any netdevice of the one or multiport rdma device. in cma_match_net_dev()
1621 if (net_eq(dev_net(net_dev), addr->dev_addr.net) && in cma_match_net_dev()
1622 (!!addr->dev_addr.bound_dev_if == in cma_match_net_dev()
1623 (addr->dev_addr.bound_dev_if == net_dev->ifindex))) in cma_match_net_dev()
1641 return ERR_PTR(-EINVAL); in cma_find_listener()
1643 hlist_for_each_entry(id_priv, &bind_list->owners, node) { in cma_find_listener()
1644 if (cma_match_private_data(id_priv, ib_event->private_data)) { in cma_find_listener()
1645 if (id_priv->id.device == cm_id->device && in cma_find_listener()
1646 cma_match_net_dev(&id_priv->id, net_dev, req)) in cma_find_listener()
1649 &id_priv->listen_list, in cma_find_listener()
1651 if (id_priv_dev->id.device == cm_id->device && in cma_find_listener()
1652 cma_match_net_dev(&id_priv_dev->id, in cma_find_listener()
1659 return ERR_PTR(-EINVAL); in cma_find_listener()
1678 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) { in cma_ib_id_from_event()
1695 * If the device state is not IFF_UP, its properties such as ifindex in cma_ib_id_from_event()
1698 * ongoing operations on net device after device is closed using in cma_ib_id_from_event()
1709 if (((*net_dev)->flags & IFF_UP) == 0) { in cma_ib_id_from_event()
1710 id_priv = ERR_PTR(-EHOSTUNREACH); in cma_ib_id_from_event()
1715 (struct sockaddr *)&req->listen_addr_storage, in cma_ib_id_from_event()
1716 (struct sockaddr *)&req->src_addr_storage)) { in cma_ib_id_from_event()
1717 id_priv = ERR_PTR(-EHOSTUNREACH); in cma_ib_id_from_event()
1723 rdma_ps_from_service_id(req->service_id), in cma_ib_id_from_event()
1724 cma_port_from_service_id(req->service_id)); in cma_ib_id_from_event()
1743 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { in cma_cancel_route()
1744 if (id_priv->query) in cma_cancel_route()
1745 ib_sa_cancel_query(id_priv->query_id, id_priv->query); in cma_cancel_route()
1759 list_del(&id_priv->list); in _cma_cancel_listens()
1761 while (!list_empty(&id_priv->listen_list)) { in _cma_cancel_listens()
1762 dev_id_priv = list_entry(id_priv->listen_list.next, in _cma_cancel_listens()
1764 /* sync with device removal to avoid duplicate destruction */ in _cma_cancel_listens()
1765 list_del_init(&dev_id_priv->list); in _cma_cancel_listens()
1766 list_del(&dev_id_priv->listen_list); in _cma_cancel_listens()
1769 rdma_destroy_id(&dev_id_priv->id); in _cma_cancel_listens()
1794 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); in cma_cancel_operation()
1800 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) in cma_cancel_operation()
1810 struct rdma_bind_list *bind_list = id_priv->bind_list; in cma_release_port()
1811 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_release_port()
1817 hlist_del(&id_priv->node); in cma_release_port()
1818 if (hlist_empty(&bind_list->owners)) { in cma_release_port()
1819 cma_ps_remove(net, bind_list->ps, bind_list->port); in cma_release_port()
1828 bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); in destroy_mc()
1830 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num)) in destroy_mc()
1831 ib_sa_free_multicast(mc->sa_mc); in destroy_mc()
1833 if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { in destroy_mc()
1835 &id_priv->id.route.addr.dev_addr; in destroy_mc()
1838 if (dev_addr->bound_dev_if) in destroy_mc()
1839 ndev = dev_get_by_index(dev_addr->net, in destroy_mc()
1840 dev_addr->bound_dev_if); in destroy_mc()
1844 cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr, in destroy_mc()
1853 cancel_work_sync(&mc->iboe_join.work); in destroy_mc()
1862 while (!list_empty(&id_priv->mc_list)) { in cma_leave_mc_groups()
1863 mc = list_first_entry(&id_priv->mc_list, struct cma_multicast, in cma_leave_mc_groups()
1865 list_del(&mc->list); in cma_leave_mc_groups()
1875 rdma_restrack_del(&id_priv->res); in _destroy_id()
1876 if (id_priv->cma_dev) { in _destroy_id()
1877 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { in _destroy_id()
1878 if (id_priv->cm_id.ib) in _destroy_id()
1879 ib_destroy_cm_id(id_priv->cm_id.ib); in _destroy_id()
1880 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { in _destroy_id()
1881 if (id_priv->cm_id.iw) in _destroy_id()
1882 iw_destroy_cm_id(id_priv->cm_id.iw); in _destroy_id()
1890 wait_for_completion(&id_priv->comp); in _destroy_id()
1892 if (id_priv->internal_id) in _destroy_id()
1893 cma_id_put(id_priv->id.context); in _destroy_id()
1895 kfree(id_priv->id.route.path_rec); in _destroy_id()
1897 put_net(id_priv->id.route.addr.dev_addr.net); in _destroy_id()
1906 __releases(&idprv->handler_mutex) in destroy_id_handler_unlock()
1919 lockdep_assert_held(&id_priv->handler_mutex); in destroy_id_handler_unlock()
1920 spin_lock_irqsave(&id_priv->lock, flags); in destroy_id_handler_unlock()
1921 state = id_priv->state; in destroy_id_handler_unlock()
1922 id_priv->state = RDMA_CM_DESTROYING; in destroy_id_handler_unlock()
1923 spin_unlock_irqrestore(&id_priv->lock, flags); in destroy_id_handler_unlock()
1924 mutex_unlock(&id_priv->handler_mutex); in destroy_id_handler_unlock()
1933 mutex_lock(&id_priv->handler_mutex); in rdma_destroy_id()
1951 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); in cma_rep_recv()
1960 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, in cma_rep_recv()
1969 event->param.conn.private_data = private_data; in cma_set_rep_event_data()
1970 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE; in cma_set_rep_event_data()
1971 event->param.conn.responder_resources = rep_data->responder_resources; in cma_set_rep_event_data()
1972 event->param.conn.initiator_depth = rep_data->initiator_depth; in cma_set_rep_event_data()
1973 event->param.conn.flow_control = rep_data->flow_control; in cma_set_rep_event_data()
1974 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count; in cma_set_rep_event_data()
1975 event->param.conn.srq = rep_data->srq; in cma_set_rep_event_data()
1976 event->param.conn.qp_num = rep_data->remote_qpn; in cma_set_rep_event_data()
1978 event->ece.vendor_id = rep_data->ece.vendor_id; in cma_set_rep_event_data()
1979 event->ece.attr_mod = rep_data->ece.attr_mod; in cma_set_rep_event_data()
1987 lockdep_assert_held(&id_priv->handler_mutex); in cma_cm_event_handler()
1990 ret = id_priv->id.event_handler(&id_priv->id, event); in cma_cm_event_handler()
1998 struct rdma_id_private *id_priv = cm_id->context; in cma_ib_handler()
2003 mutex_lock(&id_priv->handler_mutex); in cma_ib_handler()
2004 state = READ_ONCE(id_priv->state); in cma_ib_handler()
2005 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && in cma_ib_handler()
2007 (ib_event->event == IB_CM_TIMEWAIT_EXIT && in cma_ib_handler()
2011 switch (ib_event->event) { in cma_ib_handler()
2015 event.status = -ETIMEDOUT; in cma_ib_handler()
2019 (id_priv->id.qp_type != IB_QPT_UD)) { in cma_ib_handler()
2023 if (id_priv->id.qp) { in cma_ib_handler()
2030 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd, in cma_ib_handler()
2031 ib_event->private_data); in cma_ib_handler()
2038 event.status = -ETIMEDOUT; in cma_ib_handler()
2054 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id, in cma_ib_handler()
2055 ib_event->param.rej_rcvd.reason)); in cma_ib_handler()
2057 event.status = ib_event->param.rej_rcvd.reason; in cma_ib_handler()
2059 event.param.conn.private_data = ib_event->private_data; in cma_ib_handler()
2064 ib_event->event); in cma_ib_handler()
2070 /* Destroy the CM ID by returning a non-zero value. */ in cma_ib_handler()
2071 id_priv->cm_id.ib = NULL; in cma_ib_handler()
2076 mutex_unlock(&id_priv->handler_mutex); in cma_ib_handler()
2089 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; in cma_ib_new_conn_id()
2090 struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; in cma_ib_new_conn_id()
2092 ib_event->param.req_rcvd.primary_path->service_id; in cma_ib_new_conn_id()
2096 id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net, in cma_ib_new_conn_id()
2097 listen_id->event_handler, listen_id->context, in cma_ib_new_conn_id()
2098 listen_id->ps, in cma_ib_new_conn_id()
2099 ib_event->param.req_rcvd.qp_type, in cma_ib_new_conn_id()
2104 id = &id_priv->id; in cma_ib_new_conn_id()
2105 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, in cma_ib_new_conn_id()
2106 (struct sockaddr *)&id->route.addr.dst_addr, in cma_ib_new_conn_id()
2110 rt = &id->route; in cma_ib_new_conn_id()
2111 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1; in cma_ib_new_conn_id()
2112 rt->path_rec = kmalloc_array(rt->num_paths, sizeof(*rt->path_rec), in cma_ib_new_conn_id()
2114 if (!rt->path_rec) in cma_ib_new_conn_id()
2117 rt->path_rec[0] = *path; in cma_ib_new_conn_id()
2118 if (rt->num_paths == 2) in cma_ib_new_conn_id()
2119 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path; in cma_ib_new_conn_id()
2122 rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev); in cma_ib_new_conn_id()
2126 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND; in cma_ib_new_conn_id()
2127 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid); in cma_ib_new_conn_id()
2128 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey)); in cma_ib_new_conn_id()
2130 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); in cma_ib_new_conn_id()
2135 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); in cma_ib_new_conn_id()
2137 id_priv->state = RDMA_CM_CONNECT; in cma_ib_new_conn_id()
2153 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; in cma_ib_new_udp_id()
2154 struct net *net = listen_id->route.addr.dev_addr.net; in cma_ib_new_udp_id()
2158 id_priv = __rdma_create_id(net, listen_id->event_handler, in cma_ib_new_udp_id()
2159 listen_id->context, listen_id->ps, IB_QPT_UD, in cma_ib_new_udp_id()
2164 id = &id_priv->id; in cma_ib_new_udp_id()
2165 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, in cma_ib_new_udp_id()
2166 (struct sockaddr *)&id->route.addr.dst_addr, in cma_ib_new_udp_id()
2168 ib_event->param.sidr_req_rcvd.service_id)) in cma_ib_new_udp_id()
2172 rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev); in cma_ib_new_udp_id()
2176 &id->route.addr.dev_addr); in cma_ib_new_udp_id()
2182 id_priv->state = RDMA_CM_CONNECT; in cma_ib_new_udp_id()
2193 event->param.conn.private_data = private_data + offset; in cma_set_req_event_data()
2194 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset; in cma_set_req_event_data()
2195 event->param.conn.responder_resources = req_data->responder_resources; in cma_set_req_event_data()
2196 event->param.conn.initiator_depth = req_data->initiator_depth; in cma_set_req_event_data()
2197 event->param.conn.flow_control = req_data->flow_control; in cma_set_req_event_data()
2198 event->param.conn.retry_count = req_data->retry_count; in cma_set_req_event_data()
2199 event->param.conn.rnr_retry_count = req_data->rnr_retry_count; in cma_set_req_event_data()
2200 event->param.conn.srq = req_data->srq; in cma_set_req_event_data()
2201 event->param.conn.qp_num = req_data->remote_qpn; in cma_set_req_event_data()
2203 event->ece.vendor_id = req_data->ece.vendor_id; in cma_set_req_event_data()
2204 event->ece.attr_mod = req_data->ece.attr_mod; in cma_set_req_event_data()
2210 return (((ib_event->event == IB_CM_REQ_RECEIVED) && in cma_ib_check_req_qp_type()
2211 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || in cma_ib_check_req_qp_type()
2212 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && in cma_ib_check_req_qp_type()
2213 (id->qp_type == IB_QPT_UD)) || in cma_ib_check_req_qp_type()
2214 (!id->qp_type)); in cma_ib_check_req_qp_type()
2231 trace_cm_req_handler(listen_id, ib_event->event); in cma_ib_req_handler()
2232 if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) { in cma_ib_req_handler()
2233 ret = -EINVAL; in cma_ib_req_handler()
2237 mutex_lock(&listen_id->handler_mutex); in cma_ib_req_handler()
2238 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) { in cma_ib_req_handler()
2239 ret = -ECONNABORTED; in cma_ib_req_handler()
2245 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) { in cma_ib_req_handler()
2246 conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev); in cma_ib_req_handler()
2247 event.param.ud.private_data = ib_event->private_data + offset; in cma_ib_req_handler()
2249 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset; in cma_ib_req_handler()
2251 conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev); in cma_ib_req_handler()
2252 cma_set_req_event_data(&event, &ib_event->param.req_rcvd, in cma_ib_req_handler()
2253 ib_event->private_data, offset); in cma_ib_req_handler()
2256 ret = -ENOMEM; in cma_ib_req_handler()
2260 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); in cma_ib_req_handler()
2267 conn_id->cm_id.ib = cm_id; in cma_ib_req_handler()
2268 cm_id->context = conn_id; in cma_ib_req_handler()
2269 cm_id->cm_handler = cma_ib_handler; in cma_ib_req_handler()
2273 /* Destroy the CM ID by returning a non-zero value. */ in cma_ib_req_handler()
2274 conn_id->cm_id.ib = NULL; in cma_ib_req_handler()
2275 mutex_unlock(&listen_id->handler_mutex); in cma_ib_req_handler()
2280 if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT && in cma_ib_req_handler()
2281 conn_id->id.qp_type != IB_QPT_UD) { in cma_ib_req_handler()
2282 trace_cm_send_mra(cm_id->context); in cma_ib_req_handler()
2285 mutex_unlock(&conn_id->handler_mutex); in cma_ib_req_handler()
2288 mutex_unlock(&listen_id->handler_mutex); in cma_ib_req_handler()
2297 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr) in rdma_get_service_id() argument
2299 if (addr->sa_family == AF_IB) in rdma_get_service_id()
2300 return ((struct sockaddr_ib *) addr)->sib_sid; in rdma_get_service_id()
2302 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr))); in rdma_get_service_id()
2309 struct rdma_addr *addr = &cm_id->route.addr; in rdma_read_gids() local
2311 if (!cm_id->device) { in rdma_read_gids()
2319 if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) { in rdma_read_gids()
2321 rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid); in rdma_read_gids()
2323 rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid); in rdma_read_gids()
2326 rdma_addr_get_sgid(&addr->dev_addr, sgid); in rdma_read_gids()
2328 rdma_addr_get_dgid(&addr->dev_addr, dgid); in rdma_read_gids()
2335 struct rdma_id_private *id_priv = iw_id->context; in cma_iw_handler()
2338 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; in cma_iw_handler()
2339 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; in cma_iw_handler()
2341 mutex_lock(&id_priv->handler_mutex); in cma_iw_handler()
2342 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) in cma_iw_handler()
2345 switch (iw_event->event) { in cma_iw_handler()
2354 switch (iw_event->status) { in cma_iw_handler()
2357 event.param.conn.initiator_depth = iw_event->ird; in cma_iw_handler()
2358 event.param.conn.responder_resources = iw_event->ord; in cma_iw_handler()
2360 case -ECONNRESET: in cma_iw_handler()
2361 case -ECONNREFUSED: in cma_iw_handler()
2364 case -ETIMEDOUT: in cma_iw_handler()
2374 event.param.conn.initiator_depth = iw_event->ird; in cma_iw_handler()
2375 event.param.conn.responder_resources = iw_event->ord; in cma_iw_handler()
2381 event.status = iw_event->status; in cma_iw_handler()
2382 event.param.conn.private_data = iw_event->private_data; in cma_iw_handler()
2383 event.param.conn.private_data_len = iw_event->private_data_len; in cma_iw_handler()
2386 /* Destroy the CM ID by returning a non-zero value. */ in cma_iw_handler()
2387 id_priv->cm_id.iw = NULL; in cma_iw_handler()
2393 mutex_unlock(&id_priv->handler_mutex); in cma_iw_handler()
2402 int ret = -ECONNABORTED; in iw_conn_req_handler()
2403 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr; in iw_conn_req_handler()
2404 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr; in iw_conn_req_handler()
2407 event.param.conn.private_data = iw_event->private_data; in iw_conn_req_handler()
2408 event.param.conn.private_data_len = iw_event->private_data_len; in iw_conn_req_handler()
2409 event.param.conn.initiator_depth = iw_event->ird; in iw_conn_req_handler()
2410 event.param.conn.responder_resources = iw_event->ord; in iw_conn_req_handler()
2412 listen_id = cm_id->context; in iw_conn_req_handler()
2414 mutex_lock(&listen_id->handler_mutex); in iw_conn_req_handler()
2415 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) in iw_conn_req_handler()
2419 conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, in iw_conn_req_handler()
2420 listen_id->id.event_handler, in iw_conn_req_handler()
2421 listen_id->id.context, RDMA_PS_TCP, in iw_conn_req_handler()
2424 ret = -ENOMEM; in iw_conn_req_handler()
2427 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); in iw_conn_req_handler()
2428 conn_id->state = RDMA_CM_CONNECT; in iw_conn_req_handler()
2430 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); in iw_conn_req_handler()
2432 mutex_unlock(&listen_id->handler_mutex); in iw_conn_req_handler()
2439 mutex_unlock(&listen_id->handler_mutex); in iw_conn_req_handler()
2444 conn_id->cm_id.iw = cm_id; in iw_conn_req_handler()
2445 cm_id->context = conn_id; in iw_conn_req_handler()
2446 cm_id->cm_handler = cma_iw_handler; in iw_conn_req_handler()
2454 conn_id->cm_id.iw = NULL; in iw_conn_req_handler()
2455 mutex_unlock(&listen_id->handler_mutex); in iw_conn_req_handler()
2460 mutex_unlock(&conn_id->handler_mutex); in iw_conn_req_handler()
2463 mutex_unlock(&listen_id->handler_mutex); in iw_conn_req_handler()
2469 struct sockaddr *addr; in cma_ib_listen() local
2473 addr = cma_src_addr(id_priv); in cma_ib_listen()
2474 svc_id = rdma_get_service_id(&id_priv->id, addr); in cma_ib_listen()
2475 id = ib_cm_insert_listen(id_priv->id.device, in cma_ib_listen()
2479 id_priv->cm_id.ib = id; in cma_ib_listen()
2489 id = iw_create_cm_id(id_priv->id.device, in cma_iw_listen()
2495 mutex_lock(&id_priv->qp_mutex); in cma_iw_listen()
2496 id->tos = id_priv->tos; in cma_iw_listen()
2497 id->tos_set = id_priv->tos_set; in cma_iw_listen()
2498 mutex_unlock(&id_priv->qp_mutex); in cma_iw_listen()
2499 id->afonly = id_priv->afonly; in cma_iw_listen()
2500 id_priv->cm_id.iw = id; in cma_iw_listen()
2502 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), in cma_iw_listen()
2505 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); in cma_iw_listen()
2508 iw_destroy_cm_id(id_priv->cm_id.iw); in cma_iw_listen()
2509 id_priv->cm_id.iw = NULL; in cma_iw_listen()
2518 struct rdma_id_private *id_priv = id->context; in cma_listen_handler()
2521 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL) in cma_listen_handler()
2522 return -1; in cma_listen_handler()
2524 id->context = id_priv->id.context; in cma_listen_handler()
2525 id->event_handler = id_priv->id.event_handler; in cma_listen_handler()
2527 return id_priv->id.event_handler(id, event); in cma_listen_handler()
2535 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_listen_on_dev()
2541 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) in cma_listen_on_dev()
2546 id_priv->id.ps, id_priv->id.qp_type, id_priv); in cma_listen_on_dev()
2550 dev_id_priv->state = RDMA_CM_ADDR_BOUND; in cma_listen_on_dev()
2555 rdma_restrack_add(&dev_id_priv->res); in cma_listen_on_dev()
2557 dev_id_priv->internal_id = 1; in cma_listen_on_dev()
2558 dev_id_priv->afonly = id_priv->afonly; in cma_listen_on_dev()
2559 mutex_lock(&id_priv->qp_mutex); in cma_listen_on_dev()
2560 dev_id_priv->tos_set = id_priv->tos_set; in cma_listen_on_dev()
2561 dev_id_priv->tos = id_priv->tos; in cma_listen_on_dev()
2562 mutex_unlock(&id_priv->qp_mutex); in cma_listen_on_dev()
2564 ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); in cma_listen_on_dev()
2567 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); in cma_listen_on_dev()
2572 dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret); in cma_listen_on_dev()
2583 list_add_tail(&id_priv->list, &listen_any_list); in cma_listen_on_all()
2589 list_del_init(&to_destroy->list); in cma_listen_on_all()
2600 rdma_destroy_id(&to_destroy->id); in cma_listen_on_all()
2609 mutex_lock(&id_priv->qp_mutex); in rdma_set_service_type()
2610 id_priv->tos = (u8) tos; in rdma_set_service_type()
2611 id_priv->tos_set = true; in rdma_set_service_type()
2612 mutex_unlock(&id_priv->qp_mutex); in rdma_set_service_type()
2617 * rdma_set_ack_timeout() - Set the ack timeout of QP associated
2635 if (id->qp_type != IB_QPT_RC) in rdma_set_ack_timeout()
2636 return -EINVAL; in rdma_set_ack_timeout()
2639 mutex_lock(&id_priv->qp_mutex); in rdma_set_ack_timeout()
2640 id_priv->timeout = timeout; in rdma_set_ack_timeout()
2641 id_priv->timeout_set = true; in rdma_set_ack_timeout()
2642 mutex_unlock(&id_priv->qp_mutex); in rdma_set_ack_timeout()
2649 * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
2652 * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
2670 /* It is a five-bit value */ in rdma_set_min_rnr_timer()
2672 return -EINVAL; in rdma_set_min_rnr_timer()
2674 if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT)) in rdma_set_min_rnr_timer()
2675 return -EINVAL; in rdma_set_min_rnr_timer()
2678 mutex_lock(&id_priv->qp_mutex); in rdma_set_min_rnr_timer()
2679 id_priv->min_rnr_timer = min_rnr_timer; in rdma_set_min_rnr_timer()
2680 id_priv->min_rnr_timer_set = true; in rdma_set_min_rnr_timer()
2681 mutex_unlock(&id_priv->qp_mutex); in rdma_set_min_rnr_timer()
2693 route = &work->id->id.route; in cma_query_handler()
2696 route->num_paths = 1; in cma_query_handler()
2697 *route->path_rec = *path_rec; in cma_query_handler()
2699 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_query_handler()
2700 work->new_state = RDMA_CM_ADDR_RESOLVED; in cma_query_handler()
2701 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; in cma_query_handler()
2702 work->event.status = status; in cma_query_handler()
2707 queue_work(cma_wq, &work->work); in cma_query_handler()
2713 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_query_ib_route()
2721 if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num)) in cma_query_ib_route()
2730 path_rec.service_id = rdma_get_service_id(&id_priv->id, in cma_query_ib_route()
2739 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); in cma_query_ib_route()
2744 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20); in cma_query_ib_route()
2749 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20); in cma_query_ib_route()
2754 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, in cma_query_ib_route()
2755 id_priv->id.port_num, &path_rec, in cma_query_ib_route()
2758 work, &id_priv->query); in cma_query_ib_route()
2760 return (id_priv->query_id < 0) ? id_priv->query_id : 0; in cma_query_ib_route()
2767 struct rdma_cm_event *event = &mc->iboe_join.event; in cma_iboe_join_work_handler()
2768 struct rdma_id_private *id_priv = mc->id_priv; in cma_iboe_join_work_handler()
2771 mutex_lock(&id_priv->handler_mutex); in cma_iboe_join_work_handler()
2772 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || in cma_iboe_join_work_handler()
2773 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) in cma_iboe_join_work_handler()
2780 mutex_unlock(&id_priv->handler_mutex); in cma_iboe_join_work_handler()
2781 if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN) in cma_iboe_join_work_handler()
2782 rdma_destroy_ah_attr(&event->param.ud.ah_attr); in cma_iboe_join_work_handler()
2788 struct rdma_id_private *id_priv = work->id; in cma_work_handler()
2790 mutex_lock(&id_priv->handler_mutex); in cma_work_handler()
2791 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING || in cma_work_handler()
2792 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL) in cma_work_handler()
2794 if (work->old_state != 0 || work->new_state != 0) { in cma_work_handler()
2795 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) in cma_work_handler()
2799 if (cma_cm_event_handler(id_priv, &work->event)) { in cma_work_handler()
2806 mutex_unlock(&id_priv->handler_mutex); in cma_work_handler()
2809 if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN) in cma_work_handler()
2810 rdma_destroy_ah_attr(&work->event.param.ud.ah_attr); in cma_work_handler()
2817 work->id = id_priv; in cma_init_resolve_route_work()
2818 INIT_WORK(&work->work, cma_work_handler); in cma_init_resolve_route_work()
2819 work->old_state = RDMA_CM_ROUTE_QUERY; in cma_init_resolve_route_work()
2820 work->new_state = RDMA_CM_ROUTE_RESOLVED; in cma_init_resolve_route_work()
2821 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; in cma_init_resolve_route_work()
2830 work->id = id_priv; in enqueue_resolve_addr_work()
2831 INIT_WORK(&work->work, cma_work_handler); in enqueue_resolve_addr_work()
2832 work->old_state = RDMA_CM_ADDR_QUERY; in enqueue_resolve_addr_work()
2833 work->new_state = RDMA_CM_ADDR_RESOLVED; in enqueue_resolve_addr_work()
2834 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; in enqueue_resolve_addr_work()
2836 queue_work(cma_wq, &work->work); in enqueue_resolve_addr_work()
2842 struct rdma_route *route = &id_priv->id.route; in cma_resolve_ib_route()
2848 return -ENOMEM; in cma_resolve_ib_route()
2852 if (!route->path_rec) in cma_resolve_ib_route()
2853 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); in cma_resolve_ib_route()
2854 if (!route->path_rec) { in cma_resolve_ib_route()
2855 ret = -ENOMEM; in cma_resolve_ib_route()
2865 kfree(route->path_rec); in cma_resolve_ib_route()
2866 route->path_rec = NULL; in cma_resolve_ib_route()
2894 struct rdma_route *route = &id_priv->id.route; in cma_iboe_set_path_rec_l2_fields()
2896 struct rdma_addr *addr = &route->addr; in cma_iboe_set_path_rec_l2_fields() local
2900 if (!addr->dev_addr.bound_dev_if) in cma_iboe_set_path_rec_l2_fields()
2903 ndev = dev_get_by_index(addr->dev_addr.net, in cma_iboe_set_path_rec_l2_fields()
2904 addr->dev_addr.bound_dev_if); in cma_iboe_set_path_rec_l2_fields()
2908 supported_gids = roce_gid_type_mask_support(id_priv->id.device, in cma_iboe_set_path_rec_l2_fields()
2909 id_priv->id.port_num); in cma_iboe_set_path_rec_l2_fields()
2910 gid_type = cma_route_gid_type(addr->dev_addr.network, in cma_iboe_set_path_rec_l2_fields()
2912 id_priv->gid_type); in cma_iboe_set_path_rec_l2_fields()
2914 if (gid_type < ib_network_to_gid_type(addr->dev_addr.network)) in cma_iboe_set_path_rec_l2_fields()
2915 gid_type = ib_network_to_gid_type(addr->dev_addr.network); in cma_iboe_set_path_rec_l2_fields()
2916 route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type); in cma_iboe_set_path_rec_l2_fields()
2918 route->path_rec->roce.route_resolved = true; in cma_iboe_set_path_rec_l2_fields()
2919 sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr); in cma_iboe_set_path_rec_l2_fields()
2933 return -EINVAL; in rdma_set_ib_path()
2935 id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec), in rdma_set_ib_path()
2937 if (!id->route.path_rec) { in rdma_set_ib_path()
2938 ret = -ENOMEM; in rdma_set_ib_path()
2942 if (rdma_protocol_roce(id->device, id->port_num)) { in rdma_set_ib_path()
2945 ret = -ENODEV; in rdma_set_ib_path()
2951 id->route.num_paths = 1; in rdma_set_ib_path()
2955 kfree(id->route.path_rec); in rdma_set_ib_path()
2956 id->route.path_rec = NULL; in rdma_set_ib_path()
2969 return -ENOMEM; in cma_resolve_iw_route()
2972 queue_work(cma_wq, &work->work); in cma_resolve_iw_route()
2981 if (dev->num_tc) in get_vlan_ndev_tc()
2997 struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data; in get_lower_vlan_dev_tc()
3000 map->output_tc = get_vlan_ndev_tc(dev, map->input_prio); in get_lower_vlan_dev_tc()
3001 else if (dev->num_tc) in get_lower_vlan_dev_tc()
3002 map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio); in get_lower_vlan_dev_tc()
3004 map->output_tc = 0; in get_lower_vlan_dev_tc()
3005 /* We are interested only in first level VLAN device, so always in get_lower_vlan_dev_tc()
3008 map->found = true; in get_lower_vlan_dev_tc()
3018 /* If VLAN device, get it directly from the VLAN netdev */ in iboe_tos_to_sl()
3029 /* If map is found from lower device, use it; Otherwise in iboe_tos_to_sl()
3034 else if (ndev->num_tc) in iboe_tos_to_sl()
3047 fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK; in cma_get_roce_udp_flow_label()
3060 struct rdma_route *route = &id_priv->id.route; in cma_resolve_iboe_route()
3061 struct rdma_addr *addr = &route->addr; in cma_resolve_iboe_route() local
3066 u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - in cma_resolve_iboe_route()
3067 rdma_start_port(id_priv->cma_dev->device)]; in cma_resolve_iboe_route()
3070 mutex_lock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3071 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; in cma_resolve_iboe_route()
3072 mutex_unlock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3076 return -ENOMEM; in cma_resolve_iboe_route()
3078 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); in cma_resolve_iboe_route()
3079 if (!route->path_rec) { in cma_resolve_iboe_route()
3080 ret = -ENOMEM; in cma_resolve_iboe_route()
3084 route->num_paths = 1; in cma_resolve_iboe_route()
3088 ret = -ENODEV; in cma_resolve_iboe_route()
3092 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_resolve_iboe_route()
3093 &route->path_rec->sgid); in cma_resolve_iboe_route()
3094 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, in cma_resolve_iboe_route()
3095 &route->path_rec->dgid); in cma_resolve_iboe_route()
3097 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) in cma_resolve_iboe_route()
3098 /* TODO: get the hoplimit from the inet/inet6 device */ in cma_resolve_iboe_route()
3099 route->path_rec->hop_limit = addr->dev_addr.hoplimit; in cma_resolve_iboe_route()
3101 route->path_rec->hop_limit = 1; in cma_resolve_iboe_route()
3102 route->path_rec->reversible = 1; in cma_resolve_iboe_route()
3103 route->path_rec->pkey = cpu_to_be16(0xffff); in cma_resolve_iboe_route()
3104 route->path_rec->mtu_selector = IB_SA_EQ; in cma_resolve_iboe_route()
3105 route->path_rec->sl = iboe_tos_to_sl(ndev, tos); in cma_resolve_iboe_route()
3106 route->path_rec->traffic_class = tos; in cma_resolve_iboe_route()
3107 route->path_rec->mtu = iboe_get_mtu(ndev->mtu); in cma_resolve_iboe_route()
3108 route->path_rec->rate_selector = IB_SA_EQ; in cma_resolve_iboe_route()
3109 route->path_rec->rate = iboe_get_rate(ndev); in cma_resolve_iboe_route()
3111 route->path_rec->packet_life_time_selector = IB_SA_EQ; in cma_resolve_iboe_route()
3119 mutex_lock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3120 if (id_priv->timeout_set && id_priv->timeout) in cma_resolve_iboe_route()
3121 route->path_rec->packet_life_time = id_priv->timeout - 1; in cma_resolve_iboe_route()
3123 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; in cma_resolve_iboe_route()
3124 mutex_unlock(&id_priv->qp_mutex); in cma_resolve_iboe_route()
3126 if (!route->path_rec->mtu) { in cma_resolve_iboe_route()
3127 ret = -EINVAL; in cma_resolve_iboe_route()
3131 if (rdma_protocol_roce_udp_encap(id_priv->id.device, in cma_resolve_iboe_route()
3132 id_priv->id.port_num)) in cma_resolve_iboe_route()
3133 route->path_rec->flow_label = in cma_resolve_iboe_route()
3137 queue_work(cma_wq, &work->work); in cma_resolve_iboe_route()
3142 kfree(route->path_rec); in cma_resolve_iboe_route()
3143 route->path_rec = NULL; in cma_resolve_iboe_route()
3144 route->num_paths = 0; in cma_resolve_iboe_route()
3156 return -EINVAL; in rdma_resolve_route()
3160 return -EINVAL; in rdma_resolve_route()
3163 if (rdma_cap_ib_sa(id->device, id->port_num)) in rdma_resolve_route()
3165 else if (rdma_protocol_roce(id->device, id->port_num)) in rdma_resolve_route()
3167 else if (rdma_protocol_iwarp(id->device, id->port_num)) in rdma_resolve_route()
3170 ret = -ENOSYS; in rdma_resolve_route()
3183 static void cma_set_loopback(struct sockaddr *addr) in cma_set_loopback() argument
3185 switch (addr->sa_family) { in cma_set_loopback()
3187 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK); in cma_set_loopback()
3190 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr, in cma_set_loopback()
3194 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr, in cma_set_loopback()
3213 !rdma_cap_ib_cm(cur_dev->device, 1)) in cma_bind_loopback()
3219 rdma_for_each_port (cur_dev->device, p) { in cma_bind_loopback()
3220 if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) && in cma_bind_loopback()
3229 ret = -ENODEV; in cma_bind_loopback()
3236 ret = rdma_query_gid(cma_dev->device, p, 0, &gid); in cma_bind_loopback()
3240 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey); in cma_bind_loopback()
3244 id_priv->id.route.addr.dev_addr.dev_type = in cma_bind_loopback()
3245 (rdma_protocol_ib(cma_dev->device, p)) ? in cma_bind_loopback()
3248 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_bind_loopback()
3249 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); in cma_bind_loopback()
3250 id_priv->id.port_num = p; in cma_bind_loopback()
3252 rdma_restrack_add(&id_priv->res); in cma_bind_loopback()
3264 struct sockaddr *addr; in addr_handler() local
3267 mutex_lock(&id_priv->handler_mutex); in addr_handler()
3274 * matching rdma device, old address can be restored back, which helps in addr_handler()
3277 addr = cma_src_addr(id_priv); in addr_handler()
3278 memcpy(&old_addr, addr, rdma_addr_size(addr)); in addr_handler()
3279 memcpy(addr, src_addr, rdma_addr_size(src_addr)); in addr_handler()
3280 if (!status && !id_priv->cma_dev) { in addr_handler()
3283 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n", in addr_handler()
3285 rdma_restrack_add(&id_priv->res); in addr_handler()
3291 memcpy(addr, &old_addr, in addr_handler()
3306 mutex_unlock(&id_priv->handler_mutex); in addr_handler()
3317 return -ENOMEM; in cma_resolve_loopback()
3319 if (!id_priv->cma_dev) { in cma_resolve_loopback()
3325 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_resolve_loopback()
3326 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_resolve_loopback()
3342 return -ENOMEM; in cma_resolve_ib_addr()
3344 if (!id_priv->cma_dev) { in cma_resolve_ib_addr()
3350 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) in cma_resolve_ib_addr()
3351 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); in cma_resolve_ib_addr()
3363 if (!src_addr || !src_addr->sa_family) { in cma_bind_addr()
3364 src_addr = (struct sockaddr *) &id->route.addr.src_addr; in cma_bind_addr()
3365 src_addr->sa_family = dst_addr->sa_family; in cma_bind_addr()
3367 dst_addr->sa_family == AF_INET6) { in cma_bind_addr()
3370 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id; in cma_bind_addr()
3371 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL) in cma_bind_addr()
3372 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; in cma_bind_addr()
3373 } else if (dst_addr->sa_family == AF_IB) { in cma_bind_addr()
3374 ((struct sockaddr_ib *) src_addr)->sib_pkey = in cma_bind_addr()
3375 ((struct sockaddr_ib *) dst_addr)->sib_pkey; in cma_bind_addr()
3384 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
3396 ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr); in resolve_prepare_src()
3401 ret = -EINVAL; in resolve_prepare_src()
3406 if (cma_family(id_priv) != dst_addr->sa_family) { in resolve_prepare_src()
3407 ret = -EINVAL; in resolve_prepare_src()
3433 if (dst_addr->sa_family == AF_IB) { in rdma_resolve_addr()
3447 if (id_priv->used_resolve_ip) in rdma_resolve_addr()
3448 rdma_addr_cancel(&id->route.addr.dev_addr); in rdma_resolve_addr()
3450 id_priv->used_resolve_ip = 1; in rdma_resolve_addr()
3452 &id->route.addr.dev_addr, in rdma_resolve_addr()
3474 spin_lock_irqsave(&id_priv->lock, flags); in rdma_set_reuseaddr()
3475 if ((reuse && id_priv->state != RDMA_CM_LISTEN) || in rdma_set_reuseaddr()
3476 id_priv->state == RDMA_CM_IDLE) { in rdma_set_reuseaddr()
3477 id_priv->reuseaddr = reuse; in rdma_set_reuseaddr()
3480 ret = -EINVAL; in rdma_set_reuseaddr()
3482 spin_unlock_irqrestore(&id_priv->lock, flags); in rdma_set_reuseaddr()
3494 spin_lock_irqsave(&id_priv->lock, flags); in rdma_set_afonly()
3495 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { in rdma_set_afonly()
3496 id_priv->options |= (1 << CMA_OPTION_AFONLY); in rdma_set_afonly()
3497 id_priv->afonly = afonly; in rdma_set_afonly()
3500 ret = -EINVAL; in rdma_set_afonly()
3502 spin_unlock_irqrestore(&id_priv->lock, flags); in rdma_set_afonly()
3510 struct sockaddr *addr; in cma_bind_port() local
3517 addr = cma_src_addr(id_priv); in cma_bind_port()
3518 port = htons(bind_list->port); in cma_bind_port()
3520 switch (addr->sa_family) { in cma_bind_port()
3522 ((struct sockaddr_in *) addr)->sin_port = port; in cma_bind_port()
3525 ((struct sockaddr_in6 *) addr)->sin6_port = port; in cma_bind_port()
3528 sib = (struct sockaddr_ib *) addr; in cma_bind_port()
3529 sid = be64_to_cpu(sib->sib_sid); in cma_bind_port()
3530 mask = be64_to_cpu(sib->sib_sid_mask); in cma_bind_port()
3531 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port)); in cma_bind_port()
3532 sib->sib_sid_mask = cpu_to_be64(~0ULL); in cma_bind_port()
3535 id_priv->bind_list = bind_list; in cma_bind_port()
3536 hlist_add_head(&id_priv->node, &bind_list->owners); in cma_bind_port()
3549 return -ENOMEM; in cma_alloc_port()
3551 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, in cma_alloc_port()
3556 bind_list->ps = ps; in cma_alloc_port()
3557 bind_list->port = snum; in cma_alloc_port()
3562 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret; in cma_alloc_port()
3575 hlist_for_each_entry(cur_id, &bind_list->owners, node) { in cma_port_is_unique()
3583 /* different dest port -> unique */ in cma_port_is_unique()
3589 /* different src address -> unique */ in cma_port_is_unique()
3595 /* different dst address -> unique */ in cma_port_is_unique()
3601 return -EADDRNOTAVAIL; in cma_port_is_unique()
3612 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_alloc_any_port()
3617 remaining = (high - low) + 1; in cma_alloc_any_port()
3635 * re-using same port immediately after it is closed. in cma_alloc_any_port()
3639 if (ret != -EADDRNOTAVAIL) in cma_alloc_any_port()
3642 if (--remaining) { in cma_alloc_any_port()
3648 return -EADDRNOTAVAIL; in cma_alloc_any_port()
3661 struct sockaddr *addr, *cur_addr; in cma_check_port() local
3665 addr = cma_src_addr(id_priv); in cma_check_port()
3666 hlist_for_each_entry(cur_id, &bind_list->owners, node) { in cma_check_port()
3670 if (reuseaddr && cur_id->reuseaddr) in cma_check_port()
3674 if (id_priv->afonly && cur_id->afonly && in cma_check_port()
3675 (addr->sa_family != cur_addr->sa_family)) in cma_check_port()
3678 if (cma_any_addr(addr) || cma_any_addr(cur_addr)) in cma_check_port()
3679 return -EADDRNOTAVAIL; in cma_check_port()
3681 if (!cma_addr_cmp(addr, cur_addr)) in cma_check_port()
3682 return -EADDRINUSE; in cma_check_port()
3698 return -EACCES; in cma_use_port()
3700 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); in cma_use_port()
3704 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); in cma_use_port()
3714 switch (id_priv->id.ps) { in cma_select_inet_ps()
3719 return id_priv->id.ps; in cma_select_inet_ps()
3734 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK; in cma_select_ib_ps()
3735 sid = be64_to_cpu(sib->sib_sid) & mask; in cma_select_ib_ps()
3737 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { in cma_select_ib_ps()
3740 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && in cma_select_ib_ps()
3744 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && in cma_select_ib_ps()
3751 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib))); in cma_select_ib_ps()
3752 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK | in cma_select_ib_ps()
3753 be64_to_cpu(sib->sib_sid_mask)); in cma_select_ib_ps()
3768 return -EPROTONOSUPPORT; in cma_get_port()
3781 struct sockaddr *addr) in cma_check_linklocal() argument
3786 if (addr->sa_family != AF_INET6) in cma_check_linklocal()
3789 sin6 = (struct sockaddr_in6 *) addr; in cma_check_linklocal()
3791 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)) in cma_check_linklocal()
3794 if (!sin6->sin6_scope_id) in cma_check_linklocal()
3795 return -EINVAL; in cma_check_linklocal()
3797 dev_addr->bound_dev_if = sin6->sin6_scope_id; in cma_check_linklocal()
3820 return -EINVAL; in rdma_listen()
3827 if (id_priv->reuseaddr) { in rdma_listen()
3829 ret = cma_check_port(id_priv->bind_list, id_priv, 0); in rdma_listen()
3831 id_priv->reuseaddr = 0; in rdma_listen()
3837 id_priv->backlog = backlog; in rdma_listen()
3838 if (id_priv->cma_dev) { in rdma_listen()
3839 if (rdma_cap_ib_cm(id->device, 1)) { in rdma_listen()
3843 } else if (rdma_cap_iw_cm(id->device, 1)) { in rdma_listen()
3848 ret = -ENOSYS; in rdma_listen()
3859 id_priv->backlog = 0; in rdma_listen()
3869 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) in rdma_bind_addr() argument
3875 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 && in rdma_bind_addr()
3876 addr->sa_family != AF_IB) in rdma_bind_addr()
3877 return -EAFNOSUPPORT; in rdma_bind_addr()
3881 return -EINVAL; in rdma_bind_addr()
3883 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); in rdma_bind_addr()
3887 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); in rdma_bind_addr()
3888 if (!cma_any_addr(addr)) { in rdma_bind_addr()
3889 ret = cma_translate_addr(addr, &id->route.addr.dev_addr); in rdma_bind_addr()
3898 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { in rdma_bind_addr()
3899 if (addr->sa_family == AF_INET) in rdma_bind_addr()
3900 id_priv->afonly = 1; in rdma_bind_addr()
3902 else if (addr->sa_family == AF_INET6) { in rdma_bind_addr()
3903 struct net *net = id_priv->id.route.addr.dev_addr.net; in rdma_bind_addr()
3905 id_priv->afonly = net->ipv6.sysctl.bindv6only; in rdma_bind_addr()
3910 daddr->sa_family = addr->sa_family; in rdma_bind_addr()
3916 if (!cma_any_addr(addr)) in rdma_bind_addr()
3917 rdma_restrack_add(&id_priv->res); in rdma_bind_addr()
3920 if (id_priv->cma_dev) in rdma_bind_addr()
3933 cma_hdr->cma_version = CMA_VERSION; in cma_format_hdr()
3941 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr; in cma_format_hdr()
3942 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr; in cma_format_hdr()
3943 cma_hdr->port = src4->sin_port; in cma_format_hdr()
3951 cma_hdr->src_addr.ip6 = src6->sin6_addr; in cma_format_hdr()
3952 cma_hdr->dst_addr.ip6 = dst6->sin6_addr; in cma_format_hdr()
3953 cma_hdr->port = src6->sin6_port; in cma_format_hdr()
3961 struct rdma_id_private *id_priv = cm_id->context; in cma_sidr_rep_handler()
3964 &ib_event->param.sidr_rep_rcvd; in cma_sidr_rep_handler()
3967 mutex_lock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
3968 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) in cma_sidr_rep_handler()
3971 switch (ib_event->event) { in cma_sidr_rep_handler()
3974 event.status = -ETIMEDOUT; in cma_sidr_rep_handler()
3977 event.param.ud.private_data = ib_event->private_data; in cma_sidr_rep_handler()
3979 if (rep->status != IB_SIDR_SUCCESS) { in cma_sidr_rep_handler()
3981 event.status = ib_event->param.sidr_rep_rcvd.status; in cma_sidr_rep_handler()
3986 ret = cma_set_qkey(id_priv, rep->qkey); in cma_sidr_rep_handler()
3993 ib_init_ah_attr_from_path(id_priv->id.device, in cma_sidr_rep_handler()
3994 id_priv->id.port_num, in cma_sidr_rep_handler()
3995 id_priv->id.route.path_rec, in cma_sidr_rep_handler()
3997 rep->sgid_attr); in cma_sidr_rep_handler()
3998 event.param.ud.qp_num = rep->qpn; in cma_sidr_rep_handler()
3999 event.param.ud.qkey = rep->qkey; in cma_sidr_rep_handler()
4005 ib_event->event); in cma_sidr_rep_handler()
4013 /* Destroy the CM ID by returning a non-zero value. */ in cma_sidr_rep_handler()
4014 id_priv->cm_id.ib = NULL; in cma_sidr_rep_handler()
4019 mutex_unlock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
4034 req.private_data_len = offset + conn_param->private_data_len; in cma_resolve_ib_udp()
4035 if (req.private_data_len < conn_param->private_data_len) in cma_resolve_ib_udp()
4036 return -EINVAL; in cma_resolve_ib_udp()
4041 return -ENOMEM; in cma_resolve_ib_udp()
4046 if (conn_param->private_data && conn_param->private_data_len) in cma_resolve_ib_udp()
4047 memcpy(private_data + offset, conn_param->private_data, in cma_resolve_ib_udp()
4048 conn_param->private_data_len); in cma_resolve_ib_udp()
4057 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, in cma_resolve_ib_udp()
4063 id_priv->cm_id.ib = id; in cma_resolve_ib_udp()
4065 req.path = id_priv->id.route.path_rec; in cma_resolve_ib_udp()
4066 req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; in cma_resolve_ib_udp()
4067 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_resolve_ib_udp()
4068 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8); in cma_resolve_ib_udp()
4072 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); in cma_resolve_ib_udp()
4074 ib_destroy_cm_id(id_priv->cm_id.ib); in cma_resolve_ib_udp()
4075 id_priv->cm_id.ib = NULL; in cma_resolve_ib_udp()
4094 req.private_data_len = offset + conn_param->private_data_len; in cma_connect_ib()
4095 if (req.private_data_len < conn_param->private_data_len) in cma_connect_ib()
4096 return -EINVAL; in cma_connect_ib()
4101 return -ENOMEM; in cma_connect_ib()
4106 if (conn_param->private_data && conn_param->private_data_len) in cma_connect_ib()
4107 memcpy(private_data + offset, conn_param->private_data, in cma_connect_ib()
4108 conn_param->private_data_len); in cma_connect_ib()
4110 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); in cma_connect_ib()
4115 id_priv->cm_id.ib = id; in cma_connect_ib()
4117 route = &id_priv->id.route; in cma_connect_ib()
4125 req.primary_path = &route->path_rec[0]; in cma_connect_ib()
4126 if (route->num_paths == 2) in cma_connect_ib()
4127 req.alternate_path = &route->path_rec[1]; in cma_connect_ib()
4129 req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; in cma_connect_ib()
4131 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_connect_ib()
4132 req.qp_num = id_priv->qp_num; in cma_connect_ib()
4133 req.qp_type = id_priv->id.qp_type; in cma_connect_ib()
4134 req.starting_psn = id_priv->seq_num; in cma_connect_ib()
4135 req.responder_resources = conn_param->responder_resources; in cma_connect_ib()
4136 req.initiator_depth = conn_param->initiator_depth; in cma_connect_ib()
4137 req.flow_control = conn_param->flow_control; in cma_connect_ib()
4138 req.retry_count = min_t(u8, 7, conn_param->retry_count); in cma_connect_ib()
4139 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); in cma_connect_ib()
4143 req.srq = id_priv->srq ? 1 : 0; in cma_connect_ib()
4144 req.ece.vendor_id = id_priv->ece.vendor_id; in cma_connect_ib()
4145 req.ece.attr_mod = id_priv->ece.attr_mod; in cma_connect_ib()
4148 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); in cma_connect_ib()
4152 id_priv->cm_id.ib = NULL; in cma_connect_ib()
4166 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); in cma_connect_iw()
4170 mutex_lock(&id_priv->qp_mutex); in cma_connect_iw()
4171 cm_id->tos = id_priv->tos; in cma_connect_iw()
4172 cm_id->tos_set = id_priv->tos_set; in cma_connect_iw()
4173 mutex_unlock(&id_priv->qp_mutex); in cma_connect_iw()
4175 id_priv->cm_id.iw = cm_id; in cma_connect_iw()
4177 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), in cma_connect_iw()
4179 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), in cma_connect_iw()
4187 iw_param.ord = conn_param->initiator_depth; in cma_connect_iw()
4188 iw_param.ird = conn_param->responder_resources; in cma_connect_iw()
4189 iw_param.private_data = conn_param->private_data; in cma_connect_iw()
4190 iw_param.private_data_len = conn_param->private_data_len; in cma_connect_iw()
4191 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; in cma_connect_iw()
4194 iw_param.qpn = id_priv->qp_num; in cma_connect_iw()
4200 id_priv->cm_id.iw = NULL; in cma_connect_iw()
4206 * rdma_connect_locked - Initiate an active connection request.
4221 return -EINVAL; in rdma_connect_locked()
4223 if (!id->qp) { in rdma_connect_locked()
4224 id_priv->qp_num = conn_param->qp_num; in rdma_connect_locked()
4225 id_priv->srq = conn_param->srq; in rdma_connect_locked()
4228 if (rdma_cap_ib_cm(id->device, id->port_num)) { in rdma_connect_locked()
4229 if (id->qp_type == IB_QPT_UD) in rdma_connect_locked()
4233 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { in rdma_connect_locked()
4236 ret = -ENOSYS; in rdma_connect_locked()
4248 * rdma_connect - Initiate an active connection request.
4265 mutex_lock(&id_priv->handler_mutex); in rdma_connect()
4267 mutex_unlock(&id_priv->handler_mutex); in rdma_connect()
4273 * rdma_connect_ece - Initiate an active connection request with ECE data.
4286 id_priv->ece.vendor_id = ece->vendor_id; in rdma_connect_ece()
4287 id_priv->ece.attr_mod = ece->attr_mod; in rdma_connect_ece()
4308 rep.qp_num = id_priv->qp_num; in cma_accept_ib()
4309 rep.starting_psn = id_priv->seq_num; in cma_accept_ib()
4310 rep.private_data = conn_param->private_data; in cma_accept_ib()
4311 rep.private_data_len = conn_param->private_data_len; in cma_accept_ib()
4312 rep.responder_resources = conn_param->responder_resources; in cma_accept_ib()
4313 rep.initiator_depth = conn_param->initiator_depth; in cma_accept_ib()
4315 rep.flow_control = conn_param->flow_control; in cma_accept_ib()
4316 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count); in cma_accept_ib()
4317 rep.srq = id_priv->srq ? 1 : 0; in cma_accept_ib()
4318 rep.ece.vendor_id = id_priv->ece.vendor_id; in cma_accept_ib()
4319 rep.ece.attr_mod = id_priv->ece.attr_mod; in cma_accept_ib()
4322 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); in cma_accept_ib()
4334 return -EINVAL; in cma_accept_iw()
4340 iw_param.ord = conn_param->initiator_depth; in cma_accept_iw()
4341 iw_param.ird = conn_param->responder_resources; in cma_accept_iw()
4342 iw_param.private_data = conn_param->private_data; in cma_accept_iw()
4343 iw_param.private_data_len = conn_param->private_data_len; in cma_accept_iw()
4344 if (id_priv->id.qp) in cma_accept_iw()
4345 iw_param.qpn = id_priv->qp_num; in cma_accept_iw()
4347 iw_param.qpn = conn_param->qp_num; in cma_accept_iw()
4349 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); in cma_accept_iw()
4365 rep.qp_num = id_priv->qp_num; in cma_send_sidr_rep()
4366 rep.qkey = id_priv->qkey; in cma_send_sidr_rep()
4368 rep.ece.vendor_id = id_priv->ece.vendor_id; in cma_send_sidr_rep()
4369 rep.ece.attr_mod = id_priv->ece.attr_mod; in cma_send_sidr_rep()
4376 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); in cma_send_sidr_rep()
4380 * rdma_accept - Called to accept a connection request or response.
4403 lockdep_assert_held(&id_priv->handler_mutex); in rdma_accept()
4405 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT) in rdma_accept()
4406 return -EINVAL; in rdma_accept()
4408 if (!id->qp && conn_param) { in rdma_accept()
4409 id_priv->qp_num = conn_param->qp_num; in rdma_accept()
4410 id_priv->srq = conn_param->srq; in rdma_accept()
4413 if (rdma_cap_ib_cm(id->device, id->port_num)) { in rdma_accept()
4414 if (id->qp_type == IB_QPT_UD) { in rdma_accept()
4417 conn_param->qkey, in rdma_accept()
4418 conn_param->private_data, in rdma_accept()
4419 conn_param->private_data_len); in rdma_accept()
4429 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { in rdma_accept()
4432 ret = -ENOSYS; in rdma_accept()
4451 id_priv->ece.vendor_id = ece->vendor_id; in rdma_accept_ece()
4452 id_priv->ece.attr_mod = ece->attr_mod; in rdma_accept_ece()
4463 mutex_lock(&id_priv->handler_mutex); in rdma_lock_handler()
4472 mutex_unlock(&id_priv->handler_mutex); in rdma_unlock_handler()
4482 if (!id_priv->cm_id.ib) in rdma_notify()
4483 return -EINVAL; in rdma_notify()
4485 switch (id->device->node_type) { in rdma_notify()
4487 ret = ib_cm_notify(id_priv->cm_id.ib, event); in rdma_notify()
4504 if (!id_priv->cm_id.ib) in rdma_reject()
4505 return -EINVAL; in rdma_reject()
4507 if (rdma_cap_ib_cm(id->device, id->port_num)) { in rdma_reject()
4508 if (id->qp_type == IB_QPT_UD) { in rdma_reject()
4513 ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0, in rdma_reject()
4516 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { in rdma_reject()
4517 ret = iw_cm_reject(id_priv->cm_id.iw, in rdma_reject()
4520 ret = -ENOSYS; in rdma_reject()
4533 if (!id_priv->cm_id.ib) in rdma_disconnect()
4534 return -EINVAL; in rdma_disconnect()
4536 if (rdma_cap_ib_cm(id->device, id->port_num)) { in rdma_disconnect()
4542 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) { in rdma_disconnect()
4543 if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0)) in rdma_disconnect()
4548 } else if (rdma_cap_iw_cm(id->device, id->port_num)) { in rdma_disconnect()
4549 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); in rdma_disconnect()
4551 ret = -EINVAL; in rdma_disconnect()
4568 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); in cma_make_mc_event()
4573 event->status = status; in cma_make_mc_event()
4574 event->param.ud.private_data = mc->context; in cma_make_mc_event()
4576 event->event = RDMA_CM_EVENT_MULTICAST_ERROR; in cma_make_mc_event()
4580 dev_addr = &id_priv->id.route.addr.dev_addr; in cma_make_mc_event()
4581 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); in cma_make_mc_event()
4583 id_priv->cma_dev in cma_make_mc_event()
4584 ->default_gid_type[id_priv->id.port_num - in cma_make_mc_event()
4586 id_priv->cma_dev->device)]; in cma_make_mc_event()
4588 event->event = RDMA_CM_EVENT_MULTICAST_JOIN; in cma_make_mc_event()
4589 if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num, in cma_make_mc_event()
4590 &multicast->rec, ndev, gid_type, in cma_make_mc_event()
4591 &event->param.ud.ah_attr)) { in cma_make_mc_event()
4592 event->event = RDMA_CM_EVENT_MULTICAST_ERROR; in cma_make_mc_event()
4596 event->param.ud.qp_num = 0xFFFFFF; in cma_make_mc_event()
4597 event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey); in cma_make_mc_event()
4606 struct cma_multicast *mc = multicast->context; in cma_ib_mc_handler()
4607 struct rdma_id_private *id_priv = mc->id_priv; in cma_ib_mc_handler()
4611 mutex_lock(&id_priv->handler_mutex); in cma_ib_mc_handler()
4612 if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL || in cma_ib_mc_handler()
4613 READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING) in cma_ib_mc_handler()
4622 mutex_unlock(&id_priv->handler_mutex); in cma_ib_mc_handler()
4627 struct sockaddr *addr, union ib_gid *mgid) in cma_set_mgid() argument
4630 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_set_mgid()
4631 struct sockaddr_in *sin = (struct sockaddr_in *) addr; in cma_set_mgid()
4632 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr; in cma_set_mgid()
4634 if (cma_any_addr(addr)) { in cma_set_mgid()
4636 } else if ((addr->sa_family == AF_INET6) && in cma_set_mgid()
4637 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) == in cma_set_mgid()
4640 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); in cma_set_mgid()
4641 } else if (addr->sa_family == AF_IB) { in cma_set_mgid()
4642 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid); in cma_set_mgid()
4643 } else if (addr->sa_family == AF_INET6) { in cma_set_mgid()
4644 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map); in cma_set_mgid()
4645 if (id_priv->id.ps == RDMA_PS_UDP) in cma_set_mgid()
4649 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map); in cma_set_mgid()
4650 if (id_priv->id.ps == RDMA_PS_UDP) in cma_set_mgid()
4660 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_join_ib_multicast()
4665 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, in cma_join_ib_multicast()
4674 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); in cma_join_ib_multicast()
4675 rec.qkey = cpu_to_be32(id_priv->qkey); in cma_join_ib_multicast()
4678 rec.join_state = mc->join_state; in cma_join_ib_multicast()
4686 if (id_priv->id.ps == RDMA_PS_IPOIB) in cma_join_ib_multicast()
4693 mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device, in cma_join_ib_multicast()
4694 id_priv->id.port_num, &rec, comp_mask, in cma_join_ib_multicast()
4696 return PTR_ERR_OR_ZERO(mc->sa_mc); in cma_join_ib_multicast()
4699 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, in cma_iboe_set_mgid() argument
4702 struct sockaddr_in *sin = (struct sockaddr_in *)addr; in cma_iboe_set_mgid()
4703 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr; in cma_iboe_set_mgid()
4705 if (cma_any_addr(addr)) { in cma_iboe_set_mgid()
4707 } else if (addr->sa_family == AF_INET6) { in cma_iboe_set_mgid()
4708 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid); in cma_iboe_set_mgid()
4710 mgid->raw[0] = in cma_iboe_set_mgid()
4712 mgid->raw[1] = in cma_iboe_set_mgid()
4714 mgid->raw[2] = 0; in cma_iboe_set_mgid()
4715 mgid->raw[3] = 0; in cma_iboe_set_mgid()
4716 mgid->raw[4] = 0; in cma_iboe_set_mgid()
4717 mgid->raw[5] = 0; in cma_iboe_set_mgid()
4718 mgid->raw[6] = 0; in cma_iboe_set_mgid()
4719 mgid->raw[7] = 0; in cma_iboe_set_mgid()
4720 mgid->raw[8] = 0; in cma_iboe_set_mgid()
4721 mgid->raw[9] = 0; in cma_iboe_set_mgid()
4722 mgid->raw[10] = 0xff; in cma_iboe_set_mgid()
4723 mgid->raw[11] = 0xff; in cma_iboe_set_mgid()
4724 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr; in cma_iboe_set_mgid()
4731 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_iboe_join_multicast()
4733 struct sockaddr *addr = (struct sockaddr *)&mc->addr; in cma_iboe_join_multicast() local
4739 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN); in cma_iboe_join_multicast()
4741 if (cma_zero_addr(addr)) in cma_iboe_join_multicast()
4742 return -EINVAL; in cma_iboe_join_multicast()
4744 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - in cma_iboe_join_multicast()
4745 rdma_start_port(id_priv->cma_dev->device)]; in cma_iboe_join_multicast()
4746 cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type); in cma_iboe_join_multicast()
4749 if (id_priv->id.ps == RDMA_PS_UDP) in cma_iboe_join_multicast()
4752 if (dev_addr->bound_dev_if) in cma_iboe_join_multicast()
4753 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); in cma_iboe_join_multicast()
4755 return -ENODEV; in cma_iboe_join_multicast()
4759 ib.rec.mtu = iboe_get_mtu(ndev->mtu); in cma_iboe_join_multicast()
4761 if (addr->sa_family == AF_INET) { in cma_iboe_join_multicast()
4771 err = -ENOTSUPP; in cma_iboe_join_multicast()
4775 return err ?: -EINVAL; in cma_iboe_join_multicast()
4777 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_iboe_join_multicast()
4779 INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler); in cma_iboe_join_multicast()
4780 cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc); in cma_iboe_join_multicast()
4781 queue_work(cma_wq, &mc->iboe_join.work); in cma_iboe_join_multicast()
4785 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, in rdma_join_multicast() argument
4794 if (WARN_ON(id->qp)) in rdma_join_multicast()
4795 return -EINVAL; in rdma_join_multicast()
4798 if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND && in rdma_join_multicast()
4799 READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED)) in rdma_join_multicast()
4800 return -EINVAL; in rdma_join_multicast()
4804 return -ENOMEM; in rdma_join_multicast()
4806 memcpy(&mc->addr, addr, rdma_addr_size(addr)); in rdma_join_multicast()
4807 mc->context = context; in rdma_join_multicast()
4808 mc->id_priv = id_priv; in rdma_join_multicast()
4809 mc->join_state = join_state; in rdma_join_multicast()
4811 if (rdma_protocol_roce(id->device, id->port_num)) { in rdma_join_multicast()
4815 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) { in rdma_join_multicast()
4820 ret = -ENOSYS; in rdma_join_multicast()
4824 spin_lock(&id_priv->lock); in rdma_join_multicast()
4825 list_add(&mc->list, &id_priv->mc_list); in rdma_join_multicast()
4826 spin_unlock(&id_priv->lock); in rdma_join_multicast()
4835 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) in rdma_leave_multicast() argument
4841 spin_lock_irq(&id_priv->lock); in rdma_leave_multicast()
4842 list_for_each_entry(mc, &id_priv->mc_list, list) { in rdma_leave_multicast()
4843 if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0) in rdma_leave_multicast()
4845 list_del(&mc->list); in rdma_leave_multicast()
4846 spin_unlock_irq(&id_priv->lock); in rdma_leave_multicast()
4848 WARN_ON(id_priv->cma_dev->device != id->device); in rdma_leave_multicast()
4852 spin_unlock_irq(&id_priv->lock); in rdma_leave_multicast()
4861 dev_addr = &id_priv->id.route.addr.dev_addr; in cma_netdev_change()
4863 if ((dev_addr->bound_dev_if == ndev->ifindex) && in cma_netdev_change()
4864 (net_eq(dev_net(ndev), dev_addr->net)) && in cma_netdev_change()
4865 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) { in cma_netdev_change()
4866 pr_info("RDMA CM addr change for ndev %s used by id %p\n", in cma_netdev_change()
4867 ndev->name, &id_priv->id); in cma_netdev_change()
4870 return -ENOMEM; in cma_netdev_change()
4872 INIT_WORK(&work->work, cma_work_handler); in cma_netdev_change()
4873 work->id = id_priv; in cma_netdev_change()
4874 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE; in cma_netdev_change()
4876 queue_work(cma_wq, &work->work); in cma_netdev_change()
4898 list_for_each_entry(id_priv, &cma_dev->id_list, list) { in cma_netdev_callback()
4919 mutex_lock(&id_priv->handler_mutex); in cma_send_device_removal_put()
4920 /* Record that we want to remove the device */ in cma_send_device_removal_put()
4921 spin_lock_irqsave(&id_priv->lock, flags); in cma_send_device_removal_put()
4922 state = id_priv->state; in cma_send_device_removal_put()
4924 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_send_device_removal_put()
4925 mutex_unlock(&id_priv->handler_mutex); in cma_send_device_removal_put()
4929 id_priv->state = RDMA_CM_DEVICE_REMOVAL; in cma_send_device_removal_put()
4930 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_send_device_removal_put()
4938 mutex_unlock(&id_priv->handler_mutex); in cma_send_device_removal_put()
4943 mutex_unlock(&id_priv->handler_mutex); in cma_send_device_removal_put()
4956 while (!list_empty(&cma_dev->id_list)) { in cma_process_remove()
4958 &cma_dev->id_list, struct rdma_id_private, list); in cma_process_remove()
4960 list_del(&id_priv->listen_list); in cma_process_remove()
4961 list_del_init(&id_priv->list); in cma_process_remove()
4972 wait_for_completion(&cma_dev->comp); in cma_process_remove()
4975 static bool cma_supported(struct ib_device *device) in cma_supported() argument
4979 rdma_for_each_port(device, i) { in cma_supported()
4980 if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i)) in cma_supported()
4986 static int cma_add_one(struct ib_device *device) in cma_add_one() argument
4995 if (!cma_supported(device)) in cma_add_one()
4996 return -EOPNOTSUPP; in cma_add_one()
5000 return -ENOMEM; in cma_add_one()
5002 cma_dev->device = device; in cma_add_one()
5003 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt, in cma_add_one()
5004 sizeof(*cma_dev->default_gid_type), in cma_add_one()
5006 if (!cma_dev->default_gid_type) { in cma_add_one()
5007 ret = -ENOMEM; in cma_add_one()
5011 cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt, in cma_add_one()
5012 sizeof(*cma_dev->default_roce_tos), in cma_add_one()
5014 if (!cma_dev->default_roce_tos) { in cma_add_one()
5015 ret = -ENOMEM; in cma_add_one()
5019 rdma_for_each_port (device, i) { in cma_add_one()
5020 supported_gids = roce_gid_type_mask_support(device, i); in cma_add_one()
5023 cma_dev->default_gid_type[i - rdma_start_port(device)] = in cma_add_one()
5026 cma_dev->default_gid_type[i - rdma_start_port(device)] = in cma_add_one()
5028 cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0; in cma_add_one()
5031 init_completion(&cma_dev->comp); in cma_add_one()
5032 refcount_set(&cma_dev->refcount, 1); in cma_add_one()
5033 INIT_LIST_HEAD(&cma_dev->id_list); in cma_add_one()
5034 ib_set_client_data(device, &cma_client, cma_dev); in cma_add_one()
5037 list_add_tail(&cma_dev->list, &dev_list); in cma_add_one()
5045 trace_cm_add_one(device); in cma_add_one()
5049 list_del(&cma_dev->list); in cma_add_one()
5054 kfree(cma_dev->default_roce_tos); in cma_add_one()
5056 kfree(cma_dev->default_gid_type); in cma_add_one()
5063 static void cma_remove_one(struct ib_device *device, void *client_data) in cma_remove_one() argument
5067 trace_cm_remove_one(device); in cma_remove_one()
5070 list_del(&cma_dev->list); in cma_remove_one()
5074 kfree(cma_dev->default_roce_tos); in cma_remove_one()
5075 kfree(cma_dev->default_gid_type); in cma_remove_one()
5083 xa_init(&pernet->tcp_ps); in cma_init_net()
5084 xa_init(&pernet->udp_ps); in cma_init_net()
5085 xa_init(&pernet->ipoib_ps); in cma_init_net()
5086 xa_init(&pernet->ib_ps); in cma_init_net()
5095 WARN_ON(!xa_empty(&pernet->tcp_ps)); in cma_exit_net()
5096 WARN_ON(!xa_empty(&pernet->udp_ps)); in cma_exit_net()
5097 WARN_ON(!xa_empty(&pernet->ipoib_ps)); in cma_exit_net()
5098 WARN_ON(!xa_empty(&pernet->ib_ps)); in cma_exit_net()
5127 return -ENOMEM; in cma_init()