Lines Matching refs:id_priv

155 	struct rdma_id_private *id_priv;  in rdma_iw_cm_id()  local
157 id_priv = container_of(id, struct rdma_id_private, id); in rdma_iw_cm_id()
159 return id_priv->cm_id.iw; in rdma_iw_cm_id()
170 struct rdma_id_private *id_priv = in rdma_res_to_id() local
173 return &id_priv->id; in rdma_res_to_id()
361 struct rdma_id_private *id_priv; member
421 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) in cma_comp() argument
426 spin_lock_irqsave(&id_priv->lock, flags); in cma_comp()
427 ret = (id_priv->state == comp); in cma_comp()
428 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_comp()
432 static int cma_comp_exch(struct rdma_id_private *id_priv, in cma_comp_exch() argument
438 spin_lock_irqsave(&id_priv->lock, flags); in cma_comp_exch()
439 if ((ret = (id_priv->state == comp))) in cma_comp_exch()
440 id_priv->state = exch; in cma_comp_exch()
441 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_comp_exch()
445 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, in cma_exch() argument
451 spin_lock_irqsave(&id_priv->lock, flags); in cma_exch()
452 old = id_priv->state; in cma_exch()
453 id_priv->state = exch; in cma_exch()
454 spin_unlock_irqrestore(&id_priv->lock, flags); in cma_exch()
488 static void _cma_attach_to_dev(struct rdma_id_private *id_priv, in _cma_attach_to_dev() argument
492 id_priv->cma_dev = cma_dev; in _cma_attach_to_dev()
493 id_priv->id.device = cma_dev->device; in _cma_attach_to_dev()
494 id_priv->id.route.addr.dev_addr.transport = in _cma_attach_to_dev()
496 list_add_tail(&id_priv->list, &cma_dev->id_list); in _cma_attach_to_dev()
497 rdma_restrack_add(&id_priv->res); in _cma_attach_to_dev()
500 static void cma_attach_to_dev(struct rdma_id_private *id_priv, in cma_attach_to_dev() argument
503 _cma_attach_to_dev(id_priv, cma_dev); in cma_attach_to_dev()
504 id_priv->gid_type = in cma_attach_to_dev()
505 cma_dev->default_gid_type[id_priv->id.port_num - in cma_attach_to_dev()
523 static void cma_release_dev(struct rdma_id_private *id_priv) in cma_release_dev() argument
526 list_del(&id_priv->list); in cma_release_dev()
527 cma_deref_dev(id_priv->cma_dev); in cma_release_dev()
528 id_priv->cma_dev = NULL; in cma_release_dev()
532 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv) in cma_src_addr() argument
534 return (struct sockaddr *) &id_priv->id.route.addr.src_addr; in cma_src_addr()
537 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv) in cma_dst_addr() argument
539 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; in cma_dst_addr()
542 static inline unsigned short cma_family(struct rdma_id_private *id_priv) in cma_family() argument
544 return id_priv->id.route.addr.src_addr.ss_family; in cma_family()
547 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey) in cma_set_qkey() argument
552 if (id_priv->qkey) { in cma_set_qkey()
553 if (qkey && id_priv->qkey != qkey) in cma_set_qkey()
559 id_priv->qkey = qkey; in cma_set_qkey()
563 switch (id_priv->id.ps) { in cma_set_qkey()
566 id_priv->qkey = RDMA_UDP_QKEY; in cma_set_qkey()
569 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); in cma_set_qkey()
570 ret = ib_sa_get_mcmember_rec(id_priv->id.device, in cma_set_qkey()
571 id_priv->id.port_num, &rec.mgid, in cma_set_qkey()
574 id_priv->qkey = be32_to_cpu(rec.qkey); in cma_set_qkey()
607 struct rdma_id_private *id_priv) in cma_validate_port() argument
609 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_validate_port()
635 static void cma_bind_sgid_attr(struct rdma_id_private *id_priv, in cma_bind_sgid_attr() argument
638 WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); in cma_bind_sgid_attr()
639 id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; in cma_bind_sgid_attr()
642 static int cma_acquire_dev(struct rdma_id_private *id_priv, in cma_acquire_dev() argument
645 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_acquire_dev()
654 id_priv->id.ps == RDMA_PS_IPOIB) in cma_acquire_dev()
658 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_acquire_dev()
671 gid_type, gidp, id_priv); in cma_acquire_dev()
673 id_priv->id.port_num = port; in cma_acquire_dev()
674 cma_bind_sgid_attr(id_priv, sgid_attr); in cma_acquire_dev()
691 gid_type, gidp, id_priv); in cma_acquire_dev()
693 id_priv->id.port_num = port; in cma_acquire_dev()
694 cma_bind_sgid_attr(id_priv, sgid_attr); in cma_acquire_dev()
703 cma_attach_to_dev(id_priv, cma_dev); in cma_acquire_dev()
712 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) in cma_resolve_ib_dev() argument
723 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv); in cma_resolve_ib_dev()
744 id_priv->id.port_num = p; in cma_resolve_ib_dev()
753 id_priv->id.port_num = p; in cma_resolve_ib_dev()
763 cma_attach_to_dev(id_priv, cma_dev); in cma_resolve_ib_dev()
765 addr = (struct sockaddr_ib *)cma_src_addr(id_priv); in cma_resolve_ib_dev()
767 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); in cma_resolve_ib_dev()
771 static void cma_deref_id(struct rdma_id_private *id_priv) in cma_deref_id() argument
773 if (atomic_dec_and_test(&id_priv->refcount)) in cma_deref_id()
774 complete(&id_priv->comp); in cma_deref_id()
782 struct rdma_id_private *id_priv; in __rdma_create_id() local
784 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL); in __rdma_create_id()
785 if (!id_priv) in __rdma_create_id()
789 id_priv->res.kern_name = caller; in __rdma_create_id()
791 rdma_restrack_set_task(&id_priv->res, current); in __rdma_create_id()
792 id_priv->res.type = RDMA_RESTRACK_CM_ID; in __rdma_create_id()
793 id_priv->state = RDMA_CM_IDLE; in __rdma_create_id()
794 id_priv->id.context = context; in __rdma_create_id()
795 id_priv->id.event_handler = event_handler; in __rdma_create_id()
796 id_priv->id.ps = ps; in __rdma_create_id()
797 id_priv->id.qp_type = qp_type; in __rdma_create_id()
798 id_priv->tos_set = false; in __rdma_create_id()
799 id_priv->gid_type = IB_GID_TYPE_IB; in __rdma_create_id()
800 spin_lock_init(&id_priv->lock); in __rdma_create_id()
801 mutex_init(&id_priv->qp_mutex); in __rdma_create_id()
802 init_completion(&id_priv->comp); in __rdma_create_id()
803 atomic_set(&id_priv->refcount, 1); in __rdma_create_id()
804 mutex_init(&id_priv->handler_mutex); in __rdma_create_id()
805 INIT_LIST_HEAD(&id_priv->listen_list); in __rdma_create_id()
806 INIT_LIST_HEAD(&id_priv->mc_list); in __rdma_create_id()
807 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); in __rdma_create_id()
808 id_priv->id.route.addr.dev_addr.net = get_net(net); in __rdma_create_id()
809 id_priv->seq_num &= 0x00ffffff; in __rdma_create_id()
811 return &id_priv->id; in __rdma_create_id()
815 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) in cma_init_ud_qp() argument
821 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_init_ud_qp()
841 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp) in cma_init_conn_qp() argument
847 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_init_conn_qp()
857 struct rdma_id_private *id_priv; in rdma_create_qp() local
861 id_priv = container_of(id, struct rdma_id_private, id); in rdma_create_qp()
871 ret = cma_init_ud_qp(id_priv, qp); in rdma_create_qp()
873 ret = cma_init_conn_qp(id_priv, qp); in rdma_create_qp()
878 id_priv->qp_num = qp->qp_num; in rdma_create_qp()
879 id_priv->srq = (qp->srq != NULL); in rdma_create_qp()
889 struct rdma_id_private *id_priv; in rdma_destroy_qp() local
891 id_priv = container_of(id, struct rdma_id_private, id); in rdma_destroy_qp()
892 mutex_lock(&id_priv->qp_mutex); in rdma_destroy_qp()
893 ib_destroy_qp(id_priv->id.qp); in rdma_destroy_qp()
894 id_priv->id.qp = NULL; in rdma_destroy_qp()
895 mutex_unlock(&id_priv->qp_mutex); in rdma_destroy_qp()
899 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, in cma_modify_qp_rtr() argument
905 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_rtr()
906 if (!id_priv->id.qp) { in cma_modify_qp_rtr()
913 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rtr()
917 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rtr()
922 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rtr()
926 BUG_ON(id_priv->cma_dev->device != id_priv->id.device); in cma_modify_qp_rtr()
930 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rtr()
932 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_rtr()
936 static int cma_modify_qp_rts(struct rdma_id_private *id_priv, in cma_modify_qp_rts() argument
942 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_rts()
943 if (!id_priv->id.qp) { in cma_modify_qp_rts()
949 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask); in cma_modify_qp_rts()
955 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask); in cma_modify_qp_rts()
957 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_rts()
961 static int cma_modify_qp_err(struct rdma_id_private *id_priv) in cma_modify_qp_err() argument
966 mutex_lock(&id_priv->qp_mutex); in cma_modify_qp_err()
967 if (!id_priv->id.qp) { in cma_modify_qp_err()
973 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE); in cma_modify_qp_err()
975 mutex_unlock(&id_priv->qp_mutex); in cma_modify_qp_err()
979 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, in cma_ib_init_qp_attr() argument
982 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_ib_init_qp_attr()
986 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) in cma_ib_init_qp_attr()
991 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num, in cma_ib_init_qp_attr()
996 qp_attr->port_num = id_priv->id.port_num; in cma_ib_init_qp_attr()
999 if (id_priv->id.qp_type == IB_QPT_UD) { in cma_ib_init_qp_attr()
1000 ret = cma_set_qkey(id_priv, 0); in cma_ib_init_qp_attr()
1004 qp_attr->qkey = id_priv->qkey; in cma_ib_init_qp_attr()
1016 struct rdma_id_private *id_priv; in rdma_init_qp_attr() local
1019 id_priv = container_of(id, struct rdma_id_private, id); in rdma_init_qp_attr()
1021 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) in rdma_init_qp_attr()
1022 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); in rdma_init_qp_attr()
1024 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, in rdma_init_qp_attr()
1028 qp_attr->rq_psn = id_priv->seq_num; in rdma_init_qp_attr()
1030 if (!id_priv->cm_id.iw) { in rdma_init_qp_attr()
1034 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, in rdma_init_qp_attr()
1036 qp_attr->port_num = id_priv->id.port_num; in rdma_init_qp_attr()
1421 static bool cma_match_private_data(struct rdma_id_private *id_priv, in cma_match_private_data() argument
1424 struct sockaddr *addr = cma_src_addr(id_priv); in cma_match_private_data()
1428 if (cma_any_addr(addr) && !id_priv->afonly) in cma_match_private_data()
1495 struct rdma_id_private *id_priv, *id_priv_dev; in cma_find_listener() local
1500 hlist_for_each_entry(id_priv, &bind_list->owners, node) { in cma_find_listener()
1501 if (cma_match_private_data(id_priv, ib_event->private_data)) { in cma_find_listener()
1502 if (id_priv->id.device == cm_id->device && in cma_find_listener()
1503 cma_match_net_dev(&id_priv->id, net_dev, req->port)) in cma_find_listener()
1504 return id_priv; in cma_find_listener()
1506 &id_priv->listen_list, in cma_find_listener()
1525 struct rdma_id_private *id_priv; in cma_ib_id_from_event() local
1565 id_priv = ERR_PTR(-EHOSTUNREACH); in cma_ib_id_from_event()
1572 id_priv = ERR_PTR(-EHOSTUNREACH); in cma_ib_id_from_event()
1580 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev); in cma_ib_id_from_event()
1583 if (IS_ERR(id_priv) && *net_dev) { in cma_ib_id_from_event()
1587 return id_priv; in cma_ib_id_from_event()
1590 static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv) in cma_user_data_offset() argument
1592 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr); in cma_user_data_offset()
1595 static void cma_cancel_route(struct rdma_id_private *id_priv) in cma_cancel_route() argument
1597 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { in cma_cancel_route()
1598 if (id_priv->query) in cma_cancel_route()
1599 ib_sa_cancel_query(id_priv->query_id, id_priv->query); in cma_cancel_route()
1603 static void cma_cancel_listens(struct rdma_id_private *id_priv) in cma_cancel_listens() argument
1612 list_del(&id_priv->list); in cma_cancel_listens()
1614 while (!list_empty(&id_priv->listen_list)) { in cma_cancel_listens()
1615 dev_id_priv = list_entry(id_priv->listen_list.next, in cma_cancel_listens()
1628 static void cma_cancel_operation(struct rdma_id_private *id_priv, in cma_cancel_operation() argument
1633 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); in cma_cancel_operation()
1636 cma_cancel_route(id_priv); in cma_cancel_operation()
1639 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev) in cma_cancel_operation()
1640 cma_cancel_listens(id_priv); in cma_cancel_operation()
1647 static void cma_release_port(struct rdma_id_private *id_priv) in cma_release_port() argument
1649 struct rdma_bind_list *bind_list = id_priv->bind_list; in cma_release_port()
1650 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_release_port()
1656 hlist_del(&id_priv->node); in cma_release_port()
1664 static void cma_leave_roce_mc_group(struct rdma_id_private *id_priv, in cma_leave_roce_mc_group() argument
1667 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_leave_roce_mc_group()
1679 static void cma_leave_mc_groups(struct rdma_id_private *id_priv) in cma_leave_mc_groups() argument
1683 while (!list_empty(&id_priv->mc_list)) { in cma_leave_mc_groups()
1684 mc = container_of(id_priv->mc_list.next, in cma_leave_mc_groups()
1687 if (rdma_cap_ib_mcast(id_priv->cma_dev->device, in cma_leave_mc_groups()
1688 id_priv->id.port_num)) { in cma_leave_mc_groups()
1692 cma_leave_roce_mc_group(id_priv, mc); in cma_leave_mc_groups()
1699 struct rdma_id_private *id_priv; in rdma_destroy_id() local
1702 id_priv = container_of(id, struct rdma_id_private, id); in rdma_destroy_id()
1703 state = cma_exch(id_priv, RDMA_CM_DESTROYING); in rdma_destroy_id()
1704 cma_cancel_operation(id_priv, state); in rdma_destroy_id()
1710 mutex_lock(&id_priv->handler_mutex); in rdma_destroy_id()
1711 mutex_unlock(&id_priv->handler_mutex); in rdma_destroy_id()
1713 if (id_priv->cma_dev) { in rdma_destroy_id()
1714 rdma_restrack_del(&id_priv->res); in rdma_destroy_id()
1715 if (rdma_cap_ib_cm(id_priv->id.device, 1)) { in rdma_destroy_id()
1716 if (id_priv->cm_id.ib) in rdma_destroy_id()
1717 ib_destroy_cm_id(id_priv->cm_id.ib); in rdma_destroy_id()
1718 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { in rdma_destroy_id()
1719 if (id_priv->cm_id.iw) in rdma_destroy_id()
1720 iw_destroy_cm_id(id_priv->cm_id.iw); in rdma_destroy_id()
1722 cma_leave_mc_groups(id_priv); in rdma_destroy_id()
1723 cma_release_dev(id_priv); in rdma_destroy_id()
1726 cma_release_port(id_priv); in rdma_destroy_id()
1727 cma_deref_id(id_priv); in rdma_destroy_id()
1728 wait_for_completion(&id_priv->comp); in rdma_destroy_id()
1730 if (id_priv->internal_id) in rdma_destroy_id()
1731 cma_deref_id(id_priv->id.context); in rdma_destroy_id()
1733 kfree(id_priv->id.route.path_rec); in rdma_destroy_id()
1735 if (id_priv->id.route.addr.dev_addr.sgid_attr) in rdma_destroy_id()
1736 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); in rdma_destroy_id()
1738 put_net(id_priv->id.route.addr.dev_addr.net); in rdma_destroy_id()
1739 kfree(id_priv); in rdma_destroy_id()
1743 static int cma_rep_recv(struct rdma_id_private *id_priv) in cma_rep_recv() argument
1747 ret = cma_modify_qp_rtr(id_priv, NULL); in cma_rep_recv()
1751 ret = cma_modify_qp_rts(id_priv, NULL); in cma_rep_recv()
1755 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0); in cma_rep_recv()
1762 cma_modify_qp_err(id_priv); in cma_rep_recv()
1763 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, in cma_rep_recv()
1785 struct rdma_id_private *id_priv = cm_id->context; in cma_ib_handler() local
1789 mutex_lock(&id_priv->handler_mutex); in cma_ib_handler()
1791 id_priv->state != RDMA_CM_CONNECT) || in cma_ib_handler()
1793 id_priv->state != RDMA_CM_DISCONNECT)) in cma_ib_handler()
1803 if (cma_comp(id_priv, RDMA_CM_CONNECT) && in cma_ib_handler()
1804 (id_priv->id.qp_type != IB_QPT_UD)) in cma_ib_handler()
1806 if (id_priv->id.qp) { in cma_ib_handler()
1807 event.status = cma_rep_recv(id_priv); in cma_ib_handler()
1824 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, in cma_ib_handler()
1836 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id, in cma_ib_handler()
1838 cma_modify_qp_err(id_priv); in cma_ib_handler()
1850 ret = id_priv->id.event_handler(&id_priv->id, &event); in cma_ib_handler()
1853 id_priv->cm_id.ib = NULL; in cma_ib_handler()
1854 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_ib_handler()
1855 mutex_unlock(&id_priv->handler_mutex); in cma_ib_handler()
1856 rdma_destroy_id(&id_priv->id); in cma_ib_handler()
1860 mutex_unlock(&id_priv->handler_mutex); in cma_ib_handler()
1870 struct rdma_id_private *id_priv; in cma_ib_new_conn_id() local
1887 id_priv = container_of(id, struct rdma_id_private, id); in cma_ib_new_conn_id()
1908 cma_any_addr(cma_src_addr(id_priv))) { in cma_ib_new_conn_id()
1912 } else if (!cma_any_addr(cma_src_addr(id_priv))) { in cma_ib_new_conn_id()
1913 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr); in cma_ib_new_conn_id()
1920 id_priv->state = RDMA_CM_CONNECT; in cma_ib_new_conn_id()
1921 return id_priv; in cma_ib_new_conn_id()
1934 struct rdma_id_private *id_priv; in cma_ib_new_udp_id() local
1947 id_priv = container_of(id, struct rdma_id_private, id); in cma_ib_new_udp_id()
1957 if (!cma_any_addr(cma_src_addr(id_priv))) { in cma_ib_new_udp_id()
1958 ret = cma_translate_addr(cma_src_addr(id_priv), in cma_ib_new_udp_id()
1965 id_priv->state = RDMA_CM_CONNECT; in cma_ib_new_udp_id()
1966 return id_priv; in cma_ib_new_udp_id()
2128 struct rdma_id_private *id_priv = iw_id->context; in cma_iw_handler() local
2134 mutex_lock(&id_priv->handler_mutex); in cma_iw_handler()
2135 if (id_priv->state != RDMA_CM_CONNECT) in cma_iw_handler()
2143 memcpy(cma_src_addr(id_priv), laddr, in cma_iw_handler()
2145 memcpy(cma_dst_addr(id_priv), raddr, in cma_iw_handler()
2177 ret = id_priv->id.event_handler(&id_priv->id, &event); in cma_iw_handler()
2180 id_priv->cm_id.iw = NULL; in cma_iw_handler()
2181 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_iw_handler()
2182 mutex_unlock(&id_priv->handler_mutex); in cma_iw_handler()
2183 rdma_destroy_id(&id_priv->id); in cma_iw_handler()
2188 mutex_unlock(&id_priv->handler_mutex); in cma_iw_handler()
2273 static int cma_ib_listen(struct rdma_id_private *id_priv) in cma_ib_listen() argument
2279 addr = cma_src_addr(id_priv); in cma_ib_listen()
2280 svc_id = rdma_get_service_id(&id_priv->id, addr); in cma_ib_listen()
2281 id = ib_cm_insert_listen(id_priv->id.device, in cma_ib_listen()
2285 id_priv->cm_id.ib = id; in cma_ib_listen()
2290 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog) in cma_iw_listen() argument
2295 id = iw_create_cm_id(id_priv->id.device, in cma_iw_listen()
2297 id_priv); in cma_iw_listen()
2301 id->tos = id_priv->tos; in cma_iw_listen()
2302 id_priv->cm_id.iw = id; in cma_iw_listen()
2304 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv), in cma_iw_listen()
2305 rdma_addr_size(cma_src_addr(id_priv))); in cma_iw_listen()
2307 ret = iw_cm_listen(id_priv->cm_id.iw, backlog); in cma_iw_listen()
2310 iw_destroy_cm_id(id_priv->cm_id.iw); in cma_iw_listen()
2311 id_priv->cm_id.iw = NULL; in cma_iw_listen()
2320 struct rdma_id_private *id_priv = id->context; in cma_listen_handler() local
2322 id->context = id_priv->id.context; in cma_listen_handler()
2323 id->event_handler = id_priv->id.event_handler; in cma_listen_handler()
2324 return id_priv->id.event_handler(id, event); in cma_listen_handler()
2327 static void cma_listen_on_dev(struct rdma_id_private *id_priv, in cma_listen_on_dev() argument
2332 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_listen_on_dev()
2335 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) in cma_listen_on_dev()
2338 id = __rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps, in cma_listen_on_dev()
2339 id_priv->id.qp_type, id_priv->res.kern_name); in cma_listen_on_dev()
2346 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv), in cma_listen_on_dev()
2347 rdma_addr_size(cma_src_addr(id_priv))); in cma_listen_on_dev()
2350 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); in cma_listen_on_dev()
2351 atomic_inc(&id_priv->refcount); in cma_listen_on_dev()
2353 dev_id_priv->afonly = id_priv->afonly; in cma_listen_on_dev()
2355 ret = rdma_listen(id, id_priv->backlog); in cma_listen_on_dev()
2361 static void cma_listen_on_all(struct rdma_id_private *id_priv) in cma_listen_on_all() argument
2366 list_add_tail(&id_priv->list, &listen_any_list); in cma_listen_on_all()
2368 cma_listen_on_dev(id_priv, cma_dev); in cma_listen_on_all()
2374 struct rdma_id_private *id_priv; in rdma_set_service_type() local
2376 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_service_type()
2377 id_priv->tos = (u8) tos; in rdma_set_service_type()
2378 id_priv->tos_set = true; in rdma_set_service_type()
2405 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, in cma_query_ib_route() argument
2408 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_query_ib_route()
2416 if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num)) in cma_query_ib_route()
2425 path_rec.service_id = rdma_get_service_id(&id_priv->id, in cma_query_ib_route()
2426 cma_dst_addr(id_priv)); in cma_query_ib_route()
2432 switch (cma_family(id_priv)) { in cma_query_ib_route()
2434 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos); in cma_query_ib_route()
2438 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); in cma_query_ib_route()
2443 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); in cma_query_ib_route()
2449 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, in cma_query_ib_route()
2450 id_priv->id.port_num, &path_rec, in cma_query_ib_route()
2453 work, &id_priv->query); in cma_query_ib_route()
2455 return (id_priv->query_id < 0) ? id_priv->query_id : 0; in cma_query_ib_route()
2461 struct rdma_id_private *id_priv = work->id; in cma_work_handler() local
2464 mutex_lock(&id_priv->handler_mutex); in cma_work_handler()
2465 if (!cma_comp_exch(id_priv, work->old_state, work->new_state)) in cma_work_handler()
2468 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { in cma_work_handler()
2469 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_work_handler()
2473 mutex_unlock(&id_priv->handler_mutex); in cma_work_handler()
2474 cma_deref_id(id_priv); in cma_work_handler()
2476 rdma_destroy_id(&id_priv->id); in cma_work_handler()
2483 struct rdma_id_private *id_priv = work->id; in cma_ndev_work_handler() local
2486 mutex_lock(&id_priv->handler_mutex); in cma_ndev_work_handler()
2487 if (id_priv->state == RDMA_CM_DESTROYING || in cma_ndev_work_handler()
2488 id_priv->state == RDMA_CM_DEVICE_REMOVAL) in cma_ndev_work_handler()
2491 if (id_priv->id.event_handler(&id_priv->id, &work->event)) { in cma_ndev_work_handler()
2492 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_ndev_work_handler()
2497 mutex_unlock(&id_priv->handler_mutex); in cma_ndev_work_handler()
2498 cma_deref_id(id_priv); in cma_ndev_work_handler()
2500 rdma_destroy_id(&id_priv->id); in cma_ndev_work_handler()
2505 struct rdma_id_private *id_priv) in cma_init_resolve_route_work() argument
2507 work->id = id_priv; in cma_init_resolve_route_work()
2515 struct rdma_id_private *id_priv) in cma_init_resolve_addr_work() argument
2517 work->id = id_priv; in cma_init_resolve_addr_work()
2524 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) in cma_resolve_ib_route() argument
2526 struct rdma_route *route = &id_priv->id.route; in cma_resolve_ib_route()
2534 cma_init_resolve_route_work(work, id_priv); in cma_resolve_ib_route()
2542 ret = cma_query_ib_route(id_priv, timeout_ms, work); in cma_resolve_ib_route()
2575 cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv) in cma_iboe_set_path_rec_l2_fields() argument
2577 struct rdma_route *route = &id_priv->id.route; in cma_iboe_set_path_rec_l2_fields()
2591 supported_gids = roce_gid_type_mask_support(id_priv->id.device, in cma_iboe_set_path_rec_l2_fields()
2592 id_priv->id.port_num); in cma_iboe_set_path_rec_l2_fields()
2595 id_priv->gid_type); in cma_iboe_set_path_rec_l2_fields()
2609 struct rdma_id_private *id_priv; in rdma_set_ib_path() local
2613 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_ib_path()
2614 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, in rdma_set_ib_path()
2626 ndev = cma_iboe_set_path_rec_l2_fields(id_priv); in rdma_set_ib_path()
2641 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); in rdma_set_ib_path()
2646 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) in cma_resolve_iw_route() argument
2654 cma_init_resolve_route_work(work, id_priv); in cma_resolve_iw_route()
2677 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) in cma_resolve_iboe_route() argument
2679 struct rdma_route *route = &id_priv->id.route; in cma_resolve_iboe_route()
2685 u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num - in cma_resolve_iboe_route()
2686 rdma_start_port(id_priv->cma_dev->device)]; in cma_resolve_iboe_route()
2687 u8 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos; in cma_resolve_iboe_route()
2702 ndev = cma_iboe_set_path_rec_l2_fields(id_priv); in cma_resolve_iboe_route()
2708 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_resolve_iboe_route()
2710 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, in cma_resolve_iboe_route()
2713 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) in cma_resolve_iboe_route()
2734 cma_init_resolve_route_work(work, id_priv); in cma_resolve_iboe_route()
2749 struct rdma_id_private *id_priv; in rdma_resolve_route() local
2752 id_priv = container_of(id, struct rdma_id_private, id); in rdma_resolve_route()
2753 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) in rdma_resolve_route()
2756 atomic_inc(&id_priv->refcount); in rdma_resolve_route()
2758 ret = cma_resolve_ib_route(id_priv, timeout_ms); in rdma_resolve_route()
2760 ret = cma_resolve_iboe_route(id_priv); in rdma_resolve_route()
2762 ret = cma_resolve_iw_route(id_priv, timeout_ms); in rdma_resolve_route()
2771 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); in rdma_resolve_route()
2772 cma_deref_id(id_priv); in rdma_resolve_route()
2794 static int cma_bind_loopback(struct rdma_id_private *id_priv) in cma_bind_loopback() argument
2806 if (cma_family(id_priv) == AF_IB && in cma_bind_loopback()
2838 id_priv->id.route.addr.dev_addr.dev_type = in cma_bind_loopback()
2842 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_bind_loopback()
2843 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); in cma_bind_loopback()
2844 id_priv->id.port_num = p; in cma_bind_loopback()
2845 cma_attach_to_dev(id_priv, cma_dev); in cma_bind_loopback()
2846 cma_set_loopback(cma_src_addr(id_priv)); in cma_bind_loopback()
2855 struct rdma_id_private *id_priv = context; in addr_handler() local
2858 mutex_lock(&id_priv->handler_mutex); in addr_handler()
2859 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, in addr_handler()
2863 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr)); in addr_handler()
2864 if (!status && !id_priv->cma_dev) { in addr_handler()
2865 status = cma_acquire_dev(id_priv, NULL); in addr_handler()
2874 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, in addr_handler()
2882 if (id_priv->id.event_handler(&id_priv->id, &event)) { in addr_handler()
2883 cma_exch(id_priv, RDMA_CM_DESTROYING); in addr_handler()
2884 mutex_unlock(&id_priv->handler_mutex); in addr_handler()
2885 cma_deref_id(id_priv); in addr_handler()
2886 rdma_destroy_id(&id_priv->id); in addr_handler()
2890 mutex_unlock(&id_priv->handler_mutex); in addr_handler()
2891 cma_deref_id(id_priv); in addr_handler()
2894 static int cma_resolve_loopback(struct rdma_id_private *id_priv) in cma_resolve_loopback() argument
2904 if (!id_priv->cma_dev) { in cma_resolve_loopback()
2905 ret = cma_bind_loopback(id_priv); in cma_resolve_loopback()
2910 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_resolve_loopback()
2911 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); in cma_resolve_loopback()
2913 cma_init_resolve_addr_work(work, id_priv); in cma_resolve_loopback()
2921 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv) in cma_resolve_ib_addr() argument
2930 if (!id_priv->cma_dev) { in cma_resolve_ib_addr()
2931 ret = cma_resolve_ib_dev(id_priv); in cma_resolve_ib_addr()
2936 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) in cma_resolve_ib_addr()
2937 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); in cma_resolve_ib_addr()
2939 cma_init_resolve_addr_work(work, id_priv); in cma_resolve_ib_addr()
2971 struct rdma_id_private *id_priv; in rdma_resolve_addr() local
2974 id_priv = container_of(id, struct rdma_id_private, id); in rdma_resolve_addr()
2975 if (id_priv->state == RDMA_CM_IDLE) { in rdma_resolve_addr()
2981 if (cma_family(id_priv) != dst_addr->sa_family) in rdma_resolve_addr()
2984 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) in rdma_resolve_addr()
2987 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr)); in rdma_resolve_addr()
2988 atomic_inc(&id_priv->refcount); in rdma_resolve_addr()
2990 ret = cma_resolve_loopback(id_priv); in rdma_resolve_addr()
2993 ret = cma_resolve_ib_addr(id_priv); in rdma_resolve_addr()
2995 ret = rdma_resolve_ip(cma_src_addr(id_priv), in rdma_resolve_addr()
2997 timeout_ms, addr_handler, id_priv); in rdma_resolve_addr()
3005 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); in rdma_resolve_addr()
3006 cma_deref_id(id_priv); in rdma_resolve_addr()
3013 struct rdma_id_private *id_priv; in rdma_set_reuseaddr() local
3017 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_reuseaddr()
3018 spin_lock_irqsave(&id_priv->lock, flags); in rdma_set_reuseaddr()
3019 if (reuse || id_priv->state == RDMA_CM_IDLE) { in rdma_set_reuseaddr()
3020 id_priv->reuseaddr = reuse; in rdma_set_reuseaddr()
3025 spin_unlock_irqrestore(&id_priv->lock, flags); in rdma_set_reuseaddr()
3032 struct rdma_id_private *id_priv; in rdma_set_afonly() local
3036 id_priv = container_of(id, struct rdma_id_private, id); in rdma_set_afonly()
3037 spin_lock_irqsave(&id_priv->lock, flags); in rdma_set_afonly()
3038 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) { in rdma_set_afonly()
3039 id_priv->options |= (1 << CMA_OPTION_AFONLY); in rdma_set_afonly()
3040 id_priv->afonly = afonly; in rdma_set_afonly()
3045 spin_unlock_irqrestore(&id_priv->lock, flags); in rdma_set_afonly()
3051 struct rdma_id_private *id_priv) in cma_bind_port() argument
3058 addr = cma_src_addr(id_priv); in cma_bind_port()
3076 id_priv->bind_list = bind_list; in cma_bind_port()
3077 hlist_add_head(&id_priv->node, &bind_list->owners); in cma_bind_port()
3081 struct rdma_id_private *id_priv, unsigned short snum) in cma_alloc_port() argument
3090 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, in cma_alloc_port()
3097 cma_bind_port(bind_list, id_priv); in cma_alloc_port()
3105 struct rdma_id_private *id_priv) in cma_port_is_unique() argument
3108 struct sockaddr *daddr = cma_dst_addr(id_priv); in cma_port_is_unique()
3109 struct sockaddr *saddr = cma_src_addr(id_priv); in cma_port_is_unique()
3117 if (id_priv == cur_id) in cma_port_is_unique()
3144 struct rdma_id_private *id_priv) in cma_alloc_any_port() argument
3149 struct net *net = id_priv->id.route.addr.dev_addr.net; in cma_alloc_any_port()
3162 ret = cma_alloc_port(ps, id_priv, rover); in cma_alloc_any_port()
3164 ret = cma_port_is_unique(bind_list, id_priv); in cma_alloc_any_port()
3166 cma_bind_port(bind_list, id_priv); in cma_alloc_any_port()
3193 struct rdma_id_private *id_priv, uint8_t reuseaddr) in cma_check_port() argument
3198 addr = cma_src_addr(id_priv); in cma_check_port()
3200 if (id_priv == cur_id) in cma_check_port()
3208 if (id_priv->afonly && cur_id->afonly && in cma_check_port()
3222 struct rdma_id_private *id_priv) in cma_use_port() argument
3228 snum = ntohs(cma_port(cma_src_addr(id_priv))); in cma_use_port()
3232 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); in cma_use_port()
3234 ret = cma_alloc_port(ps, id_priv, snum); in cma_use_port()
3236 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); in cma_use_port()
3238 cma_bind_port(bind_list, id_priv); in cma_use_port()
3243 static int cma_bind_listen(struct rdma_id_private *id_priv) in cma_bind_listen() argument
3245 struct rdma_bind_list *bind_list = id_priv->bind_list; in cma_bind_listen()
3250 ret = cma_check_port(bind_list, id_priv, 0); in cma_bind_listen()
3256 cma_select_inet_ps(struct rdma_id_private *id_priv) in cma_select_inet_ps() argument
3258 switch (id_priv->id.ps) { in cma_select_inet_ps()
3263 return id_priv->id.ps; in cma_select_inet_ps()
3271 cma_select_ib_ps(struct rdma_id_private *id_priv) in cma_select_ib_ps() argument
3277 sib = (struct sockaddr_ib *) cma_src_addr(id_priv); in cma_select_ib_ps()
3281 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) { in cma_select_ib_ps()
3284 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) && in cma_select_ib_ps()
3288 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) && in cma_select_ib_ps()
3302 static int cma_get_port(struct rdma_id_private *id_priv) in cma_get_port() argument
3307 if (cma_family(id_priv) != AF_IB) in cma_get_port()
3308 ps = cma_select_inet_ps(id_priv); in cma_get_port()
3310 ps = cma_select_ib_ps(id_priv); in cma_get_port()
3315 if (cma_any_port(cma_src_addr(id_priv))) in cma_get_port()
3316 ret = cma_alloc_any_port(ps, id_priv); in cma_get_port()
3318 ret = cma_use_port(ps, id_priv); in cma_get_port()
3348 struct rdma_id_private *id_priv; in rdma_listen() local
3351 id_priv = container_of(id, struct rdma_id_private, id); in rdma_listen()
3352 if (id_priv->state == RDMA_CM_IDLE) { in rdma_listen()
3354 ret = rdma_bind_addr(id, cma_src_addr(id_priv)); in rdma_listen()
3359 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) in rdma_listen()
3362 if (id_priv->reuseaddr) { in rdma_listen()
3363 ret = cma_bind_listen(id_priv); in rdma_listen()
3368 id_priv->backlog = backlog; in rdma_listen()
3371 ret = cma_ib_listen(id_priv); in rdma_listen()
3375 ret = cma_iw_listen(id_priv, backlog); in rdma_listen()
3383 cma_listen_on_all(id_priv); in rdma_listen()
3387 id_priv->backlog = 0; in rdma_listen()
3388 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); in rdma_listen()
3395 struct rdma_id_private *id_priv; in rdma_bind_addr() local
3403 id_priv = container_of(id, struct rdma_id_private, id); in rdma_bind_addr()
3404 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) in rdma_bind_addr()
3411 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr)); in rdma_bind_addr()
3417 ret = cma_acquire_dev(id_priv, NULL); in rdma_bind_addr()
3422 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) { in rdma_bind_addr()
3424 id_priv->afonly = 1; in rdma_bind_addr()
3427 struct net *net = id_priv->id.route.addr.dev_addr.net; in rdma_bind_addr()
3429 id_priv->afonly = net->ipv6.sysctl.bindv6only; in rdma_bind_addr()
3433 daddr = cma_dst_addr(id_priv); in rdma_bind_addr()
3436 ret = cma_get_port(id_priv); in rdma_bind_addr()
3442 if (id_priv->cma_dev) { in rdma_bind_addr()
3443 rdma_restrack_del(&id_priv->res); in rdma_bind_addr()
3444 cma_release_dev(id_priv); in rdma_bind_addr()
3447 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); in rdma_bind_addr()
3452 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv) in cma_format_hdr() argument
3458 if (cma_family(id_priv) == AF_INET) { in cma_format_hdr()
3461 src4 = (struct sockaddr_in *) cma_src_addr(id_priv); in cma_format_hdr()
3462 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv); in cma_format_hdr()
3468 } else if (cma_family(id_priv) == AF_INET6) { in cma_format_hdr()
3471 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv); in cma_format_hdr()
3472 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv); in cma_format_hdr()
3485 struct rdma_id_private *id_priv = cm_id->context; in cma_sidr_rep_handler() local
3491 mutex_lock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
3492 if (id_priv->state != RDMA_CM_CONNECT) in cma_sidr_rep_handler()
3510 ret = cma_set_qkey(id_priv, rep->qkey); in cma_sidr_rep_handler()
3517 ib_init_ah_attr_from_path(id_priv->id.device, in cma_sidr_rep_handler()
3518 id_priv->id.port_num, in cma_sidr_rep_handler()
3519 id_priv->id.route.path_rec, in cma_sidr_rep_handler()
3533 ret = id_priv->id.event_handler(&id_priv->id, &event); in cma_sidr_rep_handler()
3538 id_priv->cm_id.ib = NULL; in cma_sidr_rep_handler()
3539 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_sidr_rep_handler()
3540 mutex_unlock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
3541 rdma_destroy_id(&id_priv->id); in cma_sidr_rep_handler()
3545 mutex_unlock(&id_priv->handler_mutex); in cma_sidr_rep_handler()
3549 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, in cma_resolve_ib_udp() argument
3559 offset = cma_user_data_offset(id_priv); in cma_resolve_ib_udp()
3577 ret = cma_format_hdr(private_data, id_priv); in cma_resolve_ib_udp()
3583 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler, in cma_resolve_ib_udp()
3584 id_priv); in cma_resolve_ib_udp()
3589 id_priv->cm_id.ib = id; in cma_resolve_ib_udp()
3591 req.path = id_priv->id.route.path_rec; in cma_resolve_ib_udp()
3592 req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; in cma_resolve_ib_udp()
3593 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_resolve_ib_udp()
3597 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req); in cma_resolve_ib_udp()
3599 ib_destroy_cm_id(id_priv->cm_id.ib); in cma_resolve_ib_udp()
3600 id_priv->cm_id.ib = NULL; in cma_resolve_ib_udp()
3607 static int cma_connect_ib(struct rdma_id_private *id_priv, in cma_connect_ib() argument
3618 offset = cma_user_data_offset(id_priv); in cma_connect_ib()
3635 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv); in cma_connect_ib()
3640 id_priv->cm_id.ib = id; in cma_connect_ib()
3642 route = &id_priv->id.route; in cma_connect_ib()
3644 ret = cma_format_hdr(private_data, id_priv); in cma_connect_ib()
3654 req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; in cma_connect_ib()
3656 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv)); in cma_connect_ib()
3657 req.qp_num = id_priv->qp_num; in cma_connect_ib()
3658 req.qp_type = id_priv->id.qp_type; in cma_connect_ib()
3659 req.starting_psn = id_priv->seq_num; in cma_connect_ib()
3668 req.srq = id_priv->srq ? 1 : 0; in cma_connect_ib()
3670 ret = ib_send_cm_req(id_priv->cm_id.ib, &req); in cma_connect_ib()
3674 id_priv->cm_id.ib = NULL; in cma_connect_ib()
3681 static int cma_connect_iw(struct rdma_id_private *id_priv, in cma_connect_iw() argument
3688 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv); in cma_connect_iw()
3692 cm_id->tos = id_priv->tos; in cma_connect_iw()
3693 id_priv->cm_id.iw = cm_id; in cma_connect_iw()
3695 memcpy(&cm_id->local_addr, cma_src_addr(id_priv), in cma_connect_iw()
3696 rdma_addr_size(cma_src_addr(id_priv))); in cma_connect_iw()
3697 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv), in cma_connect_iw()
3698 rdma_addr_size(cma_dst_addr(id_priv))); in cma_connect_iw()
3700 ret = cma_modify_qp_rtr(id_priv, conn_param); in cma_connect_iw()
3709 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num; in cma_connect_iw()
3712 iw_param.qpn = id_priv->qp_num; in cma_connect_iw()
3718 id_priv->cm_id.iw = NULL; in cma_connect_iw()
3725 struct rdma_id_private *id_priv; in rdma_connect() local
3728 id_priv = container_of(id, struct rdma_id_private, id); in rdma_connect()
3729 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) in rdma_connect()
3733 id_priv->qp_num = conn_param->qp_num; in rdma_connect()
3734 id_priv->srq = conn_param->srq; in rdma_connect()
3739 ret = cma_resolve_ib_udp(id_priv, conn_param); in rdma_connect()
3741 ret = cma_connect_ib(id_priv, conn_param); in rdma_connect()
3743 ret = cma_connect_iw(id_priv, conn_param); in rdma_connect()
3751 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); in rdma_connect()
3756 static int cma_accept_ib(struct rdma_id_private *id_priv, in cma_accept_ib() argument
3762 ret = cma_modify_qp_rtr(id_priv, conn_param); in cma_accept_ib()
3766 ret = cma_modify_qp_rts(id_priv, conn_param); in cma_accept_ib()
3771 rep.qp_num = id_priv->qp_num; in cma_accept_ib()
3772 rep.starting_psn = id_priv->seq_num; in cma_accept_ib()
3780 rep.srq = id_priv->srq ? 1 : 0; in cma_accept_ib()
3782 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep); in cma_accept_ib()
3787 static int cma_accept_iw(struct rdma_id_private *id_priv, in cma_accept_iw() argument
3796 ret = cma_modify_qp_rtr(id_priv, conn_param); in cma_accept_iw()
3804 if (id_priv->id.qp) { in cma_accept_iw()
3805 iw_param.qpn = id_priv->qp_num; in cma_accept_iw()
3809 return iw_cm_accept(id_priv->cm_id.iw, &iw_param); in cma_accept_iw()
3812 static int cma_send_sidr_rep(struct rdma_id_private *id_priv, in cma_send_sidr_rep() argument
3822 ret = cma_set_qkey(id_priv, qkey); in cma_send_sidr_rep()
3825 rep.qp_num = id_priv->qp_num; in cma_send_sidr_rep()
3826 rep.qkey = id_priv->qkey; in cma_send_sidr_rep()
3831 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep); in cma_send_sidr_rep()
3837 struct rdma_id_private *id_priv; in __rdma_accept() local
3840 id_priv = container_of(id, struct rdma_id_private, id); in __rdma_accept()
3843 id_priv->res.kern_name = caller; in __rdma_accept()
3845 rdma_restrack_set_task(&id_priv->res, current); in __rdma_accept()
3847 if (!cma_comp(id_priv, RDMA_CM_CONNECT)) in __rdma_accept()
3851 id_priv->qp_num = conn_param->qp_num; in __rdma_accept()
3852 id_priv->srq = conn_param->srq; in __rdma_accept()
3858 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, in __rdma_accept()
3863 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, in __rdma_accept()
3867 ret = cma_accept_ib(id_priv, conn_param); in __rdma_accept()
3869 ret = cma_rep_recv(id_priv); in __rdma_accept()
3872 ret = cma_accept_iw(id_priv, conn_param); in __rdma_accept()
3881 cma_modify_qp_err(id_priv); in __rdma_accept()
3889 struct rdma_id_private *id_priv; in rdma_notify() local
3892 id_priv = container_of(id, struct rdma_id_private, id); in rdma_notify()
3893 if (!id_priv->cm_id.ib) in rdma_notify()
3898 ret = ib_cm_notify(id_priv->cm_id.ib, event); in rdma_notify()
3911 struct rdma_id_private *id_priv; in rdma_reject() local
3914 id_priv = container_of(id, struct rdma_id_private, id); in rdma_reject()
3915 if (!id_priv->cm_id.ib) in rdma_reject()
3920 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, in rdma_reject()
3923 ret = ib_send_cm_rej(id_priv->cm_id.ib, in rdma_reject()
3927 ret = iw_cm_reject(id_priv->cm_id.iw, in rdma_reject()
3938 struct rdma_id_private *id_priv; in rdma_disconnect() local
3941 id_priv = container_of(id, struct rdma_id_private, id); in rdma_disconnect()
3942 if (!id_priv->cm_id.ib) in rdma_disconnect()
3946 ret = cma_modify_qp_err(id_priv); in rdma_disconnect()
3950 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) in rdma_disconnect()
3951 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); in rdma_disconnect()
3953 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); in rdma_disconnect()
3964 struct rdma_id_private *id_priv; in cma_ib_mc_handler() local
3969 id_priv = mc->id_priv; in cma_ib_mc_handler()
3970 mutex_lock(&id_priv->handler_mutex); in cma_ib_mc_handler()
3971 if (id_priv->state != RDMA_CM_ADDR_BOUND && in cma_ib_mc_handler()
3972 id_priv->state != RDMA_CM_ADDR_RESOLVED) in cma_ib_mc_handler()
3976 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey)); in cma_ib_mc_handler()
3980 mutex_lock(&id_priv->qp_mutex); in cma_ib_mc_handler()
3981 if (!status && id_priv->id.qp) { in cma_ib_mc_handler()
3982 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid, in cma_ib_mc_handler()
3988 mutex_unlock(&id_priv->qp_mutex); in cma_ib_mc_handler()
3994 &id_priv->id.route.addr.dev_addr; in cma_ib_mc_handler()
3998 id_priv->cma_dev->default_gid_type[id_priv->id.port_num - in cma_ib_mc_handler()
3999 rdma_start_port(id_priv->cma_dev->device)]; in cma_ib_mc_handler()
4002 ret = ib_init_ah_from_mcmember(id_priv->id.device, in cma_ib_mc_handler()
4003 id_priv->id.port_num, in cma_ib_mc_handler()
4017 ret = id_priv->id.event_handler(&id_priv->id, &event); in cma_ib_mc_handler()
4021 cma_exch(id_priv, RDMA_CM_DESTROYING); in cma_ib_mc_handler()
4022 mutex_unlock(&id_priv->handler_mutex); in cma_ib_mc_handler()
4023 rdma_destroy_id(&id_priv->id); in cma_ib_mc_handler()
4028 mutex_unlock(&id_priv->handler_mutex); in cma_ib_mc_handler()
4032 static void cma_set_mgid(struct rdma_id_private *id_priv, in cma_set_mgid() argument
4036 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_set_mgid()
4051 if (id_priv->id.ps == RDMA_PS_UDP) in cma_set_mgid()
4056 if (id_priv->id.ps == RDMA_PS_UDP) in cma_set_mgid()
4062 static int cma_join_ib_multicast(struct rdma_id_private *id_priv, in cma_join_ib_multicast() argument
4066 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_join_ib_multicast()
4071 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num, in cma_join_ib_multicast()
4076 ret = cma_set_qkey(id_priv, 0); in cma_join_ib_multicast()
4080 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid); in cma_join_ib_multicast()
4081 rec.qkey = cpu_to_be32(id_priv->qkey); in cma_join_ib_multicast()
4088 id_priv->id.device, in cma_join_ib_multicast()
4089 id_priv->id.port_num))) { in cma_join_ib_multicast()
4092 id_priv->id.device->name, id_priv->id.port_num); in cma_join_ib_multicast()
4102 if (id_priv->id.ps == RDMA_PS_IPOIB) in cma_join_ib_multicast()
4109 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device, in cma_join_ib_multicast()
4110 id_priv->id.port_num, &rec, in cma_join_ib_multicast()
4157 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv, in cma_iboe_join_multicast() argument
4161 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; in cma_iboe_join_multicast()
4183 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num - in cma_iboe_join_multicast()
4184 rdma_start_port(id_priv->cma_dev->device)]; in cma_iboe_join_multicast()
4188 if (id_priv->id.ps == RDMA_PS_UDP) in cma_iboe_join_multicast()
4219 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, in cma_iboe_join_multicast()
4221 work->id = id_priv; in cma_iboe_join_multicast()
4239 struct rdma_id_private *id_priv; in rdma_join_multicast() local
4246 id_priv = container_of(id, struct rdma_id_private, id); in rdma_join_multicast()
4247 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && in rdma_join_multicast()
4248 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) in rdma_join_multicast()
4257 mc->id_priv = id_priv; in rdma_join_multicast()
4262 ret = cma_iboe_join_multicast(id_priv, mc); in rdma_join_multicast()
4266 ret = cma_join_ib_multicast(id_priv, mc); in rdma_join_multicast()
4274 spin_lock(&id_priv->lock); in rdma_join_multicast()
4275 list_add(&mc->list, &id_priv->mc_list); in rdma_join_multicast()
4276 spin_unlock(&id_priv->lock); in rdma_join_multicast()
4287 struct rdma_id_private *id_priv; in rdma_leave_multicast() local
4290 id_priv = container_of(id, struct rdma_id_private, id); in rdma_leave_multicast()
4291 spin_lock_irq(&id_priv->lock); in rdma_leave_multicast()
4292 list_for_each_entry(mc, &id_priv->mc_list, list) { in rdma_leave_multicast()
4295 spin_unlock_irq(&id_priv->lock); in rdma_leave_multicast()
4302 BUG_ON(id_priv->cma_dev->device != id->device); in rdma_leave_multicast()
4308 cma_leave_roce_mc_group(id_priv, mc); in rdma_leave_multicast()
4313 spin_unlock_irq(&id_priv->lock); in rdma_leave_multicast()
4317 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv) in cma_netdev_change() argument
4322 dev_addr = &id_priv->id.route.addr.dev_addr; in cma_netdev_change()
4328 ndev->name, &id_priv->id); in cma_netdev_change()
4334 work->id = id_priv; in cma_netdev_change()
4336 atomic_inc(&id_priv->refcount); in cma_netdev_change()
4348 struct rdma_id_private *id_priv; in cma_netdev_callback() local
4359 list_for_each_entry(id_priv, &cma_dev->id_list, list) { in cma_netdev_callback()
4360 ret = cma_netdev_change(ndev, id_priv); in cma_netdev_callback()
4377 struct rdma_id_private *id_priv; in cma_add_one() local
4417 list_for_each_entry(id_priv, &listen_any_list, list) in cma_add_one()
4418 cma_listen_on_dev(id_priv, cma_dev); in cma_add_one()
4432 static int cma_remove_id_dev(struct rdma_id_private *id_priv) in cma_remove_id_dev() argument
4439 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); in cma_remove_id_dev()
4443 cma_cancel_operation(id_priv, state); in cma_remove_id_dev()
4444 mutex_lock(&id_priv->handler_mutex); in cma_remove_id_dev()
4447 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) in cma_remove_id_dev()
4451 ret = id_priv->id.event_handler(&id_priv->id, &event); in cma_remove_id_dev()
4453 mutex_unlock(&id_priv->handler_mutex); in cma_remove_id_dev()
4459 struct rdma_id_private *id_priv; in cma_process_remove() local
4464 id_priv = list_entry(cma_dev->id_list.next, in cma_process_remove()
4467 list_del(&id_priv->listen_list); in cma_process_remove()
4468 list_del_init(&id_priv->list); in cma_process_remove()
4469 atomic_inc(&id_priv->refcount); in cma_process_remove()
4472 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv); in cma_process_remove()
4473 cma_deref_id(id_priv); in cma_process_remove()
4475 rdma_destroy_id(&id_priv->id); in cma_process_remove()
4506 struct rdma_id_private *id_priv; in cma_get_id_stats() local
4524 list_for_each_entry(id_priv, &cma_dev->id_list, list) { in cma_get_id_stats()
4538 id = &id_priv->id; in cma_get_id_stats()
4545 rdma_addr_size(cma_src_addr(id_priv)), in cma_get_id_stats()
4546 cma_src_addr(id_priv), in cma_get_id_stats()
4550 rdma_addr_size(cma_dst_addr(id_priv)), in cma_get_id_stats()
4551 cma_dst_addr(id_priv), in cma_get_id_stats()
4555 id_stats->pid = task_pid_vnr(id_priv->res.task); in cma_get_id_stats()
4557 id_stats->cm_state = id_priv->state; in cma_get_id_stats()
4558 id_stats->qp_num = id_priv->qp_num; in cma_get_id_stats()