1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
4 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
5 * Copyright (c) 1999-2019, Mellanox Technologies, Inc. All rights reserved.
6 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
7 */
8
9 #include <linux/completion.h>
10 #include <linux/in.h>
11 #include <linux/in6.h>
12 #include <linux/mutex.h>
13 #include <linux/random.h>
14 #include <linux/igmp.h>
15 #include <linux/xarray.h>
16 #include <linux/inetdevice.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <net/route.h>
20
21 #include <net/net_namespace.h>
22 #include <net/netns/generic.h>
23 #include <net/tcp.h>
24 #include <net/ipv6.h>
25 #include <net/ip_fib.h>
26 #include <net/ip6_route.h>
27
28 #include <rdma/rdma_cm.h>
29 #include <rdma/rdma_cm_ib.h>
30 #include <rdma/rdma_netlink.h>
31 #include <rdma/ib.h>
32 #include <rdma/ib_cache.h>
33 #include <rdma/ib_cm.h>
34 #include <rdma/ib_sa.h>
35 #include <rdma/iw_cm.h>
36
37 #include "core_priv.h"
38 #include "cma_priv.h"
39 #include "cma_trace.h"
40
41 MODULE_AUTHOR("Sean Hefty");
42 MODULE_DESCRIPTION("Generic RDMA CM Agent");
43 MODULE_LICENSE("Dual BSD/GPL");
44
45 #define CMA_CM_RESPONSE_TIMEOUT 20
46 #define CMA_MAX_CM_RETRIES 15
47 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
48 #define CMA_IBOE_PACKET_LIFETIME 18
49 #define CMA_PREFERRED_ROCE_GID_TYPE IB_GID_TYPE_ROCE_UDP_ENCAP
50
51 static const char * const cma_events[] = {
52 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
53 [RDMA_CM_EVENT_ADDR_ERROR] = "address error",
54 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ",
55 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error",
56 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request",
57 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
58 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error",
59 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable",
60 [RDMA_CM_EVENT_REJECTED] = "rejected",
61 [RDMA_CM_EVENT_ESTABLISHED] = "established",
62 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected",
63 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal",
64 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join",
65 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error",
66 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change",
67 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
68 };
69
70 static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr,
71 union ib_gid *mgid);
72
rdma_event_msg(enum rdma_cm_event_type event)73 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
74 {
75 size_t index = event;
76
77 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
78 cma_events[index] : "unrecognized event";
79 }
80 EXPORT_SYMBOL(rdma_event_msg);
81
rdma_reject_msg(struct rdma_cm_id * id,int reason)82 const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
83 int reason)
84 {
85 if (rdma_ib_or_roce(id->device, id->port_num))
86 return ibcm_reject_msg(reason);
87
88 if (rdma_protocol_iwarp(id->device, id->port_num))
89 return iwcm_reject_msg(reason);
90
91 WARN_ON_ONCE(1);
92 return "unrecognized transport";
93 }
94 EXPORT_SYMBOL(rdma_reject_msg);
95
96 /**
97 * rdma_is_consumer_reject - return true if the consumer rejected the connect
98 * request.
99 * @id: Communication identifier that received the REJECT event.
100 * @reason: Value returned in the REJECT event status field.
101 */
rdma_is_consumer_reject(struct rdma_cm_id * id,int reason)102 static bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason)
103 {
104 if (rdma_ib_or_roce(id->device, id->port_num))
105 return reason == IB_CM_REJ_CONSUMER_DEFINED;
106
107 if (rdma_protocol_iwarp(id->device, id->port_num))
108 return reason == -ECONNREFUSED;
109
110 WARN_ON_ONCE(1);
111 return false;
112 }
113
rdma_consumer_reject_data(struct rdma_cm_id * id,struct rdma_cm_event * ev,u8 * data_len)114 const void *rdma_consumer_reject_data(struct rdma_cm_id *id,
115 struct rdma_cm_event *ev, u8 *data_len)
116 {
117 const void *p;
118
119 if (rdma_is_consumer_reject(id, ev->status)) {
120 *data_len = ev->param.conn.private_data_len;
121 p = ev->param.conn.private_data;
122 } else {
123 *data_len = 0;
124 p = NULL;
125 }
126 return p;
127 }
128 EXPORT_SYMBOL(rdma_consumer_reject_data);
129
130 /**
131 * rdma_iw_cm_id() - return the iw_cm_id pointer for this cm_id.
132 * @id: Communication Identifier
133 */
rdma_iw_cm_id(struct rdma_cm_id * id)134 struct iw_cm_id *rdma_iw_cm_id(struct rdma_cm_id *id)
135 {
136 struct rdma_id_private *id_priv;
137
138 id_priv = container_of(id, struct rdma_id_private, id);
139 if (id->device->node_type == RDMA_NODE_RNIC)
140 return id_priv->cm_id.iw;
141 return NULL;
142 }
143 EXPORT_SYMBOL(rdma_iw_cm_id);
144
145 /**
146 * rdma_res_to_id() - return the rdma_cm_id pointer for this restrack.
147 * @res: rdma resource tracking entry pointer
148 */
rdma_res_to_id(struct rdma_restrack_entry * res)149 struct rdma_cm_id *rdma_res_to_id(struct rdma_restrack_entry *res)
150 {
151 struct rdma_id_private *id_priv =
152 container_of(res, struct rdma_id_private, res);
153
154 return &id_priv->id;
155 }
156 EXPORT_SYMBOL(rdma_res_to_id);
157
158 static int cma_add_one(struct ib_device *device);
159 static void cma_remove_one(struct ib_device *device, void *client_data);
160
161 static struct ib_client cma_client = {
162 .name = "cma",
163 .add = cma_add_one,
164 .remove = cma_remove_one
165 };
166
167 static struct ib_sa_client sa_client;
168 static LIST_HEAD(dev_list);
169 static LIST_HEAD(listen_any_list);
170 static DEFINE_MUTEX(lock);
171 static struct workqueue_struct *cma_wq;
172 static unsigned int cma_pernet_id;
173
174 struct cma_pernet {
175 struct xarray tcp_ps;
176 struct xarray udp_ps;
177 struct xarray ipoib_ps;
178 struct xarray ib_ps;
179 };
180
cma_pernet(struct net * net)181 static struct cma_pernet *cma_pernet(struct net *net)
182 {
183 return net_generic(net, cma_pernet_id);
184 }
185
186 static
cma_pernet_xa(struct net * net,enum rdma_ucm_port_space ps)187 struct xarray *cma_pernet_xa(struct net *net, enum rdma_ucm_port_space ps)
188 {
189 struct cma_pernet *pernet = cma_pernet(net);
190
191 switch (ps) {
192 case RDMA_PS_TCP:
193 return &pernet->tcp_ps;
194 case RDMA_PS_UDP:
195 return &pernet->udp_ps;
196 case RDMA_PS_IPOIB:
197 return &pernet->ipoib_ps;
198 case RDMA_PS_IB:
199 return &pernet->ib_ps;
200 default:
201 return NULL;
202 }
203 }
204
205 struct cma_device {
206 struct list_head list;
207 struct ib_device *device;
208 struct completion comp;
209 refcount_t refcount;
210 struct list_head id_list;
211 enum ib_gid_type *default_gid_type;
212 u8 *default_roce_tos;
213 };
214
215 struct rdma_bind_list {
216 enum rdma_ucm_port_space ps;
217 struct hlist_head owners;
218 unsigned short port;
219 };
220
cma_ps_alloc(struct net * net,enum rdma_ucm_port_space ps,struct rdma_bind_list * bind_list,int snum)221 static int cma_ps_alloc(struct net *net, enum rdma_ucm_port_space ps,
222 struct rdma_bind_list *bind_list, int snum)
223 {
224 struct xarray *xa = cma_pernet_xa(net, ps);
225
226 return xa_insert(xa, snum, bind_list, GFP_KERNEL);
227 }
228
cma_ps_find(struct net * net,enum rdma_ucm_port_space ps,int snum)229 static struct rdma_bind_list *cma_ps_find(struct net *net,
230 enum rdma_ucm_port_space ps, int snum)
231 {
232 struct xarray *xa = cma_pernet_xa(net, ps);
233
234 return xa_load(xa, snum);
235 }
236
cma_ps_remove(struct net * net,enum rdma_ucm_port_space ps,int snum)237 static void cma_ps_remove(struct net *net, enum rdma_ucm_port_space ps,
238 int snum)
239 {
240 struct xarray *xa = cma_pernet_xa(net, ps);
241
242 xa_erase(xa, snum);
243 }
244
245 enum {
246 CMA_OPTION_AFONLY,
247 };
248
cma_dev_get(struct cma_device * cma_dev)249 void cma_dev_get(struct cma_device *cma_dev)
250 {
251 refcount_inc(&cma_dev->refcount);
252 }
253
cma_dev_put(struct cma_device * cma_dev)254 void cma_dev_put(struct cma_device *cma_dev)
255 {
256 if (refcount_dec_and_test(&cma_dev->refcount))
257 complete(&cma_dev->comp);
258 }
259
cma_enum_devices_by_ibdev(cma_device_filter filter,void * cookie)260 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
261 void *cookie)
262 {
263 struct cma_device *cma_dev;
264 struct cma_device *found_cma_dev = NULL;
265
266 mutex_lock(&lock);
267
268 list_for_each_entry(cma_dev, &dev_list, list)
269 if (filter(cma_dev->device, cookie)) {
270 found_cma_dev = cma_dev;
271 break;
272 }
273
274 if (found_cma_dev)
275 cma_dev_get(found_cma_dev);
276 mutex_unlock(&lock);
277 return found_cma_dev;
278 }
279
cma_get_default_gid_type(struct cma_device * cma_dev,u32 port)280 int cma_get_default_gid_type(struct cma_device *cma_dev,
281 u32 port)
282 {
283 if (!rdma_is_port_valid(cma_dev->device, port))
284 return -EINVAL;
285
286 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)];
287 }
288
cma_set_default_gid_type(struct cma_device * cma_dev,u32 port,enum ib_gid_type default_gid_type)289 int cma_set_default_gid_type(struct cma_device *cma_dev,
290 u32 port,
291 enum ib_gid_type default_gid_type)
292 {
293 unsigned long supported_gids;
294
295 if (!rdma_is_port_valid(cma_dev->device, port))
296 return -EINVAL;
297
298 if (default_gid_type == IB_GID_TYPE_IB &&
299 rdma_protocol_roce_eth_encap(cma_dev->device, port))
300 default_gid_type = IB_GID_TYPE_ROCE;
301
302 supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
303
304 if (!(supported_gids & 1 << default_gid_type))
305 return -EINVAL;
306
307 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] =
308 default_gid_type;
309
310 return 0;
311 }
312
cma_get_default_roce_tos(struct cma_device * cma_dev,u32 port)313 int cma_get_default_roce_tos(struct cma_device *cma_dev, u32 port)
314 {
315 if (!rdma_is_port_valid(cma_dev->device, port))
316 return -EINVAL;
317
318 return cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)];
319 }
320
cma_set_default_roce_tos(struct cma_device * cma_dev,u32 port,u8 default_roce_tos)321 int cma_set_default_roce_tos(struct cma_device *cma_dev, u32 port,
322 u8 default_roce_tos)
323 {
324 if (!rdma_is_port_valid(cma_dev->device, port))
325 return -EINVAL;
326
327 cma_dev->default_roce_tos[port - rdma_start_port(cma_dev->device)] =
328 default_roce_tos;
329
330 return 0;
331 }
cma_get_ib_dev(struct cma_device * cma_dev)332 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
333 {
334 return cma_dev->device;
335 }
336
337 /*
338 * Device removal can occur at anytime, so we need extra handling to
339 * serialize notifying the user of device removal with other callbacks.
340 * We do this by disabling removal notification while a callback is in process,
341 * and reporting it after the callback completes.
342 */
343
344 struct cma_multicast {
345 struct rdma_id_private *id_priv;
346 union {
347 struct ib_sa_multicast *sa_mc;
348 struct {
349 struct work_struct work;
350 struct rdma_cm_event event;
351 } iboe_join;
352 };
353 struct list_head list;
354 void *context;
355 struct sockaddr_storage addr;
356 u8 join_state;
357 };
358
359 struct cma_work {
360 struct work_struct work;
361 struct rdma_id_private *id;
362 enum rdma_cm_state old_state;
363 enum rdma_cm_state new_state;
364 struct rdma_cm_event event;
365 };
366
367 union cma_ip_addr {
368 struct in6_addr ip6;
369 struct {
370 __be32 pad[3];
371 __be32 addr;
372 } ip4;
373 };
374
375 struct cma_hdr {
376 u8 cma_version;
377 u8 ip_version; /* IP version: 7:4 */
378 __be16 port;
379 union cma_ip_addr src_addr;
380 union cma_ip_addr dst_addr;
381 };
382
383 #define CMA_VERSION 0x00
384
385 struct cma_req_info {
386 struct sockaddr_storage listen_addr_storage;
387 struct sockaddr_storage src_addr_storage;
388 struct ib_device *device;
389 union ib_gid local_gid;
390 __be64 service_id;
391 int port;
392 bool has_gid;
393 u16 pkey;
394 };
395
cma_comp_exch(struct rdma_id_private * id_priv,enum rdma_cm_state comp,enum rdma_cm_state exch)396 static int cma_comp_exch(struct rdma_id_private *id_priv,
397 enum rdma_cm_state comp, enum rdma_cm_state exch)
398 {
399 unsigned long flags;
400 int ret;
401
402 /*
403 * The FSM uses a funny double locking where state is protected by both
404 * the handler_mutex and the spinlock. State is not allowed to change
405 * to/from a handler_mutex protected value without also holding
406 * handler_mutex.
407 */
408 if (comp == RDMA_CM_CONNECT || exch == RDMA_CM_CONNECT)
409 lockdep_assert_held(&id_priv->handler_mutex);
410
411 spin_lock_irqsave(&id_priv->lock, flags);
412 if ((ret = (id_priv->state == comp)))
413 id_priv->state = exch;
414 spin_unlock_irqrestore(&id_priv->lock, flags);
415 return ret;
416 }
417
cma_get_ip_ver(const struct cma_hdr * hdr)418 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
419 {
420 return hdr->ip_version >> 4;
421 }
422
cma_set_ip_ver(struct cma_hdr * hdr,u8 ip_ver)423 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
424 {
425 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
426 }
427
cma_igmp_send(struct net_device * ndev,union ib_gid * mgid,bool join)428 static int cma_igmp_send(struct net_device *ndev, union ib_gid *mgid, bool join)
429 {
430 struct in_device *in_dev = NULL;
431
432 if (ndev) {
433 rtnl_lock();
434 in_dev = __in_dev_get_rtnl(ndev);
435 if (in_dev) {
436 if (join)
437 ip_mc_inc_group(in_dev,
438 *(__be32 *)(mgid->raw + 12));
439 else
440 ip_mc_dec_group(in_dev,
441 *(__be32 *)(mgid->raw + 12));
442 }
443 rtnl_unlock();
444 }
445 return (in_dev) ? 0 : -ENODEV;
446 }
447
_cma_attach_to_dev(struct rdma_id_private * id_priv,struct cma_device * cma_dev)448 static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
449 struct cma_device *cma_dev)
450 {
451 cma_dev_get(cma_dev);
452 id_priv->cma_dev = cma_dev;
453 id_priv->id.device = cma_dev->device;
454 id_priv->id.route.addr.dev_addr.transport =
455 rdma_node_get_transport(cma_dev->device->node_type);
456 list_add_tail(&id_priv->list, &cma_dev->id_list);
457
458 trace_cm_id_attach(id_priv, cma_dev->device);
459 }
460
cma_attach_to_dev(struct rdma_id_private * id_priv,struct cma_device * cma_dev)461 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
462 struct cma_device *cma_dev)
463 {
464 _cma_attach_to_dev(id_priv, cma_dev);
465 id_priv->gid_type =
466 cma_dev->default_gid_type[id_priv->id.port_num -
467 rdma_start_port(cma_dev->device)];
468 }
469
cma_release_dev(struct rdma_id_private * id_priv)470 static void cma_release_dev(struct rdma_id_private *id_priv)
471 {
472 mutex_lock(&lock);
473 list_del(&id_priv->list);
474 cma_dev_put(id_priv->cma_dev);
475 id_priv->cma_dev = NULL;
476 id_priv->id.device = NULL;
477 if (id_priv->id.route.addr.dev_addr.sgid_attr) {
478 rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr);
479 id_priv->id.route.addr.dev_addr.sgid_attr = NULL;
480 }
481 mutex_unlock(&lock);
482 }
483
cma_src_addr(struct rdma_id_private * id_priv)484 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
485 {
486 return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
487 }
488
cma_dst_addr(struct rdma_id_private * id_priv)489 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
490 {
491 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
492 }
493
cma_family(struct rdma_id_private * id_priv)494 static inline unsigned short cma_family(struct rdma_id_private *id_priv)
495 {
496 return id_priv->id.route.addr.src_addr.ss_family;
497 }
498
cma_set_qkey(struct rdma_id_private * id_priv,u32 qkey)499 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
500 {
501 struct ib_sa_mcmember_rec rec;
502 int ret = 0;
503
504 if (id_priv->qkey) {
505 if (qkey && id_priv->qkey != qkey)
506 return -EINVAL;
507 return 0;
508 }
509
510 if (qkey) {
511 id_priv->qkey = qkey;
512 return 0;
513 }
514
515 switch (id_priv->id.ps) {
516 case RDMA_PS_UDP:
517 case RDMA_PS_IB:
518 id_priv->qkey = RDMA_UDP_QKEY;
519 break;
520 case RDMA_PS_IPOIB:
521 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
522 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
523 id_priv->id.port_num, &rec.mgid,
524 &rec);
525 if (!ret)
526 id_priv->qkey = be32_to_cpu(rec.qkey);
527 break;
528 default:
529 break;
530 }
531 return ret;
532 }
533
cma_translate_ib(struct sockaddr_ib * sib,struct rdma_dev_addr * dev_addr)534 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
535 {
536 dev_addr->dev_type = ARPHRD_INFINIBAND;
537 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
538 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
539 }
540
cma_translate_addr(struct sockaddr * addr,struct rdma_dev_addr * dev_addr)541 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
542 {
543 int ret;
544
545 if (addr->sa_family != AF_IB) {
546 ret = rdma_translate_ip(addr, dev_addr);
547 } else {
548 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
549 ret = 0;
550 }
551
552 return ret;
553 }
554
555 static const struct ib_gid_attr *
cma_validate_port(struct ib_device * device,u32 port,enum ib_gid_type gid_type,union ib_gid * gid,struct rdma_id_private * id_priv)556 cma_validate_port(struct ib_device *device, u32 port,
557 enum ib_gid_type gid_type,
558 union ib_gid *gid,
559 struct rdma_id_private *id_priv)
560 {
561 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
562 int bound_if_index = dev_addr->bound_dev_if;
563 const struct ib_gid_attr *sgid_attr;
564 int dev_type = dev_addr->dev_type;
565 struct net_device *ndev = NULL;
566
567 if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net))
568 return ERR_PTR(-ENODEV);
569
570 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
571 return ERR_PTR(-ENODEV);
572
573 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
574 return ERR_PTR(-ENODEV);
575
576 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
577 ndev = dev_get_by_index(dev_addr->net, bound_if_index);
578 if (!ndev)
579 return ERR_PTR(-ENODEV);
580 } else {
581 gid_type = IB_GID_TYPE_IB;
582 }
583
584 sgid_attr = rdma_find_gid_by_port(device, gid, gid_type, port, ndev);
585 if (ndev)
586 dev_put(ndev);
587 return sgid_attr;
588 }
589
cma_bind_sgid_attr(struct rdma_id_private * id_priv,const struct ib_gid_attr * sgid_attr)590 static void cma_bind_sgid_attr(struct rdma_id_private *id_priv,
591 const struct ib_gid_attr *sgid_attr)
592 {
593 WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr);
594 id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr;
595 }
596
597 /**
598 * cma_acquire_dev_by_src_ip - Acquire cma device, port, gid attribute
599 * based on source ip address.
600 * @id_priv: cm_id which should be bound to cma device
601 *
602 * cma_acquire_dev_by_src_ip() binds cm id to cma device, port and GID attribute
603 * based on source IP address. It returns 0 on success or error code otherwise.
604 * It is applicable to active and passive side cm_id.
605 */
cma_acquire_dev_by_src_ip(struct rdma_id_private * id_priv)606 static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
607 {
608 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
609 const struct ib_gid_attr *sgid_attr;
610 union ib_gid gid, iboe_gid, *gidp;
611 struct cma_device *cma_dev;
612 enum ib_gid_type gid_type;
613 int ret = -ENODEV;
614 u32 port;
615
616 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
617 id_priv->id.ps == RDMA_PS_IPOIB)
618 return -EINVAL;
619
620 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
621 &iboe_gid);
622
623 memcpy(&gid, dev_addr->src_dev_addr +
624 rdma_addr_gid_offset(dev_addr), sizeof(gid));
625
626 mutex_lock(&lock);
627 list_for_each_entry(cma_dev, &dev_list, list) {
628 rdma_for_each_port (cma_dev->device, port) {
629 gidp = rdma_protocol_roce(cma_dev->device, port) ?
630 &iboe_gid : &gid;
631 gid_type = cma_dev->default_gid_type[port - 1];
632 sgid_attr = cma_validate_port(cma_dev->device, port,
633 gid_type, gidp, id_priv);
634 if (!IS_ERR(sgid_attr)) {
635 id_priv->id.port_num = port;
636 cma_bind_sgid_attr(id_priv, sgid_attr);
637 cma_attach_to_dev(id_priv, cma_dev);
638 ret = 0;
639 goto out;
640 }
641 }
642 }
643 out:
644 mutex_unlock(&lock);
645 return ret;
646 }
647
648 /**
649 * cma_ib_acquire_dev - Acquire cma device, port and SGID attribute
650 * @id_priv: cm id to bind to cma device
651 * @listen_id_priv: listener cm id to match against
652 * @req: Pointer to req structure containaining incoming
653 * request information
654 * cma_ib_acquire_dev() acquires cma device, port and SGID attribute when
655 * rdma device matches for listen_id and incoming request. It also verifies
656 * that a GID table entry is present for the source address.
657 * Returns 0 on success, or returns error code otherwise.
658 */
cma_ib_acquire_dev(struct rdma_id_private * id_priv,const struct rdma_id_private * listen_id_priv,struct cma_req_info * req)659 static int cma_ib_acquire_dev(struct rdma_id_private *id_priv,
660 const struct rdma_id_private *listen_id_priv,
661 struct cma_req_info *req)
662 {
663 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
664 const struct ib_gid_attr *sgid_attr;
665 enum ib_gid_type gid_type;
666 union ib_gid gid;
667
668 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
669 id_priv->id.ps == RDMA_PS_IPOIB)
670 return -EINVAL;
671
672 if (rdma_protocol_roce(req->device, req->port))
673 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
674 &gid);
675 else
676 memcpy(&gid, dev_addr->src_dev_addr +
677 rdma_addr_gid_offset(dev_addr), sizeof(gid));
678
679 gid_type = listen_id_priv->cma_dev->default_gid_type[req->port - 1];
680 sgid_attr = cma_validate_port(req->device, req->port,
681 gid_type, &gid, id_priv);
682 if (IS_ERR(sgid_attr))
683 return PTR_ERR(sgid_attr);
684
685 id_priv->id.port_num = req->port;
686 cma_bind_sgid_attr(id_priv, sgid_attr);
687 /* Need to acquire lock to protect against reader
688 * of cma_dev->id_list such as cma_netdev_callback() and
689 * cma_process_remove().
690 */
691 mutex_lock(&lock);
692 cma_attach_to_dev(id_priv, listen_id_priv->cma_dev);
693 mutex_unlock(&lock);
694 rdma_restrack_add(&id_priv->res);
695 return 0;
696 }
697
cma_iw_acquire_dev(struct rdma_id_private * id_priv,const struct rdma_id_private * listen_id_priv)698 static int cma_iw_acquire_dev(struct rdma_id_private *id_priv,
699 const struct rdma_id_private *listen_id_priv)
700 {
701 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
702 const struct ib_gid_attr *sgid_attr;
703 struct cma_device *cma_dev;
704 enum ib_gid_type gid_type;
705 int ret = -ENODEV;
706 union ib_gid gid;
707 u32 port;
708
709 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
710 id_priv->id.ps == RDMA_PS_IPOIB)
711 return -EINVAL;
712
713 memcpy(&gid, dev_addr->src_dev_addr +
714 rdma_addr_gid_offset(dev_addr), sizeof(gid));
715
716 mutex_lock(&lock);
717
718 cma_dev = listen_id_priv->cma_dev;
719 port = listen_id_priv->id.port_num;
720 gid_type = listen_id_priv->gid_type;
721 sgid_attr = cma_validate_port(cma_dev->device, port,
722 gid_type, &gid, id_priv);
723 if (!IS_ERR(sgid_attr)) {
724 id_priv->id.port_num = port;
725 cma_bind_sgid_attr(id_priv, sgid_attr);
726 ret = 0;
727 goto out;
728 }
729
730 list_for_each_entry(cma_dev, &dev_list, list) {
731 rdma_for_each_port (cma_dev->device, port) {
732 if (listen_id_priv->cma_dev == cma_dev &&
733 listen_id_priv->id.port_num == port)
734 continue;
735
736 gid_type = cma_dev->default_gid_type[port - 1];
737 sgid_attr = cma_validate_port(cma_dev->device, port,
738 gid_type, &gid, id_priv);
739 if (!IS_ERR(sgid_attr)) {
740 id_priv->id.port_num = port;
741 cma_bind_sgid_attr(id_priv, sgid_attr);
742 ret = 0;
743 goto out;
744 }
745 }
746 }
747
748 out:
749 if (!ret) {
750 cma_attach_to_dev(id_priv, cma_dev);
751 rdma_restrack_add(&id_priv->res);
752 }
753
754 mutex_unlock(&lock);
755 return ret;
756 }
757
758 /*
759 * Select the source IB device and address to reach the destination IB address.
760 */
cma_resolve_ib_dev(struct rdma_id_private * id_priv)761 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
762 {
763 struct cma_device *cma_dev, *cur_dev;
764 struct sockaddr_ib *addr;
765 union ib_gid gid, sgid, *dgid;
766 unsigned int p;
767 u16 pkey, index;
768 enum ib_port_state port_state;
769 int i;
770
771 cma_dev = NULL;
772 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
773 dgid = (union ib_gid *) &addr->sib_addr;
774 pkey = ntohs(addr->sib_pkey);
775
776 mutex_lock(&lock);
777 list_for_each_entry(cur_dev, &dev_list, list) {
778 rdma_for_each_port (cur_dev->device, p) {
779 if (!rdma_cap_af_ib(cur_dev->device, p))
780 continue;
781
782 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
783 continue;
784
785 if (ib_get_cached_port_state(cur_dev->device, p, &port_state))
786 continue;
787 for (i = 0; !rdma_query_gid(cur_dev->device,
788 p, i, &gid);
789 i++) {
790 if (!memcmp(&gid, dgid, sizeof(gid))) {
791 cma_dev = cur_dev;
792 sgid = gid;
793 id_priv->id.port_num = p;
794 goto found;
795 }
796
797 if (!cma_dev && (gid.global.subnet_prefix ==
798 dgid->global.subnet_prefix) &&
799 port_state == IB_PORT_ACTIVE) {
800 cma_dev = cur_dev;
801 sgid = gid;
802 id_priv->id.port_num = p;
803 goto found;
804 }
805 }
806 }
807 }
808 mutex_unlock(&lock);
809 return -ENODEV;
810
811 found:
812 cma_attach_to_dev(id_priv, cma_dev);
813 rdma_restrack_add(&id_priv->res);
814 mutex_unlock(&lock);
815 addr = (struct sockaddr_ib *)cma_src_addr(id_priv);
816 memcpy(&addr->sib_addr, &sgid, sizeof(sgid));
817 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
818 return 0;
819 }
820
cma_id_get(struct rdma_id_private * id_priv)821 static void cma_id_get(struct rdma_id_private *id_priv)
822 {
823 refcount_inc(&id_priv->refcount);
824 }
825
cma_id_put(struct rdma_id_private * id_priv)826 static void cma_id_put(struct rdma_id_private *id_priv)
827 {
828 if (refcount_dec_and_test(&id_priv->refcount))
829 complete(&id_priv->comp);
830 }
831
832 static struct rdma_id_private *
__rdma_create_id(struct net * net,rdma_cm_event_handler event_handler,void * context,enum rdma_ucm_port_space ps,enum ib_qp_type qp_type,const struct rdma_id_private * parent)833 __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
834 void *context, enum rdma_ucm_port_space ps,
835 enum ib_qp_type qp_type, const struct rdma_id_private *parent)
836 {
837 struct rdma_id_private *id_priv;
838
839 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
840 if (!id_priv)
841 return ERR_PTR(-ENOMEM);
842
843 id_priv->state = RDMA_CM_IDLE;
844 id_priv->id.context = context;
845 id_priv->id.event_handler = event_handler;
846 id_priv->id.ps = ps;
847 id_priv->id.qp_type = qp_type;
848 id_priv->tos_set = false;
849 id_priv->timeout_set = false;
850 id_priv->min_rnr_timer_set = false;
851 id_priv->gid_type = IB_GID_TYPE_IB;
852 spin_lock_init(&id_priv->lock);
853 mutex_init(&id_priv->qp_mutex);
854 init_completion(&id_priv->comp);
855 refcount_set(&id_priv->refcount, 1);
856 mutex_init(&id_priv->handler_mutex);
857 INIT_LIST_HEAD(&id_priv->listen_list);
858 INIT_LIST_HEAD(&id_priv->mc_list);
859 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
860 id_priv->id.route.addr.dev_addr.net = get_net(net);
861 id_priv->seq_num &= 0x00ffffff;
862
863 rdma_restrack_new(&id_priv->res, RDMA_RESTRACK_CM_ID);
864 if (parent)
865 rdma_restrack_parent_name(&id_priv->res, &parent->res);
866
867 return id_priv;
868 }
869
870 struct rdma_cm_id *
__rdma_create_kernel_id(struct net * net,rdma_cm_event_handler event_handler,void * context,enum rdma_ucm_port_space ps,enum ib_qp_type qp_type,const char * caller)871 __rdma_create_kernel_id(struct net *net, rdma_cm_event_handler event_handler,
872 void *context, enum rdma_ucm_port_space ps,
873 enum ib_qp_type qp_type, const char *caller)
874 {
875 struct rdma_id_private *ret;
876
877 ret = __rdma_create_id(net, event_handler, context, ps, qp_type, NULL);
878 if (IS_ERR(ret))
879 return ERR_CAST(ret);
880
881 rdma_restrack_set_name(&ret->res, caller);
882 return &ret->id;
883 }
884 EXPORT_SYMBOL(__rdma_create_kernel_id);
885
rdma_create_user_id(rdma_cm_event_handler event_handler,void * context,enum rdma_ucm_port_space ps,enum ib_qp_type qp_type)886 struct rdma_cm_id *rdma_create_user_id(rdma_cm_event_handler event_handler,
887 void *context,
888 enum rdma_ucm_port_space ps,
889 enum ib_qp_type qp_type)
890 {
891 struct rdma_id_private *ret;
892
893 ret = __rdma_create_id(current->nsproxy->net_ns, event_handler, context,
894 ps, qp_type, NULL);
895 if (IS_ERR(ret))
896 return ERR_CAST(ret);
897
898 rdma_restrack_set_name(&ret->res, NULL);
899 return &ret->id;
900 }
901 EXPORT_SYMBOL(rdma_create_user_id);
902
cma_init_ud_qp(struct rdma_id_private * id_priv,struct ib_qp * qp)903 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
904 {
905 struct ib_qp_attr qp_attr;
906 int qp_attr_mask, ret;
907
908 qp_attr.qp_state = IB_QPS_INIT;
909 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
910 if (ret)
911 return ret;
912
913 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
914 if (ret)
915 return ret;
916
917 qp_attr.qp_state = IB_QPS_RTR;
918 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
919 if (ret)
920 return ret;
921
922 qp_attr.qp_state = IB_QPS_RTS;
923 qp_attr.sq_psn = 0;
924 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
925
926 return ret;
927 }
928
cma_init_conn_qp(struct rdma_id_private * id_priv,struct ib_qp * qp)929 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
930 {
931 struct ib_qp_attr qp_attr;
932 int qp_attr_mask, ret;
933
934 qp_attr.qp_state = IB_QPS_INIT;
935 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
936 if (ret)
937 return ret;
938
939 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
940 }
941
rdma_create_qp(struct rdma_cm_id * id,struct ib_pd * pd,struct ib_qp_init_attr * qp_init_attr)942 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
943 struct ib_qp_init_attr *qp_init_attr)
944 {
945 struct rdma_id_private *id_priv;
946 struct ib_qp *qp;
947 int ret;
948
949 id_priv = container_of(id, struct rdma_id_private, id);
950 if (id->device != pd->device) {
951 ret = -EINVAL;
952 goto out_err;
953 }
954
955 qp_init_attr->port_num = id->port_num;
956 qp = ib_create_qp(pd, qp_init_attr);
957 if (IS_ERR(qp)) {
958 ret = PTR_ERR(qp);
959 goto out_err;
960 }
961
962 if (id->qp_type == IB_QPT_UD)
963 ret = cma_init_ud_qp(id_priv, qp);
964 else
965 ret = cma_init_conn_qp(id_priv, qp);
966 if (ret)
967 goto out_destroy;
968
969 id->qp = qp;
970 id_priv->qp_num = qp->qp_num;
971 id_priv->srq = (qp->srq != NULL);
972 trace_cm_qp_create(id_priv, pd, qp_init_attr, 0);
973 return 0;
974 out_destroy:
975 ib_destroy_qp(qp);
976 out_err:
977 trace_cm_qp_create(id_priv, pd, qp_init_attr, ret);
978 return ret;
979 }
980 EXPORT_SYMBOL(rdma_create_qp);
981
rdma_destroy_qp(struct rdma_cm_id * id)982 void rdma_destroy_qp(struct rdma_cm_id *id)
983 {
984 struct rdma_id_private *id_priv;
985
986 id_priv = container_of(id, struct rdma_id_private, id);
987 trace_cm_qp_destroy(id_priv);
988 mutex_lock(&id_priv->qp_mutex);
989 ib_destroy_qp(id_priv->id.qp);
990 id_priv->id.qp = NULL;
991 mutex_unlock(&id_priv->qp_mutex);
992 }
993 EXPORT_SYMBOL(rdma_destroy_qp);
994
cma_modify_qp_rtr(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)995 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
996 struct rdma_conn_param *conn_param)
997 {
998 struct ib_qp_attr qp_attr;
999 int qp_attr_mask, ret;
1000
1001 mutex_lock(&id_priv->qp_mutex);
1002 if (!id_priv->id.qp) {
1003 ret = 0;
1004 goto out;
1005 }
1006
1007 /* Need to update QP attributes from default values. */
1008 qp_attr.qp_state = IB_QPS_INIT;
1009 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1010 if (ret)
1011 goto out;
1012
1013 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1014 if (ret)
1015 goto out;
1016
1017 qp_attr.qp_state = IB_QPS_RTR;
1018 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1019 if (ret)
1020 goto out;
1021
1022 BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
1023
1024 if (conn_param)
1025 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
1026 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1027 out:
1028 mutex_unlock(&id_priv->qp_mutex);
1029 return ret;
1030 }
1031
cma_modify_qp_rts(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)1032 static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
1033 struct rdma_conn_param *conn_param)
1034 {
1035 struct ib_qp_attr qp_attr;
1036 int qp_attr_mask, ret;
1037
1038 mutex_lock(&id_priv->qp_mutex);
1039 if (!id_priv->id.qp) {
1040 ret = 0;
1041 goto out;
1042 }
1043
1044 qp_attr.qp_state = IB_QPS_RTS;
1045 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
1046 if (ret)
1047 goto out;
1048
1049 if (conn_param)
1050 qp_attr.max_rd_atomic = conn_param->initiator_depth;
1051 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
1052 out:
1053 mutex_unlock(&id_priv->qp_mutex);
1054 return ret;
1055 }
1056
cma_modify_qp_err(struct rdma_id_private * id_priv)1057 static int cma_modify_qp_err(struct rdma_id_private *id_priv)
1058 {
1059 struct ib_qp_attr qp_attr;
1060 int ret;
1061
1062 mutex_lock(&id_priv->qp_mutex);
1063 if (!id_priv->id.qp) {
1064 ret = 0;
1065 goto out;
1066 }
1067
1068 qp_attr.qp_state = IB_QPS_ERR;
1069 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
1070 out:
1071 mutex_unlock(&id_priv->qp_mutex);
1072 return ret;
1073 }
1074
cma_ib_init_qp_attr(struct rdma_id_private * id_priv,struct ib_qp_attr * qp_attr,int * qp_attr_mask)1075 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
1076 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
1077 {
1078 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
1079 int ret;
1080 u16 pkey;
1081
1082 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
1083 pkey = 0xffff;
1084 else
1085 pkey = ib_addr_get_pkey(dev_addr);
1086
1087 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
1088 pkey, &qp_attr->pkey_index);
1089 if (ret)
1090 return ret;
1091
1092 qp_attr->port_num = id_priv->id.port_num;
1093 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
1094
1095 if (id_priv->id.qp_type == IB_QPT_UD) {
1096 ret = cma_set_qkey(id_priv, 0);
1097 if (ret)
1098 return ret;
1099
1100 qp_attr->qkey = id_priv->qkey;
1101 *qp_attr_mask |= IB_QP_QKEY;
1102 } else {
1103 qp_attr->qp_access_flags = 0;
1104 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
1105 }
1106 return 0;
1107 }
1108
rdma_init_qp_attr(struct rdma_cm_id * id,struct ib_qp_attr * qp_attr,int * qp_attr_mask)1109 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
1110 int *qp_attr_mask)
1111 {
1112 struct rdma_id_private *id_priv;
1113 int ret = 0;
1114
1115 id_priv = container_of(id, struct rdma_id_private, id);
1116 if (rdma_cap_ib_cm(id->device, id->port_num)) {
1117 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
1118 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
1119 else
1120 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
1121 qp_attr_mask);
1122
1123 if (qp_attr->qp_state == IB_QPS_RTR)
1124 qp_attr->rq_psn = id_priv->seq_num;
1125 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
1126 if (!id_priv->cm_id.iw) {
1127 qp_attr->qp_access_flags = 0;
1128 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1129 } else
1130 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
1131 qp_attr_mask);
1132 qp_attr->port_num = id_priv->id.port_num;
1133 *qp_attr_mask |= IB_QP_PORT;
1134 } else {
1135 ret = -ENOSYS;
1136 }
1137
1138 if ((*qp_attr_mask & IB_QP_TIMEOUT) && id_priv->timeout_set)
1139 qp_attr->timeout = id_priv->timeout;
1140
1141 if ((*qp_attr_mask & IB_QP_MIN_RNR_TIMER) && id_priv->min_rnr_timer_set)
1142 qp_attr->min_rnr_timer = id_priv->min_rnr_timer;
1143
1144 return ret;
1145 }
1146 EXPORT_SYMBOL(rdma_init_qp_attr);
1147
cma_zero_addr(const struct sockaddr * addr)1148 static inline bool cma_zero_addr(const struct sockaddr *addr)
1149 {
1150 switch (addr->sa_family) {
1151 case AF_INET:
1152 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
1153 case AF_INET6:
1154 return ipv6_addr_any(&((struct sockaddr_in6 *)addr)->sin6_addr);
1155 case AF_IB:
1156 return ib_addr_any(&((struct sockaddr_ib *)addr)->sib_addr);
1157 default:
1158 return false;
1159 }
1160 }
1161
cma_loopback_addr(const struct sockaddr * addr)1162 static inline bool cma_loopback_addr(const struct sockaddr *addr)
1163 {
1164 switch (addr->sa_family) {
1165 case AF_INET:
1166 return ipv4_is_loopback(
1167 ((struct sockaddr_in *)addr)->sin_addr.s_addr);
1168 case AF_INET6:
1169 return ipv6_addr_loopback(
1170 &((struct sockaddr_in6 *)addr)->sin6_addr);
1171 case AF_IB:
1172 return ib_addr_loopback(
1173 &((struct sockaddr_ib *)addr)->sib_addr);
1174 default:
1175 return false;
1176 }
1177 }
1178
cma_any_addr(const struct sockaddr * addr)1179 static inline bool cma_any_addr(const struct sockaddr *addr)
1180 {
1181 return cma_zero_addr(addr) || cma_loopback_addr(addr);
1182 }
1183
cma_addr_cmp(const struct sockaddr * src,const struct sockaddr * dst)1184 static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
1185 {
1186 if (src->sa_family != dst->sa_family)
1187 return -1;
1188
1189 switch (src->sa_family) {
1190 case AF_INET:
1191 return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
1192 ((struct sockaddr_in *)dst)->sin_addr.s_addr;
1193 case AF_INET6: {
1194 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
1195 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
1196 bool link_local;
1197
1198 if (ipv6_addr_cmp(&src_addr6->sin6_addr,
1199 &dst_addr6->sin6_addr))
1200 return 1;
1201 link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
1202 IPV6_ADDR_LINKLOCAL;
1203 /* Link local must match their scope_ids */
1204 return link_local ? (src_addr6->sin6_scope_id !=
1205 dst_addr6->sin6_scope_id) :
1206 0;
1207 }
1208
1209 default:
1210 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
1211 &((struct sockaddr_ib *) dst)->sib_addr);
1212 }
1213 }
1214
cma_port(const struct sockaddr * addr)1215 static __be16 cma_port(const struct sockaddr *addr)
1216 {
1217 struct sockaddr_ib *sib;
1218
1219 switch (addr->sa_family) {
1220 case AF_INET:
1221 return ((struct sockaddr_in *) addr)->sin_port;
1222 case AF_INET6:
1223 return ((struct sockaddr_in6 *) addr)->sin6_port;
1224 case AF_IB:
1225 sib = (struct sockaddr_ib *) addr;
1226 return htons((u16) (be64_to_cpu(sib->sib_sid) &
1227 be64_to_cpu(sib->sib_sid_mask)));
1228 default:
1229 return 0;
1230 }
1231 }
1232
cma_any_port(const struct sockaddr * addr)1233 static inline int cma_any_port(const struct sockaddr *addr)
1234 {
1235 return !cma_port(addr);
1236 }
1237
cma_save_ib_info(struct sockaddr * src_addr,struct sockaddr * dst_addr,const struct rdma_cm_id * listen_id,const struct sa_path_rec * path)1238 static void cma_save_ib_info(struct sockaddr *src_addr,
1239 struct sockaddr *dst_addr,
1240 const struct rdma_cm_id *listen_id,
1241 const struct sa_path_rec *path)
1242 {
1243 struct sockaddr_ib *listen_ib, *ib;
1244
1245 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
1246 if (src_addr) {
1247 ib = (struct sockaddr_ib *)src_addr;
1248 ib->sib_family = AF_IB;
1249 if (path) {
1250 ib->sib_pkey = path->pkey;
1251 ib->sib_flowinfo = path->flow_label;
1252 memcpy(&ib->sib_addr, &path->sgid, 16);
1253 ib->sib_sid = path->service_id;
1254 ib->sib_scope_id = 0;
1255 } else {
1256 ib->sib_pkey = listen_ib->sib_pkey;
1257 ib->sib_flowinfo = listen_ib->sib_flowinfo;
1258 ib->sib_addr = listen_ib->sib_addr;
1259 ib->sib_sid = listen_ib->sib_sid;
1260 ib->sib_scope_id = listen_ib->sib_scope_id;
1261 }
1262 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
1263 }
1264 if (dst_addr) {
1265 ib = (struct sockaddr_ib *)dst_addr;
1266 ib->sib_family = AF_IB;
1267 if (path) {
1268 ib->sib_pkey = path->pkey;
1269 ib->sib_flowinfo = path->flow_label;
1270 memcpy(&ib->sib_addr, &path->dgid, 16);
1271 }
1272 }
1273 }
1274
cma_save_ip4_info(struct sockaddr_in * src_addr,struct sockaddr_in * dst_addr,struct cma_hdr * hdr,__be16 local_port)1275 static void cma_save_ip4_info(struct sockaddr_in *src_addr,
1276 struct sockaddr_in *dst_addr,
1277 struct cma_hdr *hdr,
1278 __be16 local_port)
1279 {
1280 if (src_addr) {
1281 *src_addr = (struct sockaddr_in) {
1282 .sin_family = AF_INET,
1283 .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
1284 .sin_port = local_port,
1285 };
1286 }
1287
1288 if (dst_addr) {
1289 *dst_addr = (struct sockaddr_in) {
1290 .sin_family = AF_INET,
1291 .sin_addr.s_addr = hdr->src_addr.ip4.addr,
1292 .sin_port = hdr->port,
1293 };
1294 }
1295 }
1296
cma_save_ip6_info(struct sockaddr_in6 * src_addr,struct sockaddr_in6 * dst_addr,struct cma_hdr * hdr,__be16 local_port)1297 static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
1298 struct sockaddr_in6 *dst_addr,
1299 struct cma_hdr *hdr,
1300 __be16 local_port)
1301 {
1302 if (src_addr) {
1303 *src_addr = (struct sockaddr_in6) {
1304 .sin6_family = AF_INET6,
1305 .sin6_addr = hdr->dst_addr.ip6,
1306 .sin6_port = local_port,
1307 };
1308 }
1309
1310 if (dst_addr) {
1311 *dst_addr = (struct sockaddr_in6) {
1312 .sin6_family = AF_INET6,
1313 .sin6_addr = hdr->src_addr.ip6,
1314 .sin6_port = hdr->port,
1315 };
1316 }
1317 }
1318
cma_port_from_service_id(__be64 service_id)1319 static u16 cma_port_from_service_id(__be64 service_id)
1320 {
1321 return (u16)be64_to_cpu(service_id);
1322 }
1323
cma_save_ip_info(struct sockaddr * src_addr,struct sockaddr * dst_addr,const struct ib_cm_event * ib_event,__be64 service_id)1324 static int cma_save_ip_info(struct sockaddr *src_addr,
1325 struct sockaddr *dst_addr,
1326 const struct ib_cm_event *ib_event,
1327 __be64 service_id)
1328 {
1329 struct cma_hdr *hdr;
1330 __be16 port;
1331
1332 hdr = ib_event->private_data;
1333 if (hdr->cma_version != CMA_VERSION)
1334 return -EINVAL;
1335
1336 port = htons(cma_port_from_service_id(service_id));
1337
1338 switch (cma_get_ip_ver(hdr)) {
1339 case 4:
1340 cma_save_ip4_info((struct sockaddr_in *)src_addr,
1341 (struct sockaddr_in *)dst_addr, hdr, port);
1342 break;
1343 case 6:
1344 cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
1345 (struct sockaddr_in6 *)dst_addr, hdr, port);
1346 break;
1347 default:
1348 return -EAFNOSUPPORT;
1349 }
1350
1351 return 0;
1352 }
1353
cma_save_net_info(struct sockaddr * src_addr,struct sockaddr * dst_addr,const struct rdma_cm_id * listen_id,const struct ib_cm_event * ib_event,sa_family_t sa_family,__be64 service_id)1354 static int cma_save_net_info(struct sockaddr *src_addr,
1355 struct sockaddr *dst_addr,
1356 const struct rdma_cm_id *listen_id,
1357 const struct ib_cm_event *ib_event,
1358 sa_family_t sa_family, __be64 service_id)
1359 {
1360 if (sa_family == AF_IB) {
1361 if (ib_event->event == IB_CM_REQ_RECEIVED)
1362 cma_save_ib_info(src_addr, dst_addr, listen_id,
1363 ib_event->param.req_rcvd.primary_path);
1364 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1365 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL);
1366 return 0;
1367 }
1368
1369 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id);
1370 }
1371
cma_save_req_info(const struct ib_cm_event * ib_event,struct cma_req_info * req)1372 static int cma_save_req_info(const struct ib_cm_event *ib_event,
1373 struct cma_req_info *req)
1374 {
1375 const struct ib_cm_req_event_param *req_param =
1376 &ib_event->param.req_rcvd;
1377 const struct ib_cm_sidr_req_event_param *sidr_param =
1378 &ib_event->param.sidr_req_rcvd;
1379
1380 switch (ib_event->event) {
1381 case IB_CM_REQ_RECEIVED:
1382 req->device = req_param->listen_id->device;
1383 req->port = req_param->port;
1384 memcpy(&req->local_gid, &req_param->primary_path->sgid,
1385 sizeof(req->local_gid));
1386 req->has_gid = true;
1387 req->service_id = req_param->primary_path->service_id;
1388 req->pkey = be16_to_cpu(req_param->primary_path->pkey);
1389 if (req->pkey != req_param->bth_pkey)
1390 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1391 "RDMA CMA: in the future this may cause the request to be dropped\n",
1392 req_param->bth_pkey, req->pkey);
1393 break;
1394 case IB_CM_SIDR_REQ_RECEIVED:
1395 req->device = sidr_param->listen_id->device;
1396 req->port = sidr_param->port;
1397 req->has_gid = false;
1398 req->service_id = sidr_param->service_id;
1399 req->pkey = sidr_param->pkey;
1400 if (req->pkey != sidr_param->bth_pkey)
1401 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1402 "RDMA CMA: in the future this may cause the request to be dropped\n",
1403 sidr_param->bth_pkey, req->pkey);
1404 break;
1405 default:
1406 return -EINVAL;
1407 }
1408
1409 return 0;
1410 }
1411
validate_ipv4_net_dev(struct net_device * net_dev,const struct sockaddr_in * dst_addr,const struct sockaddr_in * src_addr)1412 static bool validate_ipv4_net_dev(struct net_device *net_dev,
1413 const struct sockaddr_in *dst_addr,
1414 const struct sockaddr_in *src_addr)
1415 {
1416 __be32 daddr = dst_addr->sin_addr.s_addr,
1417 saddr = src_addr->sin_addr.s_addr;
1418 struct fib_result res;
1419 struct flowi4 fl4;
1420 int err;
1421 bool ret;
1422
1423 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1424 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) ||
1425 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) ||
1426 ipv4_is_loopback(saddr))
1427 return false;
1428
1429 memset(&fl4, 0, sizeof(fl4));
1430 fl4.flowi4_iif = net_dev->ifindex;
1431 fl4.daddr = daddr;
1432 fl4.saddr = saddr;
1433
1434 rcu_read_lock();
1435 err = fib_lookup(dev_net(net_dev), &fl4, &res, 0);
1436 ret = err == 0 && FIB_RES_DEV(res) == net_dev;
1437 rcu_read_unlock();
1438
1439 return ret;
1440 }
1441
validate_ipv6_net_dev(struct net_device * net_dev,const struct sockaddr_in6 * dst_addr,const struct sockaddr_in6 * src_addr)1442 static bool validate_ipv6_net_dev(struct net_device *net_dev,
1443 const struct sockaddr_in6 *dst_addr,
1444 const struct sockaddr_in6 *src_addr)
1445 {
1446 #if IS_ENABLED(CONFIG_IPV6)
1447 const int strict = ipv6_addr_type(&dst_addr->sin6_addr) &
1448 IPV6_ADDR_LINKLOCAL;
1449 struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
1450 &src_addr->sin6_addr, net_dev->ifindex,
1451 NULL, strict);
1452 bool ret;
1453
1454 if (!rt)
1455 return false;
1456
1457 ret = rt->rt6i_idev->dev == net_dev;
1458 ip6_rt_put(rt);
1459
1460 return ret;
1461 #else
1462 return false;
1463 #endif
1464 }
1465
validate_net_dev(struct net_device * net_dev,const struct sockaddr * daddr,const struct sockaddr * saddr)1466 static bool validate_net_dev(struct net_device *net_dev,
1467 const struct sockaddr *daddr,
1468 const struct sockaddr *saddr)
1469 {
1470 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr;
1471 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr;
1472 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1473 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr;
1474
1475 switch (daddr->sa_family) {
1476 case AF_INET:
1477 return saddr->sa_family == AF_INET &&
1478 validate_ipv4_net_dev(net_dev, daddr4, saddr4);
1479
1480 case AF_INET6:
1481 return saddr->sa_family == AF_INET6 &&
1482 validate_ipv6_net_dev(net_dev, daddr6, saddr6);
1483
1484 default:
1485 return false;
1486 }
1487 }
1488
1489 static struct net_device *
roce_get_net_dev_by_cm_event(const struct ib_cm_event * ib_event)1490 roce_get_net_dev_by_cm_event(const struct ib_cm_event *ib_event)
1491 {
1492 const struct ib_gid_attr *sgid_attr = NULL;
1493 struct net_device *ndev;
1494
1495 if (ib_event->event == IB_CM_REQ_RECEIVED)
1496 sgid_attr = ib_event->param.req_rcvd.ppath_sgid_attr;
1497 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1498 sgid_attr = ib_event->param.sidr_req_rcvd.sgid_attr;
1499
1500 if (!sgid_attr)
1501 return NULL;
1502
1503 rcu_read_lock();
1504 ndev = rdma_read_gid_attr_ndev_rcu(sgid_attr);
1505 if (IS_ERR(ndev))
1506 ndev = NULL;
1507 else
1508 dev_hold(ndev);
1509 rcu_read_unlock();
1510 return ndev;
1511 }
1512
cma_get_net_dev(const struct ib_cm_event * ib_event,struct cma_req_info * req)1513 static struct net_device *cma_get_net_dev(const struct ib_cm_event *ib_event,
1514 struct cma_req_info *req)
1515 {
1516 struct sockaddr *listen_addr =
1517 (struct sockaddr *)&req->listen_addr_storage;
1518 struct sockaddr *src_addr = (struct sockaddr *)&req->src_addr_storage;
1519 struct net_device *net_dev;
1520 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
1521 int err;
1522
1523 err = cma_save_ip_info(listen_addr, src_addr, ib_event,
1524 req->service_id);
1525 if (err)
1526 return ERR_PTR(err);
1527
1528 if (rdma_protocol_roce(req->device, req->port))
1529 net_dev = roce_get_net_dev_by_cm_event(ib_event);
1530 else
1531 net_dev = ib_get_net_dev_by_params(req->device, req->port,
1532 req->pkey,
1533 gid, listen_addr);
1534 if (!net_dev)
1535 return ERR_PTR(-ENODEV);
1536
1537 return net_dev;
1538 }
1539
rdma_ps_from_service_id(__be64 service_id)1540 static enum rdma_ucm_port_space rdma_ps_from_service_id(__be64 service_id)
1541 {
1542 return (be64_to_cpu(service_id) >> 16) & 0xffff;
1543 }
1544
cma_match_private_data(struct rdma_id_private * id_priv,const struct cma_hdr * hdr)1545 static bool cma_match_private_data(struct rdma_id_private *id_priv,
1546 const struct cma_hdr *hdr)
1547 {
1548 struct sockaddr *addr = cma_src_addr(id_priv);
1549 __be32 ip4_addr;
1550 struct in6_addr ip6_addr;
1551
1552 if (cma_any_addr(addr) && !id_priv->afonly)
1553 return true;
1554
1555 switch (addr->sa_family) {
1556 case AF_INET:
1557 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
1558 if (cma_get_ip_ver(hdr) != 4)
1559 return false;
1560 if (!cma_any_addr(addr) &&
1561 hdr->dst_addr.ip4.addr != ip4_addr)
1562 return false;
1563 break;
1564 case AF_INET6:
1565 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
1566 if (cma_get_ip_ver(hdr) != 6)
1567 return false;
1568 if (!cma_any_addr(addr) &&
1569 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
1570 return false;
1571 break;
1572 case AF_IB:
1573 return true;
1574 default:
1575 return false;
1576 }
1577
1578 return true;
1579 }
1580
cma_protocol_roce(const struct rdma_cm_id * id)1581 static bool cma_protocol_roce(const struct rdma_cm_id *id)
1582 {
1583 struct ib_device *device = id->device;
1584 const u32 port_num = id->port_num ?: rdma_start_port(device);
1585
1586 return rdma_protocol_roce(device, port_num);
1587 }
1588
cma_is_req_ipv6_ll(const struct cma_req_info * req)1589 static bool cma_is_req_ipv6_ll(const struct cma_req_info *req)
1590 {
1591 const struct sockaddr *daddr =
1592 (const struct sockaddr *)&req->listen_addr_storage;
1593 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1594
1595 /* Returns true if the req is for IPv6 link local */
1596 return (daddr->sa_family == AF_INET6 &&
1597 (ipv6_addr_type(&daddr6->sin6_addr) & IPV6_ADDR_LINKLOCAL));
1598 }
1599
cma_match_net_dev(const struct rdma_cm_id * id,const struct net_device * net_dev,const struct cma_req_info * req)1600 static bool cma_match_net_dev(const struct rdma_cm_id *id,
1601 const struct net_device *net_dev,
1602 const struct cma_req_info *req)
1603 {
1604 const struct rdma_addr *addr = &id->route.addr;
1605
1606 if (!net_dev)
1607 /* This request is an AF_IB request */
1608 return (!id->port_num || id->port_num == req->port) &&
1609 (addr->src_addr.ss_family == AF_IB);
1610
1611 /*
1612 * If the request is not for IPv6 link local, allow matching
1613 * request to any netdevice of the one or multiport rdma device.
1614 */
1615 if (!cma_is_req_ipv6_ll(req))
1616 return true;
1617 /*
1618 * Net namespaces must match, and if the listner is listening
1619 * on a specific netdevice than netdevice must match as well.
1620 */
1621 if (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1622 (!!addr->dev_addr.bound_dev_if ==
1623 (addr->dev_addr.bound_dev_if == net_dev->ifindex)))
1624 return true;
1625 else
1626 return false;
1627 }
1628
cma_find_listener(const struct rdma_bind_list * bind_list,const struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event,const struct cma_req_info * req,const struct net_device * net_dev)1629 static struct rdma_id_private *cma_find_listener(
1630 const struct rdma_bind_list *bind_list,
1631 const struct ib_cm_id *cm_id,
1632 const struct ib_cm_event *ib_event,
1633 const struct cma_req_info *req,
1634 const struct net_device *net_dev)
1635 {
1636 struct rdma_id_private *id_priv, *id_priv_dev;
1637
1638 lockdep_assert_held(&lock);
1639
1640 if (!bind_list)
1641 return ERR_PTR(-EINVAL);
1642
1643 hlist_for_each_entry(id_priv, &bind_list->owners, node) {
1644 if (cma_match_private_data(id_priv, ib_event->private_data)) {
1645 if (id_priv->id.device == cm_id->device &&
1646 cma_match_net_dev(&id_priv->id, net_dev, req))
1647 return id_priv;
1648 list_for_each_entry(id_priv_dev,
1649 &id_priv->listen_list,
1650 listen_list) {
1651 if (id_priv_dev->id.device == cm_id->device &&
1652 cma_match_net_dev(&id_priv_dev->id,
1653 net_dev, req))
1654 return id_priv_dev;
1655 }
1656 }
1657 }
1658
1659 return ERR_PTR(-EINVAL);
1660 }
1661
1662 static struct rdma_id_private *
cma_ib_id_from_event(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event,struct cma_req_info * req,struct net_device ** net_dev)1663 cma_ib_id_from_event(struct ib_cm_id *cm_id,
1664 const struct ib_cm_event *ib_event,
1665 struct cma_req_info *req,
1666 struct net_device **net_dev)
1667 {
1668 struct rdma_bind_list *bind_list;
1669 struct rdma_id_private *id_priv;
1670 int err;
1671
1672 err = cma_save_req_info(ib_event, req);
1673 if (err)
1674 return ERR_PTR(err);
1675
1676 *net_dev = cma_get_net_dev(ib_event, req);
1677 if (IS_ERR(*net_dev)) {
1678 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
1679 /* Assuming the protocol is AF_IB */
1680 *net_dev = NULL;
1681 } else {
1682 return ERR_CAST(*net_dev);
1683 }
1684 }
1685
1686 mutex_lock(&lock);
1687 /*
1688 * Net namespace might be getting deleted while route lookup,
1689 * cm_id lookup is in progress. Therefore, perform netdevice
1690 * validation, cm_id lookup under rcu lock.
1691 * RCU lock along with netdevice state check, synchronizes with
1692 * netdevice migrating to different net namespace and also avoids
1693 * case where net namespace doesn't get deleted while lookup is in
1694 * progress.
1695 * If the device state is not IFF_UP, its properties such as ifindex
1696 * and nd_net cannot be trusted to remain valid without rcu lock.
1697 * net/core/dev.c change_net_namespace() ensures to synchronize with
1698 * ongoing operations on net device after device is closed using
1699 * synchronize_net().
1700 */
1701 rcu_read_lock();
1702 if (*net_dev) {
1703 /*
1704 * If netdevice is down, it is likely that it is administratively
1705 * down or it might be migrating to different namespace.
1706 * In that case avoid further processing, as the net namespace
1707 * or ifindex may change.
1708 */
1709 if (((*net_dev)->flags & IFF_UP) == 0) {
1710 id_priv = ERR_PTR(-EHOSTUNREACH);
1711 goto err;
1712 }
1713
1714 if (!validate_net_dev(*net_dev,
1715 (struct sockaddr *)&req->listen_addr_storage,
1716 (struct sockaddr *)&req->src_addr_storage)) {
1717 id_priv = ERR_PTR(-EHOSTUNREACH);
1718 goto err;
1719 }
1720 }
1721
1722 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
1723 rdma_ps_from_service_id(req->service_id),
1724 cma_port_from_service_id(req->service_id));
1725 id_priv = cma_find_listener(bind_list, cm_id, ib_event, req, *net_dev);
1726 err:
1727 rcu_read_unlock();
1728 mutex_unlock(&lock);
1729 if (IS_ERR(id_priv) && *net_dev) {
1730 dev_put(*net_dev);
1731 *net_dev = NULL;
1732 }
1733 return id_priv;
1734 }
1735
cma_user_data_offset(struct rdma_id_private * id_priv)1736 static inline u8 cma_user_data_offset(struct rdma_id_private *id_priv)
1737 {
1738 return cma_family(id_priv) == AF_IB ? 0 : sizeof(struct cma_hdr);
1739 }
1740
cma_cancel_route(struct rdma_id_private * id_priv)1741 static void cma_cancel_route(struct rdma_id_private *id_priv)
1742 {
1743 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
1744 if (id_priv->query)
1745 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
1746 }
1747 }
1748
_cma_cancel_listens(struct rdma_id_private * id_priv)1749 static void _cma_cancel_listens(struct rdma_id_private *id_priv)
1750 {
1751 struct rdma_id_private *dev_id_priv;
1752
1753 lockdep_assert_held(&lock);
1754
1755 /*
1756 * Remove from listen_any_list to prevent added devices from spawning
1757 * additional listen requests.
1758 */
1759 list_del(&id_priv->list);
1760
1761 while (!list_empty(&id_priv->listen_list)) {
1762 dev_id_priv = list_entry(id_priv->listen_list.next,
1763 struct rdma_id_private, listen_list);
1764 /* sync with device removal to avoid duplicate destruction */
1765 list_del_init(&dev_id_priv->list);
1766 list_del(&dev_id_priv->listen_list);
1767 mutex_unlock(&lock);
1768
1769 rdma_destroy_id(&dev_id_priv->id);
1770 mutex_lock(&lock);
1771 }
1772 }
1773
cma_cancel_listens(struct rdma_id_private * id_priv)1774 static void cma_cancel_listens(struct rdma_id_private *id_priv)
1775 {
1776 mutex_lock(&lock);
1777 _cma_cancel_listens(id_priv);
1778 mutex_unlock(&lock);
1779 }
1780
cma_cancel_operation(struct rdma_id_private * id_priv,enum rdma_cm_state state)1781 static void cma_cancel_operation(struct rdma_id_private *id_priv,
1782 enum rdma_cm_state state)
1783 {
1784 switch (state) {
1785 case RDMA_CM_ADDR_QUERY:
1786 /*
1787 * We can avoid doing the rdma_addr_cancel() based on state,
1788 * only RDMA_CM_ADDR_QUERY has a work that could still execute.
1789 * Notice that the addr_handler work could still be exiting
1790 * outside this state, however due to the interaction with the
1791 * handler_mutex the work is guaranteed not to touch id_priv
1792 * during exit.
1793 */
1794 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
1795 break;
1796 case RDMA_CM_ROUTE_QUERY:
1797 cma_cancel_route(id_priv);
1798 break;
1799 case RDMA_CM_LISTEN:
1800 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
1801 cma_cancel_listens(id_priv);
1802 break;
1803 default:
1804 break;
1805 }
1806 }
1807
cma_release_port(struct rdma_id_private * id_priv)1808 static void cma_release_port(struct rdma_id_private *id_priv)
1809 {
1810 struct rdma_bind_list *bind_list = id_priv->bind_list;
1811 struct net *net = id_priv->id.route.addr.dev_addr.net;
1812
1813 if (!bind_list)
1814 return;
1815
1816 mutex_lock(&lock);
1817 hlist_del(&id_priv->node);
1818 if (hlist_empty(&bind_list->owners)) {
1819 cma_ps_remove(net, bind_list->ps, bind_list->port);
1820 kfree(bind_list);
1821 }
1822 mutex_unlock(&lock);
1823 }
1824
destroy_mc(struct rdma_id_private * id_priv,struct cma_multicast * mc)1825 static void destroy_mc(struct rdma_id_private *id_priv,
1826 struct cma_multicast *mc)
1827 {
1828 bool send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
1829
1830 if (rdma_cap_ib_mcast(id_priv->id.device, id_priv->id.port_num))
1831 ib_sa_free_multicast(mc->sa_mc);
1832
1833 if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
1834 struct rdma_dev_addr *dev_addr =
1835 &id_priv->id.route.addr.dev_addr;
1836 struct net_device *ndev = NULL;
1837
1838 if (dev_addr->bound_dev_if)
1839 ndev = dev_get_by_index(dev_addr->net,
1840 dev_addr->bound_dev_if);
1841 if (ndev) {
1842 union ib_gid mgid;
1843
1844 cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr,
1845 &mgid);
1846
1847 if (!send_only)
1848 cma_igmp_send(ndev, &mgid, false);
1849
1850 dev_put(ndev);
1851 }
1852
1853 cancel_work_sync(&mc->iboe_join.work);
1854 }
1855 kfree(mc);
1856 }
1857
cma_leave_mc_groups(struct rdma_id_private * id_priv)1858 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
1859 {
1860 struct cma_multicast *mc;
1861
1862 while (!list_empty(&id_priv->mc_list)) {
1863 mc = list_first_entry(&id_priv->mc_list, struct cma_multicast,
1864 list);
1865 list_del(&mc->list);
1866 destroy_mc(id_priv, mc);
1867 }
1868 }
1869
_destroy_id(struct rdma_id_private * id_priv,enum rdma_cm_state state)1870 static void _destroy_id(struct rdma_id_private *id_priv,
1871 enum rdma_cm_state state)
1872 {
1873 cma_cancel_operation(id_priv, state);
1874
1875 rdma_restrack_del(&id_priv->res);
1876 if (id_priv->cma_dev) {
1877 if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
1878 if (id_priv->cm_id.ib)
1879 ib_destroy_cm_id(id_priv->cm_id.ib);
1880 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
1881 if (id_priv->cm_id.iw)
1882 iw_destroy_cm_id(id_priv->cm_id.iw);
1883 }
1884 cma_leave_mc_groups(id_priv);
1885 cma_release_dev(id_priv);
1886 }
1887
1888 cma_release_port(id_priv);
1889 cma_id_put(id_priv);
1890 wait_for_completion(&id_priv->comp);
1891
1892 if (id_priv->internal_id)
1893 cma_id_put(id_priv->id.context);
1894
1895 kfree(id_priv->id.route.path_rec);
1896
1897 put_net(id_priv->id.route.addr.dev_addr.net);
1898 kfree(id_priv);
1899 }
1900
1901 /*
1902 * destroy an ID from within the handler_mutex. This ensures that no other
1903 * handlers can start running concurrently.
1904 */
destroy_id_handler_unlock(struct rdma_id_private * id_priv)1905 static void destroy_id_handler_unlock(struct rdma_id_private *id_priv)
1906 __releases(&idprv->handler_mutex)
1907 {
1908 enum rdma_cm_state state;
1909 unsigned long flags;
1910
1911 trace_cm_id_destroy(id_priv);
1912
1913 /*
1914 * Setting the state to destroyed under the handler mutex provides a
1915 * fence against calling handler callbacks. If this is invoked due to
1916 * the failure of a handler callback then it guarentees that no future
1917 * handlers will be called.
1918 */
1919 lockdep_assert_held(&id_priv->handler_mutex);
1920 spin_lock_irqsave(&id_priv->lock, flags);
1921 state = id_priv->state;
1922 id_priv->state = RDMA_CM_DESTROYING;
1923 spin_unlock_irqrestore(&id_priv->lock, flags);
1924 mutex_unlock(&id_priv->handler_mutex);
1925 _destroy_id(id_priv, state);
1926 }
1927
rdma_destroy_id(struct rdma_cm_id * id)1928 void rdma_destroy_id(struct rdma_cm_id *id)
1929 {
1930 struct rdma_id_private *id_priv =
1931 container_of(id, struct rdma_id_private, id);
1932
1933 mutex_lock(&id_priv->handler_mutex);
1934 destroy_id_handler_unlock(id_priv);
1935 }
1936 EXPORT_SYMBOL(rdma_destroy_id);
1937
cma_rep_recv(struct rdma_id_private * id_priv)1938 static int cma_rep_recv(struct rdma_id_private *id_priv)
1939 {
1940 int ret;
1941
1942 ret = cma_modify_qp_rtr(id_priv, NULL);
1943 if (ret)
1944 goto reject;
1945
1946 ret = cma_modify_qp_rts(id_priv, NULL);
1947 if (ret)
1948 goto reject;
1949
1950 trace_cm_send_rtu(id_priv);
1951 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
1952 if (ret)
1953 goto reject;
1954
1955 return 0;
1956 reject:
1957 pr_debug_ratelimited("RDMA CM: CONNECT_ERROR: failed to handle reply. status %d\n", ret);
1958 cma_modify_qp_err(id_priv);
1959 trace_cm_send_rej(id_priv);
1960 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
1961 NULL, 0, NULL, 0);
1962 return ret;
1963 }
1964
cma_set_rep_event_data(struct rdma_cm_event * event,const struct ib_cm_rep_event_param * rep_data,void * private_data)1965 static void cma_set_rep_event_data(struct rdma_cm_event *event,
1966 const struct ib_cm_rep_event_param *rep_data,
1967 void *private_data)
1968 {
1969 event->param.conn.private_data = private_data;
1970 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
1971 event->param.conn.responder_resources = rep_data->responder_resources;
1972 event->param.conn.initiator_depth = rep_data->initiator_depth;
1973 event->param.conn.flow_control = rep_data->flow_control;
1974 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
1975 event->param.conn.srq = rep_data->srq;
1976 event->param.conn.qp_num = rep_data->remote_qpn;
1977
1978 event->ece.vendor_id = rep_data->ece.vendor_id;
1979 event->ece.attr_mod = rep_data->ece.attr_mod;
1980 }
1981
cma_cm_event_handler(struct rdma_id_private * id_priv,struct rdma_cm_event * event)1982 static int cma_cm_event_handler(struct rdma_id_private *id_priv,
1983 struct rdma_cm_event *event)
1984 {
1985 int ret;
1986
1987 lockdep_assert_held(&id_priv->handler_mutex);
1988
1989 trace_cm_event_handler(id_priv, event);
1990 ret = id_priv->id.event_handler(&id_priv->id, event);
1991 trace_cm_event_done(id_priv, event, ret);
1992 return ret;
1993 }
1994
cma_ib_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event)1995 static int cma_ib_handler(struct ib_cm_id *cm_id,
1996 const struct ib_cm_event *ib_event)
1997 {
1998 struct rdma_id_private *id_priv = cm_id->context;
1999 struct rdma_cm_event event = {};
2000 enum rdma_cm_state state;
2001 int ret;
2002
2003 mutex_lock(&id_priv->handler_mutex);
2004 state = READ_ONCE(id_priv->state);
2005 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
2006 state != RDMA_CM_CONNECT) ||
2007 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
2008 state != RDMA_CM_DISCONNECT))
2009 goto out;
2010
2011 switch (ib_event->event) {
2012 case IB_CM_REQ_ERROR:
2013 case IB_CM_REP_ERROR:
2014 event.event = RDMA_CM_EVENT_UNREACHABLE;
2015 event.status = -ETIMEDOUT;
2016 break;
2017 case IB_CM_REP_RECEIVED:
2018 if (state == RDMA_CM_CONNECT &&
2019 (id_priv->id.qp_type != IB_QPT_UD)) {
2020 trace_cm_send_mra(id_priv);
2021 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2022 }
2023 if (id_priv->id.qp) {
2024 event.status = cma_rep_recv(id_priv);
2025 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
2026 RDMA_CM_EVENT_ESTABLISHED;
2027 } else {
2028 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
2029 }
2030 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
2031 ib_event->private_data);
2032 break;
2033 case IB_CM_RTU_RECEIVED:
2034 case IB_CM_USER_ESTABLISHED:
2035 event.event = RDMA_CM_EVENT_ESTABLISHED;
2036 break;
2037 case IB_CM_DREQ_ERROR:
2038 event.status = -ETIMEDOUT;
2039 fallthrough;
2040 case IB_CM_DREQ_RECEIVED:
2041 case IB_CM_DREP_RECEIVED:
2042 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
2043 RDMA_CM_DISCONNECT))
2044 goto out;
2045 event.event = RDMA_CM_EVENT_DISCONNECTED;
2046 break;
2047 case IB_CM_TIMEWAIT_EXIT:
2048 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
2049 break;
2050 case IB_CM_MRA_RECEIVED:
2051 /* ignore event */
2052 goto out;
2053 case IB_CM_REJ_RECEIVED:
2054 pr_debug_ratelimited("RDMA CM: REJECTED: %s\n", rdma_reject_msg(&id_priv->id,
2055 ib_event->param.rej_rcvd.reason));
2056 cma_modify_qp_err(id_priv);
2057 event.status = ib_event->param.rej_rcvd.reason;
2058 event.event = RDMA_CM_EVENT_REJECTED;
2059 event.param.conn.private_data = ib_event->private_data;
2060 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
2061 break;
2062 default:
2063 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
2064 ib_event->event);
2065 goto out;
2066 }
2067
2068 ret = cma_cm_event_handler(id_priv, &event);
2069 if (ret) {
2070 /* Destroy the CM ID by returning a non-zero value. */
2071 id_priv->cm_id.ib = NULL;
2072 destroy_id_handler_unlock(id_priv);
2073 return ret;
2074 }
2075 out:
2076 mutex_unlock(&id_priv->handler_mutex);
2077 return 0;
2078 }
2079
2080 static struct rdma_id_private *
cma_ib_new_conn_id(const struct rdma_cm_id * listen_id,const struct ib_cm_event * ib_event,struct net_device * net_dev)2081 cma_ib_new_conn_id(const struct rdma_cm_id *listen_id,
2082 const struct ib_cm_event *ib_event,
2083 struct net_device *net_dev)
2084 {
2085 struct rdma_id_private *listen_id_priv;
2086 struct rdma_id_private *id_priv;
2087 struct rdma_cm_id *id;
2088 struct rdma_route *rt;
2089 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2090 struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
2091 const __be64 service_id =
2092 ib_event->param.req_rcvd.primary_path->service_id;
2093 int ret;
2094
2095 listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2096 id_priv = __rdma_create_id(listen_id->route.addr.dev_addr.net,
2097 listen_id->event_handler, listen_id->context,
2098 listen_id->ps,
2099 ib_event->param.req_rcvd.qp_type,
2100 listen_id_priv);
2101 if (IS_ERR(id_priv))
2102 return NULL;
2103
2104 id = &id_priv->id;
2105 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2106 (struct sockaddr *)&id->route.addr.dst_addr,
2107 listen_id, ib_event, ss_family, service_id))
2108 goto err;
2109
2110 rt = &id->route;
2111 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
2112 rt->path_rec = kmalloc_array(rt->num_paths, sizeof(*rt->path_rec),
2113 GFP_KERNEL);
2114 if (!rt->path_rec)
2115 goto err;
2116
2117 rt->path_rec[0] = *path;
2118 if (rt->num_paths == 2)
2119 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
2120
2121 if (net_dev) {
2122 rdma_copy_src_l2_addr(&rt->addr.dev_addr, net_dev);
2123 } else {
2124 if (!cma_protocol_roce(listen_id) &&
2125 cma_any_addr(cma_src_addr(id_priv))) {
2126 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
2127 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
2128 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
2129 } else if (!cma_any_addr(cma_src_addr(id_priv))) {
2130 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
2131 if (ret)
2132 goto err;
2133 }
2134 }
2135 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
2136
2137 id_priv->state = RDMA_CM_CONNECT;
2138 return id_priv;
2139
2140 err:
2141 rdma_destroy_id(id);
2142 return NULL;
2143 }
2144
2145 static struct rdma_id_private *
cma_ib_new_udp_id(const struct rdma_cm_id * listen_id,const struct ib_cm_event * ib_event,struct net_device * net_dev)2146 cma_ib_new_udp_id(const struct rdma_cm_id *listen_id,
2147 const struct ib_cm_event *ib_event,
2148 struct net_device *net_dev)
2149 {
2150 const struct rdma_id_private *listen_id_priv;
2151 struct rdma_id_private *id_priv;
2152 struct rdma_cm_id *id;
2153 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2154 struct net *net = listen_id->route.addr.dev_addr.net;
2155 int ret;
2156
2157 listen_id_priv = container_of(listen_id, struct rdma_id_private, id);
2158 id_priv = __rdma_create_id(net, listen_id->event_handler,
2159 listen_id->context, listen_id->ps, IB_QPT_UD,
2160 listen_id_priv);
2161 if (IS_ERR(id_priv))
2162 return NULL;
2163
2164 id = &id_priv->id;
2165 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2166 (struct sockaddr *)&id->route.addr.dst_addr,
2167 listen_id, ib_event, ss_family,
2168 ib_event->param.sidr_req_rcvd.service_id))
2169 goto err;
2170
2171 if (net_dev) {
2172 rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev);
2173 } else {
2174 if (!cma_any_addr(cma_src_addr(id_priv))) {
2175 ret = cma_translate_addr(cma_src_addr(id_priv),
2176 &id->route.addr.dev_addr);
2177 if (ret)
2178 goto err;
2179 }
2180 }
2181
2182 id_priv->state = RDMA_CM_CONNECT;
2183 return id_priv;
2184 err:
2185 rdma_destroy_id(id);
2186 return NULL;
2187 }
2188
cma_set_req_event_data(struct rdma_cm_event * event,const struct ib_cm_req_event_param * req_data,void * private_data,int offset)2189 static void cma_set_req_event_data(struct rdma_cm_event *event,
2190 const struct ib_cm_req_event_param *req_data,
2191 void *private_data, int offset)
2192 {
2193 event->param.conn.private_data = private_data + offset;
2194 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
2195 event->param.conn.responder_resources = req_data->responder_resources;
2196 event->param.conn.initiator_depth = req_data->initiator_depth;
2197 event->param.conn.flow_control = req_data->flow_control;
2198 event->param.conn.retry_count = req_data->retry_count;
2199 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
2200 event->param.conn.srq = req_data->srq;
2201 event->param.conn.qp_num = req_data->remote_qpn;
2202
2203 event->ece.vendor_id = req_data->ece.vendor_id;
2204 event->ece.attr_mod = req_data->ece.attr_mod;
2205 }
2206
cma_ib_check_req_qp_type(const struct rdma_cm_id * id,const struct ib_cm_event * ib_event)2207 static int cma_ib_check_req_qp_type(const struct rdma_cm_id *id,
2208 const struct ib_cm_event *ib_event)
2209 {
2210 return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
2211 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
2212 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
2213 (id->qp_type == IB_QPT_UD)) ||
2214 (!id->qp_type));
2215 }
2216
cma_ib_req_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event)2217 static int cma_ib_req_handler(struct ib_cm_id *cm_id,
2218 const struct ib_cm_event *ib_event)
2219 {
2220 struct rdma_id_private *listen_id, *conn_id = NULL;
2221 struct rdma_cm_event event = {};
2222 struct cma_req_info req = {};
2223 struct net_device *net_dev;
2224 u8 offset;
2225 int ret;
2226
2227 listen_id = cma_ib_id_from_event(cm_id, ib_event, &req, &net_dev);
2228 if (IS_ERR(listen_id))
2229 return PTR_ERR(listen_id);
2230
2231 trace_cm_req_handler(listen_id, ib_event->event);
2232 if (!cma_ib_check_req_qp_type(&listen_id->id, ib_event)) {
2233 ret = -EINVAL;
2234 goto net_dev_put;
2235 }
2236
2237 mutex_lock(&listen_id->handler_mutex);
2238 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN) {
2239 ret = -ECONNABORTED;
2240 goto err_unlock;
2241 }
2242
2243 offset = cma_user_data_offset(listen_id);
2244 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
2245 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
2246 conn_id = cma_ib_new_udp_id(&listen_id->id, ib_event, net_dev);
2247 event.param.ud.private_data = ib_event->private_data + offset;
2248 event.param.ud.private_data_len =
2249 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
2250 } else {
2251 conn_id = cma_ib_new_conn_id(&listen_id->id, ib_event, net_dev);
2252 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
2253 ib_event->private_data, offset);
2254 }
2255 if (!conn_id) {
2256 ret = -ENOMEM;
2257 goto err_unlock;
2258 }
2259
2260 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2261 ret = cma_ib_acquire_dev(conn_id, listen_id, &req);
2262 if (ret) {
2263 destroy_id_handler_unlock(conn_id);
2264 goto err_unlock;
2265 }
2266
2267 conn_id->cm_id.ib = cm_id;
2268 cm_id->context = conn_id;
2269 cm_id->cm_handler = cma_ib_handler;
2270
2271 ret = cma_cm_event_handler(conn_id, &event);
2272 if (ret) {
2273 /* Destroy the CM ID by returning a non-zero value. */
2274 conn_id->cm_id.ib = NULL;
2275 mutex_unlock(&listen_id->handler_mutex);
2276 destroy_id_handler_unlock(conn_id);
2277 goto net_dev_put;
2278 }
2279
2280 if (READ_ONCE(conn_id->state) == RDMA_CM_CONNECT &&
2281 conn_id->id.qp_type != IB_QPT_UD) {
2282 trace_cm_send_mra(cm_id->context);
2283 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2284 }
2285 mutex_unlock(&conn_id->handler_mutex);
2286
2287 err_unlock:
2288 mutex_unlock(&listen_id->handler_mutex);
2289
2290 net_dev_put:
2291 if (net_dev)
2292 dev_put(net_dev);
2293
2294 return ret;
2295 }
2296
rdma_get_service_id(struct rdma_cm_id * id,struct sockaddr * addr)2297 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
2298 {
2299 if (addr->sa_family == AF_IB)
2300 return ((struct sockaddr_ib *) addr)->sib_sid;
2301
2302 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
2303 }
2304 EXPORT_SYMBOL(rdma_get_service_id);
2305
rdma_read_gids(struct rdma_cm_id * cm_id,union ib_gid * sgid,union ib_gid * dgid)2306 void rdma_read_gids(struct rdma_cm_id *cm_id, union ib_gid *sgid,
2307 union ib_gid *dgid)
2308 {
2309 struct rdma_addr *addr = &cm_id->route.addr;
2310
2311 if (!cm_id->device) {
2312 if (sgid)
2313 memset(sgid, 0, sizeof(*sgid));
2314 if (dgid)
2315 memset(dgid, 0, sizeof(*dgid));
2316 return;
2317 }
2318
2319 if (rdma_protocol_roce(cm_id->device, cm_id->port_num)) {
2320 if (sgid)
2321 rdma_ip2gid((struct sockaddr *)&addr->src_addr, sgid);
2322 if (dgid)
2323 rdma_ip2gid((struct sockaddr *)&addr->dst_addr, dgid);
2324 } else {
2325 if (sgid)
2326 rdma_addr_get_sgid(&addr->dev_addr, sgid);
2327 if (dgid)
2328 rdma_addr_get_dgid(&addr->dev_addr, dgid);
2329 }
2330 }
2331 EXPORT_SYMBOL(rdma_read_gids);
2332
cma_iw_handler(struct iw_cm_id * iw_id,struct iw_cm_event * iw_event)2333 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
2334 {
2335 struct rdma_id_private *id_priv = iw_id->context;
2336 struct rdma_cm_event event = {};
2337 int ret = 0;
2338 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2339 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2340
2341 mutex_lock(&id_priv->handler_mutex);
2342 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
2343 goto out;
2344
2345 switch (iw_event->event) {
2346 case IW_CM_EVENT_CLOSE:
2347 event.event = RDMA_CM_EVENT_DISCONNECTED;
2348 break;
2349 case IW_CM_EVENT_CONNECT_REPLY:
2350 memcpy(cma_src_addr(id_priv), laddr,
2351 rdma_addr_size(laddr));
2352 memcpy(cma_dst_addr(id_priv), raddr,
2353 rdma_addr_size(raddr));
2354 switch (iw_event->status) {
2355 case 0:
2356 event.event = RDMA_CM_EVENT_ESTABLISHED;
2357 event.param.conn.initiator_depth = iw_event->ird;
2358 event.param.conn.responder_resources = iw_event->ord;
2359 break;
2360 case -ECONNRESET:
2361 case -ECONNREFUSED:
2362 event.event = RDMA_CM_EVENT_REJECTED;
2363 break;
2364 case -ETIMEDOUT:
2365 event.event = RDMA_CM_EVENT_UNREACHABLE;
2366 break;
2367 default:
2368 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
2369 break;
2370 }
2371 break;
2372 case IW_CM_EVENT_ESTABLISHED:
2373 event.event = RDMA_CM_EVENT_ESTABLISHED;
2374 event.param.conn.initiator_depth = iw_event->ird;
2375 event.param.conn.responder_resources = iw_event->ord;
2376 break;
2377 default:
2378 goto out;
2379 }
2380
2381 event.status = iw_event->status;
2382 event.param.conn.private_data = iw_event->private_data;
2383 event.param.conn.private_data_len = iw_event->private_data_len;
2384 ret = cma_cm_event_handler(id_priv, &event);
2385 if (ret) {
2386 /* Destroy the CM ID by returning a non-zero value. */
2387 id_priv->cm_id.iw = NULL;
2388 destroy_id_handler_unlock(id_priv);
2389 return ret;
2390 }
2391
2392 out:
2393 mutex_unlock(&id_priv->handler_mutex);
2394 return ret;
2395 }
2396
iw_conn_req_handler(struct iw_cm_id * cm_id,struct iw_cm_event * iw_event)2397 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
2398 struct iw_cm_event *iw_event)
2399 {
2400 struct rdma_id_private *listen_id, *conn_id;
2401 struct rdma_cm_event event = {};
2402 int ret = -ECONNABORTED;
2403 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2404 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2405
2406 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
2407 event.param.conn.private_data = iw_event->private_data;
2408 event.param.conn.private_data_len = iw_event->private_data_len;
2409 event.param.conn.initiator_depth = iw_event->ird;
2410 event.param.conn.responder_resources = iw_event->ord;
2411
2412 listen_id = cm_id->context;
2413
2414 mutex_lock(&listen_id->handler_mutex);
2415 if (READ_ONCE(listen_id->state) != RDMA_CM_LISTEN)
2416 goto out;
2417
2418 /* Create a new RDMA id for the new IW CM ID */
2419 conn_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net,
2420 listen_id->id.event_handler,
2421 listen_id->id.context, RDMA_PS_TCP,
2422 IB_QPT_RC, listen_id);
2423 if (IS_ERR(conn_id)) {
2424 ret = -ENOMEM;
2425 goto out;
2426 }
2427 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2428 conn_id->state = RDMA_CM_CONNECT;
2429
2430 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
2431 if (ret) {
2432 mutex_unlock(&listen_id->handler_mutex);
2433 destroy_id_handler_unlock(conn_id);
2434 return ret;
2435 }
2436
2437 ret = cma_iw_acquire_dev(conn_id, listen_id);
2438 if (ret) {
2439 mutex_unlock(&listen_id->handler_mutex);
2440 destroy_id_handler_unlock(conn_id);
2441 return ret;
2442 }
2443
2444 conn_id->cm_id.iw = cm_id;
2445 cm_id->context = conn_id;
2446 cm_id->cm_handler = cma_iw_handler;
2447
2448 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
2449 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
2450
2451 ret = cma_cm_event_handler(conn_id, &event);
2452 if (ret) {
2453 /* User wants to destroy the CM ID */
2454 conn_id->cm_id.iw = NULL;
2455 mutex_unlock(&listen_id->handler_mutex);
2456 destroy_id_handler_unlock(conn_id);
2457 return ret;
2458 }
2459
2460 mutex_unlock(&conn_id->handler_mutex);
2461
2462 out:
2463 mutex_unlock(&listen_id->handler_mutex);
2464 return ret;
2465 }
2466
cma_ib_listen(struct rdma_id_private * id_priv)2467 static int cma_ib_listen(struct rdma_id_private *id_priv)
2468 {
2469 struct sockaddr *addr;
2470 struct ib_cm_id *id;
2471 __be64 svc_id;
2472
2473 addr = cma_src_addr(id_priv);
2474 svc_id = rdma_get_service_id(&id_priv->id, addr);
2475 id = ib_cm_insert_listen(id_priv->id.device,
2476 cma_ib_req_handler, svc_id);
2477 if (IS_ERR(id))
2478 return PTR_ERR(id);
2479 id_priv->cm_id.ib = id;
2480
2481 return 0;
2482 }
2483
cma_iw_listen(struct rdma_id_private * id_priv,int backlog)2484 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
2485 {
2486 int ret;
2487 struct iw_cm_id *id;
2488
2489 id = iw_create_cm_id(id_priv->id.device,
2490 iw_conn_req_handler,
2491 id_priv);
2492 if (IS_ERR(id))
2493 return PTR_ERR(id);
2494
2495 mutex_lock(&id_priv->qp_mutex);
2496 id->tos = id_priv->tos;
2497 id->tos_set = id_priv->tos_set;
2498 mutex_unlock(&id_priv->qp_mutex);
2499 id->afonly = id_priv->afonly;
2500 id_priv->cm_id.iw = id;
2501
2502 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
2503 rdma_addr_size(cma_src_addr(id_priv)));
2504
2505 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
2506
2507 if (ret) {
2508 iw_destroy_cm_id(id_priv->cm_id.iw);
2509 id_priv->cm_id.iw = NULL;
2510 }
2511
2512 return ret;
2513 }
2514
cma_listen_handler(struct rdma_cm_id * id,struct rdma_cm_event * event)2515 static int cma_listen_handler(struct rdma_cm_id *id,
2516 struct rdma_cm_event *event)
2517 {
2518 struct rdma_id_private *id_priv = id->context;
2519
2520 /* Listening IDs are always destroyed on removal */
2521 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
2522 return -1;
2523
2524 id->context = id_priv->id.context;
2525 id->event_handler = id_priv->id.event_handler;
2526 trace_cm_event_handler(id_priv, event);
2527 return id_priv->id.event_handler(id, event);
2528 }
2529
cma_listen_on_dev(struct rdma_id_private * id_priv,struct cma_device * cma_dev,struct rdma_id_private ** to_destroy)2530 static int cma_listen_on_dev(struct rdma_id_private *id_priv,
2531 struct cma_device *cma_dev,
2532 struct rdma_id_private **to_destroy)
2533 {
2534 struct rdma_id_private *dev_id_priv;
2535 struct net *net = id_priv->id.route.addr.dev_addr.net;
2536 int ret;
2537
2538 lockdep_assert_held(&lock);
2539
2540 *to_destroy = NULL;
2541 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
2542 return 0;
2543
2544 dev_id_priv =
2545 __rdma_create_id(net, cma_listen_handler, id_priv,
2546 id_priv->id.ps, id_priv->id.qp_type, id_priv);
2547 if (IS_ERR(dev_id_priv))
2548 return PTR_ERR(dev_id_priv);
2549
2550 dev_id_priv->state = RDMA_CM_ADDR_BOUND;
2551 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
2552 rdma_addr_size(cma_src_addr(id_priv)));
2553
2554 _cma_attach_to_dev(dev_id_priv, cma_dev);
2555 rdma_restrack_add(&dev_id_priv->res);
2556 cma_id_get(id_priv);
2557 dev_id_priv->internal_id = 1;
2558 dev_id_priv->afonly = id_priv->afonly;
2559 mutex_lock(&id_priv->qp_mutex);
2560 dev_id_priv->tos_set = id_priv->tos_set;
2561 dev_id_priv->tos = id_priv->tos;
2562 mutex_unlock(&id_priv->qp_mutex);
2563
2564 ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
2565 if (ret)
2566 goto err_listen;
2567 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
2568 return 0;
2569 err_listen:
2570 /* Caller must destroy this after releasing lock */
2571 *to_destroy = dev_id_priv;
2572 dev_warn(&cma_dev->device->dev, "RDMA CMA: %s, error %d\n", __func__, ret);
2573 return ret;
2574 }
2575
cma_listen_on_all(struct rdma_id_private * id_priv)2576 static int cma_listen_on_all(struct rdma_id_private *id_priv)
2577 {
2578 struct rdma_id_private *to_destroy;
2579 struct cma_device *cma_dev;
2580 int ret;
2581
2582 mutex_lock(&lock);
2583 list_add_tail(&id_priv->list, &listen_any_list);
2584 list_for_each_entry(cma_dev, &dev_list, list) {
2585 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
2586 if (ret) {
2587 /* Prevent racing with cma_process_remove() */
2588 if (to_destroy)
2589 list_del_init(&to_destroy->list);
2590 goto err_listen;
2591 }
2592 }
2593 mutex_unlock(&lock);
2594 return 0;
2595
2596 err_listen:
2597 _cma_cancel_listens(id_priv);
2598 mutex_unlock(&lock);
2599 if (to_destroy)
2600 rdma_destroy_id(&to_destroy->id);
2601 return ret;
2602 }
2603
rdma_set_service_type(struct rdma_cm_id * id,int tos)2604 void rdma_set_service_type(struct rdma_cm_id *id, int tos)
2605 {
2606 struct rdma_id_private *id_priv;
2607
2608 id_priv = container_of(id, struct rdma_id_private, id);
2609 mutex_lock(&id_priv->qp_mutex);
2610 id_priv->tos = (u8) tos;
2611 id_priv->tos_set = true;
2612 mutex_unlock(&id_priv->qp_mutex);
2613 }
2614 EXPORT_SYMBOL(rdma_set_service_type);
2615
2616 /**
2617 * rdma_set_ack_timeout() - Set the ack timeout of QP associated
2618 * with a connection identifier.
2619 * @id: Communication identifier to associated with service type.
2620 * @timeout: Ack timeout to set a QP, expressed as 4.096 * 2^(timeout) usec.
2621 *
2622 * This function should be called before rdma_connect() on active side,
2623 * and on passive side before rdma_accept(). It is applicable to primary
2624 * path only. The timeout will affect the local side of the QP, it is not
2625 * negotiated with remote side and zero disables the timer. In case it is
2626 * set before rdma_resolve_route, the value will also be used to determine
2627 * PacketLifeTime for RoCE.
2628 *
2629 * Return: 0 for success
2630 */
rdma_set_ack_timeout(struct rdma_cm_id * id,u8 timeout)2631 int rdma_set_ack_timeout(struct rdma_cm_id *id, u8 timeout)
2632 {
2633 struct rdma_id_private *id_priv;
2634
2635 if (id->qp_type != IB_QPT_RC)
2636 return -EINVAL;
2637
2638 id_priv = container_of(id, struct rdma_id_private, id);
2639 mutex_lock(&id_priv->qp_mutex);
2640 id_priv->timeout = timeout;
2641 id_priv->timeout_set = true;
2642 mutex_unlock(&id_priv->qp_mutex);
2643
2644 return 0;
2645 }
2646 EXPORT_SYMBOL(rdma_set_ack_timeout);
2647
2648 /**
2649 * rdma_set_min_rnr_timer() - Set the minimum RNR Retry timer of the
2650 * QP associated with a connection identifier.
2651 * @id: Communication identifier to associated with service type.
2652 * @min_rnr_timer: 5-bit value encoded as Table 45: "Encoding for RNR NAK
2653 * Timer Field" in the IBTA specification.
2654 *
2655 * This function should be called before rdma_connect() on active
2656 * side, and on passive side before rdma_accept(). The timer value
2657 * will be associated with the local QP. When it receives a send it is
2658 * not read to handle, typically if the receive queue is empty, an RNR
2659 * Retry NAK is returned to the requester with the min_rnr_timer
2660 * encoded. The requester will then wait at least the time specified
2661 * in the NAK before retrying. The default is zero, which translates
2662 * to a minimum RNR Timer value of 655 ms.
2663 *
2664 * Return: 0 for success
2665 */
rdma_set_min_rnr_timer(struct rdma_cm_id * id,u8 min_rnr_timer)2666 int rdma_set_min_rnr_timer(struct rdma_cm_id *id, u8 min_rnr_timer)
2667 {
2668 struct rdma_id_private *id_priv;
2669
2670 /* It is a five-bit value */
2671 if (min_rnr_timer & 0xe0)
2672 return -EINVAL;
2673
2674 if (WARN_ON(id->qp_type != IB_QPT_RC && id->qp_type != IB_QPT_XRC_TGT))
2675 return -EINVAL;
2676
2677 id_priv = container_of(id, struct rdma_id_private, id);
2678 mutex_lock(&id_priv->qp_mutex);
2679 id_priv->min_rnr_timer = min_rnr_timer;
2680 id_priv->min_rnr_timer_set = true;
2681 mutex_unlock(&id_priv->qp_mutex);
2682
2683 return 0;
2684 }
2685 EXPORT_SYMBOL(rdma_set_min_rnr_timer);
2686
cma_query_handler(int status,struct sa_path_rec * path_rec,void * context)2687 static void cma_query_handler(int status, struct sa_path_rec *path_rec,
2688 void *context)
2689 {
2690 struct cma_work *work = context;
2691 struct rdma_route *route;
2692
2693 route = &work->id->id.route;
2694
2695 if (!status) {
2696 route->num_paths = 1;
2697 *route->path_rec = *path_rec;
2698 } else {
2699 work->old_state = RDMA_CM_ROUTE_QUERY;
2700 work->new_state = RDMA_CM_ADDR_RESOLVED;
2701 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
2702 work->event.status = status;
2703 pr_debug_ratelimited("RDMA CM: ROUTE_ERROR: failed to query path. status %d\n",
2704 status);
2705 }
2706
2707 queue_work(cma_wq, &work->work);
2708 }
2709
cma_query_ib_route(struct rdma_id_private * id_priv,unsigned long timeout_ms,struct cma_work * work)2710 static int cma_query_ib_route(struct rdma_id_private *id_priv,
2711 unsigned long timeout_ms, struct cma_work *work)
2712 {
2713 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2714 struct sa_path_rec path_rec;
2715 ib_sa_comp_mask comp_mask;
2716 struct sockaddr_in6 *sin6;
2717 struct sockaddr_ib *sib;
2718
2719 memset(&path_rec, 0, sizeof path_rec);
2720
2721 if (rdma_cap_opa_ah(id_priv->id.device, id_priv->id.port_num))
2722 path_rec.rec_type = SA_PATH_REC_TYPE_OPA;
2723 else
2724 path_rec.rec_type = SA_PATH_REC_TYPE_IB;
2725 rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
2726 rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
2727 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2728 path_rec.numb_path = 1;
2729 path_rec.reversible = 1;
2730 path_rec.service_id = rdma_get_service_id(&id_priv->id,
2731 cma_dst_addr(id_priv));
2732
2733 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
2734 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
2735 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
2736
2737 switch (cma_family(id_priv)) {
2738 case AF_INET:
2739 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
2740 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
2741 break;
2742 case AF_INET6:
2743 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
2744 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
2745 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2746 break;
2747 case AF_IB:
2748 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
2749 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
2750 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2751 break;
2752 }
2753
2754 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
2755 id_priv->id.port_num, &path_rec,
2756 comp_mask, timeout_ms,
2757 GFP_KERNEL, cma_query_handler,
2758 work, &id_priv->query);
2759
2760 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
2761 }
2762
cma_iboe_join_work_handler(struct work_struct * work)2763 static void cma_iboe_join_work_handler(struct work_struct *work)
2764 {
2765 struct cma_multicast *mc =
2766 container_of(work, struct cma_multicast, iboe_join.work);
2767 struct rdma_cm_event *event = &mc->iboe_join.event;
2768 struct rdma_id_private *id_priv = mc->id_priv;
2769 int ret;
2770
2771 mutex_lock(&id_priv->handler_mutex);
2772 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
2773 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
2774 goto out_unlock;
2775
2776 ret = cma_cm_event_handler(id_priv, event);
2777 WARN_ON(ret);
2778
2779 out_unlock:
2780 mutex_unlock(&id_priv->handler_mutex);
2781 if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
2782 rdma_destroy_ah_attr(&event->param.ud.ah_attr);
2783 }
2784
cma_work_handler(struct work_struct * _work)2785 static void cma_work_handler(struct work_struct *_work)
2786 {
2787 struct cma_work *work = container_of(_work, struct cma_work, work);
2788 struct rdma_id_private *id_priv = work->id;
2789
2790 mutex_lock(&id_priv->handler_mutex);
2791 if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
2792 READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
2793 goto out_unlock;
2794 if (work->old_state != 0 || work->new_state != 0) {
2795 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
2796 goto out_unlock;
2797 }
2798
2799 if (cma_cm_event_handler(id_priv, &work->event)) {
2800 cma_id_put(id_priv);
2801 destroy_id_handler_unlock(id_priv);
2802 goto out_free;
2803 }
2804
2805 out_unlock:
2806 mutex_unlock(&id_priv->handler_mutex);
2807 cma_id_put(id_priv);
2808 out_free:
2809 if (work->event.event == RDMA_CM_EVENT_MULTICAST_JOIN)
2810 rdma_destroy_ah_attr(&work->event.param.ud.ah_attr);
2811 kfree(work);
2812 }
2813
cma_init_resolve_route_work(struct cma_work * work,struct rdma_id_private * id_priv)2814 static void cma_init_resolve_route_work(struct cma_work *work,
2815 struct rdma_id_private *id_priv)
2816 {
2817 work->id = id_priv;
2818 INIT_WORK(&work->work, cma_work_handler);
2819 work->old_state = RDMA_CM_ROUTE_QUERY;
2820 work->new_state = RDMA_CM_ROUTE_RESOLVED;
2821 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
2822 }
2823
enqueue_resolve_addr_work(struct cma_work * work,struct rdma_id_private * id_priv)2824 static void enqueue_resolve_addr_work(struct cma_work *work,
2825 struct rdma_id_private *id_priv)
2826 {
2827 /* Balances with cma_id_put() in cma_work_handler */
2828 cma_id_get(id_priv);
2829
2830 work->id = id_priv;
2831 INIT_WORK(&work->work, cma_work_handler);
2832 work->old_state = RDMA_CM_ADDR_QUERY;
2833 work->new_state = RDMA_CM_ADDR_RESOLVED;
2834 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2835
2836 queue_work(cma_wq, &work->work);
2837 }
2838
cma_resolve_ib_route(struct rdma_id_private * id_priv,unsigned long timeout_ms)2839 static int cma_resolve_ib_route(struct rdma_id_private *id_priv,
2840 unsigned long timeout_ms)
2841 {
2842 struct rdma_route *route = &id_priv->id.route;
2843 struct cma_work *work;
2844 int ret;
2845
2846 work = kzalloc(sizeof *work, GFP_KERNEL);
2847 if (!work)
2848 return -ENOMEM;
2849
2850 cma_init_resolve_route_work(work, id_priv);
2851
2852 if (!route->path_rec)
2853 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
2854 if (!route->path_rec) {
2855 ret = -ENOMEM;
2856 goto err1;
2857 }
2858
2859 ret = cma_query_ib_route(id_priv, timeout_ms, work);
2860 if (ret)
2861 goto err2;
2862
2863 return 0;
2864 err2:
2865 kfree(route->path_rec);
2866 route->path_rec = NULL;
2867 err1:
2868 kfree(work);
2869 return ret;
2870 }
2871
cma_route_gid_type(enum rdma_network_type network_type,unsigned long supported_gids,enum ib_gid_type default_gid)2872 static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
2873 unsigned long supported_gids,
2874 enum ib_gid_type default_gid)
2875 {
2876 if ((network_type == RDMA_NETWORK_IPV4 ||
2877 network_type == RDMA_NETWORK_IPV6) &&
2878 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
2879 return IB_GID_TYPE_ROCE_UDP_ENCAP;
2880
2881 return default_gid;
2882 }
2883
2884 /*
2885 * cma_iboe_set_path_rec_l2_fields() is helper function which sets
2886 * path record type based on GID type.
2887 * It also sets up other L2 fields which includes destination mac address
2888 * netdev ifindex, of the path record.
2889 * It returns the netdev of the bound interface for this path record entry.
2890 */
2891 static struct net_device *
cma_iboe_set_path_rec_l2_fields(struct rdma_id_private * id_priv)2892 cma_iboe_set_path_rec_l2_fields(struct rdma_id_private *id_priv)
2893 {
2894 struct rdma_route *route = &id_priv->id.route;
2895 enum ib_gid_type gid_type = IB_GID_TYPE_ROCE;
2896 struct rdma_addr *addr = &route->addr;
2897 unsigned long supported_gids;
2898 struct net_device *ndev;
2899
2900 if (!addr->dev_addr.bound_dev_if)
2901 return NULL;
2902
2903 ndev = dev_get_by_index(addr->dev_addr.net,
2904 addr->dev_addr.bound_dev_if);
2905 if (!ndev)
2906 return NULL;
2907
2908 supported_gids = roce_gid_type_mask_support(id_priv->id.device,
2909 id_priv->id.port_num);
2910 gid_type = cma_route_gid_type(addr->dev_addr.network,
2911 supported_gids,
2912 id_priv->gid_type);
2913 /* Use the hint from IP Stack to select GID Type */
2914 if (gid_type < ib_network_to_gid_type(addr->dev_addr.network))
2915 gid_type = ib_network_to_gid_type(addr->dev_addr.network);
2916 route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type);
2917
2918 route->path_rec->roce.route_resolved = true;
2919 sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr);
2920 return ndev;
2921 }
2922
rdma_set_ib_path(struct rdma_cm_id * id,struct sa_path_rec * path_rec)2923 int rdma_set_ib_path(struct rdma_cm_id *id,
2924 struct sa_path_rec *path_rec)
2925 {
2926 struct rdma_id_private *id_priv;
2927 struct net_device *ndev;
2928 int ret;
2929
2930 id_priv = container_of(id, struct rdma_id_private, id);
2931 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
2932 RDMA_CM_ROUTE_RESOLVED))
2933 return -EINVAL;
2934
2935 id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec),
2936 GFP_KERNEL);
2937 if (!id->route.path_rec) {
2938 ret = -ENOMEM;
2939 goto err;
2940 }
2941
2942 if (rdma_protocol_roce(id->device, id->port_num)) {
2943 ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
2944 if (!ndev) {
2945 ret = -ENODEV;
2946 goto err_free;
2947 }
2948 dev_put(ndev);
2949 }
2950
2951 id->route.num_paths = 1;
2952 return 0;
2953
2954 err_free:
2955 kfree(id->route.path_rec);
2956 id->route.path_rec = NULL;
2957 err:
2958 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
2959 return ret;
2960 }
2961 EXPORT_SYMBOL(rdma_set_ib_path);
2962
cma_resolve_iw_route(struct rdma_id_private * id_priv)2963 static int cma_resolve_iw_route(struct rdma_id_private *id_priv)
2964 {
2965 struct cma_work *work;
2966
2967 work = kzalloc(sizeof *work, GFP_KERNEL);
2968 if (!work)
2969 return -ENOMEM;
2970
2971 cma_init_resolve_route_work(work, id_priv);
2972 queue_work(cma_wq, &work->work);
2973 return 0;
2974 }
2975
get_vlan_ndev_tc(struct net_device * vlan_ndev,int prio)2976 static int get_vlan_ndev_tc(struct net_device *vlan_ndev, int prio)
2977 {
2978 struct net_device *dev;
2979
2980 dev = vlan_dev_real_dev(vlan_ndev);
2981 if (dev->num_tc)
2982 return netdev_get_prio_tc_map(dev, prio);
2983
2984 return (vlan_dev_get_egress_qos_mask(vlan_ndev, prio) &
2985 VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
2986 }
2987
2988 struct iboe_prio_tc_map {
2989 int input_prio;
2990 int output_tc;
2991 bool found;
2992 };
2993
get_lower_vlan_dev_tc(struct net_device * dev,struct netdev_nested_priv * priv)2994 static int get_lower_vlan_dev_tc(struct net_device *dev,
2995 struct netdev_nested_priv *priv)
2996 {
2997 struct iboe_prio_tc_map *map = (struct iboe_prio_tc_map *)priv->data;
2998
2999 if (is_vlan_dev(dev))
3000 map->output_tc = get_vlan_ndev_tc(dev, map->input_prio);
3001 else if (dev->num_tc)
3002 map->output_tc = netdev_get_prio_tc_map(dev, map->input_prio);
3003 else
3004 map->output_tc = 0;
3005 /* We are interested only in first level VLAN device, so always
3006 * return 1 to stop iterating over next level devices.
3007 */
3008 map->found = true;
3009 return 1;
3010 }
3011
iboe_tos_to_sl(struct net_device * ndev,int tos)3012 static int iboe_tos_to_sl(struct net_device *ndev, int tos)
3013 {
3014 struct iboe_prio_tc_map prio_tc_map = {};
3015 int prio = rt_tos2priority(tos);
3016 struct netdev_nested_priv priv;
3017
3018 /* If VLAN device, get it directly from the VLAN netdev */
3019 if (is_vlan_dev(ndev))
3020 return get_vlan_ndev_tc(ndev, prio);
3021
3022 prio_tc_map.input_prio = prio;
3023 priv.data = (void *)&prio_tc_map;
3024 rcu_read_lock();
3025 netdev_walk_all_lower_dev_rcu(ndev,
3026 get_lower_vlan_dev_tc,
3027 &priv);
3028 rcu_read_unlock();
3029 /* If map is found from lower device, use it; Otherwise
3030 * continue with the current netdevice to get priority to tc map.
3031 */
3032 if (prio_tc_map.found)
3033 return prio_tc_map.output_tc;
3034 else if (ndev->num_tc)
3035 return netdev_get_prio_tc_map(ndev, prio);
3036 else
3037 return 0;
3038 }
3039
cma_get_roce_udp_flow_label(struct rdma_id_private * id_priv)3040 static __be32 cma_get_roce_udp_flow_label(struct rdma_id_private *id_priv)
3041 {
3042 struct sockaddr_in6 *addr6;
3043 u16 dport, sport;
3044 u32 hash, fl;
3045
3046 addr6 = (struct sockaddr_in6 *)cma_src_addr(id_priv);
3047 fl = be32_to_cpu(addr6->sin6_flowinfo) & IB_GRH_FLOWLABEL_MASK;
3048 if ((cma_family(id_priv) != AF_INET6) || !fl) {
3049 dport = be16_to_cpu(cma_port(cma_dst_addr(id_priv)));
3050 sport = be16_to_cpu(cma_port(cma_src_addr(id_priv)));
3051 hash = (u32)sport * 31 + dport;
3052 fl = hash & IB_GRH_FLOWLABEL_MASK;
3053 }
3054
3055 return cpu_to_be32(fl);
3056 }
3057
cma_resolve_iboe_route(struct rdma_id_private * id_priv)3058 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
3059 {
3060 struct rdma_route *route = &id_priv->id.route;
3061 struct rdma_addr *addr = &route->addr;
3062 struct cma_work *work;
3063 int ret;
3064 struct net_device *ndev;
3065
3066 u8 default_roce_tos = id_priv->cma_dev->default_roce_tos[id_priv->id.port_num -
3067 rdma_start_port(id_priv->cma_dev->device)];
3068 u8 tos;
3069
3070 mutex_lock(&id_priv->qp_mutex);
3071 tos = id_priv->tos_set ? id_priv->tos : default_roce_tos;
3072 mutex_unlock(&id_priv->qp_mutex);
3073
3074 work = kzalloc(sizeof *work, GFP_KERNEL);
3075 if (!work)
3076 return -ENOMEM;
3077
3078 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
3079 if (!route->path_rec) {
3080 ret = -ENOMEM;
3081 goto err1;
3082 }
3083
3084 route->num_paths = 1;
3085
3086 ndev = cma_iboe_set_path_rec_l2_fields(id_priv);
3087 if (!ndev) {
3088 ret = -ENODEV;
3089 goto err2;
3090 }
3091
3092 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
3093 &route->path_rec->sgid);
3094 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
3095 &route->path_rec->dgid);
3096
3097 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
3098 /* TODO: get the hoplimit from the inet/inet6 device */
3099 route->path_rec->hop_limit = addr->dev_addr.hoplimit;
3100 else
3101 route->path_rec->hop_limit = 1;
3102 route->path_rec->reversible = 1;
3103 route->path_rec->pkey = cpu_to_be16(0xffff);
3104 route->path_rec->mtu_selector = IB_SA_EQ;
3105 route->path_rec->sl = iboe_tos_to_sl(ndev, tos);
3106 route->path_rec->traffic_class = tos;
3107 route->path_rec->mtu = iboe_get_mtu(ndev->mtu);
3108 route->path_rec->rate_selector = IB_SA_EQ;
3109 route->path_rec->rate = iboe_get_rate(ndev);
3110 dev_put(ndev);
3111 route->path_rec->packet_life_time_selector = IB_SA_EQ;
3112 /* In case ACK timeout is set, use this value to calculate
3113 * PacketLifeTime. As per IBTA 12.7.34,
3114 * local ACK timeout = (2 * PacketLifeTime + Local CA’s ACK delay).
3115 * Assuming a negligible local ACK delay, we can use
3116 * PacketLifeTime = local ACK timeout/2
3117 * as a reasonable approximation for RoCE networks.
3118 */
3119 mutex_lock(&id_priv->qp_mutex);
3120 if (id_priv->timeout_set && id_priv->timeout)
3121 route->path_rec->packet_life_time = id_priv->timeout - 1;
3122 else
3123 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
3124 mutex_unlock(&id_priv->qp_mutex);
3125
3126 if (!route->path_rec->mtu) {
3127 ret = -EINVAL;
3128 goto err2;
3129 }
3130
3131 if (rdma_protocol_roce_udp_encap(id_priv->id.device,
3132 id_priv->id.port_num))
3133 route->path_rec->flow_label =
3134 cma_get_roce_udp_flow_label(id_priv);
3135
3136 cma_init_resolve_route_work(work, id_priv);
3137 queue_work(cma_wq, &work->work);
3138
3139 return 0;
3140
3141 err2:
3142 kfree(route->path_rec);
3143 route->path_rec = NULL;
3144 route->num_paths = 0;
3145 err1:
3146 kfree(work);
3147 return ret;
3148 }
3149
rdma_resolve_route(struct rdma_cm_id * id,unsigned long timeout_ms)3150 int rdma_resolve_route(struct rdma_cm_id *id, unsigned long timeout_ms)
3151 {
3152 struct rdma_id_private *id_priv;
3153 int ret;
3154
3155 if (!timeout_ms)
3156 return -EINVAL;
3157
3158 id_priv = container_of(id, struct rdma_id_private, id);
3159 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
3160 return -EINVAL;
3161
3162 cma_id_get(id_priv);
3163 if (rdma_cap_ib_sa(id->device, id->port_num))
3164 ret = cma_resolve_ib_route(id_priv, timeout_ms);
3165 else if (rdma_protocol_roce(id->device, id->port_num))
3166 ret = cma_resolve_iboe_route(id_priv);
3167 else if (rdma_protocol_iwarp(id->device, id->port_num))
3168 ret = cma_resolve_iw_route(id_priv);
3169 else
3170 ret = -ENOSYS;
3171
3172 if (ret)
3173 goto err;
3174
3175 return 0;
3176 err:
3177 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
3178 cma_id_put(id_priv);
3179 return ret;
3180 }
3181 EXPORT_SYMBOL(rdma_resolve_route);
3182
cma_set_loopback(struct sockaddr * addr)3183 static void cma_set_loopback(struct sockaddr *addr)
3184 {
3185 switch (addr->sa_family) {
3186 case AF_INET:
3187 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
3188 break;
3189 case AF_INET6:
3190 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
3191 0, 0, 0, htonl(1));
3192 break;
3193 default:
3194 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
3195 0, 0, 0, htonl(1));
3196 break;
3197 }
3198 }
3199
cma_bind_loopback(struct rdma_id_private * id_priv)3200 static int cma_bind_loopback(struct rdma_id_private *id_priv)
3201 {
3202 struct cma_device *cma_dev, *cur_dev;
3203 union ib_gid gid;
3204 enum ib_port_state port_state;
3205 unsigned int p;
3206 u16 pkey;
3207 int ret;
3208
3209 cma_dev = NULL;
3210 mutex_lock(&lock);
3211 list_for_each_entry(cur_dev, &dev_list, list) {
3212 if (cma_family(id_priv) == AF_IB &&
3213 !rdma_cap_ib_cm(cur_dev->device, 1))
3214 continue;
3215
3216 if (!cma_dev)
3217 cma_dev = cur_dev;
3218
3219 rdma_for_each_port (cur_dev->device, p) {
3220 if (!ib_get_cached_port_state(cur_dev->device, p, &port_state) &&
3221 port_state == IB_PORT_ACTIVE) {
3222 cma_dev = cur_dev;
3223 goto port_found;
3224 }
3225 }
3226 }
3227
3228 if (!cma_dev) {
3229 ret = -ENODEV;
3230 goto out;
3231 }
3232
3233 p = 1;
3234
3235 port_found:
3236 ret = rdma_query_gid(cma_dev->device, p, 0, &gid);
3237 if (ret)
3238 goto out;
3239
3240 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
3241 if (ret)
3242 goto out;
3243
3244 id_priv->id.route.addr.dev_addr.dev_type =
3245 (rdma_protocol_ib(cma_dev->device, p)) ?
3246 ARPHRD_INFINIBAND : ARPHRD_ETHER;
3247
3248 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3249 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
3250 id_priv->id.port_num = p;
3251 cma_attach_to_dev(id_priv, cma_dev);
3252 rdma_restrack_add(&id_priv->res);
3253 cma_set_loopback(cma_src_addr(id_priv));
3254 out:
3255 mutex_unlock(&lock);
3256 return ret;
3257 }
3258
addr_handler(int status,struct sockaddr * src_addr,struct rdma_dev_addr * dev_addr,void * context)3259 static void addr_handler(int status, struct sockaddr *src_addr,
3260 struct rdma_dev_addr *dev_addr, void *context)
3261 {
3262 struct rdma_id_private *id_priv = context;
3263 struct rdma_cm_event event = {};
3264 struct sockaddr *addr;
3265 struct sockaddr_storage old_addr;
3266
3267 mutex_lock(&id_priv->handler_mutex);
3268 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
3269 RDMA_CM_ADDR_RESOLVED))
3270 goto out;
3271
3272 /*
3273 * Store the previous src address, so that if we fail to acquire
3274 * matching rdma device, old address can be restored back, which helps
3275 * to cancel the cma listen operation correctly.
3276 */
3277 addr = cma_src_addr(id_priv);
3278 memcpy(&old_addr, addr, rdma_addr_size(addr));
3279 memcpy(addr, src_addr, rdma_addr_size(src_addr));
3280 if (!status && !id_priv->cma_dev) {
3281 status = cma_acquire_dev_by_src_ip(id_priv);
3282 if (status)
3283 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to acquire device. status %d\n",
3284 status);
3285 rdma_restrack_add(&id_priv->res);
3286 } else if (status) {
3287 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to resolve IP. status %d\n", status);
3288 }
3289
3290 if (status) {
3291 memcpy(addr, &old_addr,
3292 rdma_addr_size((struct sockaddr *)&old_addr));
3293 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
3294 RDMA_CM_ADDR_BOUND))
3295 goto out;
3296 event.event = RDMA_CM_EVENT_ADDR_ERROR;
3297 event.status = status;
3298 } else
3299 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
3300
3301 if (cma_cm_event_handler(id_priv, &event)) {
3302 destroy_id_handler_unlock(id_priv);
3303 return;
3304 }
3305 out:
3306 mutex_unlock(&id_priv->handler_mutex);
3307 }
3308
cma_resolve_loopback(struct rdma_id_private * id_priv)3309 static int cma_resolve_loopback(struct rdma_id_private *id_priv)
3310 {
3311 struct cma_work *work;
3312 union ib_gid gid;
3313 int ret;
3314
3315 work = kzalloc(sizeof *work, GFP_KERNEL);
3316 if (!work)
3317 return -ENOMEM;
3318
3319 if (!id_priv->cma_dev) {
3320 ret = cma_bind_loopback(id_priv);
3321 if (ret)
3322 goto err;
3323 }
3324
3325 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
3326 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
3327
3328 enqueue_resolve_addr_work(work, id_priv);
3329 return 0;
3330 err:
3331 kfree(work);
3332 return ret;
3333 }
3334
cma_resolve_ib_addr(struct rdma_id_private * id_priv)3335 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
3336 {
3337 struct cma_work *work;
3338 int ret;
3339
3340 work = kzalloc(sizeof *work, GFP_KERNEL);
3341 if (!work)
3342 return -ENOMEM;
3343
3344 if (!id_priv->cma_dev) {
3345 ret = cma_resolve_ib_dev(id_priv);
3346 if (ret)
3347 goto err;
3348 }
3349
3350 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
3351 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
3352
3353 enqueue_resolve_addr_work(work, id_priv);
3354 return 0;
3355 err:
3356 kfree(work);
3357 return ret;
3358 }
3359
cma_bind_addr(struct rdma_cm_id * id,struct sockaddr * src_addr,const struct sockaddr * dst_addr)3360 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
3361 const struct sockaddr *dst_addr)
3362 {
3363 if (!src_addr || !src_addr->sa_family) {
3364 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
3365 src_addr->sa_family = dst_addr->sa_family;
3366 if (IS_ENABLED(CONFIG_IPV6) &&
3367 dst_addr->sa_family == AF_INET6) {
3368 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
3369 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
3370 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
3371 if (ipv6_addr_type(&dst_addr6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
3372 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
3373 } else if (dst_addr->sa_family == AF_IB) {
3374 ((struct sockaddr_ib *) src_addr)->sib_pkey =
3375 ((struct sockaddr_ib *) dst_addr)->sib_pkey;
3376 }
3377 }
3378 return rdma_bind_addr(id, src_addr);
3379 }
3380
3381 /*
3382 * If required, resolve the source address for bind and leave the id_priv in
3383 * state RDMA_CM_ADDR_BOUND. This oddly uses the state to determine the prior
3384 * calls made by ULP, a previously bound ID will not be re-bound and src_addr is
3385 * ignored.
3386 */
resolve_prepare_src(struct rdma_id_private * id_priv,struct sockaddr * src_addr,const struct sockaddr * dst_addr)3387 static int resolve_prepare_src(struct rdma_id_private *id_priv,
3388 struct sockaddr *src_addr,
3389 const struct sockaddr *dst_addr)
3390 {
3391 int ret;
3392
3393 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
3394 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
3395 /* For a well behaved ULP state will be RDMA_CM_IDLE */
3396 ret = cma_bind_addr(&id_priv->id, src_addr, dst_addr);
3397 if (ret)
3398 goto err_dst;
3399 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
3400 RDMA_CM_ADDR_QUERY))) {
3401 ret = -EINVAL;
3402 goto err_dst;
3403 }
3404 }
3405
3406 if (cma_family(id_priv) != dst_addr->sa_family) {
3407 ret = -EINVAL;
3408 goto err_state;
3409 }
3410 return 0;
3411
3412 err_state:
3413 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
3414 err_dst:
3415 memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
3416 return ret;
3417 }
3418
rdma_resolve_addr(struct rdma_cm_id * id,struct sockaddr * src_addr,const struct sockaddr * dst_addr,unsigned long timeout_ms)3419 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
3420 const struct sockaddr *dst_addr, unsigned long timeout_ms)
3421 {
3422 struct rdma_id_private *id_priv =
3423 container_of(id, struct rdma_id_private, id);
3424 int ret;
3425
3426 ret = resolve_prepare_src(id_priv, src_addr, dst_addr);
3427 if (ret)
3428 return ret;
3429
3430 if (cma_any_addr(dst_addr)) {
3431 ret = cma_resolve_loopback(id_priv);
3432 } else {
3433 if (dst_addr->sa_family == AF_IB) {
3434 ret = cma_resolve_ib_addr(id_priv);
3435 } else {
3436 /*
3437 * The FSM can return back to RDMA_CM_ADDR_BOUND after
3438 * rdma_resolve_ip() is called, eg through the error
3439 * path in addr_handler(). If this happens the existing
3440 * request must be canceled before issuing a new one.
3441 * Since canceling a request is a bit slow and this
3442 * oddball path is rare, keep track once a request has
3443 * been issued. The track turns out to be a permanent
3444 * state since this is the only cancel as it is
3445 * immediately before rdma_resolve_ip().
3446 */
3447 if (id_priv->used_resolve_ip)
3448 rdma_addr_cancel(&id->route.addr.dev_addr);
3449 else
3450 id_priv->used_resolve_ip = 1;
3451 ret = rdma_resolve_ip(cma_src_addr(id_priv), dst_addr,
3452 &id->route.addr.dev_addr,
3453 timeout_ms, addr_handler,
3454 false, id_priv);
3455 }
3456 }
3457 if (ret)
3458 goto err;
3459
3460 return 0;
3461 err:
3462 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
3463 return ret;
3464 }
3465 EXPORT_SYMBOL(rdma_resolve_addr);
3466
rdma_set_reuseaddr(struct rdma_cm_id * id,int reuse)3467 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
3468 {
3469 struct rdma_id_private *id_priv;
3470 unsigned long flags;
3471 int ret;
3472
3473 id_priv = container_of(id, struct rdma_id_private, id);
3474 spin_lock_irqsave(&id_priv->lock, flags);
3475 if ((reuse && id_priv->state != RDMA_CM_LISTEN) ||
3476 id_priv->state == RDMA_CM_IDLE) {
3477 id_priv->reuseaddr = reuse;
3478 ret = 0;
3479 } else {
3480 ret = -EINVAL;
3481 }
3482 spin_unlock_irqrestore(&id_priv->lock, flags);
3483 return ret;
3484 }
3485 EXPORT_SYMBOL(rdma_set_reuseaddr);
3486
rdma_set_afonly(struct rdma_cm_id * id,int afonly)3487 int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
3488 {
3489 struct rdma_id_private *id_priv;
3490 unsigned long flags;
3491 int ret;
3492
3493 id_priv = container_of(id, struct rdma_id_private, id);
3494 spin_lock_irqsave(&id_priv->lock, flags);
3495 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
3496 id_priv->options |= (1 << CMA_OPTION_AFONLY);
3497 id_priv->afonly = afonly;
3498 ret = 0;
3499 } else {
3500 ret = -EINVAL;
3501 }
3502 spin_unlock_irqrestore(&id_priv->lock, flags);
3503 return ret;
3504 }
3505 EXPORT_SYMBOL(rdma_set_afonly);
3506
cma_bind_port(struct rdma_bind_list * bind_list,struct rdma_id_private * id_priv)3507 static void cma_bind_port(struct rdma_bind_list *bind_list,
3508 struct rdma_id_private *id_priv)
3509 {
3510 struct sockaddr *addr;
3511 struct sockaddr_ib *sib;
3512 u64 sid, mask;
3513 __be16 port;
3514
3515 lockdep_assert_held(&lock);
3516
3517 addr = cma_src_addr(id_priv);
3518 port = htons(bind_list->port);
3519
3520 switch (addr->sa_family) {
3521 case AF_INET:
3522 ((struct sockaddr_in *) addr)->sin_port = port;
3523 break;
3524 case AF_INET6:
3525 ((struct sockaddr_in6 *) addr)->sin6_port = port;
3526 break;
3527 case AF_IB:
3528 sib = (struct sockaddr_ib *) addr;
3529 sid = be64_to_cpu(sib->sib_sid);
3530 mask = be64_to_cpu(sib->sib_sid_mask);
3531 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
3532 sib->sib_sid_mask = cpu_to_be64(~0ULL);
3533 break;
3534 }
3535 id_priv->bind_list = bind_list;
3536 hlist_add_head(&id_priv->node, &bind_list->owners);
3537 }
3538
cma_alloc_port(enum rdma_ucm_port_space ps,struct rdma_id_private * id_priv,unsigned short snum)3539 static int cma_alloc_port(enum rdma_ucm_port_space ps,
3540 struct rdma_id_private *id_priv, unsigned short snum)
3541 {
3542 struct rdma_bind_list *bind_list;
3543 int ret;
3544
3545 lockdep_assert_held(&lock);
3546
3547 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
3548 if (!bind_list)
3549 return -ENOMEM;
3550
3551 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list,
3552 snum);
3553 if (ret < 0)
3554 goto err;
3555
3556 bind_list->ps = ps;
3557 bind_list->port = snum;
3558 cma_bind_port(bind_list, id_priv);
3559 return 0;
3560 err:
3561 kfree(bind_list);
3562 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
3563 }
3564
cma_port_is_unique(struct rdma_bind_list * bind_list,struct rdma_id_private * id_priv)3565 static int cma_port_is_unique(struct rdma_bind_list *bind_list,
3566 struct rdma_id_private *id_priv)
3567 {
3568 struct rdma_id_private *cur_id;
3569 struct sockaddr *daddr = cma_dst_addr(id_priv);
3570 struct sockaddr *saddr = cma_src_addr(id_priv);
3571 __be16 dport = cma_port(daddr);
3572
3573 lockdep_assert_held(&lock);
3574
3575 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
3576 struct sockaddr *cur_daddr = cma_dst_addr(cur_id);
3577 struct sockaddr *cur_saddr = cma_src_addr(cur_id);
3578 __be16 cur_dport = cma_port(cur_daddr);
3579
3580 if (id_priv == cur_id)
3581 continue;
3582
3583 /* different dest port -> unique */
3584 if (!cma_any_port(daddr) &&
3585 !cma_any_port(cur_daddr) &&
3586 (dport != cur_dport))
3587 continue;
3588
3589 /* different src address -> unique */
3590 if (!cma_any_addr(saddr) &&
3591 !cma_any_addr(cur_saddr) &&
3592 cma_addr_cmp(saddr, cur_saddr))
3593 continue;
3594
3595 /* different dst address -> unique */
3596 if (!cma_any_addr(daddr) &&
3597 !cma_any_addr(cur_daddr) &&
3598 cma_addr_cmp(daddr, cur_daddr))
3599 continue;
3600
3601 return -EADDRNOTAVAIL;
3602 }
3603 return 0;
3604 }
3605
cma_alloc_any_port(enum rdma_ucm_port_space ps,struct rdma_id_private * id_priv)3606 static int cma_alloc_any_port(enum rdma_ucm_port_space ps,
3607 struct rdma_id_private *id_priv)
3608 {
3609 static unsigned int last_used_port;
3610 int low, high, remaining;
3611 unsigned int rover;
3612 struct net *net = id_priv->id.route.addr.dev_addr.net;
3613
3614 lockdep_assert_held(&lock);
3615
3616 inet_get_local_port_range(net, &low, &high);
3617 remaining = (high - low) + 1;
3618 rover = prandom_u32() % remaining + low;
3619 retry:
3620 if (last_used_port != rover) {
3621 struct rdma_bind_list *bind_list;
3622 int ret;
3623
3624 bind_list = cma_ps_find(net, ps, (unsigned short)rover);
3625
3626 if (!bind_list) {
3627 ret = cma_alloc_port(ps, id_priv, rover);
3628 } else {
3629 ret = cma_port_is_unique(bind_list, id_priv);
3630 if (!ret)
3631 cma_bind_port(bind_list, id_priv);
3632 }
3633 /*
3634 * Remember previously used port number in order to avoid
3635 * re-using same port immediately after it is closed.
3636 */
3637 if (!ret)
3638 last_used_port = rover;
3639 if (ret != -EADDRNOTAVAIL)
3640 return ret;
3641 }
3642 if (--remaining) {
3643 rover++;
3644 if ((rover < low) || (rover > high))
3645 rover = low;
3646 goto retry;
3647 }
3648 return -EADDRNOTAVAIL;
3649 }
3650
3651 /*
3652 * Check that the requested port is available. This is called when trying to
3653 * bind to a specific port, or when trying to listen on a bound port. In
3654 * the latter case, the provided id_priv may already be on the bind_list, but
3655 * we still need to check that it's okay to start listening.
3656 */
cma_check_port(struct rdma_bind_list * bind_list,struct rdma_id_private * id_priv,uint8_t reuseaddr)3657 static int cma_check_port(struct rdma_bind_list *bind_list,
3658 struct rdma_id_private *id_priv, uint8_t reuseaddr)
3659 {
3660 struct rdma_id_private *cur_id;
3661 struct sockaddr *addr, *cur_addr;
3662
3663 lockdep_assert_held(&lock);
3664
3665 addr = cma_src_addr(id_priv);
3666 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
3667 if (id_priv == cur_id)
3668 continue;
3669
3670 if (reuseaddr && cur_id->reuseaddr)
3671 continue;
3672
3673 cur_addr = cma_src_addr(cur_id);
3674 if (id_priv->afonly && cur_id->afonly &&
3675 (addr->sa_family != cur_addr->sa_family))
3676 continue;
3677
3678 if (cma_any_addr(addr) || cma_any_addr(cur_addr))
3679 return -EADDRNOTAVAIL;
3680
3681 if (!cma_addr_cmp(addr, cur_addr))
3682 return -EADDRINUSE;
3683 }
3684 return 0;
3685 }
3686
cma_use_port(enum rdma_ucm_port_space ps,struct rdma_id_private * id_priv)3687 static int cma_use_port(enum rdma_ucm_port_space ps,
3688 struct rdma_id_private *id_priv)
3689 {
3690 struct rdma_bind_list *bind_list;
3691 unsigned short snum;
3692 int ret;
3693
3694 lockdep_assert_held(&lock);
3695
3696 snum = ntohs(cma_port(cma_src_addr(id_priv)));
3697 if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
3698 return -EACCES;
3699
3700 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum);
3701 if (!bind_list) {
3702 ret = cma_alloc_port(ps, id_priv, snum);
3703 } else {
3704 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
3705 if (!ret)
3706 cma_bind_port(bind_list, id_priv);
3707 }
3708 return ret;
3709 }
3710
3711 static enum rdma_ucm_port_space
cma_select_inet_ps(struct rdma_id_private * id_priv)3712 cma_select_inet_ps(struct rdma_id_private *id_priv)
3713 {
3714 switch (id_priv->id.ps) {
3715 case RDMA_PS_TCP:
3716 case RDMA_PS_UDP:
3717 case RDMA_PS_IPOIB:
3718 case RDMA_PS_IB:
3719 return id_priv->id.ps;
3720 default:
3721
3722 return 0;
3723 }
3724 }
3725
3726 static enum rdma_ucm_port_space
cma_select_ib_ps(struct rdma_id_private * id_priv)3727 cma_select_ib_ps(struct rdma_id_private *id_priv)
3728 {
3729 enum rdma_ucm_port_space ps = 0;
3730 struct sockaddr_ib *sib;
3731 u64 sid_ps, mask, sid;
3732
3733 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
3734 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
3735 sid = be64_to_cpu(sib->sib_sid) & mask;
3736
3737 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
3738 sid_ps = RDMA_IB_IP_PS_IB;
3739 ps = RDMA_PS_IB;
3740 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
3741 (sid == (RDMA_IB_IP_PS_TCP & mask))) {
3742 sid_ps = RDMA_IB_IP_PS_TCP;
3743 ps = RDMA_PS_TCP;
3744 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
3745 (sid == (RDMA_IB_IP_PS_UDP & mask))) {
3746 sid_ps = RDMA_IB_IP_PS_UDP;
3747 ps = RDMA_PS_UDP;
3748 }
3749
3750 if (ps) {
3751 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
3752 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
3753 be64_to_cpu(sib->sib_sid_mask));
3754 }
3755 return ps;
3756 }
3757
cma_get_port(struct rdma_id_private * id_priv)3758 static int cma_get_port(struct rdma_id_private *id_priv)
3759 {
3760 enum rdma_ucm_port_space ps;
3761 int ret;
3762
3763 if (cma_family(id_priv) != AF_IB)
3764 ps = cma_select_inet_ps(id_priv);
3765 else
3766 ps = cma_select_ib_ps(id_priv);
3767 if (!ps)
3768 return -EPROTONOSUPPORT;
3769
3770 mutex_lock(&lock);
3771 if (cma_any_port(cma_src_addr(id_priv)))
3772 ret = cma_alloc_any_port(ps, id_priv);
3773 else
3774 ret = cma_use_port(ps, id_priv);
3775 mutex_unlock(&lock);
3776
3777 return ret;
3778 }
3779
cma_check_linklocal(struct rdma_dev_addr * dev_addr,struct sockaddr * addr)3780 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
3781 struct sockaddr *addr)
3782 {
3783 #if IS_ENABLED(CONFIG_IPV6)
3784 struct sockaddr_in6 *sin6;
3785
3786 if (addr->sa_family != AF_INET6)
3787 return 0;
3788
3789 sin6 = (struct sockaddr_in6 *) addr;
3790
3791 if (!(ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL))
3792 return 0;
3793
3794 if (!sin6->sin6_scope_id)
3795 return -EINVAL;
3796
3797 dev_addr->bound_dev_if = sin6->sin6_scope_id;
3798 #endif
3799 return 0;
3800 }
3801
rdma_listen(struct rdma_cm_id * id,int backlog)3802 int rdma_listen(struct rdma_cm_id *id, int backlog)
3803 {
3804 struct rdma_id_private *id_priv =
3805 container_of(id, struct rdma_id_private, id);
3806 int ret;
3807
3808 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) {
3809 struct sockaddr_in any_in = {
3810 .sin_family = AF_INET,
3811 .sin_addr.s_addr = htonl(INADDR_ANY),
3812 };
3813
3814 /* For a well behaved ULP state will be RDMA_CM_IDLE */
3815 ret = rdma_bind_addr(id, (struct sockaddr *)&any_in);
3816 if (ret)
3817 return ret;
3818 if (WARN_ON(!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND,
3819 RDMA_CM_LISTEN)))
3820 return -EINVAL;
3821 }
3822
3823 /*
3824 * Once the ID reaches RDMA_CM_LISTEN it is not allowed to be reusable
3825 * any more, and has to be unique in the bind list.
3826 */
3827 if (id_priv->reuseaddr) {
3828 mutex_lock(&lock);
3829 ret = cma_check_port(id_priv->bind_list, id_priv, 0);
3830 if (!ret)
3831 id_priv->reuseaddr = 0;
3832 mutex_unlock(&lock);
3833 if (ret)
3834 goto err;
3835 }
3836
3837 id_priv->backlog = backlog;
3838 if (id_priv->cma_dev) {
3839 if (rdma_cap_ib_cm(id->device, 1)) {
3840 ret = cma_ib_listen(id_priv);
3841 if (ret)
3842 goto err;
3843 } else if (rdma_cap_iw_cm(id->device, 1)) {
3844 ret = cma_iw_listen(id_priv, backlog);
3845 if (ret)
3846 goto err;
3847 } else {
3848 ret = -ENOSYS;
3849 goto err;
3850 }
3851 } else {
3852 ret = cma_listen_on_all(id_priv);
3853 if (ret)
3854 goto err;
3855 }
3856
3857 return 0;
3858 err:
3859 id_priv->backlog = 0;
3860 /*
3861 * All the failure paths that lead here will not allow the req_handler's
3862 * to have run.
3863 */
3864 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
3865 return ret;
3866 }
3867 EXPORT_SYMBOL(rdma_listen);
3868
rdma_bind_addr(struct rdma_cm_id * id,struct sockaddr * addr)3869 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
3870 {
3871 struct rdma_id_private *id_priv;
3872 int ret;
3873 struct sockaddr *daddr;
3874
3875 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
3876 addr->sa_family != AF_IB)
3877 return -EAFNOSUPPORT;
3878
3879 id_priv = container_of(id, struct rdma_id_private, id);
3880 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
3881 return -EINVAL;
3882
3883 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
3884 if (ret)
3885 goto err1;
3886
3887 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
3888 if (!cma_any_addr(addr)) {
3889 ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
3890 if (ret)
3891 goto err1;
3892
3893 ret = cma_acquire_dev_by_src_ip(id_priv);
3894 if (ret)
3895 goto err1;
3896 }
3897
3898 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
3899 if (addr->sa_family == AF_INET)
3900 id_priv->afonly = 1;
3901 #if IS_ENABLED(CONFIG_IPV6)
3902 else if (addr->sa_family == AF_INET6) {
3903 struct net *net = id_priv->id.route.addr.dev_addr.net;
3904
3905 id_priv->afonly = net->ipv6.sysctl.bindv6only;
3906 }
3907 #endif
3908 }
3909 daddr = cma_dst_addr(id_priv);
3910 daddr->sa_family = addr->sa_family;
3911
3912 ret = cma_get_port(id_priv);
3913 if (ret)
3914 goto err2;
3915
3916 if (!cma_any_addr(addr))
3917 rdma_restrack_add(&id_priv->res);
3918 return 0;
3919 err2:
3920 if (id_priv->cma_dev)
3921 cma_release_dev(id_priv);
3922 err1:
3923 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
3924 return ret;
3925 }
3926 EXPORT_SYMBOL(rdma_bind_addr);
3927
cma_format_hdr(void * hdr,struct rdma_id_private * id_priv)3928 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
3929 {
3930 struct cma_hdr *cma_hdr;
3931
3932 cma_hdr = hdr;
3933 cma_hdr->cma_version = CMA_VERSION;
3934 if (cma_family(id_priv) == AF_INET) {
3935 struct sockaddr_in *src4, *dst4;
3936
3937 src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
3938 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
3939
3940 cma_set_ip_ver(cma_hdr, 4);
3941 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
3942 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
3943 cma_hdr->port = src4->sin_port;
3944 } else if (cma_family(id_priv) == AF_INET6) {
3945 struct sockaddr_in6 *src6, *dst6;
3946
3947 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
3948 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
3949
3950 cma_set_ip_ver(cma_hdr, 6);
3951 cma_hdr->src_addr.ip6 = src6->sin6_addr;
3952 cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
3953 cma_hdr->port = src6->sin6_port;
3954 }
3955 return 0;
3956 }
3957
cma_sidr_rep_handler(struct ib_cm_id * cm_id,const struct ib_cm_event * ib_event)3958 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
3959 const struct ib_cm_event *ib_event)
3960 {
3961 struct rdma_id_private *id_priv = cm_id->context;
3962 struct rdma_cm_event event = {};
3963 const struct ib_cm_sidr_rep_event_param *rep =
3964 &ib_event->param.sidr_rep_rcvd;
3965 int ret;
3966
3967 mutex_lock(&id_priv->handler_mutex);
3968 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
3969 goto out;
3970
3971 switch (ib_event->event) {
3972 case IB_CM_SIDR_REQ_ERROR:
3973 event.event = RDMA_CM_EVENT_UNREACHABLE;
3974 event.status = -ETIMEDOUT;
3975 break;
3976 case IB_CM_SIDR_REP_RECEIVED:
3977 event.param.ud.private_data = ib_event->private_data;
3978 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
3979 if (rep->status != IB_SIDR_SUCCESS) {
3980 event.event = RDMA_CM_EVENT_UNREACHABLE;
3981 event.status = ib_event->param.sidr_rep_rcvd.status;
3982 pr_debug_ratelimited("RDMA CM: UNREACHABLE: bad SIDR reply. status %d\n",
3983 event.status);
3984 break;
3985 }
3986 ret = cma_set_qkey(id_priv, rep->qkey);
3987 if (ret) {
3988 pr_debug_ratelimited("RDMA CM: ADDR_ERROR: failed to set qkey. status %d\n", ret);
3989 event.event = RDMA_CM_EVENT_ADDR_ERROR;
3990 event.status = ret;
3991 break;
3992 }
3993 ib_init_ah_attr_from_path(id_priv->id.device,
3994 id_priv->id.port_num,
3995 id_priv->id.route.path_rec,
3996 &event.param.ud.ah_attr,
3997 rep->sgid_attr);
3998 event.param.ud.qp_num = rep->qpn;
3999 event.param.ud.qkey = rep->qkey;
4000 event.event = RDMA_CM_EVENT_ESTABLISHED;
4001 event.status = 0;
4002 break;
4003 default:
4004 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
4005 ib_event->event);
4006 goto out;
4007 }
4008
4009 ret = cma_cm_event_handler(id_priv, &event);
4010
4011 rdma_destroy_ah_attr(&event.param.ud.ah_attr);
4012 if (ret) {
4013 /* Destroy the CM ID by returning a non-zero value. */
4014 id_priv->cm_id.ib = NULL;
4015 destroy_id_handler_unlock(id_priv);
4016 return ret;
4017 }
4018 out:
4019 mutex_unlock(&id_priv->handler_mutex);
4020 return 0;
4021 }
4022
cma_resolve_ib_udp(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)4023 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
4024 struct rdma_conn_param *conn_param)
4025 {
4026 struct ib_cm_sidr_req_param req;
4027 struct ib_cm_id *id;
4028 void *private_data;
4029 u8 offset;
4030 int ret;
4031
4032 memset(&req, 0, sizeof req);
4033 offset = cma_user_data_offset(id_priv);
4034 req.private_data_len = offset + conn_param->private_data_len;
4035 if (req.private_data_len < conn_param->private_data_len)
4036 return -EINVAL;
4037
4038 if (req.private_data_len) {
4039 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
4040 if (!private_data)
4041 return -ENOMEM;
4042 } else {
4043 private_data = NULL;
4044 }
4045
4046 if (conn_param->private_data && conn_param->private_data_len)
4047 memcpy(private_data + offset, conn_param->private_data,
4048 conn_param->private_data_len);
4049
4050 if (private_data) {
4051 ret = cma_format_hdr(private_data, id_priv);
4052 if (ret)
4053 goto out;
4054 req.private_data = private_data;
4055 }
4056
4057 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
4058 id_priv);
4059 if (IS_ERR(id)) {
4060 ret = PTR_ERR(id);
4061 goto out;
4062 }
4063 id_priv->cm_id.ib = id;
4064
4065 req.path = id_priv->id.route.path_rec;
4066 req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4067 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4068 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
4069 req.max_cm_retries = CMA_MAX_CM_RETRIES;
4070
4071 trace_cm_send_sidr_req(id_priv);
4072 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
4073 if (ret) {
4074 ib_destroy_cm_id(id_priv->cm_id.ib);
4075 id_priv->cm_id.ib = NULL;
4076 }
4077 out:
4078 kfree(private_data);
4079 return ret;
4080 }
4081
cma_connect_ib(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)4082 static int cma_connect_ib(struct rdma_id_private *id_priv,
4083 struct rdma_conn_param *conn_param)
4084 {
4085 struct ib_cm_req_param req;
4086 struct rdma_route *route;
4087 void *private_data;
4088 struct ib_cm_id *id;
4089 u8 offset;
4090 int ret;
4091
4092 memset(&req, 0, sizeof req);
4093 offset = cma_user_data_offset(id_priv);
4094 req.private_data_len = offset + conn_param->private_data_len;
4095 if (req.private_data_len < conn_param->private_data_len)
4096 return -EINVAL;
4097
4098 if (req.private_data_len) {
4099 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
4100 if (!private_data)
4101 return -ENOMEM;
4102 } else {
4103 private_data = NULL;
4104 }
4105
4106 if (conn_param->private_data && conn_param->private_data_len)
4107 memcpy(private_data + offset, conn_param->private_data,
4108 conn_param->private_data_len);
4109
4110 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
4111 if (IS_ERR(id)) {
4112 ret = PTR_ERR(id);
4113 goto out;
4114 }
4115 id_priv->cm_id.ib = id;
4116
4117 route = &id_priv->id.route;
4118 if (private_data) {
4119 ret = cma_format_hdr(private_data, id_priv);
4120 if (ret)
4121 goto out;
4122 req.private_data = private_data;
4123 }
4124
4125 req.primary_path = &route->path_rec[0];
4126 if (route->num_paths == 2)
4127 req.alternate_path = &route->path_rec[1];
4128
4129 req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr;
4130 /* Alternate path SGID attribute currently unsupported */
4131 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
4132 req.qp_num = id_priv->qp_num;
4133 req.qp_type = id_priv->id.qp_type;
4134 req.starting_psn = id_priv->seq_num;
4135 req.responder_resources = conn_param->responder_resources;
4136 req.initiator_depth = conn_param->initiator_depth;
4137 req.flow_control = conn_param->flow_control;
4138 req.retry_count = min_t(u8, 7, conn_param->retry_count);
4139 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
4140 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
4141 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
4142 req.max_cm_retries = CMA_MAX_CM_RETRIES;
4143 req.srq = id_priv->srq ? 1 : 0;
4144 req.ece.vendor_id = id_priv->ece.vendor_id;
4145 req.ece.attr_mod = id_priv->ece.attr_mod;
4146
4147 trace_cm_send_req(id_priv);
4148 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
4149 out:
4150 if (ret && !IS_ERR(id)) {
4151 ib_destroy_cm_id(id);
4152 id_priv->cm_id.ib = NULL;
4153 }
4154
4155 kfree(private_data);
4156 return ret;
4157 }
4158
cma_connect_iw(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)4159 static int cma_connect_iw(struct rdma_id_private *id_priv,
4160 struct rdma_conn_param *conn_param)
4161 {
4162 struct iw_cm_id *cm_id;
4163 int ret;
4164 struct iw_cm_conn_param iw_param;
4165
4166 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
4167 if (IS_ERR(cm_id))
4168 return PTR_ERR(cm_id);
4169
4170 mutex_lock(&id_priv->qp_mutex);
4171 cm_id->tos = id_priv->tos;
4172 cm_id->tos_set = id_priv->tos_set;
4173 mutex_unlock(&id_priv->qp_mutex);
4174
4175 id_priv->cm_id.iw = cm_id;
4176
4177 memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
4178 rdma_addr_size(cma_src_addr(id_priv)));
4179 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
4180 rdma_addr_size(cma_dst_addr(id_priv)));
4181
4182 ret = cma_modify_qp_rtr(id_priv, conn_param);
4183 if (ret)
4184 goto out;
4185
4186 if (conn_param) {
4187 iw_param.ord = conn_param->initiator_depth;
4188 iw_param.ird = conn_param->responder_resources;
4189 iw_param.private_data = conn_param->private_data;
4190 iw_param.private_data_len = conn_param->private_data_len;
4191 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
4192 } else {
4193 memset(&iw_param, 0, sizeof iw_param);
4194 iw_param.qpn = id_priv->qp_num;
4195 }
4196 ret = iw_cm_connect(cm_id, &iw_param);
4197 out:
4198 if (ret) {
4199 iw_destroy_cm_id(cm_id);
4200 id_priv->cm_id.iw = NULL;
4201 }
4202 return ret;
4203 }
4204
4205 /**
4206 * rdma_connect_locked - Initiate an active connection request.
4207 * @id: Connection identifier to connect.
4208 * @conn_param: Connection information used for connected QPs.
4209 *
4210 * Same as rdma_connect() but can only be called from the
4211 * RDMA_CM_EVENT_ROUTE_RESOLVED handler callback.
4212 */
rdma_connect_locked(struct rdma_cm_id * id,struct rdma_conn_param * conn_param)4213 int rdma_connect_locked(struct rdma_cm_id *id,
4214 struct rdma_conn_param *conn_param)
4215 {
4216 struct rdma_id_private *id_priv =
4217 container_of(id, struct rdma_id_private, id);
4218 int ret;
4219
4220 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
4221 return -EINVAL;
4222
4223 if (!id->qp) {
4224 id_priv->qp_num = conn_param->qp_num;
4225 id_priv->srq = conn_param->srq;
4226 }
4227
4228 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4229 if (id->qp_type == IB_QPT_UD)
4230 ret = cma_resolve_ib_udp(id_priv, conn_param);
4231 else
4232 ret = cma_connect_ib(id_priv, conn_param);
4233 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4234 ret = cma_connect_iw(id_priv, conn_param);
4235 } else {
4236 ret = -ENOSYS;
4237 }
4238 if (ret)
4239 goto err_state;
4240 return 0;
4241 err_state:
4242 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
4243 return ret;
4244 }
4245 EXPORT_SYMBOL(rdma_connect_locked);
4246
4247 /**
4248 * rdma_connect - Initiate an active connection request.
4249 * @id: Connection identifier to connect.
4250 * @conn_param: Connection information used for connected QPs.
4251 *
4252 * Users must have resolved a route for the rdma_cm_id to connect with by having
4253 * called rdma_resolve_route before calling this routine.
4254 *
4255 * This call will either connect to a remote QP or obtain remote QP information
4256 * for unconnected rdma_cm_id's. The actual operation is based on the
4257 * rdma_cm_id's port space.
4258 */
rdma_connect(struct rdma_cm_id * id,struct rdma_conn_param * conn_param)4259 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4260 {
4261 struct rdma_id_private *id_priv =
4262 container_of(id, struct rdma_id_private, id);
4263 int ret;
4264
4265 mutex_lock(&id_priv->handler_mutex);
4266 ret = rdma_connect_locked(id, conn_param);
4267 mutex_unlock(&id_priv->handler_mutex);
4268 return ret;
4269 }
4270 EXPORT_SYMBOL(rdma_connect);
4271
4272 /**
4273 * rdma_connect_ece - Initiate an active connection request with ECE data.
4274 * @id: Connection identifier to connect.
4275 * @conn_param: Connection information used for connected QPs.
4276 * @ece: ECE parameters
4277 *
4278 * See rdma_connect() explanation.
4279 */
rdma_connect_ece(struct rdma_cm_id * id,struct rdma_conn_param * conn_param,struct rdma_ucm_ece * ece)4280 int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4281 struct rdma_ucm_ece *ece)
4282 {
4283 struct rdma_id_private *id_priv =
4284 container_of(id, struct rdma_id_private, id);
4285
4286 id_priv->ece.vendor_id = ece->vendor_id;
4287 id_priv->ece.attr_mod = ece->attr_mod;
4288
4289 return rdma_connect(id, conn_param);
4290 }
4291 EXPORT_SYMBOL(rdma_connect_ece);
4292
cma_accept_ib(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)4293 static int cma_accept_ib(struct rdma_id_private *id_priv,
4294 struct rdma_conn_param *conn_param)
4295 {
4296 struct ib_cm_rep_param rep;
4297 int ret;
4298
4299 ret = cma_modify_qp_rtr(id_priv, conn_param);
4300 if (ret)
4301 goto out;
4302
4303 ret = cma_modify_qp_rts(id_priv, conn_param);
4304 if (ret)
4305 goto out;
4306
4307 memset(&rep, 0, sizeof rep);
4308 rep.qp_num = id_priv->qp_num;
4309 rep.starting_psn = id_priv->seq_num;
4310 rep.private_data = conn_param->private_data;
4311 rep.private_data_len = conn_param->private_data_len;
4312 rep.responder_resources = conn_param->responder_resources;
4313 rep.initiator_depth = conn_param->initiator_depth;
4314 rep.failover_accepted = 0;
4315 rep.flow_control = conn_param->flow_control;
4316 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
4317 rep.srq = id_priv->srq ? 1 : 0;
4318 rep.ece.vendor_id = id_priv->ece.vendor_id;
4319 rep.ece.attr_mod = id_priv->ece.attr_mod;
4320
4321 trace_cm_send_rep(id_priv);
4322 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
4323 out:
4324 return ret;
4325 }
4326
cma_accept_iw(struct rdma_id_private * id_priv,struct rdma_conn_param * conn_param)4327 static int cma_accept_iw(struct rdma_id_private *id_priv,
4328 struct rdma_conn_param *conn_param)
4329 {
4330 struct iw_cm_conn_param iw_param;
4331 int ret;
4332
4333 if (!conn_param)
4334 return -EINVAL;
4335
4336 ret = cma_modify_qp_rtr(id_priv, conn_param);
4337 if (ret)
4338 return ret;
4339
4340 iw_param.ord = conn_param->initiator_depth;
4341 iw_param.ird = conn_param->responder_resources;
4342 iw_param.private_data = conn_param->private_data;
4343 iw_param.private_data_len = conn_param->private_data_len;
4344 if (id_priv->id.qp)
4345 iw_param.qpn = id_priv->qp_num;
4346 else
4347 iw_param.qpn = conn_param->qp_num;
4348
4349 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
4350 }
4351
cma_send_sidr_rep(struct rdma_id_private * id_priv,enum ib_cm_sidr_status status,u32 qkey,const void * private_data,int private_data_len)4352 static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
4353 enum ib_cm_sidr_status status, u32 qkey,
4354 const void *private_data, int private_data_len)
4355 {
4356 struct ib_cm_sidr_rep_param rep;
4357 int ret;
4358
4359 memset(&rep, 0, sizeof rep);
4360 rep.status = status;
4361 if (status == IB_SIDR_SUCCESS) {
4362 ret = cma_set_qkey(id_priv, qkey);
4363 if (ret)
4364 return ret;
4365 rep.qp_num = id_priv->qp_num;
4366 rep.qkey = id_priv->qkey;
4367
4368 rep.ece.vendor_id = id_priv->ece.vendor_id;
4369 rep.ece.attr_mod = id_priv->ece.attr_mod;
4370 }
4371
4372 rep.private_data = private_data;
4373 rep.private_data_len = private_data_len;
4374
4375 trace_cm_send_sidr_rep(id_priv);
4376 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
4377 }
4378
4379 /**
4380 * rdma_accept - Called to accept a connection request or response.
4381 * @id: Connection identifier associated with the request.
4382 * @conn_param: Information needed to establish the connection. This must be
4383 * provided if accepting a connection request. If accepting a connection
4384 * response, this parameter must be NULL.
4385 *
4386 * Typically, this routine is only called by the listener to accept a connection
4387 * request. It must also be called on the active side of a connection if the
4388 * user is performing their own QP transitions.
4389 *
4390 * In the case of error, a reject message is sent to the remote side and the
4391 * state of the qp associated with the id is modified to error, such that any
4392 * previously posted receive buffers would be flushed.
4393 *
4394 * This function is for use by kernel ULPs and must be called from under the
4395 * handler callback.
4396 */
rdma_accept(struct rdma_cm_id * id,struct rdma_conn_param * conn_param)4397 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
4398 {
4399 struct rdma_id_private *id_priv =
4400 container_of(id, struct rdma_id_private, id);
4401 int ret;
4402
4403 lockdep_assert_held(&id_priv->handler_mutex);
4404
4405 if (READ_ONCE(id_priv->state) != RDMA_CM_CONNECT)
4406 return -EINVAL;
4407
4408 if (!id->qp && conn_param) {
4409 id_priv->qp_num = conn_param->qp_num;
4410 id_priv->srq = conn_param->srq;
4411 }
4412
4413 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4414 if (id->qp_type == IB_QPT_UD) {
4415 if (conn_param)
4416 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
4417 conn_param->qkey,
4418 conn_param->private_data,
4419 conn_param->private_data_len);
4420 else
4421 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
4422 0, NULL, 0);
4423 } else {
4424 if (conn_param)
4425 ret = cma_accept_ib(id_priv, conn_param);
4426 else
4427 ret = cma_rep_recv(id_priv);
4428 }
4429 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4430 ret = cma_accept_iw(id_priv, conn_param);
4431 } else {
4432 ret = -ENOSYS;
4433 }
4434 if (ret)
4435 goto reject;
4436
4437 return 0;
4438 reject:
4439 cma_modify_qp_err(id_priv);
4440 rdma_reject(id, NULL, 0, IB_CM_REJ_CONSUMER_DEFINED);
4441 return ret;
4442 }
4443 EXPORT_SYMBOL(rdma_accept);
4444
rdma_accept_ece(struct rdma_cm_id * id,struct rdma_conn_param * conn_param,struct rdma_ucm_ece * ece)4445 int rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
4446 struct rdma_ucm_ece *ece)
4447 {
4448 struct rdma_id_private *id_priv =
4449 container_of(id, struct rdma_id_private, id);
4450
4451 id_priv->ece.vendor_id = ece->vendor_id;
4452 id_priv->ece.attr_mod = ece->attr_mod;
4453
4454 return rdma_accept(id, conn_param);
4455 }
4456 EXPORT_SYMBOL(rdma_accept_ece);
4457
rdma_lock_handler(struct rdma_cm_id * id)4458 void rdma_lock_handler(struct rdma_cm_id *id)
4459 {
4460 struct rdma_id_private *id_priv =
4461 container_of(id, struct rdma_id_private, id);
4462
4463 mutex_lock(&id_priv->handler_mutex);
4464 }
4465 EXPORT_SYMBOL(rdma_lock_handler);
4466
rdma_unlock_handler(struct rdma_cm_id * id)4467 void rdma_unlock_handler(struct rdma_cm_id *id)
4468 {
4469 struct rdma_id_private *id_priv =
4470 container_of(id, struct rdma_id_private, id);
4471
4472 mutex_unlock(&id_priv->handler_mutex);
4473 }
4474 EXPORT_SYMBOL(rdma_unlock_handler);
4475
rdma_notify(struct rdma_cm_id * id,enum ib_event_type event)4476 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
4477 {
4478 struct rdma_id_private *id_priv;
4479 int ret;
4480
4481 id_priv = container_of(id, struct rdma_id_private, id);
4482 if (!id_priv->cm_id.ib)
4483 return -EINVAL;
4484
4485 switch (id->device->node_type) {
4486 case RDMA_NODE_IB_CA:
4487 ret = ib_cm_notify(id_priv->cm_id.ib, event);
4488 break;
4489 default:
4490 ret = 0;
4491 break;
4492 }
4493 return ret;
4494 }
4495 EXPORT_SYMBOL(rdma_notify);
4496
rdma_reject(struct rdma_cm_id * id,const void * private_data,u8 private_data_len,u8 reason)4497 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
4498 u8 private_data_len, u8 reason)
4499 {
4500 struct rdma_id_private *id_priv;
4501 int ret;
4502
4503 id_priv = container_of(id, struct rdma_id_private, id);
4504 if (!id_priv->cm_id.ib)
4505 return -EINVAL;
4506
4507 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4508 if (id->qp_type == IB_QPT_UD) {
4509 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
4510 private_data, private_data_len);
4511 } else {
4512 trace_cm_send_rej(id_priv);
4513 ret = ib_send_cm_rej(id_priv->cm_id.ib, reason, NULL, 0,
4514 private_data, private_data_len);
4515 }
4516 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4517 ret = iw_cm_reject(id_priv->cm_id.iw,
4518 private_data, private_data_len);
4519 } else {
4520 ret = -ENOSYS;
4521 }
4522
4523 return ret;
4524 }
4525 EXPORT_SYMBOL(rdma_reject);
4526
rdma_disconnect(struct rdma_cm_id * id)4527 int rdma_disconnect(struct rdma_cm_id *id)
4528 {
4529 struct rdma_id_private *id_priv;
4530 int ret;
4531
4532 id_priv = container_of(id, struct rdma_id_private, id);
4533 if (!id_priv->cm_id.ib)
4534 return -EINVAL;
4535
4536 if (rdma_cap_ib_cm(id->device, id->port_num)) {
4537 ret = cma_modify_qp_err(id_priv);
4538 if (ret)
4539 goto out;
4540 /* Initiate or respond to a disconnect. */
4541 trace_cm_disconnect(id_priv);
4542 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) {
4543 if (!ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0))
4544 trace_cm_sent_drep(id_priv);
4545 } else {
4546 trace_cm_sent_dreq(id_priv);
4547 }
4548 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
4549 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
4550 } else
4551 ret = -EINVAL;
4552
4553 out:
4554 return ret;
4555 }
4556 EXPORT_SYMBOL(rdma_disconnect);
4557
cma_make_mc_event(int status,struct rdma_id_private * id_priv,struct ib_sa_multicast * multicast,struct rdma_cm_event * event,struct cma_multicast * mc)4558 static void cma_make_mc_event(int status, struct rdma_id_private *id_priv,
4559 struct ib_sa_multicast *multicast,
4560 struct rdma_cm_event *event,
4561 struct cma_multicast *mc)
4562 {
4563 struct rdma_dev_addr *dev_addr;
4564 enum ib_gid_type gid_type;
4565 struct net_device *ndev;
4566
4567 if (!status)
4568 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
4569 else
4570 pr_debug_ratelimited("RDMA CM: MULTICAST_ERROR: failed to join multicast. status %d\n",
4571 status);
4572
4573 event->status = status;
4574 event->param.ud.private_data = mc->context;
4575 if (status) {
4576 event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
4577 return;
4578 }
4579
4580 dev_addr = &id_priv->id.route.addr.dev_addr;
4581 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4582 gid_type =
4583 id_priv->cma_dev
4584 ->default_gid_type[id_priv->id.port_num -
4585 rdma_start_port(
4586 id_priv->cma_dev->device)];
4587
4588 event->event = RDMA_CM_EVENT_MULTICAST_JOIN;
4589 if (ib_init_ah_from_mcmember(id_priv->id.device, id_priv->id.port_num,
4590 &multicast->rec, ndev, gid_type,
4591 &event->param.ud.ah_attr)) {
4592 event->event = RDMA_CM_EVENT_MULTICAST_ERROR;
4593 goto out;
4594 }
4595
4596 event->param.ud.qp_num = 0xFFFFFF;
4597 event->param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
4598
4599 out:
4600 if (ndev)
4601 dev_put(ndev);
4602 }
4603
cma_ib_mc_handler(int status,struct ib_sa_multicast * multicast)4604 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
4605 {
4606 struct cma_multicast *mc = multicast->context;
4607 struct rdma_id_private *id_priv = mc->id_priv;
4608 struct rdma_cm_event event = {};
4609 int ret = 0;
4610
4611 mutex_lock(&id_priv->handler_mutex);
4612 if (READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL ||
4613 READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING)
4614 goto out;
4615
4616 cma_make_mc_event(status, id_priv, multicast, &event, mc);
4617 ret = cma_cm_event_handler(id_priv, &event);
4618 rdma_destroy_ah_attr(&event.param.ud.ah_attr);
4619 WARN_ON(ret);
4620
4621 out:
4622 mutex_unlock(&id_priv->handler_mutex);
4623 return 0;
4624 }
4625
cma_set_mgid(struct rdma_id_private * id_priv,struct sockaddr * addr,union ib_gid * mgid)4626 static void cma_set_mgid(struct rdma_id_private *id_priv,
4627 struct sockaddr *addr, union ib_gid *mgid)
4628 {
4629 unsigned char mc_map[MAX_ADDR_LEN];
4630 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4631 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
4632 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
4633
4634 if (cma_any_addr(addr)) {
4635 memset(mgid, 0, sizeof *mgid);
4636 } else if ((addr->sa_family == AF_INET6) &&
4637 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
4638 0xFF10A01B)) {
4639 /* IPv6 address is an SA assigned MGID. */
4640 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
4641 } else if (addr->sa_family == AF_IB) {
4642 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
4643 } else if (addr->sa_family == AF_INET6) {
4644 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
4645 if (id_priv->id.ps == RDMA_PS_UDP)
4646 mc_map[7] = 0x01; /* Use RDMA CM signature */
4647 *mgid = *(union ib_gid *) (mc_map + 4);
4648 } else {
4649 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
4650 if (id_priv->id.ps == RDMA_PS_UDP)
4651 mc_map[7] = 0x01; /* Use RDMA CM signature */
4652 *mgid = *(union ib_gid *) (mc_map + 4);
4653 }
4654 }
4655
cma_join_ib_multicast(struct rdma_id_private * id_priv,struct cma_multicast * mc)4656 static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
4657 struct cma_multicast *mc)
4658 {
4659 struct ib_sa_mcmember_rec rec;
4660 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4661 ib_sa_comp_mask comp_mask;
4662 int ret;
4663
4664 ib_addr_get_mgid(dev_addr, &rec.mgid);
4665 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
4666 &rec.mgid, &rec);
4667 if (ret)
4668 return ret;
4669
4670 ret = cma_set_qkey(id_priv, 0);
4671 if (ret)
4672 return ret;
4673
4674 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
4675 rec.qkey = cpu_to_be32(id_priv->qkey);
4676 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
4677 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
4678 rec.join_state = mc->join_state;
4679
4680 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
4681 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
4682 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
4683 IB_SA_MCMEMBER_REC_FLOW_LABEL |
4684 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
4685
4686 if (id_priv->id.ps == RDMA_PS_IPOIB)
4687 comp_mask |= IB_SA_MCMEMBER_REC_RATE |
4688 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
4689 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
4690 IB_SA_MCMEMBER_REC_MTU |
4691 IB_SA_MCMEMBER_REC_HOP_LIMIT;
4692
4693 mc->sa_mc = ib_sa_join_multicast(&sa_client, id_priv->id.device,
4694 id_priv->id.port_num, &rec, comp_mask,
4695 GFP_KERNEL, cma_ib_mc_handler, mc);
4696 return PTR_ERR_OR_ZERO(mc->sa_mc);
4697 }
4698
cma_iboe_set_mgid(struct sockaddr * addr,union ib_gid * mgid,enum ib_gid_type gid_type)4699 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
4700 enum ib_gid_type gid_type)
4701 {
4702 struct sockaddr_in *sin = (struct sockaddr_in *)addr;
4703 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
4704
4705 if (cma_any_addr(addr)) {
4706 memset(mgid, 0, sizeof *mgid);
4707 } else if (addr->sa_family == AF_INET6) {
4708 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
4709 } else {
4710 mgid->raw[0] =
4711 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
4712 mgid->raw[1] =
4713 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
4714 mgid->raw[2] = 0;
4715 mgid->raw[3] = 0;
4716 mgid->raw[4] = 0;
4717 mgid->raw[5] = 0;
4718 mgid->raw[6] = 0;
4719 mgid->raw[7] = 0;
4720 mgid->raw[8] = 0;
4721 mgid->raw[9] = 0;
4722 mgid->raw[10] = 0xff;
4723 mgid->raw[11] = 0xff;
4724 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
4725 }
4726 }
4727
cma_iboe_join_multicast(struct rdma_id_private * id_priv,struct cma_multicast * mc)4728 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
4729 struct cma_multicast *mc)
4730 {
4731 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4732 int err = 0;
4733 struct sockaddr *addr = (struct sockaddr *)&mc->addr;
4734 struct net_device *ndev = NULL;
4735 struct ib_sa_multicast ib;
4736 enum ib_gid_type gid_type;
4737 bool send_only;
4738
4739 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
4740
4741 if (cma_zero_addr(addr))
4742 return -EINVAL;
4743
4744 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
4745 rdma_start_port(id_priv->cma_dev->device)];
4746 cma_iboe_set_mgid(addr, &ib.rec.mgid, gid_type);
4747
4748 ib.rec.pkey = cpu_to_be16(0xffff);
4749 if (id_priv->id.ps == RDMA_PS_UDP)
4750 ib.rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
4751
4752 if (dev_addr->bound_dev_if)
4753 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4754 if (!ndev)
4755 return -ENODEV;
4756
4757 ib.rec.rate = iboe_get_rate(ndev);
4758 ib.rec.hop_limit = 1;
4759 ib.rec.mtu = iboe_get_mtu(ndev->mtu);
4760
4761 if (addr->sa_family == AF_INET) {
4762 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
4763 ib.rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
4764 if (!send_only) {
4765 err = cma_igmp_send(ndev, &ib.rec.mgid,
4766 true);
4767 }
4768 }
4769 } else {
4770 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
4771 err = -ENOTSUPP;
4772 }
4773 dev_put(ndev);
4774 if (err || !ib.rec.mtu)
4775 return err ?: -EINVAL;
4776
4777 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
4778 &ib.rec.port_gid);
4779 INIT_WORK(&mc->iboe_join.work, cma_iboe_join_work_handler);
4780 cma_make_mc_event(0, id_priv, &ib, &mc->iboe_join.event, mc);
4781 queue_work(cma_wq, &mc->iboe_join.work);
4782 return 0;
4783 }
4784
rdma_join_multicast(struct rdma_cm_id * id,struct sockaddr * addr,u8 join_state,void * context)4785 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
4786 u8 join_state, void *context)
4787 {
4788 struct rdma_id_private *id_priv =
4789 container_of(id, struct rdma_id_private, id);
4790 struct cma_multicast *mc;
4791 int ret;
4792
4793 /* Not supported for kernel QPs */
4794 if (WARN_ON(id->qp))
4795 return -EINVAL;
4796
4797 /* ULP is calling this wrong. */
4798 if (!id->device || (READ_ONCE(id_priv->state) != RDMA_CM_ADDR_BOUND &&
4799 READ_ONCE(id_priv->state) != RDMA_CM_ADDR_RESOLVED))
4800 return -EINVAL;
4801
4802 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
4803 if (!mc)
4804 return -ENOMEM;
4805
4806 memcpy(&mc->addr, addr, rdma_addr_size(addr));
4807 mc->context = context;
4808 mc->id_priv = id_priv;
4809 mc->join_state = join_state;
4810
4811 if (rdma_protocol_roce(id->device, id->port_num)) {
4812 ret = cma_iboe_join_multicast(id_priv, mc);
4813 if (ret)
4814 goto out_err;
4815 } else if (rdma_cap_ib_mcast(id->device, id->port_num)) {
4816 ret = cma_join_ib_multicast(id_priv, mc);
4817 if (ret)
4818 goto out_err;
4819 } else {
4820 ret = -ENOSYS;
4821 goto out_err;
4822 }
4823
4824 spin_lock(&id_priv->lock);
4825 list_add(&mc->list, &id_priv->mc_list);
4826 spin_unlock(&id_priv->lock);
4827
4828 return 0;
4829 out_err:
4830 kfree(mc);
4831 return ret;
4832 }
4833 EXPORT_SYMBOL(rdma_join_multicast);
4834
rdma_leave_multicast(struct rdma_cm_id * id,struct sockaddr * addr)4835 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
4836 {
4837 struct rdma_id_private *id_priv;
4838 struct cma_multicast *mc;
4839
4840 id_priv = container_of(id, struct rdma_id_private, id);
4841 spin_lock_irq(&id_priv->lock);
4842 list_for_each_entry(mc, &id_priv->mc_list, list) {
4843 if (memcmp(&mc->addr, addr, rdma_addr_size(addr)) != 0)
4844 continue;
4845 list_del(&mc->list);
4846 spin_unlock_irq(&id_priv->lock);
4847
4848 WARN_ON(id_priv->cma_dev->device != id->device);
4849 destroy_mc(id_priv, mc);
4850 return;
4851 }
4852 spin_unlock_irq(&id_priv->lock);
4853 }
4854 EXPORT_SYMBOL(rdma_leave_multicast);
4855
cma_netdev_change(struct net_device * ndev,struct rdma_id_private * id_priv)4856 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
4857 {
4858 struct rdma_dev_addr *dev_addr;
4859 struct cma_work *work;
4860
4861 dev_addr = &id_priv->id.route.addr.dev_addr;
4862
4863 if ((dev_addr->bound_dev_if == ndev->ifindex) &&
4864 (net_eq(dev_net(ndev), dev_addr->net)) &&
4865 memcmp(dev_addr->src_dev_addr, ndev->dev_addr, ndev->addr_len)) {
4866 pr_info("RDMA CM addr change for ndev %s used by id %p\n",
4867 ndev->name, &id_priv->id);
4868 work = kzalloc(sizeof *work, GFP_KERNEL);
4869 if (!work)
4870 return -ENOMEM;
4871
4872 INIT_WORK(&work->work, cma_work_handler);
4873 work->id = id_priv;
4874 work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
4875 cma_id_get(id_priv);
4876 queue_work(cma_wq, &work->work);
4877 }
4878
4879 return 0;
4880 }
4881
cma_netdev_callback(struct notifier_block * self,unsigned long event,void * ptr)4882 static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
4883 void *ptr)
4884 {
4885 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4886 struct cma_device *cma_dev;
4887 struct rdma_id_private *id_priv;
4888 int ret = NOTIFY_DONE;
4889
4890 if (event != NETDEV_BONDING_FAILOVER)
4891 return NOTIFY_DONE;
4892
4893 if (!netif_is_bond_master(ndev))
4894 return NOTIFY_DONE;
4895
4896 mutex_lock(&lock);
4897 list_for_each_entry(cma_dev, &dev_list, list)
4898 list_for_each_entry(id_priv, &cma_dev->id_list, list) {
4899 ret = cma_netdev_change(ndev, id_priv);
4900 if (ret)
4901 goto out;
4902 }
4903
4904 out:
4905 mutex_unlock(&lock);
4906 return ret;
4907 }
4908
4909 static struct notifier_block cma_nb = {
4910 .notifier_call = cma_netdev_callback
4911 };
4912
cma_send_device_removal_put(struct rdma_id_private * id_priv)4913 static void cma_send_device_removal_put(struct rdma_id_private *id_priv)
4914 {
4915 struct rdma_cm_event event = { .event = RDMA_CM_EVENT_DEVICE_REMOVAL };
4916 enum rdma_cm_state state;
4917 unsigned long flags;
4918
4919 mutex_lock(&id_priv->handler_mutex);
4920 /* Record that we want to remove the device */
4921 spin_lock_irqsave(&id_priv->lock, flags);
4922 state = id_priv->state;
4923 if (state == RDMA_CM_DESTROYING || state == RDMA_CM_DEVICE_REMOVAL) {
4924 spin_unlock_irqrestore(&id_priv->lock, flags);
4925 mutex_unlock(&id_priv->handler_mutex);
4926 cma_id_put(id_priv);
4927 return;
4928 }
4929 id_priv->state = RDMA_CM_DEVICE_REMOVAL;
4930 spin_unlock_irqrestore(&id_priv->lock, flags);
4931
4932 if (cma_cm_event_handler(id_priv, &event)) {
4933 /*
4934 * At this point the ULP promises it won't call
4935 * rdma_destroy_id() concurrently
4936 */
4937 cma_id_put(id_priv);
4938 mutex_unlock(&id_priv->handler_mutex);
4939 trace_cm_id_destroy(id_priv);
4940 _destroy_id(id_priv, state);
4941 return;
4942 }
4943 mutex_unlock(&id_priv->handler_mutex);
4944
4945 /*
4946 * If this races with destroy then the thread that first assigns state
4947 * to a destroying does the cancel.
4948 */
4949 cma_cancel_operation(id_priv, state);
4950 cma_id_put(id_priv);
4951 }
4952
cma_process_remove(struct cma_device * cma_dev)4953 static void cma_process_remove(struct cma_device *cma_dev)
4954 {
4955 mutex_lock(&lock);
4956 while (!list_empty(&cma_dev->id_list)) {
4957 struct rdma_id_private *id_priv = list_first_entry(
4958 &cma_dev->id_list, struct rdma_id_private, list);
4959
4960 list_del(&id_priv->listen_list);
4961 list_del_init(&id_priv->list);
4962 cma_id_get(id_priv);
4963 mutex_unlock(&lock);
4964
4965 cma_send_device_removal_put(id_priv);
4966
4967 mutex_lock(&lock);
4968 }
4969 mutex_unlock(&lock);
4970
4971 cma_dev_put(cma_dev);
4972 wait_for_completion(&cma_dev->comp);
4973 }
4974
cma_supported(struct ib_device * device)4975 static bool cma_supported(struct ib_device *device)
4976 {
4977 u32 i;
4978
4979 rdma_for_each_port(device, i) {
4980 if (rdma_cap_ib_cm(device, i) || rdma_cap_iw_cm(device, i))
4981 return true;
4982 }
4983 return false;
4984 }
4985
cma_add_one(struct ib_device * device)4986 static int cma_add_one(struct ib_device *device)
4987 {
4988 struct rdma_id_private *to_destroy;
4989 struct cma_device *cma_dev;
4990 struct rdma_id_private *id_priv;
4991 unsigned long supported_gids = 0;
4992 int ret;
4993 u32 i;
4994
4995 if (!cma_supported(device))
4996 return -EOPNOTSUPP;
4997
4998 cma_dev = kmalloc(sizeof(*cma_dev), GFP_KERNEL);
4999 if (!cma_dev)
5000 return -ENOMEM;
5001
5002 cma_dev->device = device;
5003 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
5004 sizeof(*cma_dev->default_gid_type),
5005 GFP_KERNEL);
5006 if (!cma_dev->default_gid_type) {
5007 ret = -ENOMEM;
5008 goto free_cma_dev;
5009 }
5010
5011 cma_dev->default_roce_tos = kcalloc(device->phys_port_cnt,
5012 sizeof(*cma_dev->default_roce_tos),
5013 GFP_KERNEL);
5014 if (!cma_dev->default_roce_tos) {
5015 ret = -ENOMEM;
5016 goto free_gid_type;
5017 }
5018
5019 rdma_for_each_port (device, i) {
5020 supported_gids = roce_gid_type_mask_support(device, i);
5021 WARN_ON(!supported_gids);
5022 if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
5023 cma_dev->default_gid_type[i - rdma_start_port(device)] =
5024 CMA_PREFERRED_ROCE_GID_TYPE;
5025 else
5026 cma_dev->default_gid_type[i - rdma_start_port(device)] =
5027 find_first_bit(&supported_gids, BITS_PER_LONG);
5028 cma_dev->default_roce_tos[i - rdma_start_port(device)] = 0;
5029 }
5030
5031 init_completion(&cma_dev->comp);
5032 refcount_set(&cma_dev->refcount, 1);
5033 INIT_LIST_HEAD(&cma_dev->id_list);
5034 ib_set_client_data(device, &cma_client, cma_dev);
5035
5036 mutex_lock(&lock);
5037 list_add_tail(&cma_dev->list, &dev_list);
5038 list_for_each_entry(id_priv, &listen_any_list, list) {
5039 ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
5040 if (ret)
5041 goto free_listen;
5042 }
5043 mutex_unlock(&lock);
5044
5045 trace_cm_add_one(device);
5046 return 0;
5047
5048 free_listen:
5049 list_del(&cma_dev->list);
5050 mutex_unlock(&lock);
5051
5052 /* cma_process_remove() will delete to_destroy */
5053 cma_process_remove(cma_dev);
5054 kfree(cma_dev->default_roce_tos);
5055 free_gid_type:
5056 kfree(cma_dev->default_gid_type);
5057
5058 free_cma_dev:
5059 kfree(cma_dev);
5060 return ret;
5061 }
5062
cma_remove_one(struct ib_device * device,void * client_data)5063 static void cma_remove_one(struct ib_device *device, void *client_data)
5064 {
5065 struct cma_device *cma_dev = client_data;
5066
5067 trace_cm_remove_one(device);
5068
5069 mutex_lock(&lock);
5070 list_del(&cma_dev->list);
5071 mutex_unlock(&lock);
5072
5073 cma_process_remove(cma_dev);
5074 kfree(cma_dev->default_roce_tos);
5075 kfree(cma_dev->default_gid_type);
5076 kfree(cma_dev);
5077 }
5078
cma_init_net(struct net * net)5079 static int cma_init_net(struct net *net)
5080 {
5081 struct cma_pernet *pernet = cma_pernet(net);
5082
5083 xa_init(&pernet->tcp_ps);
5084 xa_init(&pernet->udp_ps);
5085 xa_init(&pernet->ipoib_ps);
5086 xa_init(&pernet->ib_ps);
5087
5088 return 0;
5089 }
5090
cma_exit_net(struct net * net)5091 static void cma_exit_net(struct net *net)
5092 {
5093 struct cma_pernet *pernet = cma_pernet(net);
5094
5095 WARN_ON(!xa_empty(&pernet->tcp_ps));
5096 WARN_ON(!xa_empty(&pernet->udp_ps));
5097 WARN_ON(!xa_empty(&pernet->ipoib_ps));
5098 WARN_ON(!xa_empty(&pernet->ib_ps));
5099 }
5100
5101 static struct pernet_operations cma_pernet_operations = {
5102 .init = cma_init_net,
5103 .exit = cma_exit_net,
5104 .id = &cma_pernet_id,
5105 .size = sizeof(struct cma_pernet),
5106 };
5107
cma_init(void)5108 static int __init cma_init(void)
5109 {
5110 int ret;
5111
5112 /*
5113 * There is a rare lock ordering dependency in cma_netdev_callback()
5114 * that only happens when bonding is enabled. Teach lockdep that rtnl
5115 * must never be nested under lock so it can find these without having
5116 * to test with bonding.
5117 */
5118 if (IS_ENABLED(CONFIG_LOCKDEP)) {
5119 rtnl_lock();
5120 mutex_lock(&lock);
5121 mutex_unlock(&lock);
5122 rtnl_unlock();
5123 }
5124
5125 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
5126 if (!cma_wq)
5127 return -ENOMEM;
5128
5129 ret = register_pernet_subsys(&cma_pernet_operations);
5130 if (ret)
5131 goto err_wq;
5132
5133 ib_sa_register_client(&sa_client);
5134 register_netdevice_notifier(&cma_nb);
5135
5136 ret = ib_register_client(&cma_client);
5137 if (ret)
5138 goto err;
5139
5140 ret = cma_configfs_init();
5141 if (ret)
5142 goto err_ib;
5143
5144 return 0;
5145
5146 err_ib:
5147 ib_unregister_client(&cma_client);
5148 err:
5149 unregister_netdevice_notifier(&cma_nb);
5150 ib_sa_unregister_client(&sa_client);
5151 unregister_pernet_subsys(&cma_pernet_operations);
5152 err_wq:
5153 destroy_workqueue(cma_wq);
5154 return ret;
5155 }
5156
cma_cleanup(void)5157 static void __exit cma_cleanup(void)
5158 {
5159 cma_configfs_exit();
5160 ib_unregister_client(&cma_client);
5161 unregister_netdevice_notifier(&cma_nb);
5162 ib_sa_unregister_client(&sa_client);
5163 unregister_pernet_subsys(&cma_pernet_operations);
5164 destroy_workqueue(cma_wq);
5165 }
5166
5167 module_init(cma_init);
5168 module_exit(cma_cleanup);
5169