1 /*
2 * Copyright (c) 2015, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include "core_priv.h"
34
35 #include <linux/in.h>
36 #include <linux/in6.h>
37
38 /* For in6_dev_get/in6_dev_put */
39 #include <net/addrconf.h>
40 #include <net/bonding.h>
41
42 #include <rdma/ib_cache.h>
43 #include <rdma/ib_addr.h>
44
45 static struct workqueue_struct *gid_cache_wq;
46
47 enum gid_op_type {
48 GID_DEL = 0,
49 GID_ADD
50 };
51
52 struct update_gid_event_work {
53 struct work_struct work;
54 union ib_gid gid;
55 struct ib_gid_attr gid_attr;
56 enum gid_op_type gid_op;
57 };
58
59 #define ROCE_NETDEV_CALLBACK_SZ 3
60 struct netdev_event_work_cmd {
61 roce_netdev_callback cb;
62 roce_netdev_filter filter;
63 struct net_device *ndev;
64 struct net_device *filter_ndev;
65 };
66
67 struct netdev_event_work {
68 struct work_struct work;
69 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ];
70 };
71
72 static const struct {
73 bool (*is_supported)(const struct ib_device *device, u8 port_num);
74 enum ib_gid_type gid_type;
75 } PORT_CAP_TO_GID_TYPE[] = {
76 {rdma_protocol_roce_eth_encap, IB_GID_TYPE_ROCE},
77 {rdma_protocol_roce_udp_encap, IB_GID_TYPE_ROCE_UDP_ENCAP},
78 };
79
80 #define CAP_TO_GID_TABLE_SIZE ARRAY_SIZE(PORT_CAP_TO_GID_TYPE)
81
roce_gid_type_mask_support(struct ib_device * ib_dev,u8 port)82 unsigned long roce_gid_type_mask_support(struct ib_device *ib_dev, u8 port)
83 {
84 int i;
85 unsigned int ret_flags = 0;
86
87 if (!rdma_protocol_roce(ib_dev, port))
88 return 1UL << IB_GID_TYPE_IB;
89
90 for (i = 0; i < CAP_TO_GID_TABLE_SIZE; i++)
91 if (PORT_CAP_TO_GID_TYPE[i].is_supported(ib_dev, port))
92 ret_flags |= 1UL << PORT_CAP_TO_GID_TYPE[i].gid_type;
93
94 return ret_flags;
95 }
96 EXPORT_SYMBOL(roce_gid_type_mask_support);
97
update_gid(enum gid_op_type gid_op,struct ib_device * ib_dev,u8 port,union ib_gid * gid,struct ib_gid_attr * gid_attr)98 static void update_gid(enum gid_op_type gid_op, struct ib_device *ib_dev,
99 u8 port, union ib_gid *gid,
100 struct ib_gid_attr *gid_attr)
101 {
102 int i;
103 unsigned long gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
104
105 for (i = 0; i < IB_GID_TYPE_SIZE; i++) {
106 if ((1UL << i) & gid_type_mask) {
107 gid_attr->gid_type = i;
108 switch (gid_op) {
109 case GID_ADD:
110 ib_cache_gid_add(ib_dev, port,
111 gid, gid_attr);
112 break;
113 case GID_DEL:
114 ib_cache_gid_del(ib_dev, port,
115 gid, gid_attr);
116 break;
117 }
118 }
119 }
120 }
121
122 enum bonding_slave_state {
123 BONDING_SLAVE_STATE_ACTIVE = 1UL << 0,
124 BONDING_SLAVE_STATE_INACTIVE = 1UL << 1,
125 /* No primary slave or the device isn't a slave in bonding */
126 BONDING_SLAVE_STATE_NA = 1UL << 2,
127 };
128
is_eth_active_slave_of_bonding_rcu(struct net_device * dev,struct net_device * upper)129 static enum bonding_slave_state is_eth_active_slave_of_bonding_rcu(struct net_device *dev,
130 struct net_device *upper)
131 {
132 if (upper && netif_is_bond_master(upper)) {
133 struct net_device *pdev =
134 bond_option_active_slave_get_rcu(netdev_priv(upper));
135
136 if (pdev)
137 return dev == pdev ? BONDING_SLAVE_STATE_ACTIVE :
138 BONDING_SLAVE_STATE_INACTIVE;
139 }
140
141 return BONDING_SLAVE_STATE_NA;
142 }
143
144 #define REQUIRED_BOND_STATES (BONDING_SLAVE_STATE_ACTIVE | \
145 BONDING_SLAVE_STATE_NA)
146 static bool
is_eth_port_of_netdev_filter(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)147 is_eth_port_of_netdev_filter(struct ib_device *ib_dev, u8 port,
148 struct net_device *rdma_ndev, void *cookie)
149 {
150 struct net_device *real_dev;
151 bool res;
152
153 if (!rdma_ndev)
154 return false;
155
156 rcu_read_lock();
157 real_dev = rdma_vlan_dev_real_dev(cookie);
158 if (!real_dev)
159 real_dev = cookie;
160
161 res = ((rdma_is_upper_dev_rcu(rdma_ndev, cookie) &&
162 (is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev) &
163 REQUIRED_BOND_STATES)) ||
164 real_dev == rdma_ndev);
165
166 rcu_read_unlock();
167 return res;
168 }
169
170 static bool
is_eth_port_inactive_slave_filter(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)171 is_eth_port_inactive_slave_filter(struct ib_device *ib_dev, u8 port,
172 struct net_device *rdma_ndev, void *cookie)
173 {
174 struct net_device *master_dev;
175 bool res;
176
177 if (!rdma_ndev)
178 return false;
179
180 rcu_read_lock();
181 master_dev = netdev_master_upper_dev_get_rcu(rdma_ndev);
182 res = is_eth_active_slave_of_bonding_rcu(rdma_ndev, master_dev) ==
183 BONDING_SLAVE_STATE_INACTIVE;
184 rcu_read_unlock();
185
186 return res;
187 }
188
189 /** is_ndev_for_default_gid_filter - Check if a given netdevice
190 * can be considered for default GIDs or not.
191 * @ib_dev: IB device to check
192 * @port: Port to consider for adding default GID
193 * @rdma_ndev: rdma netdevice pointer
194 * @cookie_ndev: Netdevice to consider to form a default GID
195 *
196 * is_ndev_for_default_gid_filter() returns true if a given netdevice can be
197 * considered for deriving default RoCE GID, returns false otherwise.
198 */
199 static bool
is_ndev_for_default_gid_filter(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)200 is_ndev_for_default_gid_filter(struct ib_device *ib_dev, u8 port,
201 struct net_device *rdma_ndev, void *cookie)
202 {
203 struct net_device *cookie_ndev = cookie;
204 bool res;
205
206 if (!rdma_ndev)
207 return false;
208
209 rcu_read_lock();
210
211 /*
212 * When rdma netdevice is used in bonding, bonding master netdevice
213 * should be considered for default GIDs. Therefore, ignore slave rdma
214 * netdevices when bonding is considered.
215 * Additionally when event(cookie) netdevice is bond master device,
216 * make sure that it the upper netdevice of rdma netdevice.
217 */
218 res = ((cookie_ndev == rdma_ndev && !netif_is_bond_slave(rdma_ndev)) ||
219 (netif_is_bond_master(cookie_ndev) &&
220 rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev)));
221
222 rcu_read_unlock();
223 return res;
224 }
225
pass_all_filter(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)226 static bool pass_all_filter(struct ib_device *ib_dev, u8 port,
227 struct net_device *rdma_ndev, void *cookie)
228 {
229 return true;
230 }
231
upper_device_filter(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)232 static bool upper_device_filter(struct ib_device *ib_dev, u8 port,
233 struct net_device *rdma_ndev, void *cookie)
234 {
235 bool res;
236
237 if (!rdma_ndev)
238 return false;
239
240 if (rdma_ndev == cookie)
241 return true;
242
243 rcu_read_lock();
244 res = rdma_is_upper_dev_rcu(rdma_ndev, cookie);
245 rcu_read_unlock();
246
247 return res;
248 }
249
250 /**
251 * is_upper_ndev_bond_master_filter - Check if a given netdevice
252 * is bond master device of netdevice of the the RDMA device of port.
253 * @ib_dev: IB device to check
254 * @port: Port to consider for adding default GID
255 * @rdma_ndev: Pointer to rdma netdevice
256 * @cookie: Netdevice to consider to form a default GID
257 *
258 * is_upper_ndev_bond_master_filter() returns true if a cookie_netdev
259 * is bond master device and rdma_ndev is its lower netdevice. It might
260 * not have been established as slave device yet.
261 */
262 static bool
is_upper_ndev_bond_master_filter(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)263 is_upper_ndev_bond_master_filter(struct ib_device *ib_dev, u8 port,
264 struct net_device *rdma_ndev,
265 void *cookie)
266 {
267 struct net_device *cookie_ndev = cookie;
268 bool match = false;
269
270 rcu_read_lock();
271 if (netif_is_bond_master(cookie_ndev) &&
272 rdma_is_upper_dev_rcu(rdma_ndev, cookie_ndev))
273 match = true;
274 rcu_read_unlock();
275 return match;
276 }
277
update_gid_ip(enum gid_op_type gid_op,struct ib_device * ib_dev,u8 port,struct net_device * ndev,struct sockaddr * addr)278 static void update_gid_ip(enum gid_op_type gid_op,
279 struct ib_device *ib_dev,
280 u8 port, struct net_device *ndev,
281 struct sockaddr *addr)
282 {
283 union ib_gid gid;
284 struct ib_gid_attr gid_attr;
285
286 rdma_ip2gid(addr, &gid);
287 memset(&gid_attr, 0, sizeof(gid_attr));
288 gid_attr.ndev = ndev;
289
290 update_gid(gid_op, ib_dev, port, &gid, &gid_attr);
291 }
292
bond_delete_netdev_default_gids(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,struct net_device * event_ndev)293 static void bond_delete_netdev_default_gids(struct ib_device *ib_dev,
294 u8 port,
295 struct net_device *rdma_ndev,
296 struct net_device *event_ndev)
297 {
298 struct net_device *real_dev = rdma_vlan_dev_real_dev(event_ndev);
299 unsigned long gid_type_mask;
300
301 if (!rdma_ndev)
302 return;
303
304 if (!real_dev)
305 real_dev = event_ndev;
306
307 rcu_read_lock();
308
309 if (((rdma_ndev != event_ndev &&
310 !rdma_is_upper_dev_rcu(rdma_ndev, event_ndev)) ||
311 is_eth_active_slave_of_bonding_rcu(rdma_ndev, real_dev)
312 ==
313 BONDING_SLAVE_STATE_INACTIVE)) {
314 rcu_read_unlock();
315 return;
316 }
317
318 rcu_read_unlock();
319
320 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
321
322 ib_cache_gid_set_default_gid(ib_dev, port, rdma_ndev,
323 gid_type_mask,
324 IB_CACHE_GID_DEFAULT_MODE_DELETE);
325 }
326
enum_netdev_ipv4_ips(struct ib_device * ib_dev,u8 port,struct net_device * ndev)327 static void enum_netdev_ipv4_ips(struct ib_device *ib_dev,
328 u8 port, struct net_device *ndev)
329 {
330 struct in_device *in_dev;
331 struct sin_list {
332 struct list_head list;
333 struct sockaddr_in ip;
334 };
335 struct sin_list *sin_iter;
336 struct sin_list *sin_temp;
337
338 LIST_HEAD(sin_list);
339 if (ndev->reg_state >= NETREG_UNREGISTERING)
340 return;
341
342 rcu_read_lock();
343 in_dev = __in_dev_get_rcu(ndev);
344 if (!in_dev) {
345 rcu_read_unlock();
346 return;
347 }
348
349 for_ifa(in_dev) {
350 struct sin_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
351
352 if (!entry)
353 continue;
354
355 entry->ip.sin_family = AF_INET;
356 entry->ip.sin_addr.s_addr = ifa->ifa_address;
357 list_add_tail(&entry->list, &sin_list);
358 }
359 endfor_ifa(in_dev);
360 rcu_read_unlock();
361
362 list_for_each_entry_safe(sin_iter, sin_temp, &sin_list, list) {
363 update_gid_ip(GID_ADD, ib_dev, port, ndev,
364 (struct sockaddr *)&sin_iter->ip);
365 list_del(&sin_iter->list);
366 kfree(sin_iter);
367 }
368 }
369
enum_netdev_ipv6_ips(struct ib_device * ib_dev,u8 port,struct net_device * ndev)370 static void enum_netdev_ipv6_ips(struct ib_device *ib_dev,
371 u8 port, struct net_device *ndev)
372 {
373 struct inet6_ifaddr *ifp;
374 struct inet6_dev *in6_dev;
375 struct sin6_list {
376 struct list_head list;
377 struct sockaddr_in6 sin6;
378 };
379 struct sin6_list *sin6_iter;
380 struct sin6_list *sin6_temp;
381 struct ib_gid_attr gid_attr = {.ndev = ndev};
382 LIST_HEAD(sin6_list);
383
384 if (ndev->reg_state >= NETREG_UNREGISTERING)
385 return;
386
387 in6_dev = in6_dev_get(ndev);
388 if (!in6_dev)
389 return;
390
391 read_lock_bh(&in6_dev->lock);
392 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
393 struct sin6_list *entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
394
395 if (!entry)
396 continue;
397
398 entry->sin6.sin6_family = AF_INET6;
399 entry->sin6.sin6_addr = ifp->addr;
400 list_add_tail(&entry->list, &sin6_list);
401 }
402 read_unlock_bh(&in6_dev->lock);
403
404 in6_dev_put(in6_dev);
405
406 list_for_each_entry_safe(sin6_iter, sin6_temp, &sin6_list, list) {
407 union ib_gid gid;
408
409 rdma_ip2gid((struct sockaddr *)&sin6_iter->sin6, &gid);
410 update_gid(GID_ADD, ib_dev, port, &gid, &gid_attr);
411 list_del(&sin6_iter->list);
412 kfree(sin6_iter);
413 }
414 }
415
_add_netdev_ips(struct ib_device * ib_dev,u8 port,struct net_device * ndev)416 static void _add_netdev_ips(struct ib_device *ib_dev, u8 port,
417 struct net_device *ndev)
418 {
419 enum_netdev_ipv4_ips(ib_dev, port, ndev);
420 if (IS_ENABLED(CONFIG_IPV6))
421 enum_netdev_ipv6_ips(ib_dev, port, ndev);
422 }
423
add_netdev_ips(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)424 static void add_netdev_ips(struct ib_device *ib_dev, u8 port,
425 struct net_device *rdma_ndev, void *cookie)
426 {
427 _add_netdev_ips(ib_dev, port, cookie);
428 }
429
del_netdev_ips(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)430 static void del_netdev_ips(struct ib_device *ib_dev, u8 port,
431 struct net_device *rdma_ndev, void *cookie)
432 {
433 ib_cache_gid_del_all_netdev_gids(ib_dev, port, cookie);
434 }
435
436 /**
437 * del_default_gids - Delete default GIDs of the event/cookie netdevice
438 * @ib_dev: RDMA device pointer
439 * @port: Port of the RDMA device whose GID table to consider
440 * @rdma_ndev: Unused rdma netdevice
441 * @cookie: Pointer to event netdevice
442 *
443 * del_default_gids() deletes the default GIDs of the event/cookie netdevice.
444 */
del_default_gids(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)445 static void del_default_gids(struct ib_device *ib_dev, u8 port,
446 struct net_device *rdma_ndev, void *cookie)
447 {
448 struct net_device *cookie_ndev = cookie;
449 unsigned long gid_type_mask;
450
451 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
452
453 ib_cache_gid_set_default_gid(ib_dev, port, cookie_ndev, gid_type_mask,
454 IB_CACHE_GID_DEFAULT_MODE_DELETE);
455 }
456
add_default_gids(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)457 static void add_default_gids(struct ib_device *ib_dev, u8 port,
458 struct net_device *rdma_ndev, void *cookie)
459 {
460 struct net_device *event_ndev = cookie;
461 unsigned long gid_type_mask;
462
463 gid_type_mask = roce_gid_type_mask_support(ib_dev, port);
464 ib_cache_gid_set_default_gid(ib_dev, port, event_ndev, gid_type_mask,
465 IB_CACHE_GID_DEFAULT_MODE_SET);
466 }
467
enum_all_gids_of_dev_cb(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)468 static void enum_all_gids_of_dev_cb(struct ib_device *ib_dev,
469 u8 port,
470 struct net_device *rdma_ndev,
471 void *cookie)
472 {
473 struct net *net;
474 struct net_device *ndev;
475
476 /* Lock the rtnl to make sure the netdevs does not move under
477 * our feet
478 */
479 rtnl_lock();
480 down_read(&net_rwsem);
481 for_each_net(net)
482 for_each_netdev(net, ndev) {
483 /*
484 * Filter and add default GIDs of the primary netdevice
485 * when not in bonding mode, or add default GIDs
486 * of bond master device, when in bonding mode.
487 */
488 if (is_ndev_for_default_gid_filter(ib_dev, port,
489 rdma_ndev, ndev))
490 add_default_gids(ib_dev, port, rdma_ndev, ndev);
491
492 if (is_eth_port_of_netdev_filter(ib_dev, port,
493 rdma_ndev, ndev))
494 _add_netdev_ips(ib_dev, port, ndev);
495 }
496 up_read(&net_rwsem);
497 rtnl_unlock();
498 }
499
500 /**
501 * rdma_roce_rescan_device - Rescan all of the network devices in the system
502 * and add their gids, as needed, to the relevant RoCE devices.
503 *
504 * @device: the rdma device
505 */
rdma_roce_rescan_device(struct ib_device * ib_dev)506 void rdma_roce_rescan_device(struct ib_device *ib_dev)
507 {
508 ib_enum_roce_netdev(ib_dev, pass_all_filter, NULL,
509 enum_all_gids_of_dev_cb, NULL);
510 }
511 EXPORT_SYMBOL(rdma_roce_rescan_device);
512
callback_for_addr_gid_device_scan(struct ib_device * device,u8 port,struct net_device * rdma_ndev,void * cookie)513 static void callback_for_addr_gid_device_scan(struct ib_device *device,
514 u8 port,
515 struct net_device *rdma_ndev,
516 void *cookie)
517 {
518 struct update_gid_event_work *parsed = cookie;
519
520 return update_gid(parsed->gid_op, device,
521 port, &parsed->gid,
522 &parsed->gid_attr);
523 }
524
525 struct upper_list {
526 struct list_head list;
527 struct net_device *upper;
528 };
529
netdev_upper_walk(struct net_device * upper,void * data)530 static int netdev_upper_walk(struct net_device *upper, void *data)
531 {
532 struct upper_list *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
533 struct list_head *upper_list = data;
534
535 if (!entry)
536 return 0;
537
538 list_add_tail(&entry->list, upper_list);
539 dev_hold(upper);
540 entry->upper = upper;
541
542 return 0;
543 }
544
handle_netdev_upper(struct ib_device * ib_dev,u8 port,void * cookie,void (* handle_netdev)(struct ib_device * ib_dev,u8 port,struct net_device * ndev))545 static void handle_netdev_upper(struct ib_device *ib_dev, u8 port,
546 void *cookie,
547 void (*handle_netdev)(struct ib_device *ib_dev,
548 u8 port,
549 struct net_device *ndev))
550 {
551 struct net_device *ndev = cookie;
552 struct upper_list *upper_iter;
553 struct upper_list *upper_temp;
554 LIST_HEAD(upper_list);
555
556 rcu_read_lock();
557 netdev_walk_all_upper_dev_rcu(ndev, netdev_upper_walk, &upper_list);
558 rcu_read_unlock();
559
560 handle_netdev(ib_dev, port, ndev);
561 list_for_each_entry_safe(upper_iter, upper_temp, &upper_list,
562 list) {
563 handle_netdev(ib_dev, port, upper_iter->upper);
564 dev_put(upper_iter->upper);
565 list_del(&upper_iter->list);
566 kfree(upper_iter);
567 }
568 }
569
_roce_del_all_netdev_gids(struct ib_device * ib_dev,u8 port,struct net_device * event_ndev)570 static void _roce_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
571 struct net_device *event_ndev)
572 {
573 ib_cache_gid_del_all_netdev_gids(ib_dev, port, event_ndev);
574 }
575
del_netdev_upper_ips(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)576 static void del_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
577 struct net_device *rdma_ndev, void *cookie)
578 {
579 handle_netdev_upper(ib_dev, port, cookie, _roce_del_all_netdev_gids);
580 }
581
add_netdev_upper_ips(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)582 static void add_netdev_upper_ips(struct ib_device *ib_dev, u8 port,
583 struct net_device *rdma_ndev, void *cookie)
584 {
585 handle_netdev_upper(ib_dev, port, cookie, _add_netdev_ips);
586 }
587
del_netdev_default_ips_join(struct ib_device * ib_dev,u8 port,struct net_device * rdma_ndev,void * cookie)588 static void del_netdev_default_ips_join(struct ib_device *ib_dev, u8 port,
589 struct net_device *rdma_ndev,
590 void *cookie)
591 {
592 struct net_device *master_ndev;
593
594 rcu_read_lock();
595 master_ndev = netdev_master_upper_dev_get_rcu(rdma_ndev);
596 if (master_ndev)
597 dev_hold(master_ndev);
598 rcu_read_unlock();
599
600 if (master_ndev) {
601 bond_delete_netdev_default_gids(ib_dev, port, rdma_ndev,
602 master_ndev);
603 dev_put(master_ndev);
604 }
605 }
606
607 /* The following functions operate on all IB devices. netdevice_event and
608 * addr_event execute ib_enum_all_roce_netdevs through a work.
609 * ib_enum_all_roce_netdevs iterates through all IB devices.
610 */
611
netdevice_event_work_handler(struct work_struct * _work)612 static void netdevice_event_work_handler(struct work_struct *_work)
613 {
614 struct netdev_event_work *work =
615 container_of(_work, struct netdev_event_work, work);
616 unsigned int i;
617
618 for (i = 0; i < ARRAY_SIZE(work->cmds) && work->cmds[i].cb; i++) {
619 ib_enum_all_roce_netdevs(work->cmds[i].filter,
620 work->cmds[i].filter_ndev,
621 work->cmds[i].cb,
622 work->cmds[i].ndev);
623 dev_put(work->cmds[i].ndev);
624 dev_put(work->cmds[i].filter_ndev);
625 }
626
627 kfree(work);
628 }
629
netdevice_queue_work(struct netdev_event_work_cmd * cmds,struct net_device * ndev)630 static int netdevice_queue_work(struct netdev_event_work_cmd *cmds,
631 struct net_device *ndev)
632 {
633 unsigned int i;
634 struct netdev_event_work *ndev_work =
635 kmalloc(sizeof(*ndev_work), GFP_KERNEL);
636
637 if (!ndev_work)
638 return NOTIFY_DONE;
639
640 memcpy(ndev_work->cmds, cmds, sizeof(ndev_work->cmds));
641 for (i = 0; i < ARRAY_SIZE(ndev_work->cmds) && ndev_work->cmds[i].cb; i++) {
642 if (!ndev_work->cmds[i].ndev)
643 ndev_work->cmds[i].ndev = ndev;
644 if (!ndev_work->cmds[i].filter_ndev)
645 ndev_work->cmds[i].filter_ndev = ndev;
646 dev_hold(ndev_work->cmds[i].ndev);
647 dev_hold(ndev_work->cmds[i].filter_ndev);
648 }
649 INIT_WORK(&ndev_work->work, netdevice_event_work_handler);
650
651 queue_work(gid_cache_wq, &ndev_work->work);
652
653 return NOTIFY_DONE;
654 }
655
656 static const struct netdev_event_work_cmd add_cmd = {
657 .cb = add_netdev_ips,
658 .filter = is_eth_port_of_netdev_filter
659 };
660
661 static const struct netdev_event_work_cmd add_cmd_upper_ips = {
662 .cb = add_netdev_upper_ips,
663 .filter = is_eth_port_of_netdev_filter
664 };
665
666 static void
ndev_event_unlink(struct netdev_notifier_changeupper_info * changeupper_info,struct netdev_event_work_cmd * cmds)667 ndev_event_unlink(struct netdev_notifier_changeupper_info *changeupper_info,
668 struct netdev_event_work_cmd *cmds)
669 {
670 static const struct netdev_event_work_cmd
671 upper_ips_del_cmd = {
672 .cb = del_netdev_upper_ips,
673 .filter = upper_device_filter
674 };
675
676 cmds[0] = upper_ips_del_cmd;
677 cmds[0].ndev = changeupper_info->upper_dev;
678 cmds[1] = add_cmd;
679 }
680
681 static const struct netdev_event_work_cmd bonding_default_add_cmd = {
682 .cb = add_default_gids,
683 .filter = is_upper_ndev_bond_master_filter
684 };
685
686 static void
ndev_event_link(struct net_device * event_ndev,struct netdev_notifier_changeupper_info * changeupper_info,struct netdev_event_work_cmd * cmds)687 ndev_event_link(struct net_device *event_ndev,
688 struct netdev_notifier_changeupper_info *changeupper_info,
689 struct netdev_event_work_cmd *cmds)
690 {
691 static const struct netdev_event_work_cmd
692 bonding_default_del_cmd = {
693 .cb = del_default_gids,
694 .filter = is_upper_ndev_bond_master_filter
695 };
696 /*
697 * When a lower netdev is linked to its upper bonding
698 * netdev, delete lower slave netdev's default GIDs.
699 */
700 cmds[0] = bonding_default_del_cmd;
701 cmds[0].ndev = event_ndev;
702 cmds[0].filter_ndev = changeupper_info->upper_dev;
703
704 /* Now add bonding upper device default GIDs */
705 cmds[1] = bonding_default_add_cmd;
706 cmds[1].ndev = changeupper_info->upper_dev;
707 cmds[1].filter_ndev = changeupper_info->upper_dev;
708
709 /* Now add bonding upper device IP based GIDs */
710 cmds[2] = add_cmd_upper_ips;
711 cmds[2].ndev = changeupper_info->upper_dev;
712 cmds[2].filter_ndev = changeupper_info->upper_dev;
713 }
714
netdevice_event_changeupper(struct net_device * event_ndev,struct netdev_notifier_changeupper_info * changeupper_info,struct netdev_event_work_cmd * cmds)715 static void netdevice_event_changeupper(struct net_device *event_ndev,
716 struct netdev_notifier_changeupper_info *changeupper_info,
717 struct netdev_event_work_cmd *cmds)
718 {
719 if (changeupper_info->linking)
720 ndev_event_link(event_ndev, changeupper_info, cmds);
721 else
722 ndev_event_unlink(changeupper_info, cmds);
723 }
724
725 static const struct netdev_event_work_cmd add_default_gid_cmd = {
726 .cb = add_default_gids,
727 .filter = is_ndev_for_default_gid_filter,
728 };
729
netdevice_event(struct notifier_block * this,unsigned long event,void * ptr)730 static int netdevice_event(struct notifier_block *this, unsigned long event,
731 void *ptr)
732 {
733 static const struct netdev_event_work_cmd del_cmd = {
734 .cb = del_netdev_ips, .filter = pass_all_filter};
735 static const struct netdev_event_work_cmd
736 bonding_default_del_cmd_join = {
737 .cb = del_netdev_default_ips_join,
738 .filter = is_eth_port_inactive_slave_filter
739 };
740 static const struct netdev_event_work_cmd
741 netdev_del_cmd = {
742 .cb = del_netdev_ips,
743 .filter = is_eth_port_of_netdev_filter
744 };
745 static const struct netdev_event_work_cmd bonding_event_ips_del_cmd = {
746 .cb = del_netdev_upper_ips, .filter = upper_device_filter};
747 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
748 struct netdev_event_work_cmd cmds[ROCE_NETDEV_CALLBACK_SZ] = { {NULL} };
749
750 if (ndev->type != ARPHRD_ETHER)
751 return NOTIFY_DONE;
752
753 switch (event) {
754 case NETDEV_REGISTER:
755 case NETDEV_UP:
756 cmds[0] = bonding_default_del_cmd_join;
757 cmds[1] = add_default_gid_cmd;
758 cmds[2] = add_cmd;
759 break;
760
761 case NETDEV_UNREGISTER:
762 if (ndev->reg_state < NETREG_UNREGISTERED)
763 cmds[0] = del_cmd;
764 else
765 return NOTIFY_DONE;
766 break;
767
768 case NETDEV_CHANGEADDR:
769 cmds[0] = netdev_del_cmd;
770 cmds[1] = add_default_gid_cmd;
771 cmds[2] = add_cmd;
772 break;
773
774 case NETDEV_CHANGEUPPER:
775 netdevice_event_changeupper(ndev,
776 container_of(ptr, struct netdev_notifier_changeupper_info, info),
777 cmds);
778 break;
779
780 case NETDEV_BONDING_FAILOVER:
781 cmds[0] = bonding_event_ips_del_cmd;
782 /* Add default GIDs of the bond device */
783 cmds[1] = bonding_default_add_cmd;
784 /* Add IP based GIDs of the bond device */
785 cmds[2] = add_cmd_upper_ips;
786 break;
787
788 default:
789 return NOTIFY_DONE;
790 }
791
792 return netdevice_queue_work(cmds, ndev);
793 }
794
update_gid_event_work_handler(struct work_struct * _work)795 static void update_gid_event_work_handler(struct work_struct *_work)
796 {
797 struct update_gid_event_work *work =
798 container_of(_work, struct update_gid_event_work, work);
799
800 ib_enum_all_roce_netdevs(is_eth_port_of_netdev_filter,
801 work->gid_attr.ndev,
802 callback_for_addr_gid_device_scan, work);
803
804 dev_put(work->gid_attr.ndev);
805 kfree(work);
806 }
807
addr_event(struct notifier_block * this,unsigned long event,struct sockaddr * sa,struct net_device * ndev)808 static int addr_event(struct notifier_block *this, unsigned long event,
809 struct sockaddr *sa, struct net_device *ndev)
810 {
811 struct update_gid_event_work *work;
812 enum gid_op_type gid_op;
813
814 if (ndev->type != ARPHRD_ETHER)
815 return NOTIFY_DONE;
816
817 switch (event) {
818 case NETDEV_UP:
819 gid_op = GID_ADD;
820 break;
821
822 case NETDEV_DOWN:
823 gid_op = GID_DEL;
824 break;
825
826 default:
827 return NOTIFY_DONE;
828 }
829
830 work = kmalloc(sizeof(*work), GFP_ATOMIC);
831 if (!work)
832 return NOTIFY_DONE;
833
834 INIT_WORK(&work->work, update_gid_event_work_handler);
835
836 rdma_ip2gid(sa, &work->gid);
837 work->gid_op = gid_op;
838
839 memset(&work->gid_attr, 0, sizeof(work->gid_attr));
840 dev_hold(ndev);
841 work->gid_attr.ndev = ndev;
842
843 queue_work(gid_cache_wq, &work->work);
844
845 return NOTIFY_DONE;
846 }
847
inetaddr_event(struct notifier_block * this,unsigned long event,void * ptr)848 static int inetaddr_event(struct notifier_block *this, unsigned long event,
849 void *ptr)
850 {
851 struct sockaddr_in in;
852 struct net_device *ndev;
853 struct in_ifaddr *ifa = ptr;
854
855 in.sin_family = AF_INET;
856 in.sin_addr.s_addr = ifa->ifa_address;
857 ndev = ifa->ifa_dev->dev;
858
859 return addr_event(this, event, (struct sockaddr *)&in, ndev);
860 }
861
inet6addr_event(struct notifier_block * this,unsigned long event,void * ptr)862 static int inet6addr_event(struct notifier_block *this, unsigned long event,
863 void *ptr)
864 {
865 struct sockaddr_in6 in6;
866 struct net_device *ndev;
867 struct inet6_ifaddr *ifa6 = ptr;
868
869 in6.sin6_family = AF_INET6;
870 in6.sin6_addr = ifa6->addr;
871 ndev = ifa6->idev->dev;
872
873 return addr_event(this, event, (struct sockaddr *)&in6, ndev);
874 }
875
876 static struct notifier_block nb_netdevice = {
877 .notifier_call = netdevice_event
878 };
879
880 static struct notifier_block nb_inetaddr = {
881 .notifier_call = inetaddr_event
882 };
883
884 static struct notifier_block nb_inet6addr = {
885 .notifier_call = inet6addr_event
886 };
887
roce_gid_mgmt_init(void)888 int __init roce_gid_mgmt_init(void)
889 {
890 gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0);
891 if (!gid_cache_wq)
892 return -ENOMEM;
893
894 register_inetaddr_notifier(&nb_inetaddr);
895 if (IS_ENABLED(CONFIG_IPV6))
896 register_inet6addr_notifier(&nb_inet6addr);
897 /* We relay on the netdevice notifier to enumerate all
898 * existing devices in the system. Register to this notifier
899 * last to make sure we will not miss any IP add/del
900 * callbacks.
901 */
902 register_netdevice_notifier(&nb_netdevice);
903
904 return 0;
905 }
906
roce_gid_mgmt_cleanup(void)907 void __exit roce_gid_mgmt_cleanup(void)
908 {
909 if (IS_ENABLED(CONFIG_IPV6))
910 unregister_inet6addr_notifier(&nb_inet6addr);
911 unregister_inetaddr_notifier(&nb_inetaddr);
912 unregister_netdevice_notifier(&nb_netdevice);
913 /* Ensure all gid deletion tasks complete before we go down,
914 * to avoid any reference to free'd memory. By the time
915 * ib-core is removed, all physical devices have been removed,
916 * so no issue with remaining hardware contexts.
917 */
918 destroy_workqueue(gid_cache_wq);
919 }
920