1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <rdma/rdma_netlink.h>
45 #include <rdma/ib_addr.h>
46 #include <rdma/ib_cache.h>
47
48 #include "core_priv.h"
49
50 MODULE_AUTHOR("Roland Dreier");
51 MODULE_DESCRIPTION("core kernel InfiniBand API");
52 MODULE_LICENSE("Dual BSD/GPL");
53
54 struct ib_client_data {
55 struct list_head list;
56 struct ib_client *client;
57 void * data;
58 /* The device or client is going down. Do not call client or device
59 * callbacks other than remove(). */
60 bool going_down;
61 };
62
63 struct workqueue_struct *ib_comp_wq;
64 struct workqueue_struct *ib_wq;
65 EXPORT_SYMBOL_GPL(ib_wq);
66
67 /* The device_list and client_list contain devices and clients after their
68 * registration has completed, and the devices and clients are removed
69 * during unregistration. */
70 static LIST_HEAD(device_list);
71 static LIST_HEAD(client_list);
72
73 /*
74 * device_mutex and lists_rwsem protect access to both device_list and
75 * client_list. device_mutex protects writer access by device and client
76 * registration / de-registration. lists_rwsem protects reader access to
77 * these lists. Iterators of these lists must lock it for read, while updates
78 * to the lists must be done with a write lock. A special case is when the
79 * device_mutex is locked. In this case locking the lists for read access is
80 * not necessary as the device_mutex implies it.
81 *
82 * lists_rwsem also protects access to the client data list.
83 */
84 static DEFINE_MUTEX(device_mutex);
85 static DECLARE_RWSEM(lists_rwsem);
86
87 static int ib_security_change(struct notifier_block *nb, unsigned long event,
88 void *lsm_data);
89 static void ib_policy_change_task(struct work_struct *work);
90 static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
91
92 static struct notifier_block ibdev_lsm_nb = {
93 .notifier_call = ib_security_change,
94 };
95
ib_device_check_mandatory(struct ib_device * device)96 static int ib_device_check_mandatory(struct ib_device *device)
97 {
98 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
99 static const struct {
100 size_t offset;
101 char *name;
102 } mandatory_table[] = {
103 IB_MANDATORY_FUNC(query_device),
104 IB_MANDATORY_FUNC(query_port),
105 IB_MANDATORY_FUNC(query_pkey),
106 IB_MANDATORY_FUNC(alloc_pd),
107 IB_MANDATORY_FUNC(dealloc_pd),
108 IB_MANDATORY_FUNC(create_qp),
109 IB_MANDATORY_FUNC(modify_qp),
110 IB_MANDATORY_FUNC(destroy_qp),
111 IB_MANDATORY_FUNC(post_send),
112 IB_MANDATORY_FUNC(post_recv),
113 IB_MANDATORY_FUNC(create_cq),
114 IB_MANDATORY_FUNC(destroy_cq),
115 IB_MANDATORY_FUNC(poll_cq),
116 IB_MANDATORY_FUNC(req_notify_cq),
117 IB_MANDATORY_FUNC(get_dma_mr),
118 IB_MANDATORY_FUNC(dereg_mr),
119 IB_MANDATORY_FUNC(get_port_immutable)
120 };
121 int i;
122
123 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
124 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
125 pr_warn("Device %s is missing mandatory function %s\n",
126 device->name, mandatory_table[i].name);
127 return -EINVAL;
128 }
129 }
130
131 return 0;
132 }
133
__ib_device_get_by_index(u32 index)134 static struct ib_device *__ib_device_get_by_index(u32 index)
135 {
136 struct ib_device *device;
137
138 list_for_each_entry(device, &device_list, core_list)
139 if (device->index == index)
140 return device;
141
142 return NULL;
143 }
144
145 /*
146 * Caller is responsible to return refrerence count by calling put_device()
147 */
ib_device_get_by_index(u32 index)148 struct ib_device *ib_device_get_by_index(u32 index)
149 {
150 struct ib_device *device;
151
152 down_read(&lists_rwsem);
153 device = __ib_device_get_by_index(index);
154 if (device)
155 get_device(&device->dev);
156
157 up_read(&lists_rwsem);
158 return device;
159 }
160
__ib_device_get_by_name(const char * name)161 static struct ib_device *__ib_device_get_by_name(const char *name)
162 {
163 struct ib_device *device;
164
165 list_for_each_entry(device, &device_list, core_list)
166 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
167 return device;
168
169 return NULL;
170 }
171
alloc_name(char * name)172 static int alloc_name(char *name)
173 {
174 unsigned long *inuse;
175 char buf[IB_DEVICE_NAME_MAX];
176 struct ib_device *device;
177 int i;
178
179 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
180 if (!inuse)
181 return -ENOMEM;
182
183 list_for_each_entry(device, &device_list, core_list) {
184 if (!sscanf(device->name, name, &i))
185 continue;
186 if (i < 0 || i >= PAGE_SIZE * 8)
187 continue;
188 snprintf(buf, sizeof buf, name, i);
189 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
190 set_bit(i, inuse);
191 }
192
193 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
194 free_page((unsigned long) inuse);
195 snprintf(buf, sizeof buf, name, i);
196
197 if (__ib_device_get_by_name(buf))
198 return -ENFILE;
199
200 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
201 return 0;
202 }
203
ib_device_release(struct device * device)204 static void ib_device_release(struct device *device)
205 {
206 struct ib_device *dev = container_of(device, struct ib_device, dev);
207
208 WARN_ON(dev->reg_state == IB_DEV_REGISTERED);
209 if (dev->reg_state == IB_DEV_UNREGISTERED) {
210 /*
211 * In IB_DEV_UNINITIALIZED state, cache or port table
212 * is not even created. Free cache and port table only when
213 * device reaches UNREGISTERED state.
214 */
215 ib_cache_release_one(dev);
216 kfree(dev->port_immutable);
217 }
218 kfree(dev);
219 }
220
ib_device_uevent(struct device * device,struct kobj_uevent_env * env)221 static int ib_device_uevent(struct device *device,
222 struct kobj_uevent_env *env)
223 {
224 struct ib_device *dev = container_of(device, struct ib_device, dev);
225
226 if (add_uevent_var(env, "NAME=%s", dev->name))
227 return -ENOMEM;
228
229 /*
230 * It would be nice to pass the node GUID with the event...
231 */
232
233 return 0;
234 }
235
236 static struct class ib_class = {
237 .name = "infiniband",
238 .dev_release = ib_device_release,
239 .dev_uevent = ib_device_uevent,
240 };
241
242 /**
243 * ib_alloc_device - allocate an IB device struct
244 * @size:size of structure to allocate
245 *
246 * Low-level drivers should use ib_alloc_device() to allocate &struct
247 * ib_device. @size is the size of the structure to be allocated,
248 * including any private data used by the low-level driver.
249 * ib_dealloc_device() must be used to free structures allocated with
250 * ib_alloc_device().
251 */
ib_alloc_device(size_t size)252 struct ib_device *ib_alloc_device(size_t size)
253 {
254 struct ib_device *device;
255
256 if (WARN_ON(size < sizeof(struct ib_device)))
257 return NULL;
258
259 device = kzalloc(size, GFP_KERNEL);
260 if (!device)
261 return NULL;
262
263 rdma_restrack_init(&device->res);
264
265 device->dev.class = &ib_class;
266 device_initialize(&device->dev);
267
268 dev_set_drvdata(&device->dev, device);
269
270 INIT_LIST_HEAD(&device->event_handler_list);
271 spin_lock_init(&device->event_handler_lock);
272 spin_lock_init(&device->client_data_lock);
273 INIT_LIST_HEAD(&device->client_data_list);
274 INIT_LIST_HEAD(&device->port_list);
275
276 return device;
277 }
278 EXPORT_SYMBOL(ib_alloc_device);
279
280 /**
281 * ib_dealloc_device - free an IB device struct
282 * @device:structure to free
283 *
284 * Free a structure allocated with ib_alloc_device().
285 */
ib_dealloc_device(struct ib_device * device)286 void ib_dealloc_device(struct ib_device *device)
287 {
288 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
289 device->reg_state != IB_DEV_UNINITIALIZED);
290 rdma_restrack_clean(&device->res);
291 put_device(&device->dev);
292 }
293 EXPORT_SYMBOL(ib_dealloc_device);
294
add_client_context(struct ib_device * device,struct ib_client * client)295 static int add_client_context(struct ib_device *device, struct ib_client *client)
296 {
297 struct ib_client_data *context;
298 unsigned long flags;
299
300 context = kmalloc(sizeof *context, GFP_KERNEL);
301 if (!context)
302 return -ENOMEM;
303
304 context->client = client;
305 context->data = NULL;
306 context->going_down = false;
307
308 down_write(&lists_rwsem);
309 spin_lock_irqsave(&device->client_data_lock, flags);
310 list_add(&context->list, &device->client_data_list);
311 spin_unlock_irqrestore(&device->client_data_lock, flags);
312 up_write(&lists_rwsem);
313
314 return 0;
315 }
316
verify_immutable(const struct ib_device * dev,u8 port)317 static int verify_immutable(const struct ib_device *dev, u8 port)
318 {
319 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
320 rdma_max_mad_size(dev, port) != 0);
321 }
322
read_port_immutable(struct ib_device * device)323 static int read_port_immutable(struct ib_device *device)
324 {
325 int ret;
326 u8 start_port = rdma_start_port(device);
327 u8 end_port = rdma_end_port(device);
328 u8 port;
329
330 /**
331 * device->port_immutable is indexed directly by the port number to make
332 * access to this data as efficient as possible.
333 *
334 * Therefore port_immutable is declared as a 1 based array with
335 * potential empty slots at the beginning.
336 */
337 device->port_immutable = kcalloc(end_port + 1,
338 sizeof(*device->port_immutable),
339 GFP_KERNEL);
340 if (!device->port_immutable)
341 return -ENOMEM;
342
343 for (port = start_port; port <= end_port; ++port) {
344 ret = device->get_port_immutable(device, port,
345 &device->port_immutable[port]);
346 if (ret)
347 return ret;
348
349 if (verify_immutable(device, port))
350 return -EINVAL;
351 }
352 return 0;
353 }
354
ib_get_device_fw_str(struct ib_device * dev,char * str)355 void ib_get_device_fw_str(struct ib_device *dev, char *str)
356 {
357 if (dev->get_dev_fw_str)
358 dev->get_dev_fw_str(dev, str);
359 else
360 str[0] = '\0';
361 }
362 EXPORT_SYMBOL(ib_get_device_fw_str);
363
setup_port_pkey_list(struct ib_device * device)364 static int setup_port_pkey_list(struct ib_device *device)
365 {
366 int i;
367
368 /**
369 * device->port_pkey_list is indexed directly by the port number,
370 * Therefore it is declared as a 1 based array with potential empty
371 * slots at the beginning.
372 */
373 device->port_pkey_list = kcalloc(rdma_end_port(device) + 1,
374 sizeof(*device->port_pkey_list),
375 GFP_KERNEL);
376
377 if (!device->port_pkey_list)
378 return -ENOMEM;
379
380 for (i = 0; i < (rdma_end_port(device) + 1); i++) {
381 spin_lock_init(&device->port_pkey_list[i].list_lock);
382 INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list);
383 }
384
385 return 0;
386 }
387
ib_policy_change_task(struct work_struct * work)388 static void ib_policy_change_task(struct work_struct *work)
389 {
390 struct ib_device *dev;
391
392 down_read(&lists_rwsem);
393 list_for_each_entry(dev, &device_list, core_list) {
394 int i;
395
396 for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) {
397 u64 sp;
398 int ret = ib_get_cached_subnet_prefix(dev,
399 i,
400 &sp);
401
402 WARN_ONCE(ret,
403 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
404 ret);
405 if (!ret)
406 ib_security_cache_change(dev, i, sp);
407 }
408 }
409 up_read(&lists_rwsem);
410 }
411
ib_security_change(struct notifier_block * nb,unsigned long event,void * lsm_data)412 static int ib_security_change(struct notifier_block *nb, unsigned long event,
413 void *lsm_data)
414 {
415 if (event != LSM_POLICY_CHANGE)
416 return NOTIFY_DONE;
417
418 schedule_work(&ib_policy_change_work);
419
420 return NOTIFY_OK;
421 }
422
423 /**
424 * __dev_new_index - allocate an device index
425 *
426 * Returns a suitable unique value for a new device interface
427 * number. It assumes that there are less than 2^32-1 ib devices
428 * will be present in the system.
429 */
__dev_new_index(void)430 static u32 __dev_new_index(void)
431 {
432 /*
433 * The device index to allow stable naming.
434 * Similar to struct net -> ifindex.
435 */
436 static u32 index;
437
438 for (;;) {
439 if (!(++index))
440 index = 1;
441
442 if (!__ib_device_get_by_index(index))
443 return index;
444 }
445 }
446
447 /**
448 * ib_register_device - Register an IB device with IB core
449 * @device:Device to register
450 *
451 * Low-level drivers use ib_register_device() to register their
452 * devices with the IB core. All registered clients will receive a
453 * callback for each device that is added. @device must be allocated
454 * with ib_alloc_device().
455 */
ib_register_device(struct ib_device * device,int (* port_callback)(struct ib_device *,u8,struct kobject *))456 int ib_register_device(struct ib_device *device,
457 int (*port_callback)(struct ib_device *,
458 u8, struct kobject *))
459 {
460 int ret;
461 struct ib_client *client;
462 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
463 struct device *parent = device->dev.parent;
464
465 WARN_ON_ONCE(device->dma_device);
466 if (device->dev.dma_ops) {
467 /*
468 * The caller provided custom DMA operations. Copy the
469 * DMA-related fields that are used by e.g. dma_alloc_coherent()
470 * into device->dev.
471 */
472 device->dma_device = &device->dev;
473 if (!device->dev.dma_mask) {
474 if (parent)
475 device->dev.dma_mask = parent->dma_mask;
476 else
477 WARN_ON_ONCE(true);
478 }
479 if (!device->dev.coherent_dma_mask) {
480 if (parent)
481 device->dev.coherent_dma_mask =
482 parent->coherent_dma_mask;
483 else
484 WARN_ON_ONCE(true);
485 }
486 } else {
487 /*
488 * The caller did not provide custom DMA operations. Use the
489 * DMA mapping operations of the parent device.
490 */
491 WARN_ON_ONCE(!parent);
492 device->dma_device = parent;
493 }
494
495 mutex_lock(&device_mutex);
496
497 if (strchr(device->name, '%')) {
498 ret = alloc_name(device->name);
499 if (ret)
500 goto out;
501 }
502
503 if (ib_device_check_mandatory(device)) {
504 ret = -EINVAL;
505 goto out;
506 }
507
508 ret = read_port_immutable(device);
509 if (ret) {
510 pr_warn("Couldn't create per port immutable data %s\n",
511 device->name);
512 goto out;
513 }
514
515 ret = setup_port_pkey_list(device);
516 if (ret) {
517 pr_warn("Couldn't create per port_pkey_list\n");
518 goto out;
519 }
520
521 ret = ib_cache_setup_one(device);
522 if (ret) {
523 pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
524 goto port_cleanup;
525 }
526
527 ret = ib_device_register_rdmacg(device);
528 if (ret) {
529 pr_warn("Couldn't register device with rdma cgroup\n");
530 goto cache_cleanup;
531 }
532
533 memset(&device->attrs, 0, sizeof(device->attrs));
534 ret = device->query_device(device, &device->attrs, &uhw);
535 if (ret) {
536 pr_warn("Couldn't query the device attributes\n");
537 goto cg_cleanup;
538 }
539
540 ret = ib_device_register_sysfs(device, port_callback);
541 if (ret) {
542 pr_warn("Couldn't register device %s with driver model\n",
543 device->name);
544 goto cg_cleanup;
545 }
546
547 device->reg_state = IB_DEV_REGISTERED;
548
549 list_for_each_entry(client, &client_list, list)
550 if (!add_client_context(device, client) && client->add)
551 client->add(device);
552
553 device->index = __dev_new_index();
554 down_write(&lists_rwsem);
555 list_add_tail(&device->core_list, &device_list);
556 up_write(&lists_rwsem);
557 mutex_unlock(&device_mutex);
558 return 0;
559
560 cg_cleanup:
561 ib_device_unregister_rdmacg(device);
562 cache_cleanup:
563 ib_cache_cleanup_one(device);
564 ib_cache_release_one(device);
565 port_cleanup:
566 kfree(device->port_immutable);
567 out:
568 mutex_unlock(&device_mutex);
569 return ret;
570 }
571 EXPORT_SYMBOL(ib_register_device);
572
573 /**
574 * ib_unregister_device - Unregister an IB device
575 * @device:Device to unregister
576 *
577 * Unregister an IB device. All clients will receive a remove callback.
578 */
ib_unregister_device(struct ib_device * device)579 void ib_unregister_device(struct ib_device *device)
580 {
581 struct ib_client_data *context, *tmp;
582 unsigned long flags;
583
584 mutex_lock(&device_mutex);
585
586 down_write(&lists_rwsem);
587 list_del(&device->core_list);
588 spin_lock_irqsave(&device->client_data_lock, flags);
589 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
590 context->going_down = true;
591 spin_unlock_irqrestore(&device->client_data_lock, flags);
592 downgrade_write(&lists_rwsem);
593
594 list_for_each_entry_safe(context, tmp, &device->client_data_list,
595 list) {
596 if (context->client->remove)
597 context->client->remove(device, context->data);
598 }
599 up_read(&lists_rwsem);
600
601 ib_device_unregister_rdmacg(device);
602 ib_device_unregister_sysfs(device);
603
604 mutex_unlock(&device_mutex);
605
606 ib_cache_cleanup_one(device);
607
608 ib_security_destroy_port_pkey_list(device);
609 kfree(device->port_pkey_list);
610
611 down_write(&lists_rwsem);
612 spin_lock_irqsave(&device->client_data_lock, flags);
613 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
614 kfree(context);
615 spin_unlock_irqrestore(&device->client_data_lock, flags);
616 up_write(&lists_rwsem);
617
618 device->reg_state = IB_DEV_UNREGISTERED;
619 }
620 EXPORT_SYMBOL(ib_unregister_device);
621
622 /**
623 * ib_register_client - Register an IB client
624 * @client:Client to register
625 *
626 * Upper level users of the IB drivers can use ib_register_client() to
627 * register callbacks for IB device addition and removal. When an IB
628 * device is added, each registered client's add method will be called
629 * (in the order the clients were registered), and when a device is
630 * removed, each client's remove method will be called (in the reverse
631 * order that clients were registered). In addition, when
632 * ib_register_client() is called, the client will receive an add
633 * callback for all devices already registered.
634 */
ib_register_client(struct ib_client * client)635 int ib_register_client(struct ib_client *client)
636 {
637 struct ib_device *device;
638
639 mutex_lock(&device_mutex);
640
641 list_for_each_entry(device, &device_list, core_list)
642 if (!add_client_context(device, client) && client->add)
643 client->add(device);
644
645 down_write(&lists_rwsem);
646 list_add_tail(&client->list, &client_list);
647 up_write(&lists_rwsem);
648
649 mutex_unlock(&device_mutex);
650
651 return 0;
652 }
653 EXPORT_SYMBOL(ib_register_client);
654
655 /**
656 * ib_unregister_client - Unregister an IB client
657 * @client:Client to unregister
658 *
659 * Upper level users use ib_unregister_client() to remove their client
660 * registration. When ib_unregister_client() is called, the client
661 * will receive a remove callback for each IB device still registered.
662 */
ib_unregister_client(struct ib_client * client)663 void ib_unregister_client(struct ib_client *client)
664 {
665 struct ib_client_data *context, *tmp;
666 struct ib_device *device;
667 unsigned long flags;
668
669 mutex_lock(&device_mutex);
670
671 down_write(&lists_rwsem);
672 list_del(&client->list);
673 up_write(&lists_rwsem);
674
675 list_for_each_entry(device, &device_list, core_list) {
676 struct ib_client_data *found_context = NULL;
677
678 down_write(&lists_rwsem);
679 spin_lock_irqsave(&device->client_data_lock, flags);
680 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
681 if (context->client == client) {
682 context->going_down = true;
683 found_context = context;
684 break;
685 }
686 spin_unlock_irqrestore(&device->client_data_lock, flags);
687 up_write(&lists_rwsem);
688
689 if (client->remove)
690 client->remove(device, found_context ?
691 found_context->data : NULL);
692
693 if (!found_context) {
694 pr_warn("No client context found for %s/%s\n",
695 device->name, client->name);
696 continue;
697 }
698
699 down_write(&lists_rwsem);
700 spin_lock_irqsave(&device->client_data_lock, flags);
701 list_del(&found_context->list);
702 kfree(found_context);
703 spin_unlock_irqrestore(&device->client_data_lock, flags);
704 up_write(&lists_rwsem);
705 }
706
707 mutex_unlock(&device_mutex);
708 }
709 EXPORT_SYMBOL(ib_unregister_client);
710
711 /**
712 * ib_get_client_data - Get IB client context
713 * @device:Device to get context for
714 * @client:Client to get context for
715 *
716 * ib_get_client_data() returns client context set with
717 * ib_set_client_data().
718 */
ib_get_client_data(struct ib_device * device,struct ib_client * client)719 void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
720 {
721 struct ib_client_data *context;
722 void *ret = NULL;
723 unsigned long flags;
724
725 spin_lock_irqsave(&device->client_data_lock, flags);
726 list_for_each_entry(context, &device->client_data_list, list)
727 if (context->client == client) {
728 ret = context->data;
729 break;
730 }
731 spin_unlock_irqrestore(&device->client_data_lock, flags);
732
733 return ret;
734 }
735 EXPORT_SYMBOL(ib_get_client_data);
736
737 /**
738 * ib_set_client_data - Set IB client context
739 * @device:Device to set context for
740 * @client:Client to set context for
741 * @data:Context to set
742 *
743 * ib_set_client_data() sets client context that can be retrieved with
744 * ib_get_client_data().
745 */
ib_set_client_data(struct ib_device * device,struct ib_client * client,void * data)746 void ib_set_client_data(struct ib_device *device, struct ib_client *client,
747 void *data)
748 {
749 struct ib_client_data *context;
750 unsigned long flags;
751
752 spin_lock_irqsave(&device->client_data_lock, flags);
753 list_for_each_entry(context, &device->client_data_list, list)
754 if (context->client == client) {
755 context->data = data;
756 goto out;
757 }
758
759 pr_warn("No client context found for %s/%s\n",
760 device->name, client->name);
761
762 out:
763 spin_unlock_irqrestore(&device->client_data_lock, flags);
764 }
765 EXPORT_SYMBOL(ib_set_client_data);
766
767 /**
768 * ib_register_event_handler - Register an IB event handler
769 * @event_handler:Handler to register
770 *
771 * ib_register_event_handler() registers an event handler that will be
772 * called back when asynchronous IB events occur (as defined in
773 * chapter 11 of the InfiniBand Architecture Specification). This
774 * callback may occur in interrupt context.
775 */
ib_register_event_handler(struct ib_event_handler * event_handler)776 void ib_register_event_handler(struct ib_event_handler *event_handler)
777 {
778 unsigned long flags;
779
780 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
781 list_add_tail(&event_handler->list,
782 &event_handler->device->event_handler_list);
783 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
784 }
785 EXPORT_SYMBOL(ib_register_event_handler);
786
787 /**
788 * ib_unregister_event_handler - Unregister an event handler
789 * @event_handler:Handler to unregister
790 *
791 * Unregister an event handler registered with
792 * ib_register_event_handler().
793 */
ib_unregister_event_handler(struct ib_event_handler * event_handler)794 void ib_unregister_event_handler(struct ib_event_handler *event_handler)
795 {
796 unsigned long flags;
797
798 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
799 list_del(&event_handler->list);
800 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
801 }
802 EXPORT_SYMBOL(ib_unregister_event_handler);
803
804 /**
805 * ib_dispatch_event - Dispatch an asynchronous event
806 * @event:Event to dispatch
807 *
808 * Low-level drivers must call ib_dispatch_event() to dispatch the
809 * event to all registered event handlers when an asynchronous event
810 * occurs.
811 */
ib_dispatch_event(struct ib_event * event)812 void ib_dispatch_event(struct ib_event *event)
813 {
814 unsigned long flags;
815 struct ib_event_handler *handler;
816
817 spin_lock_irqsave(&event->device->event_handler_lock, flags);
818
819 list_for_each_entry(handler, &event->device->event_handler_list, list)
820 handler->handler(handler, event);
821
822 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
823 }
824 EXPORT_SYMBOL(ib_dispatch_event);
825
826 /**
827 * ib_query_port - Query IB port attributes
828 * @device:Device to query
829 * @port_num:Port number to query
830 * @port_attr:Port attributes
831 *
832 * ib_query_port() returns the attributes of a port through the
833 * @port_attr pointer.
834 */
ib_query_port(struct ib_device * device,u8 port_num,struct ib_port_attr * port_attr)835 int ib_query_port(struct ib_device *device,
836 u8 port_num,
837 struct ib_port_attr *port_attr)
838 {
839 union ib_gid gid;
840 int err;
841
842 if (!rdma_is_port_valid(device, port_num))
843 return -EINVAL;
844
845 memset(port_attr, 0, sizeof(*port_attr));
846 err = device->query_port(device, port_num, port_attr);
847 if (err || port_attr->subnet_prefix)
848 return err;
849
850 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
851 return 0;
852
853 err = device->query_gid(device, port_num, 0, &gid);
854 if (err)
855 return err;
856
857 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
858 return 0;
859 }
860 EXPORT_SYMBOL(ib_query_port);
861
862 /**
863 * ib_enum_roce_netdev - enumerate all RoCE ports
864 * @ib_dev : IB device we want to query
865 * @filter: Should we call the callback?
866 * @filter_cookie: Cookie passed to filter
867 * @cb: Callback to call for each found RoCE ports
868 * @cookie: Cookie passed back to the callback
869 *
870 * Enumerates all of the physical RoCE ports of ib_dev
871 * which are related to netdevice and calls callback() on each
872 * device for which filter() function returns non zero.
873 */
ib_enum_roce_netdev(struct ib_device * ib_dev,roce_netdev_filter filter,void * filter_cookie,roce_netdev_callback cb,void * cookie)874 void ib_enum_roce_netdev(struct ib_device *ib_dev,
875 roce_netdev_filter filter,
876 void *filter_cookie,
877 roce_netdev_callback cb,
878 void *cookie)
879 {
880 u8 port;
881
882 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
883 port++)
884 if (rdma_protocol_roce(ib_dev, port)) {
885 struct net_device *idev = NULL;
886
887 if (ib_dev->get_netdev)
888 idev = ib_dev->get_netdev(ib_dev, port);
889
890 if (idev &&
891 idev->reg_state >= NETREG_UNREGISTERED) {
892 dev_put(idev);
893 idev = NULL;
894 }
895
896 if (filter(ib_dev, port, idev, filter_cookie))
897 cb(ib_dev, port, idev, cookie);
898
899 if (idev)
900 dev_put(idev);
901 }
902 }
903
904 /**
905 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
906 * @filter: Should we call the callback?
907 * @filter_cookie: Cookie passed to filter
908 * @cb: Callback to call for each found RoCE ports
909 * @cookie: Cookie passed back to the callback
910 *
911 * Enumerates all RoCE devices' physical ports which are related
912 * to netdevices and calls callback() on each device for which
913 * filter() function returns non zero.
914 */
ib_enum_all_roce_netdevs(roce_netdev_filter filter,void * filter_cookie,roce_netdev_callback cb,void * cookie)915 void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
916 void *filter_cookie,
917 roce_netdev_callback cb,
918 void *cookie)
919 {
920 struct ib_device *dev;
921
922 down_read(&lists_rwsem);
923 list_for_each_entry(dev, &device_list, core_list)
924 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
925 up_read(&lists_rwsem);
926 }
927
928 /**
929 * ib_enum_all_devs - enumerate all ib_devices
930 * @cb: Callback to call for each found ib_device
931 *
932 * Enumerates all ib_devices and calls callback() on each device.
933 */
ib_enum_all_devs(nldev_callback nldev_cb,struct sk_buff * skb,struct netlink_callback * cb)934 int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
935 struct netlink_callback *cb)
936 {
937 struct ib_device *dev;
938 unsigned int idx = 0;
939 int ret = 0;
940
941 down_read(&lists_rwsem);
942 list_for_each_entry(dev, &device_list, core_list) {
943 ret = nldev_cb(dev, skb, cb, idx);
944 if (ret)
945 break;
946 idx++;
947 }
948
949 up_read(&lists_rwsem);
950 return ret;
951 }
952
953 /**
954 * ib_query_pkey - Get P_Key table entry
955 * @device:Device to query
956 * @port_num:Port number to query
957 * @index:P_Key table index to query
958 * @pkey:Returned P_Key
959 *
960 * ib_query_pkey() fetches the specified P_Key table entry.
961 */
ib_query_pkey(struct ib_device * device,u8 port_num,u16 index,u16 * pkey)962 int ib_query_pkey(struct ib_device *device,
963 u8 port_num, u16 index, u16 *pkey)
964 {
965 return device->query_pkey(device, port_num, index, pkey);
966 }
967 EXPORT_SYMBOL(ib_query_pkey);
968
969 /**
970 * ib_modify_device - Change IB device attributes
971 * @device:Device to modify
972 * @device_modify_mask:Mask of attributes to change
973 * @device_modify:New attribute values
974 *
975 * ib_modify_device() changes a device's attributes as specified by
976 * the @device_modify_mask and @device_modify structure.
977 */
ib_modify_device(struct ib_device * device,int device_modify_mask,struct ib_device_modify * device_modify)978 int ib_modify_device(struct ib_device *device,
979 int device_modify_mask,
980 struct ib_device_modify *device_modify)
981 {
982 if (!device->modify_device)
983 return -ENOSYS;
984
985 return device->modify_device(device, device_modify_mask,
986 device_modify);
987 }
988 EXPORT_SYMBOL(ib_modify_device);
989
990 /**
991 * ib_modify_port - Modifies the attributes for the specified port.
992 * @device: The device to modify.
993 * @port_num: The number of the port to modify.
994 * @port_modify_mask: Mask used to specify which attributes of the port
995 * to change.
996 * @port_modify: New attribute values for the port.
997 *
998 * ib_modify_port() changes a port's attributes as specified by the
999 * @port_modify_mask and @port_modify structure.
1000 */
ib_modify_port(struct ib_device * device,u8 port_num,int port_modify_mask,struct ib_port_modify * port_modify)1001 int ib_modify_port(struct ib_device *device,
1002 u8 port_num, int port_modify_mask,
1003 struct ib_port_modify *port_modify)
1004 {
1005 int rc;
1006
1007 if (!rdma_is_port_valid(device, port_num))
1008 return -EINVAL;
1009
1010 if (device->modify_port)
1011 rc = device->modify_port(device, port_num, port_modify_mask,
1012 port_modify);
1013 else
1014 rc = rdma_protocol_roce(device, port_num) ? 0 : -ENOSYS;
1015 return rc;
1016 }
1017 EXPORT_SYMBOL(ib_modify_port);
1018
1019 /**
1020 * ib_find_gid - Returns the port number and GID table index where
1021 * a specified GID value occurs. Its searches only for IB link layer.
1022 * @device: The device to query.
1023 * @gid: The GID value to search for.
1024 * @port_num: The port number of the device where the GID value was found.
1025 * @index: The index into the GID table where the GID was found. This
1026 * parameter may be NULL.
1027 */
ib_find_gid(struct ib_device * device,union ib_gid * gid,u8 * port_num,u16 * index)1028 int ib_find_gid(struct ib_device *device, union ib_gid *gid,
1029 u8 *port_num, u16 *index)
1030 {
1031 union ib_gid tmp_gid;
1032 int ret, port, i;
1033
1034 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
1035 if (!rdma_protocol_ib(device, port))
1036 continue;
1037
1038 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
1039 ret = rdma_query_gid(device, port, i, &tmp_gid);
1040 if (ret)
1041 return ret;
1042 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
1043 *port_num = port;
1044 if (index)
1045 *index = i;
1046 return 0;
1047 }
1048 }
1049 }
1050
1051 return -ENOENT;
1052 }
1053 EXPORT_SYMBOL(ib_find_gid);
1054
1055 /**
1056 * ib_find_pkey - Returns the PKey table index where a specified
1057 * PKey value occurs.
1058 * @device: The device to query.
1059 * @port_num: The port number of the device to search for the PKey.
1060 * @pkey: The PKey value to search for.
1061 * @index: The index into the PKey table where the PKey was found.
1062 */
ib_find_pkey(struct ib_device * device,u8 port_num,u16 pkey,u16 * index)1063 int ib_find_pkey(struct ib_device *device,
1064 u8 port_num, u16 pkey, u16 *index)
1065 {
1066 int ret, i;
1067 u16 tmp_pkey;
1068 int partial_ix = -1;
1069
1070 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
1071 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
1072 if (ret)
1073 return ret;
1074 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
1075 /* if there is full-member pkey take it.*/
1076 if (tmp_pkey & 0x8000) {
1077 *index = i;
1078 return 0;
1079 }
1080 if (partial_ix < 0)
1081 partial_ix = i;
1082 }
1083 }
1084
1085 /*no full-member, if exists take the limited*/
1086 if (partial_ix >= 0) {
1087 *index = partial_ix;
1088 return 0;
1089 }
1090 return -ENOENT;
1091 }
1092 EXPORT_SYMBOL(ib_find_pkey);
1093
1094 /**
1095 * ib_get_net_dev_by_params() - Return the appropriate net_dev
1096 * for a received CM request
1097 * @dev: An RDMA device on which the request has been received.
1098 * @port: Port number on the RDMA device.
1099 * @pkey: The Pkey the request came on.
1100 * @gid: A GID that the net_dev uses to communicate.
1101 * @addr: Contains the IP address that the request specified as its
1102 * destination.
1103 */
ib_get_net_dev_by_params(struct ib_device * dev,u8 port,u16 pkey,const union ib_gid * gid,const struct sockaddr * addr)1104 struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
1105 u8 port,
1106 u16 pkey,
1107 const union ib_gid *gid,
1108 const struct sockaddr *addr)
1109 {
1110 struct net_device *net_dev = NULL;
1111 struct ib_client_data *context;
1112
1113 if (!rdma_protocol_ib(dev, port))
1114 return NULL;
1115
1116 down_read(&lists_rwsem);
1117
1118 list_for_each_entry(context, &dev->client_data_list, list) {
1119 struct ib_client *client = context->client;
1120
1121 if (context->going_down)
1122 continue;
1123
1124 if (client->get_net_dev_by_params) {
1125 net_dev = client->get_net_dev_by_params(dev, port, pkey,
1126 gid, addr,
1127 context->data);
1128 if (net_dev)
1129 break;
1130 }
1131 }
1132
1133 up_read(&lists_rwsem);
1134
1135 return net_dev;
1136 }
1137 EXPORT_SYMBOL(ib_get_net_dev_by_params);
1138
1139 static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
1140 [RDMA_NL_LS_OP_RESOLVE] = {
1141 .doit = ib_nl_handle_resolve_resp,
1142 .flags = RDMA_NL_ADMIN_PERM,
1143 },
1144 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
1145 .doit = ib_nl_handle_set_timeout,
1146 .flags = RDMA_NL_ADMIN_PERM,
1147 },
1148 [RDMA_NL_LS_OP_IP_RESOLVE] = {
1149 .doit = ib_nl_handle_ip_res_resp,
1150 .flags = RDMA_NL_ADMIN_PERM,
1151 },
1152 };
1153
ib_core_init(void)1154 static int __init ib_core_init(void)
1155 {
1156 int ret;
1157
1158 ib_wq = alloc_workqueue("infiniband", 0, 0);
1159 if (!ib_wq)
1160 return -ENOMEM;
1161
1162 ib_comp_wq = alloc_workqueue("ib-comp-wq",
1163 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1164 if (!ib_comp_wq) {
1165 ret = -ENOMEM;
1166 goto err;
1167 }
1168
1169 ret = class_register(&ib_class);
1170 if (ret) {
1171 pr_warn("Couldn't create InfiniBand device class\n");
1172 goto err_comp;
1173 }
1174
1175 ret = rdma_nl_init();
1176 if (ret) {
1177 pr_warn("Couldn't init IB netlink interface: err %d\n", ret);
1178 goto err_sysfs;
1179 }
1180
1181 ret = addr_init();
1182 if (ret) {
1183 pr_warn("Could't init IB address resolution\n");
1184 goto err_ibnl;
1185 }
1186
1187 ret = ib_mad_init();
1188 if (ret) {
1189 pr_warn("Couldn't init IB MAD\n");
1190 goto err_addr;
1191 }
1192
1193 ret = ib_sa_init();
1194 if (ret) {
1195 pr_warn("Couldn't init SA\n");
1196 goto err_mad;
1197 }
1198
1199 ret = register_lsm_notifier(&ibdev_lsm_nb);
1200 if (ret) {
1201 pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
1202 goto err_sa;
1203 }
1204
1205 nldev_init();
1206 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
1207 roce_gid_mgmt_init();
1208
1209 return 0;
1210
1211 err_sa:
1212 ib_sa_cleanup();
1213 err_mad:
1214 ib_mad_cleanup();
1215 err_addr:
1216 addr_cleanup();
1217 err_ibnl:
1218 rdma_nl_exit();
1219 err_sysfs:
1220 class_unregister(&ib_class);
1221 err_comp:
1222 destroy_workqueue(ib_comp_wq);
1223 err:
1224 destroy_workqueue(ib_wq);
1225 return ret;
1226 }
1227
ib_core_cleanup(void)1228 static void __exit ib_core_cleanup(void)
1229 {
1230 roce_gid_mgmt_cleanup();
1231 nldev_exit();
1232 rdma_nl_unregister(RDMA_NL_LS);
1233 unregister_lsm_notifier(&ibdev_lsm_nb);
1234 ib_sa_cleanup();
1235 ib_mad_cleanup();
1236 addr_cleanup();
1237 rdma_nl_exit();
1238 class_unregister(&ib_class);
1239 destroy_workqueue(ib_comp_wq);
1240 /* Make sure that any pending umem accounting work is done. */
1241 destroy_workqueue(ib_wq);
1242 }
1243
1244 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
1245
1246 subsys_initcall(ib_core_init);
1247 module_exit(ib_core_cleanup);
1248