Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/string.h>
36#include <linux/errno.h>
37#include <linux/kernel.h>
38#include <linux/slab.h>
39#include <linux/init.h>
40#include <linux/netdevice.h>
41#include <net/net_namespace.h>
42#include <linux/security.h>
43#include <linux/notifier.h>
44#include <linux/hashtable.h>
45#include <rdma/rdma_netlink.h>
46#include <rdma/ib_addr.h>
47#include <rdma/ib_cache.h>
48#include <rdma/rdma_counter.h>
49
50#include "core_priv.h"
51#include "restrack.h"
52
53MODULE_AUTHOR("Roland Dreier");
54MODULE_DESCRIPTION("core kernel InfiniBand API");
55MODULE_LICENSE("Dual BSD/GPL");
56
57struct workqueue_struct *ib_comp_wq;
58struct workqueue_struct *ib_comp_unbound_wq;
59struct workqueue_struct *ib_wq;
60EXPORT_SYMBOL_GPL(ib_wq);
61
62/*
63 * Each of the three rwsem locks (devices, clients, client_data) protects the
64 * xarray of the same name. Specifically it allows the caller to assert that
65 * the MARK will/will not be changing under the lock, and for devices and
66 * clients, that the value in the xarray is still a valid pointer. Change of
67 * the MARK is linked to the object state, so holding the lock and testing the
68 * MARK also asserts that the contained object is in a certain state.
69 *
70 * This is used to build a two stage register/unregister flow where objects
71 * can continue to be in the xarray even though they are still in progress to
72 * register/unregister.
73 *
74 * The xarray itself provides additional locking, and restartable iteration,
75 * which is also relied on.
76 *
77 * Locks should not be nested, with the exception of client_data, which is
78 * allowed to nest under the read side of the other two locks.
79 *
80 * The devices_rwsem also protects the device name list, any change or
81 * assignment of device name must also hold the write side to guarantee unique
82 * names.
83 */
84
85/*
86 * devices contains devices that have had their names assigned. The
87 * devices may not be registered. Users that care about the registration
88 * status need to call ib_device_try_get() on the device to ensure it is
89 * registered, and keep it registered, for the required duration.
90 *
91 */
92static DEFINE_XARRAY_FLAGS(devices, XA_FLAGS_ALLOC);
93static DECLARE_RWSEM(devices_rwsem);
94#define DEVICE_REGISTERED XA_MARK_1
95
96static u32 highest_client_id;
97#define CLIENT_REGISTERED XA_MARK_1
98static DEFINE_XARRAY_FLAGS(clients, XA_FLAGS_ALLOC);
99static DECLARE_RWSEM(clients_rwsem);
100
101static void ib_client_put(struct ib_client *client)
102{
103 if (refcount_dec_and_test(&client->uses))
104 complete(&client->uses_zero);
105}
106
107/*
108 * If client_data is registered then the corresponding client must also still
109 * be registered.
110 */
111#define CLIENT_DATA_REGISTERED XA_MARK_1
112
113unsigned int rdma_dev_net_id;
114
115/*
116 * A list of net namespaces is maintained in an xarray. This is necessary
117 * because we can't get the locking right using the existing net ns list. We
118 * would require a init_net callback after the list is updated.
119 */
120static DEFINE_XARRAY_FLAGS(rdma_nets, XA_FLAGS_ALLOC);
121/*
122 * rwsem to protect accessing the rdma_nets xarray entries.
123 */
124static DECLARE_RWSEM(rdma_nets_rwsem);
125
126bool ib_devices_shared_netns = true;
127module_param_named(netns_mode, ib_devices_shared_netns, bool, 0444);
128MODULE_PARM_DESC(netns_mode,
129 "Share device among net namespaces; default=1 (shared)");
130/**
131 * rdma_dev_access_netns() - Return whether an rdma device can be accessed
132 * from a specified net namespace or not.
133 * @dev: Pointer to rdma device which needs to be checked
134 * @net: Pointer to net namesapce for which access to be checked
135 *
136 * When the rdma device is in shared mode, it ignores the net namespace.
137 * When the rdma device is exclusive to a net namespace, rdma device net
138 * namespace is checked against the specified one.
139 */
140bool rdma_dev_access_netns(const struct ib_device *dev, const struct net *net)
141{
142 return (ib_devices_shared_netns ||
143 net_eq(read_pnet(&dev->coredev.rdma_net), net));
144}
145EXPORT_SYMBOL(rdma_dev_access_netns);
146
147/*
148 * xarray has this behavior where it won't iterate over NULL values stored in
149 * allocated arrays. So we need our own iterator to see all values stored in
150 * the array. This does the same thing as xa_for_each except that it also
151 * returns NULL valued entries if the array is allocating. Simplified to only
152 * work on simple xarrays.
153 */
154static void *xan_find_marked(struct xarray *xa, unsigned long *indexp,
155 xa_mark_t filter)
156{
157 XA_STATE(xas, xa, *indexp);
158 void *entry;
159
160 rcu_read_lock();
161 do {
162 entry = xas_find_marked(&xas, ULONG_MAX, filter);
163 if (xa_is_zero(entry))
164 break;
165 } while (xas_retry(&xas, entry));
166 rcu_read_unlock();
167
168 if (entry) {
169 *indexp = xas.xa_index;
170 if (xa_is_zero(entry))
171 return NULL;
172 return entry;
173 }
174 return XA_ERROR(-ENOENT);
175}
176#define xan_for_each_marked(xa, index, entry, filter) \
177 for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
178 !xa_is_err(entry); \
179 (index)++, entry = xan_find_marked(xa, &(index), filter))
180
181/* RCU hash table mapping netdevice pointers to struct ib_port_data */
182static DEFINE_SPINLOCK(ndev_hash_lock);
183static DECLARE_HASHTABLE(ndev_hash, 5);
184
185static void free_netdevs(struct ib_device *ib_dev);
186static void ib_unregister_work(struct work_struct *work);
187static void __ib_unregister_device(struct ib_device *device);
188static int ib_security_change(struct notifier_block *nb, unsigned long event,
189 void *lsm_data);
190static void ib_policy_change_task(struct work_struct *work);
191static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task);
192
193static void __ibdev_printk(const char *level, const struct ib_device *ibdev,
194 struct va_format *vaf)
195{
196 if (ibdev && ibdev->dev.parent)
197 dev_printk_emit(level[1] - '0',
198 ibdev->dev.parent,
199 "%s %s %s: %pV",
200 dev_driver_string(ibdev->dev.parent),
201 dev_name(ibdev->dev.parent),
202 dev_name(&ibdev->dev),
203 vaf);
204 else if (ibdev)
205 printk("%s%s: %pV",
206 level, dev_name(&ibdev->dev), vaf);
207 else
208 printk("%s(NULL ib_device): %pV", level, vaf);
209}
210
211void ibdev_printk(const char *level, const struct ib_device *ibdev,
212 const char *format, ...)
213{
214 struct va_format vaf;
215 va_list args;
216
217 va_start(args, format);
218
219 vaf.fmt = format;
220 vaf.va = &args;
221
222 __ibdev_printk(level, ibdev, &vaf);
223
224 va_end(args);
225}
226EXPORT_SYMBOL(ibdev_printk);
227
228#define define_ibdev_printk_level(func, level) \
229void func(const struct ib_device *ibdev, const char *fmt, ...) \
230{ \
231 struct va_format vaf; \
232 va_list args; \
233 \
234 va_start(args, fmt); \
235 \
236 vaf.fmt = fmt; \
237 vaf.va = &args; \
238 \
239 __ibdev_printk(level, ibdev, &vaf); \
240 \
241 va_end(args); \
242} \
243EXPORT_SYMBOL(func);
244
245define_ibdev_printk_level(ibdev_emerg, KERN_EMERG);
246define_ibdev_printk_level(ibdev_alert, KERN_ALERT);
247define_ibdev_printk_level(ibdev_crit, KERN_CRIT);
248define_ibdev_printk_level(ibdev_err, KERN_ERR);
249define_ibdev_printk_level(ibdev_warn, KERN_WARNING);
250define_ibdev_printk_level(ibdev_notice, KERN_NOTICE);
251define_ibdev_printk_level(ibdev_info, KERN_INFO);
252
253static struct notifier_block ibdev_lsm_nb = {
254 .notifier_call = ib_security_change,
255};
256
257static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
258 struct net *net);
259
260/* Pointer to the RCU head at the start of the ib_port_data array */
261struct ib_port_data_rcu {
262 struct rcu_head rcu_head;
263 struct ib_port_data pdata[];
264};
265
266static void ib_device_check_mandatory(struct ib_device *device)
267{
268#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
269 static const struct {
270 size_t offset;
271 char *name;
272 } mandatory_table[] = {
273 IB_MANDATORY_FUNC(query_device),
274 IB_MANDATORY_FUNC(query_port),
275 IB_MANDATORY_FUNC(query_pkey),
276 IB_MANDATORY_FUNC(alloc_pd),
277 IB_MANDATORY_FUNC(dealloc_pd),
278 IB_MANDATORY_FUNC(create_qp),
279 IB_MANDATORY_FUNC(modify_qp),
280 IB_MANDATORY_FUNC(destroy_qp),
281 IB_MANDATORY_FUNC(post_send),
282 IB_MANDATORY_FUNC(post_recv),
283 IB_MANDATORY_FUNC(create_cq),
284 IB_MANDATORY_FUNC(destroy_cq),
285 IB_MANDATORY_FUNC(poll_cq),
286 IB_MANDATORY_FUNC(req_notify_cq),
287 IB_MANDATORY_FUNC(get_dma_mr),
288 IB_MANDATORY_FUNC(dereg_mr),
289 IB_MANDATORY_FUNC(get_port_immutable)
290 };
291 int i;
292
293 device->kverbs_provider = true;
294 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
295 if (!*(void **) ((void *) &device->ops +
296 mandatory_table[i].offset)) {
297 device->kverbs_provider = false;
298 break;
299 }
300 }
301}
302
303/*
304 * Caller must perform ib_device_put() to return the device reference count
305 * when ib_device_get_by_index() returns valid device pointer.
306 */
307struct ib_device *ib_device_get_by_index(const struct net *net, u32 index)
308{
309 struct ib_device *device;
310
311 down_read(&devices_rwsem);
312 device = xa_load(&devices, index);
313 if (device) {
314 if (!rdma_dev_access_netns(device, net)) {
315 device = NULL;
316 goto out;
317 }
318
319 if (!ib_device_try_get(device))
320 device = NULL;
321 }
322out:
323 up_read(&devices_rwsem);
324 return device;
325}
326
327/**
328 * ib_device_put - Release IB device reference
329 * @device: device whose reference to be released
330 *
331 * ib_device_put() releases reference to the IB device to allow it to be
332 * unregistered and eventually free.
333 */
334void ib_device_put(struct ib_device *device)
335{
336 if (refcount_dec_and_test(&device->refcount))
337 complete(&device->unreg_completion);
338}
339EXPORT_SYMBOL(ib_device_put);
340
341static struct ib_device *__ib_device_get_by_name(const char *name)
342{
343 struct ib_device *device;
344 unsigned long index;
345
346 xa_for_each (&devices, index, device)
347 if (!strcmp(name, dev_name(&device->dev)))
348 return device;
349
350 return NULL;
351}
352
353/**
354 * ib_device_get_by_name - Find an IB device by name
355 * @name: The name to look for
356 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
357 *
358 * Find and hold an ib_device by its name. The caller must call
359 * ib_device_put() on the returned pointer.
360 */
361struct ib_device *ib_device_get_by_name(const char *name,
362 enum rdma_driver_id driver_id)
363{
364 struct ib_device *device;
365
366 down_read(&devices_rwsem);
367 device = __ib_device_get_by_name(name);
368 if (device && driver_id != RDMA_DRIVER_UNKNOWN &&
369 device->ops.driver_id != driver_id)
370 device = NULL;
371
372 if (device) {
373 if (!ib_device_try_get(device))
374 device = NULL;
375 }
376 up_read(&devices_rwsem);
377 return device;
378}
379EXPORT_SYMBOL(ib_device_get_by_name);
380
381static int rename_compat_devs(struct ib_device *device)
382{
383 struct ib_core_device *cdev;
384 unsigned long index;
385 int ret = 0;
386
387 mutex_lock(&device->compat_devs_mutex);
388 xa_for_each (&device->compat_devs, index, cdev) {
389 ret = device_rename(&cdev->dev, dev_name(&device->dev));
390 if (ret) {
391 dev_warn(&cdev->dev,
392 "Fail to rename compatdev to new name %s\n",
393 dev_name(&device->dev));
394 break;
395 }
396 }
397 mutex_unlock(&device->compat_devs_mutex);
398 return ret;
399}
400
401int ib_device_rename(struct ib_device *ibdev, const char *name)
402{
403 unsigned long index;
404 void *client_data;
405 int ret;
406
407 down_write(&devices_rwsem);
408 if (!strcmp(name, dev_name(&ibdev->dev))) {
409 up_write(&devices_rwsem);
410 return 0;
411 }
412
413 if (__ib_device_get_by_name(name)) {
414 up_write(&devices_rwsem);
415 return -EEXIST;
416 }
417
418 ret = device_rename(&ibdev->dev, name);
419 if (ret) {
420 up_write(&devices_rwsem);
421 return ret;
422 }
423
424 strlcpy(ibdev->name, name, IB_DEVICE_NAME_MAX);
425 ret = rename_compat_devs(ibdev);
426
427 downgrade_write(&devices_rwsem);
428 down_read(&ibdev->client_data_rwsem);
429 xan_for_each_marked(&ibdev->client_data, index, client_data,
430 CLIENT_DATA_REGISTERED) {
431 struct ib_client *client = xa_load(&clients, index);
432
433 if (!client || !client->rename)
434 continue;
435
436 client->rename(ibdev, client_data);
437 }
438 up_read(&ibdev->client_data_rwsem);
439 up_read(&devices_rwsem);
440 return 0;
441}
442
443int ib_device_set_dim(struct ib_device *ibdev, u8 use_dim)
444{
445 if (use_dim > 1)
446 return -EINVAL;
447 ibdev->use_cq_dim = use_dim;
448
449 return 0;
450}
451
452static int alloc_name(struct ib_device *ibdev, const char *name)
453{
454 struct ib_device *device;
455 unsigned long index;
456 struct ida inuse;
457 int rc;
458 int i;
459
460 lockdep_assert_held_write(&devices_rwsem);
461 ida_init(&inuse);
462 xa_for_each (&devices, index, device) {
463 char buf[IB_DEVICE_NAME_MAX];
464
465 if (sscanf(dev_name(&device->dev), name, &i) != 1)
466 continue;
467 if (i < 0 || i >= INT_MAX)
468 continue;
469 snprintf(buf, sizeof buf, name, i);
470 if (strcmp(buf, dev_name(&device->dev)) != 0)
471 continue;
472
473 rc = ida_alloc_range(&inuse, i, i, GFP_KERNEL);
474 if (rc < 0)
475 goto out;
476 }
477
478 rc = ida_alloc(&inuse, GFP_KERNEL);
479 if (rc < 0)
480 goto out;
481
482 rc = dev_set_name(&ibdev->dev, name, rc);
483out:
484 ida_destroy(&inuse);
485 return rc;
486}
487
488static void ib_device_release(struct device *device)
489{
490 struct ib_device *dev = container_of(device, struct ib_device, dev);
491
492 free_netdevs(dev);
493 WARN_ON(refcount_read(&dev->refcount));
494 if (dev->port_data) {
495 ib_cache_release_one(dev);
496 ib_security_release_port_pkey_list(dev);
497 rdma_counter_release(dev);
498 kfree_rcu(container_of(dev->port_data, struct ib_port_data_rcu,
499 pdata[0]),
500 rcu_head);
501 }
502
503 mutex_destroy(&dev->unregistration_lock);
504 mutex_destroy(&dev->compat_devs_mutex);
505
506 xa_destroy(&dev->compat_devs);
507 xa_destroy(&dev->client_data);
508 kfree_rcu(dev, rcu_head);
509}
510
511static int ib_device_uevent(struct device *device,
512 struct kobj_uevent_env *env)
513{
514 if (add_uevent_var(env, "NAME=%s", dev_name(device)))
515 return -ENOMEM;
516
517 /*
518 * It would be nice to pass the node GUID with the event...
519 */
520
521 return 0;
522}
523
524static const void *net_namespace(struct device *d)
525{
526 struct ib_core_device *coredev =
527 container_of(d, struct ib_core_device, dev);
528
529 return read_pnet(&coredev->rdma_net);
530}
531
532static struct class ib_class = {
533 .name = "infiniband",
534 .dev_release = ib_device_release,
535 .dev_uevent = ib_device_uevent,
536 .ns_type = &net_ns_type_operations,
537 .namespace = net_namespace,
538};
539
540static void rdma_init_coredev(struct ib_core_device *coredev,
541 struct ib_device *dev, struct net *net)
542{
543 /* This BUILD_BUG_ON is intended to catch layout change
544 * of union of ib_core_device and device.
545 * dev must be the first element as ib_core and providers
546 * driver uses it. Adding anything in ib_core_device before
547 * device will break this assumption.
548 */
549 BUILD_BUG_ON(offsetof(struct ib_device, coredev.dev) !=
550 offsetof(struct ib_device, dev));
551
552 coredev->dev.class = &ib_class;
553 coredev->dev.groups = dev->groups;
554 device_initialize(&coredev->dev);
555 coredev->owner = dev;
556 INIT_LIST_HEAD(&coredev->port_list);
557 write_pnet(&coredev->rdma_net, net);
558}
559
560/**
561 * _ib_alloc_device - allocate an IB device struct
562 * @size:size of structure to allocate
563 *
564 * Low-level drivers should use ib_alloc_device() to allocate &struct
565 * ib_device. @size is the size of the structure to be allocated,
566 * including any private data used by the low-level driver.
567 * ib_dealloc_device() must be used to free structures allocated with
568 * ib_alloc_device().
569 */
570struct ib_device *_ib_alloc_device(size_t size)
571{
572 struct ib_device *device;
573
574 if (WARN_ON(size < sizeof(struct ib_device)))
575 return NULL;
576
577 device = kzalloc(size, GFP_KERNEL);
578 if (!device)
579 return NULL;
580
581 if (rdma_restrack_init(device)) {
582 kfree(device);
583 return NULL;
584 }
585
586 device->groups[0] = &ib_dev_attr_group;
587 rdma_init_coredev(&device->coredev, device, &init_net);
588
589 INIT_LIST_HEAD(&device->event_handler_list);
590 spin_lock_init(&device->qp_open_list_lock);
591 init_rwsem(&device->event_handler_rwsem);
592 mutex_init(&device->unregistration_lock);
593 /*
594 * client_data needs to be alloc because we don't want our mark to be
595 * destroyed if the user stores NULL in the client data.
596 */
597 xa_init_flags(&device->client_data, XA_FLAGS_ALLOC);
598 init_rwsem(&device->client_data_rwsem);
599 xa_init_flags(&device->compat_devs, XA_FLAGS_ALLOC);
600 mutex_init(&device->compat_devs_mutex);
601 init_completion(&device->unreg_completion);
602 INIT_WORK(&device->unregistration_work, ib_unregister_work);
603
604 return device;
605}
606EXPORT_SYMBOL(_ib_alloc_device);
607
608/**
609 * ib_dealloc_device - free an IB device struct
610 * @device:structure to free
611 *
612 * Free a structure allocated with ib_alloc_device().
613 */
614void ib_dealloc_device(struct ib_device *device)
615{
616 if (device->ops.dealloc_driver)
617 device->ops.dealloc_driver(device);
618
619 /*
620 * ib_unregister_driver() requires all devices to remain in the xarray
621 * while their ops are callable. The last op we call is dealloc_driver
622 * above. This is needed to create a fence on op callbacks prior to
623 * allowing the driver module to unload.
624 */
625 down_write(&devices_rwsem);
626 if (xa_load(&devices, device->index) == device)
627 xa_erase(&devices, device->index);
628 up_write(&devices_rwsem);
629
630 /* Expedite releasing netdev references */
631 free_netdevs(device);
632
633 WARN_ON(!xa_empty(&device->compat_devs));
634 WARN_ON(!xa_empty(&device->client_data));
635 WARN_ON(refcount_read(&device->refcount));
636 rdma_restrack_clean(device);
637 /* Balances with device_initialize */
638 put_device(&device->dev);
639}
640EXPORT_SYMBOL(ib_dealloc_device);
641
642/*
643 * add_client_context() and remove_client_context() must be safe against
644 * parallel calls on the same device - registration/unregistration of both the
645 * device and client can be occurring in parallel.
646 *
647 * The routines need to be a fence, any caller must not return until the add
648 * or remove is fully completed.
649 */
650static int add_client_context(struct ib_device *device,
651 struct ib_client *client)
652{
653 int ret = 0;
654
655 if (!device->kverbs_provider && !client->no_kverbs_req)
656 return 0;
657
658 down_write(&device->client_data_rwsem);
659 /*
660 * So long as the client is registered hold both the client and device
661 * unregistration locks.
662 */
663 if (!refcount_inc_not_zero(&client->uses))
664 goto out_unlock;
665 refcount_inc(&device->refcount);
666
667 /*
668 * Another caller to add_client_context got here first and has already
669 * completely initialized context.
670 */
671 if (xa_get_mark(&device->client_data, client->client_id,
672 CLIENT_DATA_REGISTERED))
673 goto out;
674
675 ret = xa_err(xa_store(&device->client_data, client->client_id, NULL,
676 GFP_KERNEL));
677 if (ret)
678 goto out;
679 downgrade_write(&device->client_data_rwsem);
680 if (client->add) {
681 if (client->add(device)) {
682 /*
683 * If a client fails to add then the error code is
684 * ignored, but we won't call any more ops on this
685 * client.
686 */
687 xa_erase(&device->client_data, client->client_id);
688 up_read(&device->client_data_rwsem);
689 ib_device_put(device);
690 ib_client_put(client);
691 return 0;
692 }
693 }
694
695 /* Readers shall not see a client until add has been completed */
696 xa_set_mark(&device->client_data, client->client_id,
697 CLIENT_DATA_REGISTERED);
698 up_read(&device->client_data_rwsem);
699 return 0;
700
701out:
702 ib_device_put(device);
703 ib_client_put(client);
704out_unlock:
705 up_write(&device->client_data_rwsem);
706 return ret;
707}
708
709static void remove_client_context(struct ib_device *device,
710 unsigned int client_id)
711{
712 struct ib_client *client;
713 void *client_data;
714
715 down_write(&device->client_data_rwsem);
716 if (!xa_get_mark(&device->client_data, client_id,
717 CLIENT_DATA_REGISTERED)) {
718 up_write(&device->client_data_rwsem);
719 return;
720 }
721 client_data = xa_load(&device->client_data, client_id);
722 xa_clear_mark(&device->client_data, client_id, CLIENT_DATA_REGISTERED);
723 client = xa_load(&clients, client_id);
724 up_write(&device->client_data_rwsem);
725
726 /*
727 * Notice we cannot be holding any exclusive locks when calling the
728 * remove callback as the remove callback can recurse back into any
729 * public functions in this module and thus try for any locks those
730 * functions take.
731 *
732 * For this reason clients and drivers should not call the
733 * unregistration functions will holdling any locks.
734 */
735 if (client->remove)
736 client->remove(device, client_data);
737
738 xa_erase(&device->client_data, client_id);
739 ib_device_put(device);
740 ib_client_put(client);
741}
742
743static int alloc_port_data(struct ib_device *device)
744{
745 struct ib_port_data_rcu *pdata_rcu;
746 unsigned int port;
747
748 if (device->port_data)
749 return 0;
750
751 /* This can only be called once the physical port range is defined */
752 if (WARN_ON(!device->phys_port_cnt))
753 return -EINVAL;
754
755 /*
756 * device->port_data is indexed directly by the port number to make
757 * access to this data as efficient as possible.
758 *
759 * Therefore port_data is declared as a 1 based array with potential
760 * empty slots at the beginning.
761 */
762 pdata_rcu = kzalloc(struct_size(pdata_rcu, pdata,
763 rdma_end_port(device) + 1),
764 GFP_KERNEL);
765 if (!pdata_rcu)
766 return -ENOMEM;
767 /*
768 * The rcu_head is put in front of the port data array and the stored
769 * pointer is adjusted since we never need to see that member until
770 * kfree_rcu.
771 */
772 device->port_data = pdata_rcu->pdata;
773
774 rdma_for_each_port (device, port) {
775 struct ib_port_data *pdata = &device->port_data[port];
776
777 pdata->ib_dev = device;
778 spin_lock_init(&pdata->pkey_list_lock);
779 INIT_LIST_HEAD(&pdata->pkey_list);
780 spin_lock_init(&pdata->netdev_lock);
781 INIT_HLIST_NODE(&pdata->ndev_hash_link);
782 }
783 return 0;
784}
785
786static int verify_immutable(const struct ib_device *dev, u8 port)
787{
788 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
789 rdma_max_mad_size(dev, port) != 0);
790}
791
792static int setup_port_data(struct ib_device *device)
793{
794 unsigned int port;
795 int ret;
796
797 ret = alloc_port_data(device);
798 if (ret)
799 return ret;
800
801 rdma_for_each_port (device, port) {
802 struct ib_port_data *pdata = &device->port_data[port];
803
804 ret = device->ops.get_port_immutable(device, port,
805 &pdata->immutable);
806 if (ret)
807 return ret;
808
809 if (verify_immutable(device, port))
810 return -EINVAL;
811 }
812 return 0;
813}
814
815void ib_get_device_fw_str(struct ib_device *dev, char *str)
816{
817 if (dev->ops.get_dev_fw_str)
818 dev->ops.get_dev_fw_str(dev, str);
819 else
820 str[0] = '\0';
821}
822EXPORT_SYMBOL(ib_get_device_fw_str);
823
824static void ib_policy_change_task(struct work_struct *work)
825{
826 struct ib_device *dev;
827 unsigned long index;
828
829 down_read(&devices_rwsem);
830 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
831 unsigned int i;
832
833 rdma_for_each_port (dev, i) {
834 u64 sp;
835 int ret = ib_get_cached_subnet_prefix(dev,
836 i,
837 &sp);
838
839 WARN_ONCE(ret,
840 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
841 ret);
842 if (!ret)
843 ib_security_cache_change(dev, i, sp);
844 }
845 }
846 up_read(&devices_rwsem);
847}
848
849static int ib_security_change(struct notifier_block *nb, unsigned long event,
850 void *lsm_data)
851{
852 if (event != LSM_POLICY_CHANGE)
853 return NOTIFY_DONE;
854
855 schedule_work(&ib_policy_change_work);
856 ib_mad_agent_security_change();
857
858 return NOTIFY_OK;
859}
860
861static void compatdev_release(struct device *dev)
862{
863 struct ib_core_device *cdev =
864 container_of(dev, struct ib_core_device, dev);
865
866 kfree(cdev);
867}
868
869static int add_one_compat_dev(struct ib_device *device,
870 struct rdma_dev_net *rnet)
871{
872 struct ib_core_device *cdev;
873 int ret;
874
875 lockdep_assert_held(&rdma_nets_rwsem);
876 if (!ib_devices_shared_netns)
877 return 0;
878
879 /*
880 * Create and add compat device in all namespaces other than where it
881 * is currently bound to.
882 */
883 if (net_eq(read_pnet(&rnet->net),
884 read_pnet(&device->coredev.rdma_net)))
885 return 0;
886
887 /*
888 * The first of init_net() or ib_register_device() to take the
889 * compat_devs_mutex wins and gets to add the device. Others will wait
890 * for completion here.
891 */
892 mutex_lock(&device->compat_devs_mutex);
893 cdev = xa_load(&device->compat_devs, rnet->id);
894 if (cdev) {
895 ret = 0;
896 goto done;
897 }
898 ret = xa_reserve(&device->compat_devs, rnet->id, GFP_KERNEL);
899 if (ret)
900 goto done;
901
902 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
903 if (!cdev) {
904 ret = -ENOMEM;
905 goto cdev_err;
906 }
907
908 cdev->dev.parent = device->dev.parent;
909 rdma_init_coredev(cdev, device, read_pnet(&rnet->net));
910 cdev->dev.release = compatdev_release;
911 ret = dev_set_name(&cdev->dev, "%s", dev_name(&device->dev));
912 if (ret)
913 goto add_err;
914
915 ret = device_add(&cdev->dev);
916 if (ret)
917 goto add_err;
918 ret = ib_setup_port_attrs(cdev);
919 if (ret)
920 goto port_err;
921
922 ret = xa_err(xa_store(&device->compat_devs, rnet->id,
923 cdev, GFP_KERNEL));
924 if (ret)
925 goto insert_err;
926
927 mutex_unlock(&device->compat_devs_mutex);
928 return 0;
929
930insert_err:
931 ib_free_port_attrs(cdev);
932port_err:
933 device_del(&cdev->dev);
934add_err:
935 put_device(&cdev->dev);
936cdev_err:
937 xa_release(&device->compat_devs, rnet->id);
938done:
939 mutex_unlock(&device->compat_devs_mutex);
940 return ret;
941}
942
943static void remove_one_compat_dev(struct ib_device *device, u32 id)
944{
945 struct ib_core_device *cdev;
946
947 mutex_lock(&device->compat_devs_mutex);
948 cdev = xa_erase(&device->compat_devs, id);
949 mutex_unlock(&device->compat_devs_mutex);
950 if (cdev) {
951 ib_free_port_attrs(cdev);
952 device_del(&cdev->dev);
953 put_device(&cdev->dev);
954 }
955}
956
957static void remove_compat_devs(struct ib_device *device)
958{
959 struct ib_core_device *cdev;
960 unsigned long index;
961
962 xa_for_each (&device->compat_devs, index, cdev)
963 remove_one_compat_dev(device, index);
964}
965
966static int add_compat_devs(struct ib_device *device)
967{
968 struct rdma_dev_net *rnet;
969 unsigned long index;
970 int ret = 0;
971
972 lockdep_assert_held(&devices_rwsem);
973
974 down_read(&rdma_nets_rwsem);
975 xa_for_each (&rdma_nets, index, rnet) {
976 ret = add_one_compat_dev(device, rnet);
977 if (ret)
978 break;
979 }
980 up_read(&rdma_nets_rwsem);
981 return ret;
982}
983
984static void remove_all_compat_devs(void)
985{
986 struct ib_compat_device *cdev;
987 struct ib_device *dev;
988 unsigned long index;
989
990 down_read(&devices_rwsem);
991 xa_for_each (&devices, index, dev) {
992 unsigned long c_index = 0;
993
994 /* Hold nets_rwsem so that any other thread modifying this
995 * system param can sync with this thread.
996 */
997 down_read(&rdma_nets_rwsem);
998 xa_for_each (&dev->compat_devs, c_index, cdev)
999 remove_one_compat_dev(dev, c_index);
1000 up_read(&rdma_nets_rwsem);
1001 }
1002 up_read(&devices_rwsem);
1003}
1004
1005static int add_all_compat_devs(void)
1006{
1007 struct rdma_dev_net *rnet;
1008 struct ib_device *dev;
1009 unsigned long index;
1010 int ret = 0;
1011
1012 down_read(&devices_rwsem);
1013 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1014 unsigned long net_index = 0;
1015
1016 /* Hold nets_rwsem so that any other thread modifying this
1017 * system param can sync with this thread.
1018 */
1019 down_read(&rdma_nets_rwsem);
1020 xa_for_each (&rdma_nets, net_index, rnet) {
1021 ret = add_one_compat_dev(dev, rnet);
1022 if (ret)
1023 break;
1024 }
1025 up_read(&rdma_nets_rwsem);
1026 }
1027 up_read(&devices_rwsem);
1028 if (ret)
1029 remove_all_compat_devs();
1030 return ret;
1031}
1032
1033int rdma_compatdev_set(u8 enable)
1034{
1035 struct rdma_dev_net *rnet;
1036 unsigned long index;
1037 int ret = 0;
1038
1039 down_write(&rdma_nets_rwsem);
1040 if (ib_devices_shared_netns == enable) {
1041 up_write(&rdma_nets_rwsem);
1042 return 0;
1043 }
1044
1045 /* enable/disable of compat devices is not supported
1046 * when more than default init_net exists.
1047 */
1048 xa_for_each (&rdma_nets, index, rnet) {
1049 ret++;
1050 break;
1051 }
1052 if (!ret)
1053 ib_devices_shared_netns = enable;
1054 up_write(&rdma_nets_rwsem);
1055 if (ret)
1056 return -EBUSY;
1057
1058 if (enable)
1059 ret = add_all_compat_devs();
1060 else
1061 remove_all_compat_devs();
1062 return ret;
1063}
1064
1065static void rdma_dev_exit_net(struct net *net)
1066{
1067 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
1068 struct ib_device *dev;
1069 unsigned long index;
1070 int ret;
1071
1072 down_write(&rdma_nets_rwsem);
1073 /*
1074 * Prevent the ID from being re-used and hide the id from xa_for_each.
1075 */
1076 ret = xa_err(xa_store(&rdma_nets, rnet->id, NULL, GFP_KERNEL));
1077 WARN_ON(ret);
1078 up_write(&rdma_nets_rwsem);
1079
1080 down_read(&devices_rwsem);
1081 xa_for_each (&devices, index, dev) {
1082 get_device(&dev->dev);
1083 /*
1084 * Release the devices_rwsem so that pontentially blocking
1085 * device_del, doesn't hold the devices_rwsem for too long.
1086 */
1087 up_read(&devices_rwsem);
1088
1089 remove_one_compat_dev(dev, rnet->id);
1090
1091 /*
1092 * If the real device is in the NS then move it back to init.
1093 */
1094 rdma_dev_change_netns(dev, net, &init_net);
1095
1096 put_device(&dev->dev);
1097 down_read(&devices_rwsem);
1098 }
1099 up_read(&devices_rwsem);
1100
1101 rdma_nl_net_exit(rnet);
1102 xa_erase(&rdma_nets, rnet->id);
1103}
1104
1105static __net_init int rdma_dev_init_net(struct net *net)
1106{
1107 struct rdma_dev_net *rnet = rdma_net_to_dev_net(net);
1108 unsigned long index;
1109 struct ib_device *dev;
1110 int ret;
1111
1112 write_pnet(&rnet->net, net);
1113
1114 ret = rdma_nl_net_init(rnet);
1115 if (ret)
1116 return ret;
1117
1118 /* No need to create any compat devices in default init_net. */
1119 if (net_eq(net, &init_net))
1120 return 0;
1121
1122 ret = xa_alloc(&rdma_nets, &rnet->id, rnet, xa_limit_32b, GFP_KERNEL);
1123 if (ret) {
1124 rdma_nl_net_exit(rnet);
1125 return ret;
1126 }
1127
1128 down_read(&devices_rwsem);
1129 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
1130 /* Hold nets_rwsem so that netlink command cannot change
1131 * system configuration for device sharing mode.
1132 */
1133 down_read(&rdma_nets_rwsem);
1134 ret = add_one_compat_dev(dev, rnet);
1135 up_read(&rdma_nets_rwsem);
1136 if (ret)
1137 break;
1138 }
1139 up_read(&devices_rwsem);
1140
1141 if (ret)
1142 rdma_dev_exit_net(net);
1143
1144 return ret;
1145}
1146
1147/*
1148 * Assign the unique string device name and the unique device index. This is
1149 * undone by ib_dealloc_device.
1150 */
1151static int assign_name(struct ib_device *device, const char *name)
1152{
1153 static u32 last_id;
1154 int ret;
1155
1156 down_write(&devices_rwsem);
1157 /* Assign a unique name to the device */
1158 if (strchr(name, '%'))
1159 ret = alloc_name(device, name);
1160 else
1161 ret = dev_set_name(&device->dev, name);
1162 if (ret)
1163 goto out;
1164
1165 if (__ib_device_get_by_name(dev_name(&device->dev))) {
1166 ret = -ENFILE;
1167 goto out;
1168 }
1169 strlcpy(device->name, dev_name(&device->dev), IB_DEVICE_NAME_MAX);
1170
1171 ret = xa_alloc_cyclic(&devices, &device->index, device, xa_limit_31b,
1172 &last_id, GFP_KERNEL);
1173 if (ret > 0)
1174 ret = 0;
1175
1176out:
1177 up_write(&devices_rwsem);
1178 return ret;
1179}
1180
1181static void setup_dma_device(struct ib_device *device)
1182{
1183 struct device *parent = device->dev.parent;
1184
1185 WARN_ON_ONCE(device->dma_device);
1186 if (device->dev.dma_ops) {
1187 /*
1188 * The caller provided custom DMA operations. Copy the
1189 * DMA-related fields that are used by e.g. dma_alloc_coherent()
1190 * into device->dev.
1191 */
1192 device->dma_device = &device->dev;
1193 if (!device->dev.dma_mask) {
1194 if (parent)
1195 device->dev.dma_mask = parent->dma_mask;
1196 else
1197 WARN_ON_ONCE(true);
1198 }
1199 if (!device->dev.coherent_dma_mask) {
1200 if (parent)
1201 device->dev.coherent_dma_mask =
1202 parent->coherent_dma_mask;
1203 else
1204 WARN_ON_ONCE(true);
1205 }
1206 } else {
1207 /*
1208 * The caller did not provide custom DMA operations. Use the
1209 * DMA mapping operations of the parent device.
1210 */
1211 WARN_ON_ONCE(!parent);
1212 device->dma_device = parent;
1213 }
1214
1215 if (!device->dev.dma_parms) {
1216 if (parent) {
1217 /*
1218 * The caller did not provide DMA parameters, so
1219 * 'parent' probably represents a PCI device. The PCI
1220 * core sets the maximum segment size to 64
1221 * KB. Increase this parameter to 2 GB.
1222 */
1223 device->dev.dma_parms = parent->dma_parms;
1224 dma_set_max_seg_size(device->dma_device, SZ_2G);
1225 } else {
1226 WARN_ON_ONCE(true);
1227 }
1228 }
1229}
1230
1231/*
1232 * setup_device() allocates memory and sets up data that requires calling the
1233 * device ops, this is the only reason these actions are not done during
1234 * ib_alloc_device. It is undone by ib_dealloc_device().
1235 */
1236static int setup_device(struct ib_device *device)
1237{
1238 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
1239 int ret;
1240
1241 setup_dma_device(device);
1242 ib_device_check_mandatory(device);
1243
1244 ret = setup_port_data(device);
1245 if (ret) {
1246 dev_warn(&device->dev, "Couldn't create per-port data\n");
1247 return ret;
1248 }
1249
1250 memset(&device->attrs, 0, sizeof(device->attrs));
1251 ret = device->ops.query_device(device, &device->attrs, &uhw);
1252 if (ret) {
1253 dev_warn(&device->dev,
1254 "Couldn't query the device attributes\n");
1255 return ret;
1256 }
1257
1258 return 0;
1259}
1260
1261static void disable_device(struct ib_device *device)
1262{
1263 u32 cid;
1264
1265 WARN_ON(!refcount_read(&device->refcount));
1266
1267 down_write(&devices_rwsem);
1268 xa_clear_mark(&devices, device->index, DEVICE_REGISTERED);
1269 up_write(&devices_rwsem);
1270
1271 /*
1272 * Remove clients in LIFO order, see assign_client_id. This could be
1273 * more efficient if xarray learns to reverse iterate. Since no new
1274 * clients can be added to this ib_device past this point we only need
1275 * the maximum possible client_id value here.
1276 */
1277 down_read(&clients_rwsem);
1278 cid = highest_client_id;
1279 up_read(&clients_rwsem);
1280 while (cid) {
1281 cid--;
1282 remove_client_context(device, cid);
1283 }
1284
1285 /* Pairs with refcount_set in enable_device */
1286 ib_device_put(device);
1287 wait_for_completion(&device->unreg_completion);
1288
1289 /*
1290 * compat devices must be removed after device refcount drops to zero.
1291 * Otherwise init_net() may add more compatdevs after removing compat
1292 * devices and before device is disabled.
1293 */
1294 remove_compat_devs(device);
1295}
1296
1297/*
1298 * An enabled device is visible to all clients and to all the public facing
1299 * APIs that return a device pointer. This always returns with a new get, even
1300 * if it fails.
1301 */
1302static int enable_device_and_get(struct ib_device *device)
1303{
1304 struct ib_client *client;
1305 unsigned long index;
1306 int ret = 0;
1307
1308 /*
1309 * One ref belongs to the xa and the other belongs to this
1310 * thread. This is needed to guard against parallel unregistration.
1311 */
1312 refcount_set(&device->refcount, 2);
1313 down_write(&devices_rwsem);
1314 xa_set_mark(&devices, device->index, DEVICE_REGISTERED);
1315
1316 /*
1317 * By using downgrade_write() we ensure that no other thread can clear
1318 * DEVICE_REGISTERED while we are completing the client setup.
1319 */
1320 downgrade_write(&devices_rwsem);
1321
1322 if (device->ops.enable_driver) {
1323 ret = device->ops.enable_driver(device);
1324 if (ret)
1325 goto out;
1326 }
1327
1328 down_read(&clients_rwsem);
1329 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1330 ret = add_client_context(device, client);
1331 if (ret)
1332 break;
1333 }
1334 up_read(&clients_rwsem);
1335 if (!ret)
1336 ret = add_compat_devs(device);
1337out:
1338 up_read(&devices_rwsem);
1339 return ret;
1340}
1341
1342/**
1343 * ib_register_device - Register an IB device with IB core
1344 * @device: Device to register
1345 * @name: unique string device name. This may include a '%' which will
1346 * cause a unique index to be added to the passed device name.
1347 *
1348 * Low-level drivers use ib_register_device() to register their
1349 * devices with the IB core. All registered clients will receive a
1350 * callback for each device that is added. @device must be allocated
1351 * with ib_alloc_device().
1352 *
1353 * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
1354 * asynchronously then the device pointer may become freed as soon as this
1355 * function returns.
1356 */
1357int ib_register_device(struct ib_device *device, const char *name)
1358{
1359 int ret;
1360
1361 ret = assign_name(device, name);
1362 if (ret)
1363 return ret;
1364
1365 ret = setup_device(device);
1366 if (ret)
1367 return ret;
1368
1369 ret = ib_cache_setup_one(device);
1370 if (ret) {
1371 dev_warn(&device->dev,
1372 "Couldn't set up InfiniBand P_Key/GID cache\n");
1373 return ret;
1374 }
1375
1376 ib_device_register_rdmacg(device);
1377
1378 rdma_counter_init(device);
1379
1380 /*
1381 * Ensure that ADD uevent is not fired because it
1382 * is too early amd device is not initialized yet.
1383 */
1384 dev_set_uevent_suppress(&device->dev, true);
1385 ret = device_add(&device->dev);
1386 if (ret)
1387 goto cg_cleanup;
1388
1389 ret = ib_device_register_sysfs(device);
1390 if (ret) {
1391 dev_warn(&device->dev,
1392 "Couldn't register device with driver model\n");
1393 goto dev_cleanup;
1394 }
1395
1396 ib_cq_pool_init(device);
1397 ret = enable_device_and_get(device);
1398 dev_set_uevent_suppress(&device->dev, false);
1399 /* Mark for userspace that device is ready */
1400 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1401 if (ret) {
1402 void (*dealloc_fn)(struct ib_device *);
1403
1404 /*
1405 * If we hit this error flow then we don't want to
1406 * automatically dealloc the device since the caller is
1407 * expected to call ib_dealloc_device() after
1408 * ib_register_device() fails. This is tricky due to the
1409 * possibility for a parallel unregistration along with this
1410 * error flow. Since we have a refcount here we know any
1411 * parallel flow is stopped in disable_device and will see the
1412 * NULL pointers, causing the responsibility to
1413 * ib_dealloc_device() to revert back to this thread.
1414 */
1415 dealloc_fn = device->ops.dealloc_driver;
1416 device->ops.dealloc_driver = NULL;
1417 ib_device_put(device);
1418 __ib_unregister_device(device);
1419 device->ops.dealloc_driver = dealloc_fn;
1420 return ret;
1421 }
1422 ib_device_put(device);
1423
1424 return 0;
1425
1426dev_cleanup:
1427 device_del(&device->dev);
1428cg_cleanup:
1429 dev_set_uevent_suppress(&device->dev, false);
1430 ib_device_unregister_rdmacg(device);
1431 ib_cache_cleanup_one(device);
1432 return ret;
1433}
1434EXPORT_SYMBOL(ib_register_device);
1435
1436/* Callers must hold a get on the device. */
1437static void __ib_unregister_device(struct ib_device *ib_dev)
1438{
1439 /*
1440 * We have a registration lock so that all the calls to unregister are
1441 * fully fenced, once any unregister returns the device is truely
1442 * unregistered even if multiple callers are unregistering it at the
1443 * same time. This also interacts with the registration flow and
1444 * provides sane semantics if register and unregister are racing.
1445 */
1446 mutex_lock(&ib_dev->unregistration_lock);
1447 if (!refcount_read(&ib_dev->refcount))
1448 goto out;
1449
1450 disable_device(ib_dev);
1451 ib_cq_pool_destroy(ib_dev);
1452
1453 /* Expedite removing unregistered pointers from the hash table */
1454 free_netdevs(ib_dev);
1455
1456 ib_device_unregister_sysfs(ib_dev);
1457 device_del(&ib_dev->dev);
1458 ib_device_unregister_rdmacg(ib_dev);
1459 ib_cache_cleanup_one(ib_dev);
1460
1461 /*
1462 * Drivers using the new flow may not call ib_dealloc_device except
1463 * in error unwind prior to registration success.
1464 */
1465 if (ib_dev->ops.dealloc_driver) {
1466 WARN_ON(kref_read(&ib_dev->dev.kobj.kref) <= 1);
1467 ib_dealloc_device(ib_dev);
1468 }
1469out:
1470 mutex_unlock(&ib_dev->unregistration_lock);
1471}
1472
1473/**
1474 * ib_unregister_device - Unregister an IB device
1475 * @ib_dev: The device to unregister
1476 *
1477 * Unregister an IB device. All clients will receive a remove callback.
1478 *
1479 * Callers should call this routine only once, and protect against races with
1480 * registration. Typically it should only be called as part of a remove
1481 * callback in an implementation of driver core's struct device_driver and
1482 * related.
1483 *
1484 * If ops.dealloc_driver is used then ib_dev will be freed upon return from
1485 * this function.
1486 */
1487void ib_unregister_device(struct ib_device *ib_dev)
1488{
1489 get_device(&ib_dev->dev);
1490 __ib_unregister_device(ib_dev);
1491 put_device(&ib_dev->dev);
1492}
1493EXPORT_SYMBOL(ib_unregister_device);
1494
1495/**
1496 * ib_unregister_device_and_put - Unregister a device while holding a 'get'
1497 * @ib_dev: The device to unregister
1498 *
1499 * This is the same as ib_unregister_device(), except it includes an internal
1500 * ib_device_put() that should match a 'get' obtained by the caller.
1501 *
1502 * It is safe to call this routine concurrently from multiple threads while
1503 * holding the 'get'. When the function returns the device is fully
1504 * unregistered.
1505 *
1506 * Drivers using this flow MUST use the driver_unregister callback to clean up
1507 * their resources associated with the device and dealloc it.
1508 */
1509void ib_unregister_device_and_put(struct ib_device *ib_dev)
1510{
1511 WARN_ON(!ib_dev->ops.dealloc_driver);
1512 get_device(&ib_dev->dev);
1513 ib_device_put(ib_dev);
1514 __ib_unregister_device(ib_dev);
1515 put_device(&ib_dev->dev);
1516}
1517EXPORT_SYMBOL(ib_unregister_device_and_put);
1518
1519/**
1520 * ib_unregister_driver - Unregister all IB devices for a driver
1521 * @driver_id: The driver to unregister
1522 *
1523 * This implements a fence for device unregistration. It only returns once all
1524 * devices associated with the driver_id have fully completed their
1525 * unregistration and returned from ib_unregister_device*().
1526 *
1527 * If device's are not yet unregistered it goes ahead and starts unregistering
1528 * them.
1529 *
1530 * This does not block creation of new devices with the given driver_id, that
1531 * is the responsibility of the caller.
1532 */
1533void ib_unregister_driver(enum rdma_driver_id driver_id)
1534{
1535 struct ib_device *ib_dev;
1536 unsigned long index;
1537
1538 down_read(&devices_rwsem);
1539 xa_for_each (&devices, index, ib_dev) {
1540 if (ib_dev->ops.driver_id != driver_id)
1541 continue;
1542
1543 get_device(&ib_dev->dev);
1544 up_read(&devices_rwsem);
1545
1546 WARN_ON(!ib_dev->ops.dealloc_driver);
1547 __ib_unregister_device(ib_dev);
1548
1549 put_device(&ib_dev->dev);
1550 down_read(&devices_rwsem);
1551 }
1552 up_read(&devices_rwsem);
1553}
1554EXPORT_SYMBOL(ib_unregister_driver);
1555
1556static void ib_unregister_work(struct work_struct *work)
1557{
1558 struct ib_device *ib_dev =
1559 container_of(work, struct ib_device, unregistration_work);
1560
1561 __ib_unregister_device(ib_dev);
1562 put_device(&ib_dev->dev);
1563}
1564
1565/**
1566 * ib_unregister_device_queued - Unregister a device using a work queue
1567 * @ib_dev: The device to unregister
1568 *
1569 * This schedules an asynchronous unregistration using a WQ for the device. A
1570 * driver should use this to avoid holding locks while doing unregistration,
1571 * such as holding the RTNL lock.
1572 *
1573 * Drivers using this API must use ib_unregister_driver before module unload
1574 * to ensure that all scheduled unregistrations have completed.
1575 */
1576void ib_unregister_device_queued(struct ib_device *ib_dev)
1577{
1578 WARN_ON(!refcount_read(&ib_dev->refcount));
1579 WARN_ON(!ib_dev->ops.dealloc_driver);
1580 get_device(&ib_dev->dev);
1581 if (!queue_work(system_unbound_wq, &ib_dev->unregistration_work))
1582 put_device(&ib_dev->dev);
1583}
1584EXPORT_SYMBOL(ib_unregister_device_queued);
1585
1586/*
1587 * The caller must pass in a device that has the kref held and the refcount
1588 * released. If the device is in cur_net and still registered then it is moved
1589 * into net.
1590 */
1591static int rdma_dev_change_netns(struct ib_device *device, struct net *cur_net,
1592 struct net *net)
1593{
1594 int ret2 = -EINVAL;
1595 int ret;
1596
1597 mutex_lock(&device->unregistration_lock);
1598
1599 /*
1600 * If a device not under ib_device_get() or if the unregistration_lock
1601 * is not held, the namespace can be changed, or it can be unregistered.
1602 * Check again under the lock.
1603 */
1604 if (refcount_read(&device->refcount) == 0 ||
1605 !net_eq(cur_net, read_pnet(&device->coredev.rdma_net))) {
1606 ret = -ENODEV;
1607 goto out;
1608 }
1609
1610 kobject_uevent(&device->dev.kobj, KOBJ_REMOVE);
1611 disable_device(device);
1612
1613 /*
1614 * At this point no one can be using the device, so it is safe to
1615 * change the namespace.
1616 */
1617 write_pnet(&device->coredev.rdma_net, net);
1618
1619 down_read(&devices_rwsem);
1620 /*
1621 * Currently rdma devices are system wide unique. So the device name
1622 * is guaranteed free in the new namespace. Publish the new namespace
1623 * at the sysfs level.
1624 */
1625 ret = device_rename(&device->dev, dev_name(&device->dev));
1626 up_read(&devices_rwsem);
1627 if (ret) {
1628 dev_warn(&device->dev,
1629 "%s: Couldn't rename device after namespace change\n",
1630 __func__);
1631 /* Try and put things back and re-enable the device */
1632 write_pnet(&device->coredev.rdma_net, cur_net);
1633 }
1634
1635 ret2 = enable_device_and_get(device);
1636 if (ret2) {
1637 /*
1638 * This shouldn't really happen, but if it does, let the user
1639 * retry at later point. So don't disable the device.
1640 */
1641 dev_warn(&device->dev,
1642 "%s: Couldn't re-enable device after namespace change\n",
1643 __func__);
1644 }
1645 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1646
1647 ib_device_put(device);
1648out:
1649 mutex_unlock(&device->unregistration_lock);
1650 if (ret)
1651 return ret;
1652 return ret2;
1653}
1654
1655int ib_device_set_netns_put(struct sk_buff *skb,
1656 struct ib_device *dev, u32 ns_fd)
1657{
1658 struct net *net;
1659 int ret;
1660
1661 net = get_net_ns_by_fd(ns_fd);
1662 if (IS_ERR(net)) {
1663 ret = PTR_ERR(net);
1664 goto net_err;
1665 }
1666
1667 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1668 ret = -EPERM;
1669 goto ns_err;
1670 }
1671
1672 /*
1673 * Currently supported only for those providers which support
1674 * disassociation and don't do port specific sysfs init. Once a
1675 * port_cleanup infrastructure is implemented, this limitation will be
1676 * removed.
1677 */
1678 if (!dev->ops.disassociate_ucontext || dev->ops.init_port ||
1679 ib_devices_shared_netns) {
1680 ret = -EOPNOTSUPP;
1681 goto ns_err;
1682 }
1683
1684 get_device(&dev->dev);
1685 ib_device_put(dev);
1686 ret = rdma_dev_change_netns(dev, current->nsproxy->net_ns, net);
1687 put_device(&dev->dev);
1688
1689 put_net(net);
1690 return ret;
1691
1692ns_err:
1693 put_net(net);
1694net_err:
1695 ib_device_put(dev);
1696 return ret;
1697}
1698
1699static struct pernet_operations rdma_dev_net_ops = {
1700 .init = rdma_dev_init_net,
1701 .exit = rdma_dev_exit_net,
1702 .id = &rdma_dev_net_id,
1703 .size = sizeof(struct rdma_dev_net),
1704};
1705
1706static int assign_client_id(struct ib_client *client)
1707{
1708 int ret;
1709
1710 down_write(&clients_rwsem);
1711 /*
1712 * The add/remove callbacks must be called in FIFO/LIFO order. To
1713 * achieve this we assign client_ids so they are sorted in
1714 * registration order.
1715 */
1716 client->client_id = highest_client_id;
1717 ret = xa_insert(&clients, client->client_id, client, GFP_KERNEL);
1718 if (ret)
1719 goto out;
1720
1721 highest_client_id++;
1722 xa_set_mark(&clients, client->client_id, CLIENT_REGISTERED);
1723
1724out:
1725 up_write(&clients_rwsem);
1726 return ret;
1727}
1728
1729static void remove_client_id(struct ib_client *client)
1730{
1731 down_write(&clients_rwsem);
1732 xa_erase(&clients, client->client_id);
1733 for (; highest_client_id; highest_client_id--)
1734 if (xa_load(&clients, highest_client_id - 1))
1735 break;
1736 up_write(&clients_rwsem);
1737}
1738
1739/**
1740 * ib_register_client - Register an IB client
1741 * @client:Client to register
1742 *
1743 * Upper level users of the IB drivers can use ib_register_client() to
1744 * register callbacks for IB device addition and removal. When an IB
1745 * device is added, each registered client's add method will be called
1746 * (in the order the clients were registered), and when a device is
1747 * removed, each client's remove method will be called (in the reverse
1748 * order that clients were registered). In addition, when
1749 * ib_register_client() is called, the client will receive an add
1750 * callback for all devices already registered.
1751 */
1752int ib_register_client(struct ib_client *client)
1753{
1754 struct ib_device *device;
1755 unsigned long index;
1756 int ret;
1757
1758 refcount_set(&client->uses, 1);
1759 init_completion(&client->uses_zero);
1760 ret = assign_client_id(client);
1761 if (ret)
1762 return ret;
1763
1764 down_read(&devices_rwsem);
1765 xa_for_each_marked (&devices, index, device, DEVICE_REGISTERED) {
1766 ret = add_client_context(device, client);
1767 if (ret) {
1768 up_read(&devices_rwsem);
1769 ib_unregister_client(client);
1770 return ret;
1771 }
1772 }
1773 up_read(&devices_rwsem);
1774 return 0;
1775}
1776EXPORT_SYMBOL(ib_register_client);
1777
1778/**
1779 * ib_unregister_client - Unregister an IB client
1780 * @client:Client to unregister
1781 *
1782 * Upper level users use ib_unregister_client() to remove their client
1783 * registration. When ib_unregister_client() is called, the client
1784 * will receive a remove callback for each IB device still registered.
1785 *
1786 * This is a full fence, once it returns no client callbacks will be called,
1787 * or are running in another thread.
1788 */
1789void ib_unregister_client(struct ib_client *client)
1790{
1791 struct ib_device *device;
1792 unsigned long index;
1793
1794 down_write(&clients_rwsem);
1795 ib_client_put(client);
1796 xa_clear_mark(&clients, client->client_id, CLIENT_REGISTERED);
1797 up_write(&clients_rwsem);
1798
1799 /* We do not want to have locks while calling client->remove() */
1800 rcu_read_lock();
1801 xa_for_each (&devices, index, device) {
1802 if (!ib_device_try_get(device))
1803 continue;
1804 rcu_read_unlock();
1805
1806 remove_client_context(device, client->client_id);
1807
1808 ib_device_put(device);
1809 rcu_read_lock();
1810 }
1811 rcu_read_unlock();
1812
1813 /*
1814 * remove_client_context() is not a fence, it can return even though a
1815 * removal is ongoing. Wait until all removals are completed.
1816 */
1817 wait_for_completion(&client->uses_zero);
1818 remove_client_id(client);
1819}
1820EXPORT_SYMBOL(ib_unregister_client);
1821
1822static int __ib_get_global_client_nl_info(const char *client_name,
1823 struct ib_client_nl_info *res)
1824{
1825 struct ib_client *client;
1826 unsigned long index;
1827 int ret = -ENOENT;
1828
1829 down_read(&clients_rwsem);
1830 xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) {
1831 if (strcmp(client->name, client_name) != 0)
1832 continue;
1833 if (!client->get_global_nl_info) {
1834 ret = -EOPNOTSUPP;
1835 break;
1836 }
1837 ret = client->get_global_nl_info(res);
1838 if (WARN_ON(ret == -ENOENT))
1839 ret = -EINVAL;
1840 if (!ret && res->cdev)
1841 get_device(res->cdev);
1842 break;
1843 }
1844 up_read(&clients_rwsem);
1845 return ret;
1846}
1847
1848static int __ib_get_client_nl_info(struct ib_device *ibdev,
1849 const char *client_name,
1850 struct ib_client_nl_info *res)
1851{
1852 unsigned long index;
1853 void *client_data;
1854 int ret = -ENOENT;
1855
1856 down_read(&ibdev->client_data_rwsem);
1857 xan_for_each_marked (&ibdev->client_data, index, client_data,
1858 CLIENT_DATA_REGISTERED) {
1859 struct ib_client *client = xa_load(&clients, index);
1860
1861 if (!client || strcmp(client->name, client_name) != 0)
1862 continue;
1863 if (!client->get_nl_info) {
1864 ret = -EOPNOTSUPP;
1865 break;
1866 }
1867 ret = client->get_nl_info(ibdev, client_data, res);
1868 if (WARN_ON(ret == -ENOENT))
1869 ret = -EINVAL;
1870
1871 /*
1872 * The cdev is guaranteed valid as long as we are inside the
1873 * client_data_rwsem as remove_one can't be called. Keep it
1874 * valid for the caller.
1875 */
1876 if (!ret && res->cdev)
1877 get_device(res->cdev);
1878 break;
1879 }
1880 up_read(&ibdev->client_data_rwsem);
1881
1882 return ret;
1883}
1884
1885/**
1886 * ib_get_client_nl_info - Fetch the nl_info from a client
1887 * @device - IB device
1888 * @client_name - Name of the client
1889 * @res - Result of the query
1890 */
1891int ib_get_client_nl_info(struct ib_device *ibdev, const char *client_name,
1892 struct ib_client_nl_info *res)
1893{
1894 int ret;
1895
1896 if (ibdev)
1897 ret = __ib_get_client_nl_info(ibdev, client_name, res);
1898 else
1899 ret = __ib_get_global_client_nl_info(client_name, res);
1900#ifdef CONFIG_MODULES
1901 if (ret == -ENOENT) {
1902 request_module("rdma-client-%s", client_name);
1903 if (ibdev)
1904 ret = __ib_get_client_nl_info(ibdev, client_name, res);
1905 else
1906 ret = __ib_get_global_client_nl_info(client_name, res);
1907 }
1908#endif
1909 if (ret) {
1910 if (ret == -ENOENT)
1911 return -EOPNOTSUPP;
1912 return ret;
1913 }
1914
1915 if (WARN_ON(!res->cdev))
1916 return -EINVAL;
1917 return 0;
1918}
1919
1920/**
1921 * ib_set_client_data - Set IB client context
1922 * @device:Device to set context for
1923 * @client:Client to set context for
1924 * @data:Context to set
1925 *
1926 * ib_set_client_data() sets client context data that can be retrieved with
1927 * ib_get_client_data(). This can only be called while the client is
1928 * registered to the device, once the ib_client remove() callback returns this
1929 * cannot be called.
1930 */
1931void ib_set_client_data(struct ib_device *device, struct ib_client *client,
1932 void *data)
1933{
1934 void *rc;
1935
1936 if (WARN_ON(IS_ERR(data)))
1937 data = NULL;
1938
1939 rc = xa_store(&device->client_data, client->client_id, data,
1940 GFP_KERNEL);
1941 WARN_ON(xa_is_err(rc));
1942}
1943EXPORT_SYMBOL(ib_set_client_data);
1944
1945/**
1946 * ib_register_event_handler - Register an IB event handler
1947 * @event_handler:Handler to register
1948 *
1949 * ib_register_event_handler() registers an event handler that will be
1950 * called back when asynchronous IB events occur (as defined in
1951 * chapter 11 of the InfiniBand Architecture Specification). This
1952 * callback occurs in workqueue context.
1953 */
1954void ib_register_event_handler(struct ib_event_handler *event_handler)
1955{
1956 down_write(&event_handler->device->event_handler_rwsem);
1957 list_add_tail(&event_handler->list,
1958 &event_handler->device->event_handler_list);
1959 up_write(&event_handler->device->event_handler_rwsem);
1960}
1961EXPORT_SYMBOL(ib_register_event_handler);
1962
1963/**
1964 * ib_unregister_event_handler - Unregister an event handler
1965 * @event_handler:Handler to unregister
1966 *
1967 * Unregister an event handler registered with
1968 * ib_register_event_handler().
1969 */
1970void ib_unregister_event_handler(struct ib_event_handler *event_handler)
1971{
1972 down_write(&event_handler->device->event_handler_rwsem);
1973 list_del(&event_handler->list);
1974 up_write(&event_handler->device->event_handler_rwsem);
1975}
1976EXPORT_SYMBOL(ib_unregister_event_handler);
1977
1978void ib_dispatch_event_clients(struct ib_event *event)
1979{
1980 struct ib_event_handler *handler;
1981
1982 down_read(&event->device->event_handler_rwsem);
1983
1984 list_for_each_entry(handler, &event->device->event_handler_list, list)
1985 handler->handler(handler, event);
1986
1987 up_read(&event->device->event_handler_rwsem);
1988}
1989
1990static int iw_query_port(struct ib_device *device,
1991 u8 port_num,
1992 struct ib_port_attr *port_attr)
1993{
1994 struct in_device *inetdev;
1995 struct net_device *netdev;
1996
1997 memset(port_attr, 0, sizeof(*port_attr));
1998
1999 netdev = ib_device_get_netdev(device, port_num);
2000 if (!netdev)
2001 return -ENODEV;
2002
2003 port_attr->max_mtu = IB_MTU_4096;
2004 port_attr->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
2005
2006 if (!netif_carrier_ok(netdev)) {
2007 port_attr->state = IB_PORT_DOWN;
2008 port_attr->phys_state = IB_PORT_PHYS_STATE_DISABLED;
2009 } else {
2010 rcu_read_lock();
2011 inetdev = __in_dev_get_rcu(netdev);
2012
2013 if (inetdev && inetdev->ifa_list) {
2014 port_attr->state = IB_PORT_ACTIVE;
2015 port_attr->phys_state = IB_PORT_PHYS_STATE_LINK_UP;
2016 } else {
2017 port_attr->state = IB_PORT_INIT;
2018 port_attr->phys_state =
2019 IB_PORT_PHYS_STATE_PORT_CONFIGURATION_TRAINING;
2020 }
2021
2022 rcu_read_unlock();
2023 }
2024
2025 dev_put(netdev);
2026 return device->ops.query_port(device, port_num, port_attr);
2027}
2028
2029static int __ib_query_port(struct ib_device *device,
2030 u8 port_num,
2031 struct ib_port_attr *port_attr)
2032{
2033 union ib_gid gid = {};
2034 int err;
2035
2036 memset(port_attr, 0, sizeof(*port_attr));
2037
2038 err = device->ops.query_port(device, port_num, port_attr);
2039 if (err || port_attr->subnet_prefix)
2040 return err;
2041
2042 if (rdma_port_get_link_layer(device, port_num) !=
2043 IB_LINK_LAYER_INFINIBAND)
2044 return 0;
2045
2046 err = device->ops.query_gid(device, port_num, 0, &gid);
2047 if (err)
2048 return err;
2049
2050 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
2051 return 0;
2052}
2053
2054/**
2055 * ib_query_port - Query IB port attributes
2056 * @device:Device to query
2057 * @port_num:Port number to query
2058 * @port_attr:Port attributes
2059 *
2060 * ib_query_port() returns the attributes of a port through the
2061 * @port_attr pointer.
2062 */
2063int ib_query_port(struct ib_device *device,
2064 u8 port_num,
2065 struct ib_port_attr *port_attr)
2066{
2067 if (!rdma_is_port_valid(device, port_num))
2068 return -EINVAL;
2069
2070 if (rdma_protocol_iwarp(device, port_num))
2071 return iw_query_port(device, port_num, port_attr);
2072 else
2073 return __ib_query_port(device, port_num, port_attr);
2074}
2075EXPORT_SYMBOL(ib_query_port);
2076
2077static void add_ndev_hash(struct ib_port_data *pdata)
2078{
2079 unsigned long flags;
2080
2081 might_sleep();
2082
2083 spin_lock_irqsave(&ndev_hash_lock, flags);
2084 if (hash_hashed(&pdata->ndev_hash_link)) {
2085 hash_del_rcu(&pdata->ndev_hash_link);
2086 spin_unlock_irqrestore(&ndev_hash_lock, flags);
2087 /*
2088 * We cannot do hash_add_rcu after a hash_del_rcu until the
2089 * grace period
2090 */
2091 synchronize_rcu();
2092 spin_lock_irqsave(&ndev_hash_lock, flags);
2093 }
2094 if (pdata->netdev)
2095 hash_add_rcu(ndev_hash, &pdata->ndev_hash_link,
2096 (uintptr_t)pdata->netdev);
2097 spin_unlock_irqrestore(&ndev_hash_lock, flags);
2098}
2099
2100/**
2101 * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
2102 * @ib_dev: Device to modify
2103 * @ndev: net_device to affiliate, may be NULL
2104 * @port: IB port the net_device is connected to
2105 *
2106 * Drivers should use this to link the ib_device to a netdev so the netdev
2107 * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
2108 * affiliated with any port.
2109 *
2110 * The caller must ensure that the given ndev is not unregistered or
2111 * unregistering, and that either the ib_device is unregistered or
2112 * ib_device_set_netdev() is called with NULL when the ndev sends a
2113 * NETDEV_UNREGISTER event.
2114 */
2115int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
2116 unsigned int port)
2117{
2118 struct net_device *old_ndev;
2119 struct ib_port_data *pdata;
2120 unsigned long flags;
2121 int ret;
2122
2123 /*
2124 * Drivers wish to call this before ib_register_driver, so we have to
2125 * setup the port data early.
2126 */
2127 ret = alloc_port_data(ib_dev);
2128 if (ret)
2129 return ret;
2130
2131 if (!rdma_is_port_valid(ib_dev, port))
2132 return -EINVAL;
2133
2134 pdata = &ib_dev->port_data[port];
2135 spin_lock_irqsave(&pdata->netdev_lock, flags);
2136 old_ndev = rcu_dereference_protected(
2137 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2138 if (old_ndev == ndev) {
2139 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2140 return 0;
2141 }
2142
2143 if (ndev)
2144 dev_hold(ndev);
2145 rcu_assign_pointer(pdata->netdev, ndev);
2146 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2147
2148 add_ndev_hash(pdata);
2149 if (old_ndev)
2150 dev_put(old_ndev);
2151
2152 return 0;
2153}
2154EXPORT_SYMBOL(ib_device_set_netdev);
2155
2156static void free_netdevs(struct ib_device *ib_dev)
2157{
2158 unsigned long flags;
2159 unsigned int port;
2160
2161 if (!ib_dev->port_data)
2162 return;
2163
2164 rdma_for_each_port (ib_dev, port) {
2165 struct ib_port_data *pdata = &ib_dev->port_data[port];
2166 struct net_device *ndev;
2167
2168 spin_lock_irqsave(&pdata->netdev_lock, flags);
2169 ndev = rcu_dereference_protected(
2170 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2171 if (ndev) {
2172 spin_lock(&ndev_hash_lock);
2173 hash_del_rcu(&pdata->ndev_hash_link);
2174 spin_unlock(&ndev_hash_lock);
2175
2176 /*
2177 * If this is the last dev_put there is still a
2178 * synchronize_rcu before the netdev is kfreed, so we
2179 * can continue to rely on unlocked pointer
2180 * comparisons after the put
2181 */
2182 rcu_assign_pointer(pdata->netdev, NULL);
2183 dev_put(ndev);
2184 }
2185 spin_unlock_irqrestore(&pdata->netdev_lock, flags);
2186 }
2187}
2188
2189struct net_device *ib_device_get_netdev(struct ib_device *ib_dev,
2190 unsigned int port)
2191{
2192 struct ib_port_data *pdata;
2193 struct net_device *res;
2194
2195 if (!rdma_is_port_valid(ib_dev, port))
2196 return NULL;
2197
2198 pdata = &ib_dev->port_data[port];
2199
2200 /*
2201 * New drivers should use ib_device_set_netdev() not the legacy
2202 * get_netdev().
2203 */
2204 if (ib_dev->ops.get_netdev)
2205 res = ib_dev->ops.get_netdev(ib_dev, port);
2206 else {
2207 spin_lock(&pdata->netdev_lock);
2208 res = rcu_dereference_protected(
2209 pdata->netdev, lockdep_is_held(&pdata->netdev_lock));
2210 if (res)
2211 dev_hold(res);
2212 spin_unlock(&pdata->netdev_lock);
2213 }
2214
2215 /*
2216 * If we are starting to unregister expedite things by preventing
2217 * propagation of an unregistering netdev.
2218 */
2219 if (res && res->reg_state != NETREG_REGISTERED) {
2220 dev_put(res);
2221 return NULL;
2222 }
2223
2224 return res;
2225}
2226
2227/**
2228 * ib_device_get_by_netdev - Find an IB device associated with a netdev
2229 * @ndev: netdev to locate
2230 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
2231 *
2232 * Find and hold an ib_device that is associated with a netdev via
2233 * ib_device_set_netdev(). The caller must call ib_device_put() on the
2234 * returned pointer.
2235 */
2236struct ib_device *ib_device_get_by_netdev(struct net_device *ndev,
2237 enum rdma_driver_id driver_id)
2238{
2239 struct ib_device *res = NULL;
2240 struct ib_port_data *cur;
2241
2242 rcu_read_lock();
2243 hash_for_each_possible_rcu (ndev_hash, cur, ndev_hash_link,
2244 (uintptr_t)ndev) {
2245 if (rcu_access_pointer(cur->netdev) == ndev &&
2246 (driver_id == RDMA_DRIVER_UNKNOWN ||
2247 cur->ib_dev->ops.driver_id == driver_id) &&
2248 ib_device_try_get(cur->ib_dev)) {
2249 res = cur->ib_dev;
2250 break;
2251 }
2252 }
2253 rcu_read_unlock();
2254
2255 return res;
2256}
2257EXPORT_SYMBOL(ib_device_get_by_netdev);
2258
2259/**
2260 * ib_enum_roce_netdev - enumerate all RoCE ports
2261 * @ib_dev : IB device we want to query
2262 * @filter: Should we call the callback?
2263 * @filter_cookie: Cookie passed to filter
2264 * @cb: Callback to call for each found RoCE ports
2265 * @cookie: Cookie passed back to the callback
2266 *
2267 * Enumerates all of the physical RoCE ports of ib_dev
2268 * which are related to netdevice and calls callback() on each
2269 * device for which filter() function returns non zero.
2270 */
2271void ib_enum_roce_netdev(struct ib_device *ib_dev,
2272 roce_netdev_filter filter,
2273 void *filter_cookie,
2274 roce_netdev_callback cb,
2275 void *cookie)
2276{
2277 unsigned int port;
2278
2279 rdma_for_each_port (ib_dev, port)
2280 if (rdma_protocol_roce(ib_dev, port)) {
2281 struct net_device *idev =
2282 ib_device_get_netdev(ib_dev, port);
2283
2284 if (filter(ib_dev, port, idev, filter_cookie))
2285 cb(ib_dev, port, idev, cookie);
2286
2287 if (idev)
2288 dev_put(idev);
2289 }
2290}
2291
2292/**
2293 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
2294 * @filter: Should we call the callback?
2295 * @filter_cookie: Cookie passed to filter
2296 * @cb: Callback to call for each found RoCE ports
2297 * @cookie: Cookie passed back to the callback
2298 *
2299 * Enumerates all RoCE devices' physical ports which are related
2300 * to netdevices and calls callback() on each device for which
2301 * filter() function returns non zero.
2302 */
2303void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
2304 void *filter_cookie,
2305 roce_netdev_callback cb,
2306 void *cookie)
2307{
2308 struct ib_device *dev;
2309 unsigned long index;
2310
2311 down_read(&devices_rwsem);
2312 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED)
2313 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
2314 up_read(&devices_rwsem);
2315}
2316
2317/**
2318 * ib_enum_all_devs - enumerate all ib_devices
2319 * @cb: Callback to call for each found ib_device
2320 *
2321 * Enumerates all ib_devices and calls callback() on each device.
2322 */
2323int ib_enum_all_devs(nldev_callback nldev_cb, struct sk_buff *skb,
2324 struct netlink_callback *cb)
2325{
2326 unsigned long index;
2327 struct ib_device *dev;
2328 unsigned int idx = 0;
2329 int ret = 0;
2330
2331 down_read(&devices_rwsem);
2332 xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
2333 if (!rdma_dev_access_netns(dev, sock_net(skb->sk)))
2334 continue;
2335
2336 ret = nldev_cb(dev, skb, cb, idx);
2337 if (ret)
2338 break;
2339 idx++;
2340 }
2341 up_read(&devices_rwsem);
2342 return ret;
2343}
2344
2345/**
2346 * ib_query_pkey - Get P_Key table entry
2347 * @device:Device to query
2348 * @port_num:Port number to query
2349 * @index:P_Key table index to query
2350 * @pkey:Returned P_Key
2351 *
2352 * ib_query_pkey() fetches the specified P_Key table entry.
2353 */
2354int ib_query_pkey(struct ib_device *device,
2355 u8 port_num, u16 index, u16 *pkey)
2356{
2357 if (!rdma_is_port_valid(device, port_num))
2358 return -EINVAL;
2359
2360 return device->ops.query_pkey(device, port_num, index, pkey);
2361}
2362EXPORT_SYMBOL(ib_query_pkey);
2363
2364/**
2365 * ib_modify_device - Change IB device attributes
2366 * @device:Device to modify
2367 * @device_modify_mask:Mask of attributes to change
2368 * @device_modify:New attribute values
2369 *
2370 * ib_modify_device() changes a device's attributes as specified by
2371 * the @device_modify_mask and @device_modify structure.
2372 */
2373int ib_modify_device(struct ib_device *device,
2374 int device_modify_mask,
2375 struct ib_device_modify *device_modify)
2376{
2377 if (!device->ops.modify_device)
2378 return -EOPNOTSUPP;
2379
2380 return device->ops.modify_device(device, device_modify_mask,
2381 device_modify);
2382}
2383EXPORT_SYMBOL(ib_modify_device);
2384
2385/**
2386 * ib_modify_port - Modifies the attributes for the specified port.
2387 * @device: The device to modify.
2388 * @port_num: The number of the port to modify.
2389 * @port_modify_mask: Mask used to specify which attributes of the port
2390 * to change.
2391 * @port_modify: New attribute values for the port.
2392 *
2393 * ib_modify_port() changes a port's attributes as specified by the
2394 * @port_modify_mask and @port_modify structure.
2395 */
2396int ib_modify_port(struct ib_device *device,
2397 u8 port_num, int port_modify_mask,
2398 struct ib_port_modify *port_modify)
2399{
2400 int rc;
2401
2402 if (!rdma_is_port_valid(device, port_num))
2403 return -EINVAL;
2404
2405 if (device->ops.modify_port)
2406 rc = device->ops.modify_port(device, port_num,
2407 port_modify_mask,
2408 port_modify);
2409 else if (rdma_protocol_roce(device, port_num) &&
2410 ((port_modify->set_port_cap_mask & ~IB_PORT_CM_SUP) == 0 ||
2411 (port_modify->clr_port_cap_mask & ~IB_PORT_CM_SUP) == 0))
2412 rc = 0;
2413 else
2414 rc = -EOPNOTSUPP;
2415 return rc;
2416}
2417EXPORT_SYMBOL(ib_modify_port);
2418
2419/**
2420 * ib_find_gid - Returns the port number and GID table index where
2421 * a specified GID value occurs. Its searches only for IB link layer.
2422 * @device: The device to query.
2423 * @gid: The GID value to search for.
2424 * @port_num: The port number of the device where the GID value was found.
2425 * @index: The index into the GID table where the GID was found. This
2426 * parameter may be NULL.
2427 */
2428int ib_find_gid(struct ib_device *device, union ib_gid *gid,
2429 u8 *port_num, u16 *index)
2430{
2431 union ib_gid tmp_gid;
2432 unsigned int port;
2433 int ret, i;
2434
2435 rdma_for_each_port (device, port) {
2436 if (!rdma_protocol_ib(device, port))
2437 continue;
2438
2439 for (i = 0; i < device->port_data[port].immutable.gid_tbl_len;
2440 ++i) {
2441 ret = rdma_query_gid(device, port, i, &tmp_gid);
2442 if (ret)
2443 return ret;
2444 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
2445 *port_num = port;
2446 if (index)
2447 *index = i;
2448 return 0;
2449 }
2450 }
2451 }
2452
2453 return -ENOENT;
2454}
2455EXPORT_SYMBOL(ib_find_gid);
2456
2457/**
2458 * ib_find_pkey - Returns the PKey table index where a specified
2459 * PKey value occurs.
2460 * @device: The device to query.
2461 * @port_num: The port number of the device to search for the PKey.
2462 * @pkey: The PKey value to search for.
2463 * @index: The index into the PKey table where the PKey was found.
2464 */
2465int ib_find_pkey(struct ib_device *device,
2466 u8 port_num, u16 pkey, u16 *index)
2467{
2468 int ret, i;
2469 u16 tmp_pkey;
2470 int partial_ix = -1;
2471
2472 for (i = 0; i < device->port_data[port_num].immutable.pkey_tbl_len;
2473 ++i) {
2474 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
2475 if (ret)
2476 return ret;
2477 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
2478 /* if there is full-member pkey take it.*/
2479 if (tmp_pkey & 0x8000) {
2480 *index = i;
2481 return 0;
2482 }
2483 if (partial_ix < 0)
2484 partial_ix = i;
2485 }
2486 }
2487
2488 /*no full-member, if exists take the limited*/
2489 if (partial_ix >= 0) {
2490 *index = partial_ix;
2491 return 0;
2492 }
2493 return -ENOENT;
2494}
2495EXPORT_SYMBOL(ib_find_pkey);
2496
2497/**
2498 * ib_get_net_dev_by_params() - Return the appropriate net_dev
2499 * for a received CM request
2500 * @dev: An RDMA device on which the request has been received.
2501 * @port: Port number on the RDMA device.
2502 * @pkey: The Pkey the request came on.
2503 * @gid: A GID that the net_dev uses to communicate.
2504 * @addr: Contains the IP address that the request specified as its
2505 * destination.
2506 *
2507 */
2508struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
2509 u8 port,
2510 u16 pkey,
2511 const union ib_gid *gid,
2512 const struct sockaddr *addr)
2513{
2514 struct net_device *net_dev = NULL;
2515 unsigned long index;
2516 void *client_data;
2517
2518 if (!rdma_protocol_ib(dev, port))
2519 return NULL;
2520
2521 /*
2522 * Holding the read side guarantees that the client will not become
2523 * unregistered while we are calling get_net_dev_by_params()
2524 */
2525 down_read(&dev->client_data_rwsem);
2526 xan_for_each_marked (&dev->client_data, index, client_data,
2527 CLIENT_DATA_REGISTERED) {
2528 struct ib_client *client = xa_load(&clients, index);
2529
2530 if (!client || !client->get_net_dev_by_params)
2531 continue;
2532
2533 net_dev = client->get_net_dev_by_params(dev, port, pkey, gid,
2534 addr, client_data);
2535 if (net_dev)
2536 break;
2537 }
2538 up_read(&dev->client_data_rwsem);
2539
2540 return net_dev;
2541}
2542EXPORT_SYMBOL(ib_get_net_dev_by_params);
2543
2544void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
2545{
2546 struct ib_device_ops *dev_ops = &dev->ops;
2547#define SET_DEVICE_OP(ptr, name) \
2548 do { \
2549 if (ops->name) \
2550 if (!((ptr)->name)) \
2551 (ptr)->name = ops->name; \
2552 } while (0)
2553
2554#define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
2555
2556 if (ops->driver_id != RDMA_DRIVER_UNKNOWN) {
2557 WARN_ON(dev_ops->driver_id != RDMA_DRIVER_UNKNOWN &&
2558 dev_ops->driver_id != ops->driver_id);
2559 dev_ops->driver_id = ops->driver_id;
2560 }
2561 if (ops->owner) {
2562 WARN_ON(dev_ops->owner && dev_ops->owner != ops->owner);
2563 dev_ops->owner = ops->owner;
2564 }
2565 if (ops->uverbs_abi_ver)
2566 dev_ops->uverbs_abi_ver = ops->uverbs_abi_ver;
2567
2568 dev_ops->uverbs_no_driver_id_binding |=
2569 ops->uverbs_no_driver_id_binding;
2570
2571 SET_DEVICE_OP(dev_ops, add_gid);
2572 SET_DEVICE_OP(dev_ops, advise_mr);
2573 SET_DEVICE_OP(dev_ops, alloc_dm);
2574 SET_DEVICE_OP(dev_ops, alloc_hw_stats);
2575 SET_DEVICE_OP(dev_ops, alloc_mr);
2576 SET_DEVICE_OP(dev_ops, alloc_mr_integrity);
2577 SET_DEVICE_OP(dev_ops, alloc_mw);
2578 SET_DEVICE_OP(dev_ops, alloc_pd);
2579 SET_DEVICE_OP(dev_ops, alloc_rdma_netdev);
2580 SET_DEVICE_OP(dev_ops, alloc_ucontext);
2581 SET_DEVICE_OP(dev_ops, alloc_xrcd);
2582 SET_DEVICE_OP(dev_ops, attach_mcast);
2583 SET_DEVICE_OP(dev_ops, check_mr_status);
2584 SET_DEVICE_OP(dev_ops, counter_alloc_stats);
2585 SET_DEVICE_OP(dev_ops, counter_bind_qp);
2586 SET_DEVICE_OP(dev_ops, counter_dealloc);
2587 SET_DEVICE_OP(dev_ops, counter_unbind_qp);
2588 SET_DEVICE_OP(dev_ops, counter_update_stats);
2589 SET_DEVICE_OP(dev_ops, create_ah);
2590 SET_DEVICE_OP(dev_ops, create_counters);
2591 SET_DEVICE_OP(dev_ops, create_cq);
2592 SET_DEVICE_OP(dev_ops, create_flow);
2593 SET_DEVICE_OP(dev_ops, create_flow_action_esp);
2594 SET_DEVICE_OP(dev_ops, create_qp);
2595 SET_DEVICE_OP(dev_ops, create_rwq_ind_table);
2596 SET_DEVICE_OP(dev_ops, create_srq);
2597 SET_DEVICE_OP(dev_ops, create_wq);
2598 SET_DEVICE_OP(dev_ops, dealloc_dm);
2599 SET_DEVICE_OP(dev_ops, dealloc_driver);
2600 SET_DEVICE_OP(dev_ops, dealloc_mw);
2601 SET_DEVICE_OP(dev_ops, dealloc_pd);
2602 SET_DEVICE_OP(dev_ops, dealloc_ucontext);
2603 SET_DEVICE_OP(dev_ops, dealloc_xrcd);
2604 SET_DEVICE_OP(dev_ops, del_gid);
2605 SET_DEVICE_OP(dev_ops, dereg_mr);
2606 SET_DEVICE_OP(dev_ops, destroy_ah);
2607 SET_DEVICE_OP(dev_ops, destroy_counters);
2608 SET_DEVICE_OP(dev_ops, destroy_cq);
2609 SET_DEVICE_OP(dev_ops, destroy_flow);
2610 SET_DEVICE_OP(dev_ops, destroy_flow_action);
2611 SET_DEVICE_OP(dev_ops, destroy_qp);
2612 SET_DEVICE_OP(dev_ops, destroy_rwq_ind_table);
2613 SET_DEVICE_OP(dev_ops, destroy_srq);
2614 SET_DEVICE_OP(dev_ops, destroy_wq);
2615 SET_DEVICE_OP(dev_ops, detach_mcast);
2616 SET_DEVICE_OP(dev_ops, disassociate_ucontext);
2617 SET_DEVICE_OP(dev_ops, drain_rq);
2618 SET_DEVICE_OP(dev_ops, drain_sq);
2619 SET_DEVICE_OP(dev_ops, enable_driver);
2620 SET_DEVICE_OP(dev_ops, fill_res_entry);
2621 SET_DEVICE_OP(dev_ops, fill_stat_entry);
2622 SET_DEVICE_OP(dev_ops, get_dev_fw_str);
2623 SET_DEVICE_OP(dev_ops, get_dma_mr);
2624 SET_DEVICE_OP(dev_ops, get_hw_stats);
2625 SET_DEVICE_OP(dev_ops, get_link_layer);
2626 SET_DEVICE_OP(dev_ops, get_netdev);
2627 SET_DEVICE_OP(dev_ops, get_port_immutable);
2628 SET_DEVICE_OP(dev_ops, get_vector_affinity);
2629 SET_DEVICE_OP(dev_ops, get_vf_config);
2630 SET_DEVICE_OP(dev_ops, get_vf_guid);
2631 SET_DEVICE_OP(dev_ops, get_vf_stats);
2632 SET_DEVICE_OP(dev_ops, init_port);
2633 SET_DEVICE_OP(dev_ops, iw_accept);
2634 SET_DEVICE_OP(dev_ops, iw_add_ref);
2635 SET_DEVICE_OP(dev_ops, iw_connect);
2636 SET_DEVICE_OP(dev_ops, iw_create_listen);
2637 SET_DEVICE_OP(dev_ops, iw_destroy_listen);
2638 SET_DEVICE_OP(dev_ops, iw_get_qp);
2639 SET_DEVICE_OP(dev_ops, iw_reject);
2640 SET_DEVICE_OP(dev_ops, iw_rem_ref);
2641 SET_DEVICE_OP(dev_ops, map_mr_sg);
2642 SET_DEVICE_OP(dev_ops, map_mr_sg_pi);
2643 SET_DEVICE_OP(dev_ops, mmap);
2644 SET_DEVICE_OP(dev_ops, mmap_free);
2645 SET_DEVICE_OP(dev_ops, modify_ah);
2646 SET_DEVICE_OP(dev_ops, modify_cq);
2647 SET_DEVICE_OP(dev_ops, modify_device);
2648 SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
2649 SET_DEVICE_OP(dev_ops, modify_port);
2650 SET_DEVICE_OP(dev_ops, modify_qp);
2651 SET_DEVICE_OP(dev_ops, modify_srq);
2652 SET_DEVICE_OP(dev_ops, modify_wq);
2653 SET_DEVICE_OP(dev_ops, peek_cq);
2654 SET_DEVICE_OP(dev_ops, poll_cq);
2655 SET_DEVICE_OP(dev_ops, post_recv);
2656 SET_DEVICE_OP(dev_ops, post_send);
2657 SET_DEVICE_OP(dev_ops, post_srq_recv);
2658 SET_DEVICE_OP(dev_ops, process_mad);
2659 SET_DEVICE_OP(dev_ops, query_ah);
2660 SET_DEVICE_OP(dev_ops, query_device);
2661 SET_DEVICE_OP(dev_ops, query_gid);
2662 SET_DEVICE_OP(dev_ops, query_pkey);
2663 SET_DEVICE_OP(dev_ops, query_port);
2664 SET_DEVICE_OP(dev_ops, query_qp);
2665 SET_DEVICE_OP(dev_ops, query_srq);
2666 SET_DEVICE_OP(dev_ops, rdma_netdev_get_params);
2667 SET_DEVICE_OP(dev_ops, read_counters);
2668 SET_DEVICE_OP(dev_ops, reg_dm_mr);
2669 SET_DEVICE_OP(dev_ops, reg_user_mr);
2670 SET_DEVICE_OP(dev_ops, req_ncomp_notif);
2671 SET_DEVICE_OP(dev_ops, req_notify_cq);
2672 SET_DEVICE_OP(dev_ops, rereg_user_mr);
2673 SET_DEVICE_OP(dev_ops, resize_cq);
2674 SET_DEVICE_OP(dev_ops, set_vf_guid);
2675 SET_DEVICE_OP(dev_ops, set_vf_link_state);
2676
2677 SET_OBJ_SIZE(dev_ops, ib_ah);
2678 SET_OBJ_SIZE(dev_ops, ib_cq);
2679 SET_OBJ_SIZE(dev_ops, ib_pd);
2680 SET_OBJ_SIZE(dev_ops, ib_srq);
2681 SET_OBJ_SIZE(dev_ops, ib_ucontext);
2682}
2683EXPORT_SYMBOL(ib_set_device_ops);
2684
2685static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = {
2686 [RDMA_NL_LS_OP_RESOLVE] = {
2687 .doit = ib_nl_handle_resolve_resp,
2688 .flags = RDMA_NL_ADMIN_PERM,
2689 },
2690 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
2691 .doit = ib_nl_handle_set_timeout,
2692 .flags = RDMA_NL_ADMIN_PERM,
2693 },
2694 [RDMA_NL_LS_OP_IP_RESOLVE] = {
2695 .doit = ib_nl_handle_ip_res_resp,
2696 .flags = RDMA_NL_ADMIN_PERM,
2697 },
2698};
2699
2700static int __init ib_core_init(void)
2701{
2702 int ret;
2703
2704 ib_wq = alloc_workqueue("infiniband", 0, 0);
2705 if (!ib_wq)
2706 return -ENOMEM;
2707
2708 ib_comp_wq = alloc_workqueue("ib-comp-wq",
2709 WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
2710 if (!ib_comp_wq) {
2711 ret = -ENOMEM;
2712 goto err;
2713 }
2714
2715 ib_comp_unbound_wq =
2716 alloc_workqueue("ib-comp-unb-wq",
2717 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM |
2718 WQ_SYSFS, WQ_UNBOUND_MAX_ACTIVE);
2719 if (!ib_comp_unbound_wq) {
2720 ret = -ENOMEM;
2721 goto err_comp;
2722 }
2723
2724 ret = class_register(&ib_class);
2725 if (ret) {
2726 pr_warn("Couldn't create InfiniBand device class\n");
2727 goto err_comp_unbound;
2728 }
2729
2730 rdma_nl_init();
2731
2732 ret = addr_init();
2733 if (ret) {
2734 pr_warn("Could't init IB address resolution\n");
2735 goto err_ibnl;
2736 }
2737
2738 ret = ib_mad_init();
2739 if (ret) {
2740 pr_warn("Couldn't init IB MAD\n");
2741 goto err_addr;
2742 }
2743
2744 ret = ib_sa_init();
2745 if (ret) {
2746 pr_warn("Couldn't init SA\n");
2747 goto err_mad;
2748 }
2749
2750 ret = register_blocking_lsm_notifier(&ibdev_lsm_nb);
2751 if (ret) {
2752 pr_warn("Couldn't register LSM notifier. ret %d\n", ret);
2753 goto err_sa;
2754 }
2755
2756 ret = register_pernet_device(&rdma_dev_net_ops);
2757 if (ret) {
2758 pr_warn("Couldn't init compat dev. ret %d\n", ret);
2759 goto err_compat;
2760 }
2761
2762 nldev_init();
2763 rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table);
2764 roce_gid_mgmt_init();
2765
2766 return 0;
2767
2768err_compat:
2769 unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
2770err_sa:
2771 ib_sa_cleanup();
2772err_mad:
2773 ib_mad_cleanup();
2774err_addr:
2775 addr_cleanup();
2776err_ibnl:
2777 class_unregister(&ib_class);
2778err_comp_unbound:
2779 destroy_workqueue(ib_comp_unbound_wq);
2780err_comp:
2781 destroy_workqueue(ib_comp_wq);
2782err:
2783 destroy_workqueue(ib_wq);
2784 return ret;
2785}
2786
2787static void __exit ib_core_cleanup(void)
2788{
2789 roce_gid_mgmt_cleanup();
2790 nldev_exit();
2791 rdma_nl_unregister(RDMA_NL_LS);
2792 unregister_pernet_device(&rdma_dev_net_ops);
2793 unregister_blocking_lsm_notifier(&ibdev_lsm_nb);
2794 ib_sa_cleanup();
2795 ib_mad_cleanup();
2796 addr_cleanup();
2797 rdma_nl_exit();
2798 class_unregister(&ib_class);
2799 destroy_workqueue(ib_comp_unbound_wq);
2800 destroy_workqueue(ib_comp_wq);
2801 /* Make sure that any pending umem accounting work is done. */
2802 destroy_workqueue(ib_wq);
2803 flush_workqueue(system_unbound_wq);
2804 WARN_ON(!xa_empty(&clients));
2805 WARN_ON(!xa_empty(&devices));
2806}
2807
2808MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS, 4);
2809
2810/* ib core relies on netdev stack to first register net_ns_type_operations
2811 * ns kobject type before ib_core initialization.
2812 */
2813fs_initcall(ib_core_init);
2814module_exit(ib_core_cleanup);