2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/netdevice.h>
41 #include <net/net_namespace.h>
42 #include <net/netns/generic.h>
43 #include <linux/security.h>
44 #include <linux/notifier.h>
45 #include <linux/hashtable.h>
46 #include <rdma/rdma_netlink.h>
47 #include <rdma/ib_addr.h>
48 #include <rdma/ib_cache.h>
50 #include "core_priv.h"
53 MODULE_AUTHOR("Roland Dreier");
54 MODULE_DESCRIPTION("core kernel InfiniBand API");
55 MODULE_LICENSE("Dual BSD/GPL");
57 struct workqueue_struct
*ib_comp_wq
;
58 struct workqueue_struct
*ib_comp_unbound_wq
;
59 struct workqueue_struct
*ib_wq
;
60 EXPORT_SYMBOL_GPL(ib_wq
);
63 * Each of the three rwsem locks (devices, clients, client_data) protects the
64 * xarray of the same name. Specifically it allows the caller to assert that
65 * the MARK will/will not be changing under the lock, and for devices and
66 * clients, that the value in the xarray is still a valid pointer. Change of
67 * the MARK is linked to the object state, so holding the lock and testing the
68 * MARK also asserts that the contained object is in a certain state.
70 * This is used to build a two stage register/unregister flow where objects
71 * can continue to be in the xarray even though they are still in progress to
72 * register/unregister.
74 * The xarray itself provides additional locking, and restartable iteration,
75 * which is also relied on.
77 * Locks should not be nested, with the exception of client_data, which is
78 * allowed to nest under the read side of the other two locks.
80 * The devices_rwsem also protects the device name list, any change or
81 * assignment of device name must also hold the write side to guarantee unique
86 * devices contains devices that have had their names assigned. The
87 * devices may not be registered. Users that care about the registration
88 * status need to call ib_device_try_get() on the device to ensure it is
89 * registered, and keep it registered, for the required duration.
92 static DEFINE_XARRAY_FLAGS(devices
, XA_FLAGS_ALLOC
);
93 static DECLARE_RWSEM(devices_rwsem
);
94 #define DEVICE_REGISTERED XA_MARK_1
96 static LIST_HEAD(client_list
);
97 #define CLIENT_REGISTERED XA_MARK_1
98 static DEFINE_XARRAY_FLAGS(clients
, XA_FLAGS_ALLOC
);
99 static DECLARE_RWSEM(clients_rwsem
);
102 * If client_data is registered then the corresponding client must also still
105 #define CLIENT_DATA_REGISTERED XA_MARK_1
108 * struct rdma_dev_net - rdma net namespace metadata for a net
109 * @net: Pointer to owner net namespace
110 * @id: xarray id to identify the net namespace.
112 struct rdma_dev_net
{
117 static unsigned int rdma_dev_net_id
;
120 * A list of net namespaces is maintained in an xarray. This is necessary
121 * because we can't get the locking right using the existing net ns list. We
122 * would require a init_net callback after the list is updated.
124 static DEFINE_XARRAY_FLAGS(rdma_nets
, XA_FLAGS_ALLOC
);
126 * rwsem to protect accessing the rdma_nets xarray entries.
128 static DECLARE_RWSEM(rdma_nets_rwsem
);
130 bool ib_devices_shared_netns
= true;
131 module_param_named(netns_mode
, ib_devices_shared_netns
, bool, 0444);
132 MODULE_PARM_DESC(netns_mode
,
133 "Share device among net namespaces; default=1 (shared)");
135 * rdma_dev_access_netns() - Return whether a rdma device can be accessed
136 * from a specified net namespace or not.
137 * @device: Pointer to rdma device which needs to be checked
138 * @net: Pointer to net namesapce for which access to be checked
140 * rdma_dev_access_netns() - Return whether a rdma device can be accessed
141 * from a specified net namespace or not. When
142 * rdma device is in shared mode, it ignores the
143 * net namespace. When rdma device is exclusive
144 * to a net namespace, rdma device net namespace is
145 * checked against the specified one.
147 bool rdma_dev_access_netns(const struct ib_device
*dev
, const struct net
*net
)
149 return (ib_devices_shared_netns
||
150 net_eq(read_pnet(&dev
->coredev
.rdma_net
), net
));
152 EXPORT_SYMBOL(rdma_dev_access_netns
);
155 * xarray has this behavior where it won't iterate over NULL values stored in
156 * allocated arrays. So we need our own iterator to see all values stored in
157 * the array. This does the same thing as xa_for_each except that it also
158 * returns NULL valued entries if the array is allocating. Simplified to only
159 * work on simple xarrays.
161 static void *xan_find_marked(struct xarray
*xa
, unsigned long *indexp
,
164 XA_STATE(xas
, xa
, *indexp
);
169 entry
= xas_find_marked(&xas
, ULONG_MAX
, filter
);
170 if (xa_is_zero(entry
))
172 } while (xas_retry(&xas
, entry
));
176 *indexp
= xas
.xa_index
;
177 if (xa_is_zero(entry
))
181 return XA_ERROR(-ENOENT
);
183 #define xan_for_each_marked(xa, index, entry, filter) \
184 for (index = 0, entry = xan_find_marked(xa, &(index), filter); \
186 (index)++, entry = xan_find_marked(xa, &(index), filter))
188 /* RCU hash table mapping netdevice pointers to struct ib_port_data */
189 static DEFINE_SPINLOCK(ndev_hash_lock
);
190 static DECLARE_HASHTABLE(ndev_hash
, 5);
192 static void free_netdevs(struct ib_device
*ib_dev
);
193 static void ib_unregister_work(struct work_struct
*work
);
194 static void __ib_unregister_device(struct ib_device
*device
);
195 static int ib_security_change(struct notifier_block
*nb
, unsigned long event
,
197 static void ib_policy_change_task(struct work_struct
*work
);
198 static DECLARE_WORK(ib_policy_change_work
, ib_policy_change_task
);
200 static void __ibdev_printk(const char *level
, const struct ib_device
*ibdev
,
201 struct va_format
*vaf
)
203 if (ibdev
&& ibdev
->dev
.parent
)
204 dev_printk_emit(level
[1] - '0',
207 dev_driver_string(ibdev
->dev
.parent
),
208 dev_name(ibdev
->dev
.parent
),
209 dev_name(&ibdev
->dev
),
213 level
, dev_name(&ibdev
->dev
), vaf
);
215 printk("%s(NULL ib_device): %pV", level
, vaf
);
218 void ibdev_printk(const char *level
, const struct ib_device
*ibdev
,
219 const char *format
, ...)
221 struct va_format vaf
;
224 va_start(args
, format
);
229 __ibdev_printk(level
, ibdev
, &vaf
);
233 EXPORT_SYMBOL(ibdev_printk
);
235 #define define_ibdev_printk_level(func, level) \
236 void func(const struct ib_device *ibdev, const char *fmt, ...) \
238 struct va_format vaf; \
241 va_start(args, fmt); \
246 __ibdev_printk(level, ibdev, &vaf); \
252 define_ibdev_printk_level(ibdev_emerg
, KERN_EMERG
);
253 define_ibdev_printk_level(ibdev_alert
, KERN_ALERT
);
254 define_ibdev_printk_level(ibdev_crit
, KERN_CRIT
);
255 define_ibdev_printk_level(ibdev_err
, KERN_ERR
);
256 define_ibdev_printk_level(ibdev_warn
, KERN_WARNING
);
257 define_ibdev_printk_level(ibdev_notice
, KERN_NOTICE
);
258 define_ibdev_printk_level(ibdev_info
, KERN_INFO
);
260 static struct notifier_block ibdev_lsm_nb
= {
261 .notifier_call
= ib_security_change
,
264 static int rdma_dev_change_netns(struct ib_device
*device
, struct net
*cur_net
,
267 /* Pointer to the RCU head at the start of the ib_port_data array */
268 struct ib_port_data_rcu
{
269 struct rcu_head rcu_head
;
270 struct ib_port_data pdata
[];
273 static int ib_device_check_mandatory(struct ib_device
*device
)
275 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device_ops, x), #x }
276 static const struct {
279 } mandatory_table
[] = {
280 IB_MANDATORY_FUNC(query_device
),
281 IB_MANDATORY_FUNC(query_port
),
282 IB_MANDATORY_FUNC(query_pkey
),
283 IB_MANDATORY_FUNC(alloc_pd
),
284 IB_MANDATORY_FUNC(dealloc_pd
),
285 IB_MANDATORY_FUNC(create_qp
),
286 IB_MANDATORY_FUNC(modify_qp
),
287 IB_MANDATORY_FUNC(destroy_qp
),
288 IB_MANDATORY_FUNC(post_send
),
289 IB_MANDATORY_FUNC(post_recv
),
290 IB_MANDATORY_FUNC(create_cq
),
291 IB_MANDATORY_FUNC(destroy_cq
),
292 IB_MANDATORY_FUNC(poll_cq
),
293 IB_MANDATORY_FUNC(req_notify_cq
),
294 IB_MANDATORY_FUNC(get_dma_mr
),
295 IB_MANDATORY_FUNC(dereg_mr
),
296 IB_MANDATORY_FUNC(get_port_immutable
)
300 device
->kverbs_provider
= true;
301 for (i
= 0; i
< ARRAY_SIZE(mandatory_table
); ++i
) {
302 if (!*(void **) ((void *) &device
->ops
+
303 mandatory_table
[i
].offset
)) {
304 device
->kverbs_provider
= false;
313 * Caller must perform ib_device_put() to return the device reference count
314 * when ib_device_get_by_index() returns valid device pointer.
316 struct ib_device
*ib_device_get_by_index(const struct net
*net
, u32 index
)
318 struct ib_device
*device
;
320 down_read(&devices_rwsem
);
321 device
= xa_load(&devices
, index
);
323 if (!rdma_dev_access_netns(device
, net
)) {
328 if (!ib_device_try_get(device
))
332 up_read(&devices_rwsem
);
337 * ib_device_put - Release IB device reference
338 * @device: device whose reference to be released
340 * ib_device_put() releases reference to the IB device to allow it to be
341 * unregistered and eventually free.
343 void ib_device_put(struct ib_device
*device
)
345 if (refcount_dec_and_test(&device
->refcount
))
346 complete(&device
->unreg_completion
);
348 EXPORT_SYMBOL(ib_device_put
);
350 static struct ib_device
*__ib_device_get_by_name(const char *name
)
352 struct ib_device
*device
;
355 xa_for_each (&devices
, index
, device
)
356 if (!strcmp(name
, dev_name(&device
->dev
)))
363 * ib_device_get_by_name - Find an IB device by name
364 * @name: The name to look for
365 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
367 * Find and hold an ib_device by its name. The caller must call
368 * ib_device_put() on the returned pointer.
370 struct ib_device
*ib_device_get_by_name(const char *name
,
371 enum rdma_driver_id driver_id
)
373 struct ib_device
*device
;
375 down_read(&devices_rwsem
);
376 device
= __ib_device_get_by_name(name
);
377 if (device
&& driver_id
!= RDMA_DRIVER_UNKNOWN
&&
378 device
->driver_id
!= driver_id
)
382 if (!ib_device_try_get(device
))
385 up_read(&devices_rwsem
);
388 EXPORT_SYMBOL(ib_device_get_by_name
);
390 static int rename_compat_devs(struct ib_device
*device
)
392 struct ib_core_device
*cdev
;
396 mutex_lock(&device
->compat_devs_mutex
);
397 xa_for_each (&device
->compat_devs
, index
, cdev
) {
398 ret
= device_rename(&cdev
->dev
, dev_name(&device
->dev
));
401 "Fail to rename compatdev to new name %s\n",
402 dev_name(&device
->dev
));
406 mutex_unlock(&device
->compat_devs_mutex
);
410 int ib_device_rename(struct ib_device
*ibdev
, const char *name
)
416 down_write(&devices_rwsem
);
417 if (!strcmp(name
, dev_name(&ibdev
->dev
))) {
418 up_write(&devices_rwsem
);
422 if (__ib_device_get_by_name(name
)) {
423 up_write(&devices_rwsem
);
427 ret
= device_rename(&ibdev
->dev
, name
);
429 up_write(&devices_rwsem
);
433 strlcpy(ibdev
->name
, name
, IB_DEVICE_NAME_MAX
);
434 ret
= rename_compat_devs(ibdev
);
436 downgrade_write(&devices_rwsem
);
437 down_read(&ibdev
->client_data_rwsem
);
438 xan_for_each_marked(&ibdev
->client_data
, index
, client_data
,
439 CLIENT_DATA_REGISTERED
) {
440 struct ib_client
*client
= xa_load(&clients
, index
);
442 if (!client
|| !client
->rename
)
445 client
->rename(ibdev
, client_data
);
447 up_read(&ibdev
->client_data_rwsem
);
448 up_read(&devices_rwsem
);
452 static int alloc_name(struct ib_device
*ibdev
, const char *name
)
454 struct ib_device
*device
;
460 lockdep_assert_held_exclusive(&devices_rwsem
);
462 xa_for_each (&devices
, index
, device
) {
463 char buf
[IB_DEVICE_NAME_MAX
];
465 if (sscanf(dev_name(&device
->dev
), name
, &i
) != 1)
467 if (i
< 0 || i
>= INT_MAX
)
469 snprintf(buf
, sizeof buf
, name
, i
);
470 if (strcmp(buf
, dev_name(&device
->dev
)) != 0)
473 rc
= ida_alloc_range(&inuse
, i
, i
, GFP_KERNEL
);
478 rc
= ida_alloc(&inuse
, GFP_KERNEL
);
482 rc
= dev_set_name(&ibdev
->dev
, name
, rc
);
488 static void ib_device_release(struct device
*device
)
490 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
493 WARN_ON(refcount_read(&dev
->refcount
));
494 if (dev
->port_data
) {
495 ib_cache_release_one(dev
);
496 ib_security_release_port_pkey_list(dev
);
497 kfree_rcu(container_of(dev
->port_data
, struct ib_port_data_rcu
,
501 xa_destroy(&dev
->compat_devs
);
502 xa_destroy(&dev
->client_data
);
503 kfree_rcu(dev
, rcu_head
);
506 static int ib_device_uevent(struct device
*device
,
507 struct kobj_uevent_env
*env
)
509 if (add_uevent_var(env
, "NAME=%s", dev_name(device
)))
513 * It would be nice to pass the node GUID with the event...
519 static const void *net_namespace(struct device
*d
)
521 struct ib_core_device
*coredev
=
522 container_of(d
, struct ib_core_device
, dev
);
524 return read_pnet(&coredev
->rdma_net
);
527 static struct class ib_class
= {
528 .name
= "infiniband",
529 .dev_release
= ib_device_release
,
530 .dev_uevent
= ib_device_uevent
,
531 .ns_type
= &net_ns_type_operations
,
532 .namespace = net_namespace
,
535 static void rdma_init_coredev(struct ib_core_device
*coredev
,
536 struct ib_device
*dev
, struct net
*net
)
538 /* This BUILD_BUG_ON is intended to catch layout change
539 * of union of ib_core_device and device.
540 * dev must be the first element as ib_core and providers
541 * driver uses it. Adding anything in ib_core_device before
542 * device will break this assumption.
544 BUILD_BUG_ON(offsetof(struct ib_device
, coredev
.dev
) !=
545 offsetof(struct ib_device
, dev
));
547 coredev
->dev
.class = &ib_class
;
548 coredev
->dev
.groups
= dev
->groups
;
549 device_initialize(&coredev
->dev
);
550 coredev
->owner
= dev
;
551 INIT_LIST_HEAD(&coredev
->port_list
);
552 write_pnet(&coredev
->rdma_net
, net
);
556 * _ib_alloc_device - allocate an IB device struct
557 * @size:size of structure to allocate
559 * Low-level drivers should use ib_alloc_device() to allocate &struct
560 * ib_device. @size is the size of the structure to be allocated,
561 * including any private data used by the low-level driver.
562 * ib_dealloc_device() must be used to free structures allocated with
565 struct ib_device
*_ib_alloc_device(size_t size
)
567 struct ib_device
*device
;
569 if (WARN_ON(size
< sizeof(struct ib_device
)))
572 device
= kzalloc(size
, GFP_KERNEL
);
576 if (rdma_restrack_init(device
)) {
581 device
->groups
[0] = &ib_dev_attr_group
;
582 rdma_init_coredev(&device
->coredev
, device
, &init_net
);
584 INIT_LIST_HEAD(&device
->event_handler_list
);
585 spin_lock_init(&device
->event_handler_lock
);
586 mutex_init(&device
->unregistration_lock
);
588 * client_data needs to be alloc because we don't want our mark to be
589 * destroyed if the user stores NULL in the client data.
591 xa_init_flags(&device
->client_data
, XA_FLAGS_ALLOC
);
592 init_rwsem(&device
->client_data_rwsem
);
593 xa_init_flags(&device
->compat_devs
, XA_FLAGS_ALLOC
);
594 mutex_init(&device
->compat_devs_mutex
);
595 init_completion(&device
->unreg_completion
);
596 INIT_WORK(&device
->unregistration_work
, ib_unregister_work
);
600 EXPORT_SYMBOL(_ib_alloc_device
);
603 * ib_dealloc_device - free an IB device struct
604 * @device:structure to free
606 * Free a structure allocated with ib_alloc_device().
608 void ib_dealloc_device(struct ib_device
*device
)
610 if (device
->ops
.dealloc_driver
)
611 device
->ops
.dealloc_driver(device
);
614 * ib_unregister_driver() requires all devices to remain in the xarray
615 * while their ops are callable. The last op we call is dealloc_driver
616 * above. This is needed to create a fence on op callbacks prior to
617 * allowing the driver module to unload.
619 down_write(&devices_rwsem
);
620 if (xa_load(&devices
, device
->index
) == device
)
621 xa_erase(&devices
, device
->index
);
622 up_write(&devices_rwsem
);
624 /* Expedite releasing netdev references */
625 free_netdevs(device
);
627 WARN_ON(!xa_empty(&device
->compat_devs
));
628 WARN_ON(!xa_empty(&device
->client_data
));
629 WARN_ON(refcount_read(&device
->refcount
));
630 rdma_restrack_clean(device
);
631 /* Balances with device_initialize */
632 put_device(&device
->dev
);
634 EXPORT_SYMBOL(ib_dealloc_device
);
637 * add_client_context() and remove_client_context() must be safe against
638 * parallel calls on the same device - registration/unregistration of both the
639 * device and client can be occurring in parallel.
641 * The routines need to be a fence, any caller must not return until the add
642 * or remove is fully completed.
644 static int add_client_context(struct ib_device
*device
,
645 struct ib_client
*client
)
649 if (!device
->kverbs_provider
&& !client
->no_kverbs_req
)
652 down_write(&device
->client_data_rwsem
);
654 * Another caller to add_client_context got here first and has already
655 * completely initialized context.
657 if (xa_get_mark(&device
->client_data
, client
->client_id
,
658 CLIENT_DATA_REGISTERED
))
661 ret
= xa_err(xa_store(&device
->client_data
, client
->client_id
, NULL
,
665 downgrade_write(&device
->client_data_rwsem
);
669 /* Readers shall not see a client until add has been completed */
670 xa_set_mark(&device
->client_data
, client
->client_id
,
671 CLIENT_DATA_REGISTERED
);
672 up_read(&device
->client_data_rwsem
);
676 up_write(&device
->client_data_rwsem
);
680 static void remove_client_context(struct ib_device
*device
,
681 unsigned int client_id
)
683 struct ib_client
*client
;
686 down_write(&device
->client_data_rwsem
);
687 if (!xa_get_mark(&device
->client_data
, client_id
,
688 CLIENT_DATA_REGISTERED
)) {
689 up_write(&device
->client_data_rwsem
);
692 client_data
= xa_load(&device
->client_data
, client_id
);
693 xa_clear_mark(&device
->client_data
, client_id
, CLIENT_DATA_REGISTERED
);
694 client
= xa_load(&clients
, client_id
);
695 downgrade_write(&device
->client_data_rwsem
);
698 * Notice we cannot be holding any exclusive locks when calling the
699 * remove callback as the remove callback can recurse back into any
700 * public functions in this module and thus try for any locks those
703 * For this reason clients and drivers should not call the
704 * unregistration functions will holdling any locks.
706 * It tempting to drop the client_data_rwsem too, but this is required
707 * to ensure that unregister_client does not return until all clients
708 * are completely unregistered, which is required to avoid module
712 client
->remove(device
, client_data
);
714 xa_erase(&device
->client_data
, client_id
);
715 up_read(&device
->client_data_rwsem
);
718 static int alloc_port_data(struct ib_device
*device
)
720 struct ib_port_data_rcu
*pdata_rcu
;
723 if (device
->port_data
)
726 /* This can only be called once the physical port range is defined */
727 if (WARN_ON(!device
->phys_port_cnt
))
731 * device->port_data is indexed directly by the port number to make
732 * access to this data as efficient as possible.
734 * Therefore port_data is declared as a 1 based array with potential
735 * empty slots at the beginning.
737 pdata_rcu
= kzalloc(struct_size(pdata_rcu
, pdata
,
738 rdma_end_port(device
) + 1),
743 * The rcu_head is put in front of the port data array and the stored
744 * pointer is adjusted since we never need to see that member until
747 device
->port_data
= pdata_rcu
->pdata
;
749 rdma_for_each_port (device
, port
) {
750 struct ib_port_data
*pdata
= &device
->port_data
[port
];
752 pdata
->ib_dev
= device
;
753 spin_lock_init(&pdata
->pkey_list_lock
);
754 INIT_LIST_HEAD(&pdata
->pkey_list
);
755 spin_lock_init(&pdata
->netdev_lock
);
756 INIT_HLIST_NODE(&pdata
->ndev_hash_link
);
761 static int verify_immutable(const struct ib_device
*dev
, u8 port
)
763 return WARN_ON(!rdma_cap_ib_mad(dev
, port
) &&
764 rdma_max_mad_size(dev
, port
) != 0);
767 static int setup_port_data(struct ib_device
*device
)
772 ret
= alloc_port_data(device
);
776 rdma_for_each_port (device
, port
) {
777 struct ib_port_data
*pdata
= &device
->port_data
[port
];
779 ret
= device
->ops
.get_port_immutable(device
, port
,
784 if (verify_immutable(device
, port
))
790 void ib_get_device_fw_str(struct ib_device
*dev
, char *str
)
792 if (dev
->ops
.get_dev_fw_str
)
793 dev
->ops
.get_dev_fw_str(dev
, str
);
797 EXPORT_SYMBOL(ib_get_device_fw_str
);
799 static void ib_policy_change_task(struct work_struct
*work
)
801 struct ib_device
*dev
;
804 down_read(&devices_rwsem
);
805 xa_for_each_marked (&devices
, index
, dev
, DEVICE_REGISTERED
) {
808 rdma_for_each_port (dev
, i
) {
810 int ret
= ib_get_cached_subnet_prefix(dev
,
815 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
818 ib_security_cache_change(dev
, i
, sp
);
821 up_read(&devices_rwsem
);
824 static int ib_security_change(struct notifier_block
*nb
, unsigned long event
,
827 if (event
!= LSM_POLICY_CHANGE
)
830 schedule_work(&ib_policy_change_work
);
831 ib_mad_agent_security_change();
836 static void compatdev_release(struct device
*dev
)
838 struct ib_core_device
*cdev
=
839 container_of(dev
, struct ib_core_device
, dev
);
844 static int add_one_compat_dev(struct ib_device
*device
,
845 struct rdma_dev_net
*rnet
)
847 struct ib_core_device
*cdev
;
850 lockdep_assert_held(&rdma_nets_rwsem
);
851 if (!ib_devices_shared_netns
)
855 * Create and add compat device in all namespaces other than where it
856 * is currently bound to.
858 if (net_eq(read_pnet(&rnet
->net
),
859 read_pnet(&device
->coredev
.rdma_net
)))
863 * The first of init_net() or ib_register_device() to take the
864 * compat_devs_mutex wins and gets to add the device. Others will wait
865 * for completion here.
867 mutex_lock(&device
->compat_devs_mutex
);
868 cdev
= xa_load(&device
->compat_devs
, rnet
->id
);
873 ret
= xa_reserve(&device
->compat_devs
, rnet
->id
, GFP_KERNEL
);
877 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
883 cdev
->dev
.parent
= device
->dev
.parent
;
884 rdma_init_coredev(cdev
, device
, read_pnet(&rnet
->net
));
885 cdev
->dev
.release
= compatdev_release
;
886 dev_set_name(&cdev
->dev
, "%s", dev_name(&device
->dev
));
888 ret
= device_add(&cdev
->dev
);
891 ret
= ib_setup_port_attrs(cdev
);
895 ret
= xa_err(xa_store(&device
->compat_devs
, rnet
->id
,
900 mutex_unlock(&device
->compat_devs_mutex
);
904 ib_free_port_attrs(cdev
);
906 device_del(&cdev
->dev
);
908 put_device(&cdev
->dev
);
910 xa_release(&device
->compat_devs
, rnet
->id
);
912 mutex_unlock(&device
->compat_devs_mutex
);
916 static void remove_one_compat_dev(struct ib_device
*device
, u32 id
)
918 struct ib_core_device
*cdev
;
920 mutex_lock(&device
->compat_devs_mutex
);
921 cdev
= xa_erase(&device
->compat_devs
, id
);
922 mutex_unlock(&device
->compat_devs_mutex
);
924 ib_free_port_attrs(cdev
);
925 device_del(&cdev
->dev
);
926 put_device(&cdev
->dev
);
930 static void remove_compat_devs(struct ib_device
*device
)
932 struct ib_core_device
*cdev
;
935 xa_for_each (&device
->compat_devs
, index
, cdev
)
936 remove_one_compat_dev(device
, index
);
939 static int add_compat_devs(struct ib_device
*device
)
941 struct rdma_dev_net
*rnet
;
945 lockdep_assert_held(&devices_rwsem
);
947 down_read(&rdma_nets_rwsem
);
948 xa_for_each (&rdma_nets
, index
, rnet
) {
949 ret
= add_one_compat_dev(device
, rnet
);
953 up_read(&rdma_nets_rwsem
);
957 static void remove_all_compat_devs(void)
959 struct ib_compat_device
*cdev
;
960 struct ib_device
*dev
;
963 down_read(&devices_rwsem
);
964 xa_for_each (&devices
, index
, dev
) {
965 unsigned long c_index
= 0;
967 /* Hold nets_rwsem so that any other thread modifying this
968 * system param can sync with this thread.
970 down_read(&rdma_nets_rwsem
);
971 xa_for_each (&dev
->compat_devs
, c_index
, cdev
)
972 remove_one_compat_dev(dev
, c_index
);
973 up_read(&rdma_nets_rwsem
);
975 up_read(&devices_rwsem
);
978 static int add_all_compat_devs(void)
980 struct rdma_dev_net
*rnet
;
981 struct ib_device
*dev
;
985 down_read(&devices_rwsem
);
986 xa_for_each_marked (&devices
, index
, dev
, DEVICE_REGISTERED
) {
987 unsigned long net_index
= 0;
989 /* Hold nets_rwsem so that any other thread modifying this
990 * system param can sync with this thread.
992 down_read(&rdma_nets_rwsem
);
993 xa_for_each (&rdma_nets
, net_index
, rnet
) {
994 ret
= add_one_compat_dev(dev
, rnet
);
998 up_read(&rdma_nets_rwsem
);
1000 up_read(&devices_rwsem
);
1002 remove_all_compat_devs();
1006 int rdma_compatdev_set(u8 enable
)
1008 struct rdma_dev_net
*rnet
;
1009 unsigned long index
;
1012 down_write(&rdma_nets_rwsem
);
1013 if (ib_devices_shared_netns
== enable
) {
1014 up_write(&rdma_nets_rwsem
);
1018 /* enable/disable of compat devices is not supported
1019 * when more than default init_net exists.
1021 xa_for_each (&rdma_nets
, index
, rnet
) {
1026 ib_devices_shared_netns
= enable
;
1027 up_write(&rdma_nets_rwsem
);
1032 ret
= add_all_compat_devs();
1034 remove_all_compat_devs();
1038 static void rdma_dev_exit_net(struct net
*net
)
1040 struct rdma_dev_net
*rnet
= net_generic(net
, rdma_dev_net_id
);
1041 struct ib_device
*dev
;
1042 unsigned long index
;
1045 down_write(&rdma_nets_rwsem
);
1047 * Prevent the ID from being re-used and hide the id from xa_for_each.
1049 ret
= xa_err(xa_store(&rdma_nets
, rnet
->id
, NULL
, GFP_KERNEL
));
1051 up_write(&rdma_nets_rwsem
);
1053 down_read(&devices_rwsem
);
1054 xa_for_each (&devices
, index
, dev
) {
1055 get_device(&dev
->dev
);
1057 * Release the devices_rwsem so that pontentially blocking
1058 * device_del, doesn't hold the devices_rwsem for too long.
1060 up_read(&devices_rwsem
);
1062 remove_one_compat_dev(dev
, rnet
->id
);
1065 * If the real device is in the NS then move it back to init.
1067 rdma_dev_change_netns(dev
, net
, &init_net
);
1069 put_device(&dev
->dev
);
1070 down_read(&devices_rwsem
);
1072 up_read(&devices_rwsem
);
1074 xa_erase(&rdma_nets
, rnet
->id
);
1077 static __net_init
int rdma_dev_init_net(struct net
*net
)
1079 struct rdma_dev_net
*rnet
= net_generic(net
, rdma_dev_net_id
);
1080 unsigned long index
;
1081 struct ib_device
*dev
;
1084 /* No need to create any compat devices in default init_net. */
1085 if (net_eq(net
, &init_net
))
1088 write_pnet(&rnet
->net
, net
);
1090 ret
= xa_alloc(&rdma_nets
, &rnet
->id
, rnet
, xa_limit_32b
, GFP_KERNEL
);
1094 down_read(&devices_rwsem
);
1095 xa_for_each_marked (&devices
, index
, dev
, DEVICE_REGISTERED
) {
1096 /* Hold nets_rwsem so that netlink command cannot change
1097 * system configuration for device sharing mode.
1099 down_read(&rdma_nets_rwsem
);
1100 ret
= add_one_compat_dev(dev
, rnet
);
1101 up_read(&rdma_nets_rwsem
);
1105 up_read(&devices_rwsem
);
1108 rdma_dev_exit_net(net
);
1114 * Assign the unique string device name and the unique device index. This is
1115 * undone by ib_dealloc_device.
1117 static int assign_name(struct ib_device
*device
, const char *name
)
1122 down_write(&devices_rwsem
);
1123 /* Assign a unique name to the device */
1124 if (strchr(name
, '%'))
1125 ret
= alloc_name(device
, name
);
1127 ret
= dev_set_name(&device
->dev
, name
);
1131 if (__ib_device_get_by_name(dev_name(&device
->dev
))) {
1135 strlcpy(device
->name
, dev_name(&device
->dev
), IB_DEVICE_NAME_MAX
);
1137 ret
= xa_alloc_cyclic(&devices
, &device
->index
, device
, xa_limit_31b
,
1138 &last_id
, GFP_KERNEL
);
1143 up_write(&devices_rwsem
);
1147 static void setup_dma_device(struct ib_device
*device
)
1149 struct device
*parent
= device
->dev
.parent
;
1151 WARN_ON_ONCE(device
->dma_device
);
1152 if (device
->dev
.dma_ops
) {
1154 * The caller provided custom DMA operations. Copy the
1155 * DMA-related fields that are used by e.g. dma_alloc_coherent()
1158 device
->dma_device
= &device
->dev
;
1159 if (!device
->dev
.dma_mask
) {
1161 device
->dev
.dma_mask
= parent
->dma_mask
;
1165 if (!device
->dev
.coherent_dma_mask
) {
1167 device
->dev
.coherent_dma_mask
=
1168 parent
->coherent_dma_mask
;
1174 * The caller did not provide custom DMA operations. Use the
1175 * DMA mapping operations of the parent device.
1177 WARN_ON_ONCE(!parent
);
1178 device
->dma_device
= parent
;
1180 /* Setup default max segment size for all IB devices */
1181 dma_set_max_seg_size(device
->dma_device
, SZ_2G
);
1186 * setup_device() allocates memory and sets up data that requires calling the
1187 * device ops, this is the only reason these actions are not done during
1188 * ib_alloc_device. It is undone by ib_dealloc_device().
1190 static int setup_device(struct ib_device
*device
)
1192 struct ib_udata uhw
= {.outlen
= 0, .inlen
= 0};
1195 setup_dma_device(device
);
1197 ret
= ib_device_check_mandatory(device
);
1201 ret
= setup_port_data(device
);
1203 dev_warn(&device
->dev
, "Couldn't create per-port data\n");
1207 memset(&device
->attrs
, 0, sizeof(device
->attrs
));
1208 ret
= device
->ops
.query_device(device
, &device
->attrs
, &uhw
);
1210 dev_warn(&device
->dev
,
1211 "Couldn't query the device attributes\n");
1218 static void disable_device(struct ib_device
*device
)
1220 struct ib_client
*client
;
1222 WARN_ON(!refcount_read(&device
->refcount
));
1224 down_write(&devices_rwsem
);
1225 xa_clear_mark(&devices
, device
->index
, DEVICE_REGISTERED
);
1226 up_write(&devices_rwsem
);
1228 down_read(&clients_rwsem
);
1229 list_for_each_entry_reverse(client
, &client_list
, list
)
1230 remove_client_context(device
, client
->client_id
);
1231 up_read(&clients_rwsem
);
1233 /* Pairs with refcount_set in enable_device */
1234 ib_device_put(device
);
1235 wait_for_completion(&device
->unreg_completion
);
1238 * compat devices must be removed after device refcount drops to zero.
1239 * Otherwise init_net() may add more compatdevs after removing compat
1240 * devices and before device is disabled.
1242 remove_compat_devs(device
);
1246 * An enabled device is visible to all clients and to all the public facing
1247 * APIs that return a device pointer. This always returns with a new get, even
1250 static int enable_device_and_get(struct ib_device
*device
)
1252 struct ib_client
*client
;
1253 unsigned long index
;
1257 * One ref belongs to the xa and the other belongs to this
1258 * thread. This is needed to guard against parallel unregistration.
1260 refcount_set(&device
->refcount
, 2);
1261 down_write(&devices_rwsem
);
1262 xa_set_mark(&devices
, device
->index
, DEVICE_REGISTERED
);
1265 * By using downgrade_write() we ensure that no other thread can clear
1266 * DEVICE_REGISTERED while we are completing the client setup.
1268 downgrade_write(&devices_rwsem
);
1270 if (device
->ops
.enable_driver
) {
1271 ret
= device
->ops
.enable_driver(device
);
1276 down_read(&clients_rwsem
);
1277 xa_for_each_marked (&clients
, index
, client
, CLIENT_REGISTERED
) {
1278 ret
= add_client_context(device
, client
);
1282 up_read(&clients_rwsem
);
1284 ret
= add_compat_devs(device
);
1286 up_read(&devices_rwsem
);
1291 * ib_register_device - Register an IB device with IB core
1292 * @device:Device to register
1294 * Low-level drivers use ib_register_device() to register their
1295 * devices with the IB core. All registered clients will receive a
1296 * callback for each device that is added. @device must be allocated
1297 * with ib_alloc_device().
1299 * If the driver uses ops.dealloc_driver and calls any ib_unregister_device()
1300 * asynchronously then the device pointer may become freed as soon as this
1303 int ib_register_device(struct ib_device
*device
, const char *name
)
1307 ret
= assign_name(device
, name
);
1311 ret
= setup_device(device
);
1315 ret
= ib_cache_setup_one(device
);
1317 dev_warn(&device
->dev
,
1318 "Couldn't set up InfiniBand P_Key/GID cache\n");
1322 ib_device_register_rdmacg(device
);
1325 * Ensure that ADD uevent is not fired because it
1326 * is too early amd device is not initialized yet.
1328 dev_set_uevent_suppress(&device
->dev
, true);
1329 ret
= device_add(&device
->dev
);
1333 ret
= ib_device_register_sysfs(device
);
1335 dev_warn(&device
->dev
,
1336 "Couldn't register device with driver model\n");
1340 ret
= enable_device_and_get(device
);
1341 dev_set_uevent_suppress(&device
->dev
, false);
1342 /* Mark for userspace that device is ready */
1343 kobject_uevent(&device
->dev
.kobj
, KOBJ_ADD
);
1345 void (*dealloc_fn
)(struct ib_device
*);
1348 * If we hit this error flow then we don't want to
1349 * automatically dealloc the device since the caller is
1350 * expected to call ib_dealloc_device() after
1351 * ib_register_device() fails. This is tricky due to the
1352 * possibility for a parallel unregistration along with this
1353 * error flow. Since we have a refcount here we know any
1354 * parallel flow is stopped in disable_device and will see the
1355 * NULL pointers, causing the responsibility to
1356 * ib_dealloc_device() to revert back to this thread.
1358 dealloc_fn
= device
->ops
.dealloc_driver
;
1359 device
->ops
.dealloc_driver
= NULL
;
1360 ib_device_put(device
);
1361 __ib_unregister_device(device
);
1362 device
->ops
.dealloc_driver
= dealloc_fn
;
1365 ib_device_put(device
);
1370 device_del(&device
->dev
);
1372 dev_set_uevent_suppress(&device
->dev
, false);
1373 ib_device_unregister_rdmacg(device
);
1374 ib_cache_cleanup_one(device
);
1377 EXPORT_SYMBOL(ib_register_device
);
1379 /* Callers must hold a get on the device. */
1380 static void __ib_unregister_device(struct ib_device
*ib_dev
)
1383 * We have a registration lock so that all the calls to unregister are
1384 * fully fenced, once any unregister returns the device is truely
1385 * unregistered even if multiple callers are unregistering it at the
1386 * same time. This also interacts with the registration flow and
1387 * provides sane semantics if register and unregister are racing.
1389 mutex_lock(&ib_dev
->unregistration_lock
);
1390 if (!refcount_read(&ib_dev
->refcount
))
1393 disable_device(ib_dev
);
1395 /* Expedite removing unregistered pointers from the hash table */
1396 free_netdevs(ib_dev
);
1398 ib_device_unregister_sysfs(ib_dev
);
1399 device_del(&ib_dev
->dev
);
1400 ib_device_unregister_rdmacg(ib_dev
);
1401 ib_cache_cleanup_one(ib_dev
);
1404 * Drivers using the new flow may not call ib_dealloc_device except
1405 * in error unwind prior to registration success.
1407 if (ib_dev
->ops
.dealloc_driver
) {
1408 WARN_ON(kref_read(&ib_dev
->dev
.kobj
.kref
) <= 1);
1409 ib_dealloc_device(ib_dev
);
1412 mutex_unlock(&ib_dev
->unregistration_lock
);
1416 * ib_unregister_device - Unregister an IB device
1417 * @device: The device to unregister
1419 * Unregister an IB device. All clients will receive a remove callback.
1421 * Callers should call this routine only once, and protect against races with
1422 * registration. Typically it should only be called as part of a remove
1423 * callback in an implementation of driver core's struct device_driver and
1426 * If ops.dealloc_driver is used then ib_dev will be freed upon return from
1429 void ib_unregister_device(struct ib_device
*ib_dev
)
1431 get_device(&ib_dev
->dev
);
1432 __ib_unregister_device(ib_dev
);
1433 put_device(&ib_dev
->dev
);
1435 EXPORT_SYMBOL(ib_unregister_device
);
1438 * ib_unregister_device_and_put - Unregister a device while holding a 'get'
1439 * device: The device to unregister
1441 * This is the same as ib_unregister_device(), except it includes an internal
1442 * ib_device_put() that should match a 'get' obtained by the caller.
1444 * It is safe to call this routine concurrently from multiple threads while
1445 * holding the 'get'. When the function returns the device is fully
1448 * Drivers using this flow MUST use the driver_unregister callback to clean up
1449 * their resources associated with the device and dealloc it.
1451 void ib_unregister_device_and_put(struct ib_device
*ib_dev
)
1453 WARN_ON(!ib_dev
->ops
.dealloc_driver
);
1454 get_device(&ib_dev
->dev
);
1455 ib_device_put(ib_dev
);
1456 __ib_unregister_device(ib_dev
);
1457 put_device(&ib_dev
->dev
);
1459 EXPORT_SYMBOL(ib_unregister_device_and_put
);
1462 * ib_unregister_driver - Unregister all IB devices for a driver
1463 * @driver_id: The driver to unregister
1465 * This implements a fence for device unregistration. It only returns once all
1466 * devices associated with the driver_id have fully completed their
1467 * unregistration and returned from ib_unregister_device*().
1469 * If device's are not yet unregistered it goes ahead and starts unregistering
1472 * This does not block creation of new devices with the given driver_id, that
1473 * is the responsibility of the caller.
1475 void ib_unregister_driver(enum rdma_driver_id driver_id
)
1477 struct ib_device
*ib_dev
;
1478 unsigned long index
;
1480 down_read(&devices_rwsem
);
1481 xa_for_each (&devices
, index
, ib_dev
) {
1482 if (ib_dev
->driver_id
!= driver_id
)
1485 get_device(&ib_dev
->dev
);
1486 up_read(&devices_rwsem
);
1488 WARN_ON(!ib_dev
->ops
.dealloc_driver
);
1489 __ib_unregister_device(ib_dev
);
1491 put_device(&ib_dev
->dev
);
1492 down_read(&devices_rwsem
);
1494 up_read(&devices_rwsem
);
1496 EXPORT_SYMBOL(ib_unregister_driver
);
1498 static void ib_unregister_work(struct work_struct
*work
)
1500 struct ib_device
*ib_dev
=
1501 container_of(work
, struct ib_device
, unregistration_work
);
1503 __ib_unregister_device(ib_dev
);
1504 put_device(&ib_dev
->dev
);
1508 * ib_unregister_device_queued - Unregister a device using a work queue
1509 * device: The device to unregister
1511 * This schedules an asynchronous unregistration using a WQ for the device. A
1512 * driver should use this to avoid holding locks while doing unregistration,
1513 * such as holding the RTNL lock.
1515 * Drivers using this API must use ib_unregister_driver before module unload
1516 * to ensure that all scheduled unregistrations have completed.
1518 void ib_unregister_device_queued(struct ib_device
*ib_dev
)
1520 WARN_ON(!refcount_read(&ib_dev
->refcount
));
1521 WARN_ON(!ib_dev
->ops
.dealloc_driver
);
1522 get_device(&ib_dev
->dev
);
1523 if (!queue_work(system_unbound_wq
, &ib_dev
->unregistration_work
))
1524 put_device(&ib_dev
->dev
);
1526 EXPORT_SYMBOL(ib_unregister_device_queued
);
1529 * The caller must pass in a device that has the kref held and the refcount
1530 * released. If the device is in cur_net and still registered then it is moved
1533 static int rdma_dev_change_netns(struct ib_device
*device
, struct net
*cur_net
,
1539 mutex_lock(&device
->unregistration_lock
);
1542 * If a device not under ib_device_get() or if the unregistration_lock
1543 * is not held, the namespace can be changed, or it can be unregistered.
1544 * Check again under the lock.
1546 if (refcount_read(&device
->refcount
) == 0 ||
1547 !net_eq(cur_net
, read_pnet(&device
->coredev
.rdma_net
))) {
1552 kobject_uevent(&device
->dev
.kobj
, KOBJ_REMOVE
);
1553 disable_device(device
);
1556 * At this point no one can be using the device, so it is safe to
1557 * change the namespace.
1559 write_pnet(&device
->coredev
.rdma_net
, net
);
1561 down_read(&devices_rwsem
);
1563 * Currently rdma devices are system wide unique. So the device name
1564 * is guaranteed free in the new namespace. Publish the new namespace
1565 * at the sysfs level.
1567 ret
= device_rename(&device
->dev
, dev_name(&device
->dev
));
1568 up_read(&devices_rwsem
);
1570 dev_warn(&device
->dev
,
1571 "%s: Couldn't rename device after namespace change\n",
1573 /* Try and put things back and re-enable the device */
1574 write_pnet(&device
->coredev
.rdma_net
, cur_net
);
1577 ret2
= enable_device_and_get(device
);
1580 * This shouldn't really happen, but if it does, let the user
1581 * retry at later point. So don't disable the device.
1583 dev_warn(&device
->dev
,
1584 "%s: Couldn't re-enable device after namespace change\n",
1587 kobject_uevent(&device
->dev
.kobj
, KOBJ_ADD
);
1589 ib_device_put(device
);
1591 mutex_unlock(&device
->unregistration_lock
);
1597 int ib_device_set_netns_put(struct sk_buff
*skb
,
1598 struct ib_device
*dev
, u32 ns_fd
)
1603 net
= get_net_ns_by_fd(ns_fd
);
1609 if (!netlink_ns_capable(skb
, net
->user_ns
, CAP_NET_ADMIN
)) {
1615 * Currently supported only for those providers which support
1616 * disassociation and don't do port specific sysfs init. Once a
1617 * port_cleanup infrastructure is implemented, this limitation will be
1620 if (!dev
->ops
.disassociate_ucontext
|| dev
->ops
.init_port
||
1621 ib_devices_shared_netns
) {
1626 get_device(&dev
->dev
);
1628 ret
= rdma_dev_change_netns(dev
, current
->nsproxy
->net_ns
, net
);
1629 put_device(&dev
->dev
);
1641 static struct pernet_operations rdma_dev_net_ops
= {
1642 .init
= rdma_dev_init_net
,
1643 .exit
= rdma_dev_exit_net
,
1644 .id
= &rdma_dev_net_id
,
1645 .size
= sizeof(struct rdma_dev_net
),
1648 static int assign_client_id(struct ib_client
*client
)
1652 down_write(&clients_rwsem
);
1654 * The add/remove callbacks must be called in FIFO/LIFO order. To
1655 * achieve this we assign client_ids so they are sorted in
1656 * registration order, and retain a linked list we can reverse iterate
1657 * to get the LIFO order. The extra linked list can go away if xarray
1658 * learns to reverse iterate.
1660 if (list_empty(&client_list
)) {
1661 client
->client_id
= 0;
1663 struct ib_client
*last
;
1665 last
= list_last_entry(&client_list
, struct ib_client
, list
);
1666 client
->client_id
= last
->client_id
+ 1;
1668 ret
= xa_insert(&clients
, client
->client_id
, client
, GFP_KERNEL
);
1672 xa_set_mark(&clients
, client
->client_id
, CLIENT_REGISTERED
);
1673 list_add_tail(&client
->list
, &client_list
);
1676 up_write(&clients_rwsem
);
1681 * ib_register_client - Register an IB client
1682 * @client:Client to register
1684 * Upper level users of the IB drivers can use ib_register_client() to
1685 * register callbacks for IB device addition and removal. When an IB
1686 * device is added, each registered client's add method will be called
1687 * (in the order the clients were registered), and when a device is
1688 * removed, each client's remove method will be called (in the reverse
1689 * order that clients were registered). In addition, when
1690 * ib_register_client() is called, the client will receive an add
1691 * callback for all devices already registered.
1693 int ib_register_client(struct ib_client
*client
)
1695 struct ib_device
*device
;
1696 unsigned long index
;
1699 ret
= assign_client_id(client
);
1703 down_read(&devices_rwsem
);
1704 xa_for_each_marked (&devices
, index
, device
, DEVICE_REGISTERED
) {
1705 ret
= add_client_context(device
, client
);
1707 up_read(&devices_rwsem
);
1708 ib_unregister_client(client
);
1712 up_read(&devices_rwsem
);
1715 EXPORT_SYMBOL(ib_register_client
);
1718 * ib_unregister_client - Unregister an IB client
1719 * @client:Client to unregister
1721 * Upper level users use ib_unregister_client() to remove their client
1722 * registration. When ib_unregister_client() is called, the client
1723 * will receive a remove callback for each IB device still registered.
1725 * This is a full fence, once it returns no client callbacks will be called,
1726 * or are running in another thread.
1728 void ib_unregister_client(struct ib_client
*client
)
1730 struct ib_device
*device
;
1731 unsigned long index
;
1733 down_write(&clients_rwsem
);
1734 xa_clear_mark(&clients
, client
->client_id
, CLIENT_REGISTERED
);
1735 up_write(&clients_rwsem
);
1737 * Every device still known must be serialized to make sure we are
1738 * done with the client callbacks before we return.
1740 down_read(&devices_rwsem
);
1741 xa_for_each (&devices
, index
, device
)
1742 remove_client_context(device
, client
->client_id
);
1743 up_read(&devices_rwsem
);
1745 down_write(&clients_rwsem
);
1746 list_del(&client
->list
);
1747 xa_erase(&clients
, client
->client_id
);
1748 up_write(&clients_rwsem
);
1750 EXPORT_SYMBOL(ib_unregister_client
);
1753 * ib_set_client_data - Set IB client context
1754 * @device:Device to set context for
1755 * @client:Client to set context for
1756 * @data:Context to set
1758 * ib_set_client_data() sets client context data that can be retrieved with
1759 * ib_get_client_data(). This can only be called while the client is
1760 * registered to the device, once the ib_client remove() callback returns this
1763 void ib_set_client_data(struct ib_device
*device
, struct ib_client
*client
,
1768 if (WARN_ON(IS_ERR(data
)))
1771 rc
= xa_store(&device
->client_data
, client
->client_id
, data
,
1773 WARN_ON(xa_is_err(rc
));
1775 EXPORT_SYMBOL(ib_set_client_data
);
1778 * ib_register_event_handler - Register an IB event handler
1779 * @event_handler:Handler to register
1781 * ib_register_event_handler() registers an event handler that will be
1782 * called back when asynchronous IB events occur (as defined in
1783 * chapter 11 of the InfiniBand Architecture Specification). This
1784 * callback may occur in interrupt context.
1786 void ib_register_event_handler(struct ib_event_handler
*event_handler
)
1788 unsigned long flags
;
1790 spin_lock_irqsave(&event_handler
->device
->event_handler_lock
, flags
);
1791 list_add_tail(&event_handler
->list
,
1792 &event_handler
->device
->event_handler_list
);
1793 spin_unlock_irqrestore(&event_handler
->device
->event_handler_lock
, flags
);
1795 EXPORT_SYMBOL(ib_register_event_handler
);
1798 * ib_unregister_event_handler - Unregister an event handler
1799 * @event_handler:Handler to unregister
1801 * Unregister an event handler registered with
1802 * ib_register_event_handler().
1804 void ib_unregister_event_handler(struct ib_event_handler
*event_handler
)
1806 unsigned long flags
;
1808 spin_lock_irqsave(&event_handler
->device
->event_handler_lock
, flags
);
1809 list_del(&event_handler
->list
);
1810 spin_unlock_irqrestore(&event_handler
->device
->event_handler_lock
, flags
);
1812 EXPORT_SYMBOL(ib_unregister_event_handler
);
1815 * ib_dispatch_event - Dispatch an asynchronous event
1816 * @event:Event to dispatch
1818 * Low-level drivers must call ib_dispatch_event() to dispatch the
1819 * event to all registered event handlers when an asynchronous event
1822 void ib_dispatch_event(struct ib_event
*event
)
1824 unsigned long flags
;
1825 struct ib_event_handler
*handler
;
1827 spin_lock_irqsave(&event
->device
->event_handler_lock
, flags
);
1829 list_for_each_entry(handler
, &event
->device
->event_handler_list
, list
)
1830 handler
->handler(handler
, event
);
1832 spin_unlock_irqrestore(&event
->device
->event_handler_lock
, flags
);
1834 EXPORT_SYMBOL(ib_dispatch_event
);
1837 * ib_query_port - Query IB port attributes
1838 * @device:Device to query
1839 * @port_num:Port number to query
1840 * @port_attr:Port attributes
1842 * ib_query_port() returns the attributes of a port through the
1843 * @port_attr pointer.
1845 int ib_query_port(struct ib_device
*device
,
1847 struct ib_port_attr
*port_attr
)
1852 if (!rdma_is_port_valid(device
, port_num
))
1855 memset(port_attr
, 0, sizeof(*port_attr
));
1856 err
= device
->ops
.query_port(device
, port_num
, port_attr
);
1857 if (err
|| port_attr
->subnet_prefix
)
1860 if (rdma_port_get_link_layer(device
, port_num
) != IB_LINK_LAYER_INFINIBAND
)
1863 err
= device
->ops
.query_gid(device
, port_num
, 0, &gid
);
1867 port_attr
->subnet_prefix
= be64_to_cpu(gid
.global
.subnet_prefix
);
1870 EXPORT_SYMBOL(ib_query_port
);
1872 static void add_ndev_hash(struct ib_port_data
*pdata
)
1874 unsigned long flags
;
1878 spin_lock_irqsave(&ndev_hash_lock
, flags
);
1879 if (hash_hashed(&pdata
->ndev_hash_link
)) {
1880 hash_del_rcu(&pdata
->ndev_hash_link
);
1881 spin_unlock_irqrestore(&ndev_hash_lock
, flags
);
1883 * We cannot do hash_add_rcu after a hash_del_rcu until the
1887 spin_lock_irqsave(&ndev_hash_lock
, flags
);
1890 hash_add_rcu(ndev_hash
, &pdata
->ndev_hash_link
,
1891 (uintptr_t)pdata
->netdev
);
1892 spin_unlock_irqrestore(&ndev_hash_lock
, flags
);
1896 * ib_device_set_netdev - Associate the ib_dev with an underlying net_device
1897 * @ib_dev: Device to modify
1898 * @ndev: net_device to affiliate, may be NULL
1899 * @port: IB port the net_device is connected to
1901 * Drivers should use this to link the ib_device to a netdev so the netdev
1902 * shows up in interfaces like ib_enum_roce_netdev. Only one netdev may be
1903 * affiliated with any port.
1905 * The caller must ensure that the given ndev is not unregistered or
1906 * unregistering, and that either the ib_device is unregistered or
1907 * ib_device_set_netdev() is called with NULL when the ndev sends a
1908 * NETDEV_UNREGISTER event.
1910 int ib_device_set_netdev(struct ib_device
*ib_dev
, struct net_device
*ndev
,
1913 struct net_device
*old_ndev
;
1914 struct ib_port_data
*pdata
;
1915 unsigned long flags
;
1919 * Drivers wish to call this before ib_register_driver, so we have to
1920 * setup the port data early.
1922 ret
= alloc_port_data(ib_dev
);
1926 if (!rdma_is_port_valid(ib_dev
, port
))
1929 pdata
= &ib_dev
->port_data
[port
];
1930 spin_lock_irqsave(&pdata
->netdev_lock
, flags
);
1931 old_ndev
= rcu_dereference_protected(
1932 pdata
->netdev
, lockdep_is_held(&pdata
->netdev_lock
));
1933 if (old_ndev
== ndev
) {
1934 spin_unlock_irqrestore(&pdata
->netdev_lock
, flags
);
1940 rcu_assign_pointer(pdata
->netdev
, ndev
);
1941 spin_unlock_irqrestore(&pdata
->netdev_lock
, flags
);
1943 add_ndev_hash(pdata
);
1949 EXPORT_SYMBOL(ib_device_set_netdev
);
1951 static void free_netdevs(struct ib_device
*ib_dev
)
1953 unsigned long flags
;
1956 if (!ib_dev
->port_data
)
1959 rdma_for_each_port (ib_dev
, port
) {
1960 struct ib_port_data
*pdata
= &ib_dev
->port_data
[port
];
1961 struct net_device
*ndev
;
1963 spin_lock_irqsave(&pdata
->netdev_lock
, flags
);
1964 ndev
= rcu_dereference_protected(
1965 pdata
->netdev
, lockdep_is_held(&pdata
->netdev_lock
));
1967 spin_lock(&ndev_hash_lock
);
1968 hash_del_rcu(&pdata
->ndev_hash_link
);
1969 spin_unlock(&ndev_hash_lock
);
1972 * If this is the last dev_put there is still a
1973 * synchronize_rcu before the netdev is kfreed, so we
1974 * can continue to rely on unlocked pointer
1975 * comparisons after the put
1977 rcu_assign_pointer(pdata
->netdev
, NULL
);
1980 spin_unlock_irqrestore(&pdata
->netdev_lock
, flags
);
1984 struct net_device
*ib_device_get_netdev(struct ib_device
*ib_dev
,
1987 struct ib_port_data
*pdata
;
1988 struct net_device
*res
;
1990 if (!rdma_is_port_valid(ib_dev
, port
))
1993 pdata
= &ib_dev
->port_data
[port
];
1996 * New drivers should use ib_device_set_netdev() not the legacy
1999 if (ib_dev
->ops
.get_netdev
)
2000 res
= ib_dev
->ops
.get_netdev(ib_dev
, port
);
2002 spin_lock(&pdata
->netdev_lock
);
2003 res
= rcu_dereference_protected(
2004 pdata
->netdev
, lockdep_is_held(&pdata
->netdev_lock
));
2007 spin_unlock(&pdata
->netdev_lock
);
2011 * If we are starting to unregister expedite things by preventing
2012 * propagation of an unregistering netdev.
2014 if (res
&& res
->reg_state
!= NETREG_REGISTERED
) {
2023 * ib_device_get_by_netdev - Find an IB device associated with a netdev
2024 * @ndev: netdev to locate
2025 * @driver_id: The driver ID that must match (RDMA_DRIVER_UNKNOWN matches all)
2027 * Find and hold an ib_device that is associated with a netdev via
2028 * ib_device_set_netdev(). The caller must call ib_device_put() on the
2031 struct ib_device
*ib_device_get_by_netdev(struct net_device
*ndev
,
2032 enum rdma_driver_id driver_id
)
2034 struct ib_device
*res
= NULL
;
2035 struct ib_port_data
*cur
;
2038 hash_for_each_possible_rcu (ndev_hash
, cur
, ndev_hash_link
,
2040 if (rcu_access_pointer(cur
->netdev
) == ndev
&&
2041 (driver_id
== RDMA_DRIVER_UNKNOWN
||
2042 cur
->ib_dev
->driver_id
== driver_id
) &&
2043 ib_device_try_get(cur
->ib_dev
)) {
2052 EXPORT_SYMBOL(ib_device_get_by_netdev
);
2055 * ib_enum_roce_netdev - enumerate all RoCE ports
2056 * @ib_dev : IB device we want to query
2057 * @filter: Should we call the callback?
2058 * @filter_cookie: Cookie passed to filter
2059 * @cb: Callback to call for each found RoCE ports
2060 * @cookie: Cookie passed back to the callback
2062 * Enumerates all of the physical RoCE ports of ib_dev
2063 * which are related to netdevice and calls callback() on each
2064 * device for which filter() function returns non zero.
2066 void ib_enum_roce_netdev(struct ib_device
*ib_dev
,
2067 roce_netdev_filter filter
,
2068 void *filter_cookie
,
2069 roce_netdev_callback cb
,
2074 rdma_for_each_port (ib_dev
, port
)
2075 if (rdma_protocol_roce(ib_dev
, port
)) {
2076 struct net_device
*idev
=
2077 ib_device_get_netdev(ib_dev
, port
);
2079 if (filter(ib_dev
, port
, idev
, filter_cookie
))
2080 cb(ib_dev
, port
, idev
, cookie
);
2088 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
2089 * @filter: Should we call the callback?
2090 * @filter_cookie: Cookie passed to filter
2091 * @cb: Callback to call for each found RoCE ports
2092 * @cookie: Cookie passed back to the callback
2094 * Enumerates all RoCE devices' physical ports which are related
2095 * to netdevices and calls callback() on each device for which
2096 * filter() function returns non zero.
2098 void ib_enum_all_roce_netdevs(roce_netdev_filter filter
,
2099 void *filter_cookie
,
2100 roce_netdev_callback cb
,
2103 struct ib_device
*dev
;
2104 unsigned long index
;
2106 down_read(&devices_rwsem
);
2107 xa_for_each_marked (&devices
, index
, dev
, DEVICE_REGISTERED
)
2108 ib_enum_roce_netdev(dev
, filter
, filter_cookie
, cb
, cookie
);
2109 up_read(&devices_rwsem
);
2113 * ib_enum_all_devs - enumerate all ib_devices
2114 * @cb: Callback to call for each found ib_device
2116 * Enumerates all ib_devices and calls callback() on each device.
2118 int ib_enum_all_devs(nldev_callback nldev_cb
, struct sk_buff
*skb
,
2119 struct netlink_callback
*cb
)
2121 unsigned long index
;
2122 struct ib_device
*dev
;
2123 unsigned int idx
= 0;
2126 down_read(&devices_rwsem
);
2127 xa_for_each_marked (&devices
, index
, dev
, DEVICE_REGISTERED
) {
2128 if (!rdma_dev_access_netns(dev
, sock_net(skb
->sk
)))
2131 ret
= nldev_cb(dev
, skb
, cb
, idx
);
2136 up_read(&devices_rwsem
);
2141 * ib_query_pkey - Get P_Key table entry
2142 * @device:Device to query
2143 * @port_num:Port number to query
2144 * @index:P_Key table index to query
2145 * @pkey:Returned P_Key
2147 * ib_query_pkey() fetches the specified P_Key table entry.
2149 int ib_query_pkey(struct ib_device
*device
,
2150 u8 port_num
, u16 index
, u16
*pkey
)
2152 if (!rdma_is_port_valid(device
, port_num
))
2155 return device
->ops
.query_pkey(device
, port_num
, index
, pkey
);
2157 EXPORT_SYMBOL(ib_query_pkey
);
2160 * ib_modify_device - Change IB device attributes
2161 * @device:Device to modify
2162 * @device_modify_mask:Mask of attributes to change
2163 * @device_modify:New attribute values
2165 * ib_modify_device() changes a device's attributes as specified by
2166 * the @device_modify_mask and @device_modify structure.
2168 int ib_modify_device(struct ib_device
*device
,
2169 int device_modify_mask
,
2170 struct ib_device_modify
*device_modify
)
2172 if (!device
->ops
.modify_device
)
2175 return device
->ops
.modify_device(device
, device_modify_mask
,
2178 EXPORT_SYMBOL(ib_modify_device
);
2181 * ib_modify_port - Modifies the attributes for the specified port.
2182 * @device: The device to modify.
2183 * @port_num: The number of the port to modify.
2184 * @port_modify_mask: Mask used to specify which attributes of the port
2186 * @port_modify: New attribute values for the port.
2188 * ib_modify_port() changes a port's attributes as specified by the
2189 * @port_modify_mask and @port_modify structure.
2191 int ib_modify_port(struct ib_device
*device
,
2192 u8 port_num
, int port_modify_mask
,
2193 struct ib_port_modify
*port_modify
)
2197 if (!rdma_is_port_valid(device
, port_num
))
2200 if (device
->ops
.modify_port
)
2201 rc
= device
->ops
.modify_port(device
, port_num
,
2205 rc
= rdma_protocol_roce(device
, port_num
) ? 0 : -ENOSYS
;
2208 EXPORT_SYMBOL(ib_modify_port
);
2211 * ib_find_gid - Returns the port number and GID table index where
2212 * a specified GID value occurs. Its searches only for IB link layer.
2213 * @device: The device to query.
2214 * @gid: The GID value to search for.
2215 * @port_num: The port number of the device where the GID value was found.
2216 * @index: The index into the GID table where the GID was found. This
2217 * parameter may be NULL.
2219 int ib_find_gid(struct ib_device
*device
, union ib_gid
*gid
,
2220 u8
*port_num
, u16
*index
)
2222 union ib_gid tmp_gid
;
2226 rdma_for_each_port (device
, port
) {
2227 if (!rdma_protocol_ib(device
, port
))
2230 for (i
= 0; i
< device
->port_data
[port
].immutable
.gid_tbl_len
;
2232 ret
= rdma_query_gid(device
, port
, i
, &tmp_gid
);
2235 if (!memcmp(&tmp_gid
, gid
, sizeof *gid
)) {
2246 EXPORT_SYMBOL(ib_find_gid
);
2249 * ib_find_pkey - Returns the PKey table index where a specified
2250 * PKey value occurs.
2251 * @device: The device to query.
2252 * @port_num: The port number of the device to search for the PKey.
2253 * @pkey: The PKey value to search for.
2254 * @index: The index into the PKey table where the PKey was found.
2256 int ib_find_pkey(struct ib_device
*device
,
2257 u8 port_num
, u16 pkey
, u16
*index
)
2261 int partial_ix
= -1;
2263 for (i
= 0; i
< device
->port_data
[port_num
].immutable
.pkey_tbl_len
;
2265 ret
= ib_query_pkey(device
, port_num
, i
, &tmp_pkey
);
2268 if ((pkey
& 0x7fff) == (tmp_pkey
& 0x7fff)) {
2269 /* if there is full-member pkey take it.*/
2270 if (tmp_pkey
& 0x8000) {
2279 /*no full-member, if exists take the limited*/
2280 if (partial_ix
>= 0) {
2281 *index
= partial_ix
;
2286 EXPORT_SYMBOL(ib_find_pkey
);
2289 * ib_get_net_dev_by_params() - Return the appropriate net_dev
2290 * for a received CM request
2291 * @dev: An RDMA device on which the request has been received.
2292 * @port: Port number on the RDMA device.
2293 * @pkey: The Pkey the request came on.
2294 * @gid: A GID that the net_dev uses to communicate.
2295 * @addr: Contains the IP address that the request specified as its
2299 struct net_device
*ib_get_net_dev_by_params(struct ib_device
*dev
,
2302 const union ib_gid
*gid
,
2303 const struct sockaddr
*addr
)
2305 struct net_device
*net_dev
= NULL
;
2306 unsigned long index
;
2309 if (!rdma_protocol_ib(dev
, port
))
2313 * Holding the read side guarantees that the client will not become
2314 * unregistered while we are calling get_net_dev_by_params()
2316 down_read(&dev
->client_data_rwsem
);
2317 xan_for_each_marked (&dev
->client_data
, index
, client_data
,
2318 CLIENT_DATA_REGISTERED
) {
2319 struct ib_client
*client
= xa_load(&clients
, index
);
2321 if (!client
|| !client
->get_net_dev_by_params
)
2324 net_dev
= client
->get_net_dev_by_params(dev
, port
, pkey
, gid
,
2329 up_read(&dev
->client_data_rwsem
);
2333 EXPORT_SYMBOL(ib_get_net_dev_by_params
);
2335 void ib_set_device_ops(struct ib_device
*dev
, const struct ib_device_ops
*ops
)
2337 struct ib_device_ops
*dev_ops
= &dev
->ops
;
2338 #define SET_DEVICE_OP(ptr, name) \
2341 if (!((ptr)->name)) \
2342 (ptr)->name = ops->name; \
2345 #define SET_OBJ_SIZE(ptr, name) SET_DEVICE_OP(ptr, size_##name)
2347 SET_DEVICE_OP(dev_ops
, add_gid
);
2348 SET_DEVICE_OP(dev_ops
, advise_mr
);
2349 SET_DEVICE_OP(dev_ops
, alloc_dm
);
2350 SET_DEVICE_OP(dev_ops
, alloc_fmr
);
2351 SET_DEVICE_OP(dev_ops
, alloc_hw_stats
);
2352 SET_DEVICE_OP(dev_ops
, alloc_mr
);
2353 SET_DEVICE_OP(dev_ops
, alloc_mw
);
2354 SET_DEVICE_OP(dev_ops
, alloc_pd
);
2355 SET_DEVICE_OP(dev_ops
, alloc_rdma_netdev
);
2356 SET_DEVICE_OP(dev_ops
, alloc_ucontext
);
2357 SET_DEVICE_OP(dev_ops
, alloc_xrcd
);
2358 SET_DEVICE_OP(dev_ops
, attach_mcast
);
2359 SET_DEVICE_OP(dev_ops
, check_mr_status
);
2360 SET_DEVICE_OP(dev_ops
, create_ah
);
2361 SET_DEVICE_OP(dev_ops
, create_counters
);
2362 SET_DEVICE_OP(dev_ops
, create_cq
);
2363 SET_DEVICE_OP(dev_ops
, create_flow
);
2364 SET_DEVICE_OP(dev_ops
, create_flow_action_esp
);
2365 SET_DEVICE_OP(dev_ops
, create_qp
);
2366 SET_DEVICE_OP(dev_ops
, create_rwq_ind_table
);
2367 SET_DEVICE_OP(dev_ops
, create_srq
);
2368 SET_DEVICE_OP(dev_ops
, create_wq
);
2369 SET_DEVICE_OP(dev_ops
, dealloc_dm
);
2370 SET_DEVICE_OP(dev_ops
, dealloc_driver
);
2371 SET_DEVICE_OP(dev_ops
, dealloc_fmr
);
2372 SET_DEVICE_OP(dev_ops
, dealloc_mw
);
2373 SET_DEVICE_OP(dev_ops
, dealloc_pd
);
2374 SET_DEVICE_OP(dev_ops
, dealloc_ucontext
);
2375 SET_DEVICE_OP(dev_ops
, dealloc_xrcd
);
2376 SET_DEVICE_OP(dev_ops
, del_gid
);
2377 SET_DEVICE_OP(dev_ops
, dereg_mr
);
2378 SET_DEVICE_OP(dev_ops
, destroy_ah
);
2379 SET_DEVICE_OP(dev_ops
, destroy_counters
);
2380 SET_DEVICE_OP(dev_ops
, destroy_cq
);
2381 SET_DEVICE_OP(dev_ops
, destroy_flow
);
2382 SET_DEVICE_OP(dev_ops
, destroy_flow_action
);
2383 SET_DEVICE_OP(dev_ops
, destroy_qp
);
2384 SET_DEVICE_OP(dev_ops
, destroy_rwq_ind_table
);
2385 SET_DEVICE_OP(dev_ops
, destroy_srq
);
2386 SET_DEVICE_OP(dev_ops
, destroy_wq
);
2387 SET_DEVICE_OP(dev_ops
, detach_mcast
);
2388 SET_DEVICE_OP(dev_ops
, disassociate_ucontext
);
2389 SET_DEVICE_OP(dev_ops
, drain_rq
);
2390 SET_DEVICE_OP(dev_ops
, drain_sq
);
2391 SET_DEVICE_OP(dev_ops
, enable_driver
);
2392 SET_DEVICE_OP(dev_ops
, fill_res_entry
);
2393 SET_DEVICE_OP(dev_ops
, get_dev_fw_str
);
2394 SET_DEVICE_OP(dev_ops
, get_dma_mr
);
2395 SET_DEVICE_OP(dev_ops
, get_hw_stats
);
2396 SET_DEVICE_OP(dev_ops
, get_link_layer
);
2397 SET_DEVICE_OP(dev_ops
, get_netdev
);
2398 SET_DEVICE_OP(dev_ops
, get_port_immutable
);
2399 SET_DEVICE_OP(dev_ops
, get_vector_affinity
);
2400 SET_DEVICE_OP(dev_ops
, get_vf_config
);
2401 SET_DEVICE_OP(dev_ops
, get_vf_stats
);
2402 SET_DEVICE_OP(dev_ops
, init_port
);
2403 SET_DEVICE_OP(dev_ops
, iw_accept
);
2404 SET_DEVICE_OP(dev_ops
, iw_add_ref
);
2405 SET_DEVICE_OP(dev_ops
, iw_connect
);
2406 SET_DEVICE_OP(dev_ops
, iw_create_listen
);
2407 SET_DEVICE_OP(dev_ops
, iw_destroy_listen
);
2408 SET_DEVICE_OP(dev_ops
, iw_get_qp
);
2409 SET_DEVICE_OP(dev_ops
, iw_reject
);
2410 SET_DEVICE_OP(dev_ops
, iw_rem_ref
);
2411 SET_DEVICE_OP(dev_ops
, map_mr_sg
);
2412 SET_DEVICE_OP(dev_ops
, map_phys_fmr
);
2413 SET_DEVICE_OP(dev_ops
, mmap
);
2414 SET_DEVICE_OP(dev_ops
, modify_ah
);
2415 SET_DEVICE_OP(dev_ops
, modify_cq
);
2416 SET_DEVICE_OP(dev_ops
, modify_device
);
2417 SET_DEVICE_OP(dev_ops
, modify_flow_action_esp
);
2418 SET_DEVICE_OP(dev_ops
, modify_port
);
2419 SET_DEVICE_OP(dev_ops
, modify_qp
);
2420 SET_DEVICE_OP(dev_ops
, modify_srq
);
2421 SET_DEVICE_OP(dev_ops
, modify_wq
);
2422 SET_DEVICE_OP(dev_ops
, peek_cq
);
2423 SET_DEVICE_OP(dev_ops
, poll_cq
);
2424 SET_DEVICE_OP(dev_ops
, post_recv
);
2425 SET_DEVICE_OP(dev_ops
, post_send
);
2426 SET_DEVICE_OP(dev_ops
, post_srq_recv
);
2427 SET_DEVICE_OP(dev_ops
, process_mad
);
2428 SET_DEVICE_OP(dev_ops
, query_ah
);
2429 SET_DEVICE_OP(dev_ops
, query_device
);
2430 SET_DEVICE_OP(dev_ops
, query_gid
);
2431 SET_DEVICE_OP(dev_ops
, query_pkey
);
2432 SET_DEVICE_OP(dev_ops
, query_port
);
2433 SET_DEVICE_OP(dev_ops
, query_qp
);
2434 SET_DEVICE_OP(dev_ops
, query_srq
);
2435 SET_DEVICE_OP(dev_ops
, rdma_netdev_get_params
);
2436 SET_DEVICE_OP(dev_ops
, read_counters
);
2437 SET_DEVICE_OP(dev_ops
, reg_dm_mr
);
2438 SET_DEVICE_OP(dev_ops
, reg_user_mr
);
2439 SET_DEVICE_OP(dev_ops
, req_ncomp_notif
);
2440 SET_DEVICE_OP(dev_ops
, req_notify_cq
);
2441 SET_DEVICE_OP(dev_ops
, rereg_user_mr
);
2442 SET_DEVICE_OP(dev_ops
, resize_cq
);
2443 SET_DEVICE_OP(dev_ops
, set_vf_guid
);
2444 SET_DEVICE_OP(dev_ops
, set_vf_link_state
);
2445 SET_DEVICE_OP(dev_ops
, unmap_fmr
);
2447 SET_OBJ_SIZE(dev_ops
, ib_ah
);
2448 SET_OBJ_SIZE(dev_ops
, ib_pd
);
2449 SET_OBJ_SIZE(dev_ops
, ib_srq
);
2450 SET_OBJ_SIZE(dev_ops
, ib_ucontext
);
2452 EXPORT_SYMBOL(ib_set_device_ops
);
2454 static const struct rdma_nl_cbs ibnl_ls_cb_table
[RDMA_NL_LS_NUM_OPS
] = {
2455 [RDMA_NL_LS_OP_RESOLVE
] = {
2456 .doit
= ib_nl_handle_resolve_resp
,
2457 .flags
= RDMA_NL_ADMIN_PERM
,
2459 [RDMA_NL_LS_OP_SET_TIMEOUT
] = {
2460 .doit
= ib_nl_handle_set_timeout
,
2461 .flags
= RDMA_NL_ADMIN_PERM
,
2463 [RDMA_NL_LS_OP_IP_RESOLVE
] = {
2464 .doit
= ib_nl_handle_ip_res_resp
,
2465 .flags
= RDMA_NL_ADMIN_PERM
,
2469 static int __init
ib_core_init(void)
2473 ib_wq
= alloc_workqueue("infiniband", 0, 0);
2477 ib_comp_wq
= alloc_workqueue("ib-comp-wq",
2478 WQ_HIGHPRI
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
2484 ib_comp_unbound_wq
=
2485 alloc_workqueue("ib-comp-unb-wq",
2486 WQ_UNBOUND
| WQ_HIGHPRI
| WQ_MEM_RECLAIM
|
2487 WQ_SYSFS
, WQ_UNBOUND_MAX_ACTIVE
);
2488 if (!ib_comp_unbound_wq
) {
2493 ret
= class_register(&ib_class
);
2495 pr_warn("Couldn't create InfiniBand device class\n");
2496 goto err_comp_unbound
;
2499 ret
= rdma_nl_init();
2501 pr_warn("Couldn't init IB netlink interface: err %d\n", ret
);
2507 pr_warn("Could't init IB address resolution\n");
2511 ret
= ib_mad_init();
2513 pr_warn("Couldn't init IB MAD\n");
2519 pr_warn("Couldn't init SA\n");
2523 ret
= register_lsm_notifier(&ibdev_lsm_nb
);
2525 pr_warn("Couldn't register LSM notifier. ret %d\n", ret
);
2529 ret
= register_pernet_device(&rdma_dev_net_ops
);
2531 pr_warn("Couldn't init compat dev. ret %d\n", ret
);
2536 rdma_nl_register(RDMA_NL_LS
, ibnl_ls_cb_table
);
2537 roce_gid_mgmt_init();
2542 unregister_lsm_notifier(&ibdev_lsm_nb
);
2552 class_unregister(&ib_class
);
2554 destroy_workqueue(ib_comp_unbound_wq
);
2556 destroy_workqueue(ib_comp_wq
);
2558 destroy_workqueue(ib_wq
);
2562 static void __exit
ib_core_cleanup(void)
2564 roce_gid_mgmt_cleanup();
2566 rdma_nl_unregister(RDMA_NL_LS
);
2567 unregister_pernet_device(&rdma_dev_net_ops
);
2568 unregister_lsm_notifier(&ibdev_lsm_nb
);
2573 class_unregister(&ib_class
);
2574 destroy_workqueue(ib_comp_unbound_wq
);
2575 destroy_workqueue(ib_comp_wq
);
2576 /* Make sure that any pending umem accounting work is done. */
2577 destroy_workqueue(ib_wq
);
2578 flush_workqueue(system_unbound_wq
);
2579 WARN_ON(!xa_empty(&clients
));
2580 WARN_ON(!xa_empty(&devices
));
2583 MODULE_ALIAS_RDMA_NETLINK(RDMA_NL_LS
, 4);
2585 /* ib core relies on netdev stack to first register net_ns_type_operations
2586 * ns kobject type before ib_core initialization.
2588 fs_initcall(ib_core_init
);
2589 module_exit(ib_core_cleanup
);