2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <linux/security.h>
43 #include <linux/notifier.h>
44 #include <rdma/rdma_netlink.h>
45 #include <rdma/ib_addr.h>
46 #include <rdma/ib_cache.h>
48 #include "core_priv.h"
50 MODULE_AUTHOR("Roland Dreier");
51 MODULE_DESCRIPTION("core kernel InfiniBand API");
52 MODULE_LICENSE("Dual BSD/GPL");
54 struct ib_client_data
{
55 struct list_head list
;
56 struct ib_client
*client
;
58 /* The device or client is going down. Do not call client or device
59 * callbacks other than remove(). */
63 struct workqueue_struct
*ib_comp_wq
;
64 struct workqueue_struct
*ib_wq
;
65 EXPORT_SYMBOL_GPL(ib_wq
);
67 /* The device_list and client_list contain devices and clients after their
68 * registration has completed, and the devices and clients are removed
69 * during unregistration. */
70 static LIST_HEAD(device_list
);
71 static LIST_HEAD(client_list
);
74 * device_mutex and lists_rwsem protect access to both device_list and
75 * client_list. device_mutex protects writer access by device and client
76 * registration / de-registration. lists_rwsem protects reader access to
77 * these lists. Iterators of these lists must lock it for read, while updates
78 * to the lists must be done with a write lock. A special case is when the
79 * device_mutex is locked. In this case locking the lists for read access is
80 * not necessary as the device_mutex implies it.
82 * lists_rwsem also protects access to the client data list.
84 static DEFINE_MUTEX(device_mutex
);
85 static DECLARE_RWSEM(lists_rwsem
);
87 static int ib_security_change(struct notifier_block
*nb
, unsigned long event
,
89 static void ib_policy_change_task(struct work_struct
*work
);
90 static DECLARE_WORK(ib_policy_change_work
, ib_policy_change_task
);
92 static struct notifier_block ibdev_lsm_nb
= {
93 .notifier_call
= ib_security_change
,
96 static int ib_device_check_mandatory(struct ib_device
*device
)
98 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
102 } mandatory_table
[] = {
103 IB_MANDATORY_FUNC(query_device
),
104 IB_MANDATORY_FUNC(query_port
),
105 IB_MANDATORY_FUNC(query_pkey
),
106 IB_MANDATORY_FUNC(query_gid
),
107 IB_MANDATORY_FUNC(alloc_pd
),
108 IB_MANDATORY_FUNC(dealloc_pd
),
109 IB_MANDATORY_FUNC(create_ah
),
110 IB_MANDATORY_FUNC(destroy_ah
),
111 IB_MANDATORY_FUNC(create_qp
),
112 IB_MANDATORY_FUNC(modify_qp
),
113 IB_MANDATORY_FUNC(destroy_qp
),
114 IB_MANDATORY_FUNC(post_send
),
115 IB_MANDATORY_FUNC(post_recv
),
116 IB_MANDATORY_FUNC(create_cq
),
117 IB_MANDATORY_FUNC(destroy_cq
),
118 IB_MANDATORY_FUNC(poll_cq
),
119 IB_MANDATORY_FUNC(req_notify_cq
),
120 IB_MANDATORY_FUNC(get_dma_mr
),
121 IB_MANDATORY_FUNC(dereg_mr
),
122 IB_MANDATORY_FUNC(get_port_immutable
)
126 for (i
= 0; i
< ARRAY_SIZE(mandatory_table
); ++i
) {
127 if (!*(void **) ((void *) device
+ mandatory_table
[i
].offset
)) {
128 pr_warn("Device %s is missing mandatory function %s\n",
129 device
->name
, mandatory_table
[i
].name
);
137 static struct ib_device
*__ib_device_get_by_name(const char *name
)
139 struct ib_device
*device
;
141 list_for_each_entry(device
, &device_list
, core_list
)
142 if (!strncmp(name
, device
->name
, IB_DEVICE_NAME_MAX
))
149 static int alloc_name(char *name
)
151 unsigned long *inuse
;
152 char buf
[IB_DEVICE_NAME_MAX
];
153 struct ib_device
*device
;
156 inuse
= (unsigned long *) get_zeroed_page(GFP_KERNEL
);
160 list_for_each_entry(device
, &device_list
, core_list
) {
161 if (!sscanf(device
->name
, name
, &i
))
163 if (i
< 0 || i
>= PAGE_SIZE
* 8)
165 snprintf(buf
, sizeof buf
, name
, i
);
166 if (!strncmp(buf
, device
->name
, IB_DEVICE_NAME_MAX
))
170 i
= find_first_zero_bit(inuse
, PAGE_SIZE
* 8);
171 free_page((unsigned long) inuse
);
172 snprintf(buf
, sizeof buf
, name
, i
);
174 if (__ib_device_get_by_name(buf
))
177 strlcpy(name
, buf
, IB_DEVICE_NAME_MAX
);
181 static void ib_device_release(struct device
*device
)
183 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
185 WARN_ON(dev
->reg_state
== IB_DEV_REGISTERED
);
186 if (dev
->reg_state
== IB_DEV_UNREGISTERED
) {
188 * In IB_DEV_UNINITIALIZED state, cache or port table
189 * is not even created. Free cache and port table only when
190 * device reaches UNREGISTERED state.
192 ib_cache_release_one(dev
);
193 kfree(dev
->port_immutable
);
198 static int ib_device_uevent(struct device
*device
,
199 struct kobj_uevent_env
*env
)
201 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
203 if (add_uevent_var(env
, "NAME=%s", dev
->name
))
207 * It would be nice to pass the node GUID with the event...
213 static struct class ib_class
= {
214 .name
= "infiniband",
215 .dev_release
= ib_device_release
,
216 .dev_uevent
= ib_device_uevent
,
220 * ib_alloc_device - allocate an IB device struct
221 * @size:size of structure to allocate
223 * Low-level drivers should use ib_alloc_device() to allocate &struct
224 * ib_device. @size is the size of the structure to be allocated,
225 * including any private data used by the low-level driver.
226 * ib_dealloc_device() must be used to free structures allocated with
229 struct ib_device
*ib_alloc_device(size_t size
)
231 struct ib_device
*device
;
233 if (WARN_ON(size
< sizeof(struct ib_device
)))
236 device
= kzalloc(size
, GFP_KERNEL
);
240 device
->dev
.class = &ib_class
;
241 device_initialize(&device
->dev
);
243 dev_set_drvdata(&device
->dev
, device
);
245 INIT_LIST_HEAD(&device
->event_handler_list
);
246 spin_lock_init(&device
->event_handler_lock
);
247 spin_lock_init(&device
->client_data_lock
);
248 INIT_LIST_HEAD(&device
->client_data_list
);
249 INIT_LIST_HEAD(&device
->port_list
);
253 EXPORT_SYMBOL(ib_alloc_device
);
256 * ib_dealloc_device - free an IB device struct
257 * @device:structure to free
259 * Free a structure allocated with ib_alloc_device().
261 void ib_dealloc_device(struct ib_device
*device
)
263 WARN_ON(device
->reg_state
!= IB_DEV_UNREGISTERED
&&
264 device
->reg_state
!= IB_DEV_UNINITIALIZED
);
265 kobject_put(&device
->dev
.kobj
);
267 EXPORT_SYMBOL(ib_dealloc_device
);
269 static int add_client_context(struct ib_device
*device
, struct ib_client
*client
)
271 struct ib_client_data
*context
;
274 context
= kmalloc(sizeof *context
, GFP_KERNEL
);
278 context
->client
= client
;
279 context
->data
= NULL
;
280 context
->going_down
= false;
282 down_write(&lists_rwsem
);
283 spin_lock_irqsave(&device
->client_data_lock
, flags
);
284 list_add(&context
->list
, &device
->client_data_list
);
285 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
286 up_write(&lists_rwsem
);
291 static int verify_immutable(const struct ib_device
*dev
, u8 port
)
293 return WARN_ON(!rdma_cap_ib_mad(dev
, port
) &&
294 rdma_max_mad_size(dev
, port
) != 0);
297 static int read_port_immutable(struct ib_device
*device
)
300 u8 start_port
= rdma_start_port(device
);
301 u8 end_port
= rdma_end_port(device
);
305 * device->port_immutable is indexed directly by the port number to make
306 * access to this data as efficient as possible.
308 * Therefore port_immutable is declared as a 1 based array with
309 * potential empty slots at the beginning.
311 device
->port_immutable
= kzalloc(sizeof(*device
->port_immutable
)
314 if (!device
->port_immutable
)
317 for (port
= start_port
; port
<= end_port
; ++port
) {
318 ret
= device
->get_port_immutable(device
, port
,
319 &device
->port_immutable
[port
]);
323 if (verify_immutable(device
, port
))
329 void ib_get_device_fw_str(struct ib_device
*dev
, char *str
, size_t str_len
)
331 if (dev
->get_dev_fw_str
)
332 dev
->get_dev_fw_str(dev
, str
, str_len
);
336 EXPORT_SYMBOL(ib_get_device_fw_str
);
338 static int setup_port_pkey_list(struct ib_device
*device
)
343 * device->port_pkey_list is indexed directly by the port number,
344 * Therefore it is declared as a 1 based array with potential empty
345 * slots at the beginning.
347 device
->port_pkey_list
= kcalloc(rdma_end_port(device
) + 1,
348 sizeof(*device
->port_pkey_list
),
351 if (!device
->port_pkey_list
)
354 for (i
= 0; i
< (rdma_end_port(device
) + 1); i
++) {
355 spin_lock_init(&device
->port_pkey_list
[i
].list_lock
);
356 INIT_LIST_HEAD(&device
->port_pkey_list
[i
].pkey_list
);
362 static void ib_policy_change_task(struct work_struct
*work
)
364 struct ib_device
*dev
;
366 down_read(&lists_rwsem
);
367 list_for_each_entry(dev
, &device_list
, core_list
) {
370 for (i
= rdma_start_port(dev
); i
<= rdma_end_port(dev
); i
++) {
372 int ret
= ib_get_cached_subnet_prefix(dev
,
377 "ib_get_cached_subnet_prefix err: %d, this should never happen here\n",
380 ib_security_cache_change(dev
, i
, sp
);
383 up_read(&lists_rwsem
);
386 static int ib_security_change(struct notifier_block
*nb
, unsigned long event
,
389 if (event
!= LSM_POLICY_CHANGE
)
392 schedule_work(&ib_policy_change_work
);
398 * ib_register_device - Register an IB device with IB core
399 * @device:Device to register
401 * Low-level drivers use ib_register_device() to register their
402 * devices with the IB core. All registered clients will receive a
403 * callback for each device that is added. @device must be allocated
404 * with ib_alloc_device().
406 int ib_register_device(struct ib_device
*device
,
407 int (*port_callback
)(struct ib_device
*,
408 u8
, struct kobject
*))
411 struct ib_client
*client
;
412 struct ib_udata uhw
= {.outlen
= 0, .inlen
= 0};
413 struct device
*parent
= device
->dev
.parent
;
415 WARN_ON_ONCE(!parent
);
416 WARN_ON_ONCE(device
->dma_device
);
417 if (device
->dev
.dma_ops
) {
419 * The caller provided custom DMA operations. Copy the
420 * DMA-related fields that are used by e.g. dma_alloc_coherent()
423 device
->dma_device
= &device
->dev
;
424 if (!device
->dev
.dma_mask
)
425 device
->dev
.dma_mask
= parent
->dma_mask
;
426 if (!device
->dev
.coherent_dma_mask
)
427 device
->dev
.coherent_dma_mask
=
428 parent
->coherent_dma_mask
;
431 * The caller did not provide custom DMA operations. Use the
432 * DMA mapping operations of the parent device.
434 device
->dma_device
= parent
;
437 mutex_lock(&device_mutex
);
439 if (strchr(device
->name
, '%')) {
440 ret
= alloc_name(device
->name
);
445 if (ib_device_check_mandatory(device
)) {
450 ret
= read_port_immutable(device
);
452 pr_warn("Couldn't create per port immutable data %s\n",
457 ret
= setup_port_pkey_list(device
);
459 pr_warn("Couldn't create per port_pkey_list\n");
463 ret
= ib_cache_setup_one(device
);
465 pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
469 ret
= ib_device_register_rdmacg(device
);
471 pr_warn("Couldn't register device with rdma cgroup\n");
475 memset(&device
->attrs
, 0, sizeof(device
->attrs
));
476 ret
= device
->query_device(device
, &device
->attrs
, &uhw
);
478 pr_warn("Couldn't query the device attributes\n");
482 ret
= ib_device_register_sysfs(device
, port_callback
);
484 pr_warn("Couldn't register device %s with driver model\n",
489 device
->reg_state
= IB_DEV_REGISTERED
;
491 list_for_each_entry(client
, &client_list
, list
)
492 if (client
->add
&& !add_client_context(device
, client
))
495 down_write(&lists_rwsem
);
496 list_add_tail(&device
->core_list
, &device_list
);
497 up_write(&lists_rwsem
);
498 mutex_unlock(&device_mutex
);
502 ib_cache_cleanup_one(device
);
503 ib_cache_release_one(device
);
505 kfree(device
->port_immutable
);
507 mutex_unlock(&device_mutex
);
510 EXPORT_SYMBOL(ib_register_device
);
513 * ib_unregister_device - Unregister an IB device
514 * @device:Device to unregister
516 * Unregister an IB device. All clients will receive a remove callback.
518 void ib_unregister_device(struct ib_device
*device
)
520 struct ib_client_data
*context
, *tmp
;
523 mutex_lock(&device_mutex
);
525 down_write(&lists_rwsem
);
526 list_del(&device
->core_list
);
527 spin_lock_irqsave(&device
->client_data_lock
, flags
);
528 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
, list
)
529 context
->going_down
= true;
530 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
531 downgrade_write(&lists_rwsem
);
533 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
,
535 if (context
->client
->remove
)
536 context
->client
->remove(device
, context
->data
);
538 up_read(&lists_rwsem
);
540 mutex_unlock(&device_mutex
);
542 ib_device_unregister_rdmacg(device
);
543 ib_device_unregister_sysfs(device
);
544 ib_cache_cleanup_one(device
);
546 ib_security_destroy_port_pkey_list(device
);
547 kfree(device
->port_pkey_list
);
549 down_write(&lists_rwsem
);
550 spin_lock_irqsave(&device
->client_data_lock
, flags
);
551 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
, list
)
553 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
554 up_write(&lists_rwsem
);
556 device
->reg_state
= IB_DEV_UNREGISTERED
;
558 EXPORT_SYMBOL(ib_unregister_device
);
561 * ib_register_client - Register an IB client
562 * @client:Client to register
564 * Upper level users of the IB drivers can use ib_register_client() to
565 * register callbacks for IB device addition and removal. When an IB
566 * device is added, each registered client's add method will be called
567 * (in the order the clients were registered), and when a device is
568 * removed, each client's remove method will be called (in the reverse
569 * order that clients were registered). In addition, when
570 * ib_register_client() is called, the client will receive an add
571 * callback for all devices already registered.
573 int ib_register_client(struct ib_client
*client
)
575 struct ib_device
*device
;
577 mutex_lock(&device_mutex
);
579 list_for_each_entry(device
, &device_list
, core_list
)
580 if (client
->add
&& !add_client_context(device
, client
))
583 down_write(&lists_rwsem
);
584 list_add_tail(&client
->list
, &client_list
);
585 up_write(&lists_rwsem
);
587 mutex_unlock(&device_mutex
);
591 EXPORT_SYMBOL(ib_register_client
);
594 * ib_unregister_client - Unregister an IB client
595 * @client:Client to unregister
597 * Upper level users use ib_unregister_client() to remove their client
598 * registration. When ib_unregister_client() is called, the client
599 * will receive a remove callback for each IB device still registered.
601 void ib_unregister_client(struct ib_client
*client
)
603 struct ib_client_data
*context
, *tmp
;
604 struct ib_device
*device
;
607 mutex_lock(&device_mutex
);
609 down_write(&lists_rwsem
);
610 list_del(&client
->list
);
611 up_write(&lists_rwsem
);
613 list_for_each_entry(device
, &device_list
, core_list
) {
614 struct ib_client_data
*found_context
= NULL
;
616 down_write(&lists_rwsem
);
617 spin_lock_irqsave(&device
->client_data_lock
, flags
);
618 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
, list
)
619 if (context
->client
== client
) {
620 context
->going_down
= true;
621 found_context
= context
;
624 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
625 up_write(&lists_rwsem
);
628 client
->remove(device
, found_context
?
629 found_context
->data
: NULL
);
631 if (!found_context
) {
632 pr_warn("No client context found for %s/%s\n",
633 device
->name
, client
->name
);
637 down_write(&lists_rwsem
);
638 spin_lock_irqsave(&device
->client_data_lock
, flags
);
639 list_del(&found_context
->list
);
640 kfree(found_context
);
641 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
642 up_write(&lists_rwsem
);
645 mutex_unlock(&device_mutex
);
647 EXPORT_SYMBOL(ib_unregister_client
);
650 * ib_get_client_data - Get IB client context
651 * @device:Device to get context for
652 * @client:Client to get context for
654 * ib_get_client_data() returns client context set with
655 * ib_set_client_data().
657 void *ib_get_client_data(struct ib_device
*device
, struct ib_client
*client
)
659 struct ib_client_data
*context
;
663 spin_lock_irqsave(&device
->client_data_lock
, flags
);
664 list_for_each_entry(context
, &device
->client_data_list
, list
)
665 if (context
->client
== client
) {
669 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
673 EXPORT_SYMBOL(ib_get_client_data
);
676 * ib_set_client_data - Set IB client context
677 * @device:Device to set context for
678 * @client:Client to set context for
679 * @data:Context to set
681 * ib_set_client_data() sets client context that can be retrieved with
682 * ib_get_client_data().
684 void ib_set_client_data(struct ib_device
*device
, struct ib_client
*client
,
687 struct ib_client_data
*context
;
690 spin_lock_irqsave(&device
->client_data_lock
, flags
);
691 list_for_each_entry(context
, &device
->client_data_list
, list
)
692 if (context
->client
== client
) {
693 context
->data
= data
;
697 pr_warn("No client context found for %s/%s\n",
698 device
->name
, client
->name
);
701 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
703 EXPORT_SYMBOL(ib_set_client_data
);
706 * ib_register_event_handler - Register an IB event handler
707 * @event_handler:Handler to register
709 * ib_register_event_handler() registers an event handler that will be
710 * called back when asynchronous IB events occur (as defined in
711 * chapter 11 of the InfiniBand Architecture Specification). This
712 * callback may occur in interrupt context.
714 int ib_register_event_handler (struct ib_event_handler
*event_handler
)
718 spin_lock_irqsave(&event_handler
->device
->event_handler_lock
, flags
);
719 list_add_tail(&event_handler
->list
,
720 &event_handler
->device
->event_handler_list
);
721 spin_unlock_irqrestore(&event_handler
->device
->event_handler_lock
, flags
);
725 EXPORT_SYMBOL(ib_register_event_handler
);
728 * ib_unregister_event_handler - Unregister an event handler
729 * @event_handler:Handler to unregister
731 * Unregister an event handler registered with
732 * ib_register_event_handler().
734 int ib_unregister_event_handler(struct ib_event_handler
*event_handler
)
738 spin_lock_irqsave(&event_handler
->device
->event_handler_lock
, flags
);
739 list_del(&event_handler
->list
);
740 spin_unlock_irqrestore(&event_handler
->device
->event_handler_lock
, flags
);
744 EXPORT_SYMBOL(ib_unregister_event_handler
);
747 * ib_dispatch_event - Dispatch an asynchronous event
748 * @event:Event to dispatch
750 * Low-level drivers must call ib_dispatch_event() to dispatch the
751 * event to all registered event handlers when an asynchronous event
754 void ib_dispatch_event(struct ib_event
*event
)
757 struct ib_event_handler
*handler
;
759 spin_lock_irqsave(&event
->device
->event_handler_lock
, flags
);
761 list_for_each_entry(handler
, &event
->device
->event_handler_list
, list
)
762 handler
->handler(handler
, event
);
764 spin_unlock_irqrestore(&event
->device
->event_handler_lock
, flags
);
766 EXPORT_SYMBOL(ib_dispatch_event
);
769 * ib_query_port - Query IB port attributes
770 * @device:Device to query
771 * @port_num:Port number to query
772 * @port_attr:Port attributes
774 * ib_query_port() returns the attributes of a port through the
775 * @port_attr pointer.
777 int ib_query_port(struct ib_device
*device
,
779 struct ib_port_attr
*port_attr
)
784 if (!rdma_is_port_valid(device
, port_num
))
787 memset(port_attr
, 0, sizeof(*port_attr
));
788 err
= device
->query_port(device
, port_num
, port_attr
);
789 if (err
|| port_attr
->subnet_prefix
)
792 if (rdma_port_get_link_layer(device
, port_num
) != IB_LINK_LAYER_INFINIBAND
)
795 err
= ib_query_gid(device
, port_num
, 0, &gid
, NULL
);
799 port_attr
->subnet_prefix
= be64_to_cpu(gid
.global
.subnet_prefix
);
802 EXPORT_SYMBOL(ib_query_port
);
805 * ib_query_gid - Get GID table entry
806 * @device:Device to query
807 * @port_num:Port number to query
808 * @index:GID table index to query
810 * @attr: Returned GID attributes related to this GID index (only in RoCE).
813 * ib_query_gid() fetches the specified GID table entry.
815 int ib_query_gid(struct ib_device
*device
,
816 u8 port_num
, int index
, union ib_gid
*gid
,
817 struct ib_gid_attr
*attr
)
819 if (rdma_cap_roce_gid_table(device
, port_num
))
820 return ib_get_cached_gid(device
, port_num
, index
, gid
, attr
);
825 return device
->query_gid(device
, port_num
, index
, gid
);
827 EXPORT_SYMBOL(ib_query_gid
);
830 * ib_enum_roce_netdev - enumerate all RoCE ports
831 * @ib_dev : IB device we want to query
832 * @filter: Should we call the callback?
833 * @filter_cookie: Cookie passed to filter
834 * @cb: Callback to call for each found RoCE ports
835 * @cookie: Cookie passed back to the callback
837 * Enumerates all of the physical RoCE ports of ib_dev
838 * which are related to netdevice and calls callback() on each
839 * device for which filter() function returns non zero.
841 void ib_enum_roce_netdev(struct ib_device
*ib_dev
,
842 roce_netdev_filter filter
,
844 roce_netdev_callback cb
,
849 for (port
= rdma_start_port(ib_dev
); port
<= rdma_end_port(ib_dev
);
851 if (rdma_protocol_roce(ib_dev
, port
)) {
852 struct net_device
*idev
= NULL
;
854 if (ib_dev
->get_netdev
)
855 idev
= ib_dev
->get_netdev(ib_dev
, port
);
858 idev
->reg_state
>= NETREG_UNREGISTERED
) {
863 if (filter(ib_dev
, port
, idev
, filter_cookie
))
864 cb(ib_dev
, port
, idev
, cookie
);
872 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
873 * @filter: Should we call the callback?
874 * @filter_cookie: Cookie passed to filter
875 * @cb: Callback to call for each found RoCE ports
876 * @cookie: Cookie passed back to the callback
878 * Enumerates all RoCE devices' physical ports which are related
879 * to netdevices and calls callback() on each device for which
880 * filter() function returns non zero.
882 void ib_enum_all_roce_netdevs(roce_netdev_filter filter
,
884 roce_netdev_callback cb
,
887 struct ib_device
*dev
;
889 down_read(&lists_rwsem
);
890 list_for_each_entry(dev
, &device_list
, core_list
)
891 ib_enum_roce_netdev(dev
, filter
, filter_cookie
, cb
, cookie
);
892 up_read(&lists_rwsem
);
896 * ib_query_pkey - Get P_Key table entry
897 * @device:Device to query
898 * @port_num:Port number to query
899 * @index:P_Key table index to query
900 * @pkey:Returned P_Key
902 * ib_query_pkey() fetches the specified P_Key table entry.
904 int ib_query_pkey(struct ib_device
*device
,
905 u8 port_num
, u16 index
, u16
*pkey
)
907 return device
->query_pkey(device
, port_num
, index
, pkey
);
909 EXPORT_SYMBOL(ib_query_pkey
);
912 * ib_modify_device - Change IB device attributes
913 * @device:Device to modify
914 * @device_modify_mask:Mask of attributes to change
915 * @device_modify:New attribute values
917 * ib_modify_device() changes a device's attributes as specified by
918 * the @device_modify_mask and @device_modify structure.
920 int ib_modify_device(struct ib_device
*device
,
921 int device_modify_mask
,
922 struct ib_device_modify
*device_modify
)
924 if (!device
->modify_device
)
927 return device
->modify_device(device
, device_modify_mask
,
930 EXPORT_SYMBOL(ib_modify_device
);
933 * ib_modify_port - Modifies the attributes for the specified port.
934 * @device: The device to modify.
935 * @port_num: The number of the port to modify.
936 * @port_modify_mask: Mask used to specify which attributes of the port
938 * @port_modify: New attribute values for the port.
940 * ib_modify_port() changes a port's attributes as specified by the
941 * @port_modify_mask and @port_modify structure.
943 int ib_modify_port(struct ib_device
*device
,
944 u8 port_num
, int port_modify_mask
,
945 struct ib_port_modify
*port_modify
)
947 if (!device
->modify_port
)
950 if (!rdma_is_port_valid(device
, port_num
))
953 return device
->modify_port(device
, port_num
, port_modify_mask
,
956 EXPORT_SYMBOL(ib_modify_port
);
959 * ib_find_gid - Returns the port number and GID table index where
960 * a specified GID value occurs.
961 * @device: The device to query.
962 * @gid: The GID value to search for.
963 * @gid_type: Type of GID.
964 * @ndev: The ndev related to the GID to search for.
965 * @port_num: The port number of the device where the GID value was found.
966 * @index: The index into the GID table where the GID was found. This
967 * parameter may be NULL.
969 int ib_find_gid(struct ib_device
*device
, union ib_gid
*gid
,
970 enum ib_gid_type gid_type
, struct net_device
*ndev
,
971 u8
*port_num
, u16
*index
)
973 union ib_gid tmp_gid
;
976 for (port
= rdma_start_port(device
); port
<= rdma_end_port(device
); ++port
) {
977 if (rdma_cap_roce_gid_table(device
, port
)) {
978 if (!ib_find_cached_gid_by_port(device
, gid
, gid_type
, port
,
985 if (gid_type
!= IB_GID_TYPE_IB
)
988 for (i
= 0; i
< device
->port_immutable
[port
].gid_tbl_len
; ++i
) {
989 ret
= ib_query_gid(device
, port
, i
, &tmp_gid
, NULL
);
992 if (!memcmp(&tmp_gid
, gid
, sizeof *gid
)) {
1003 EXPORT_SYMBOL(ib_find_gid
);
1006 * ib_find_pkey - Returns the PKey table index where a specified
1007 * PKey value occurs.
1008 * @device: The device to query.
1009 * @port_num: The port number of the device to search for the PKey.
1010 * @pkey: The PKey value to search for.
1011 * @index: The index into the PKey table where the PKey was found.
1013 int ib_find_pkey(struct ib_device
*device
,
1014 u8 port_num
, u16 pkey
, u16
*index
)
1018 int partial_ix
= -1;
1020 for (i
= 0; i
< device
->port_immutable
[port_num
].pkey_tbl_len
; ++i
) {
1021 ret
= ib_query_pkey(device
, port_num
, i
, &tmp_pkey
);
1024 if ((pkey
& 0x7fff) == (tmp_pkey
& 0x7fff)) {
1025 /* if there is full-member pkey take it.*/
1026 if (tmp_pkey
& 0x8000) {
1035 /*no full-member, if exists take the limited*/
1036 if (partial_ix
>= 0) {
1037 *index
= partial_ix
;
1042 EXPORT_SYMBOL(ib_find_pkey
);
1045 * ib_get_net_dev_by_params() - Return the appropriate net_dev
1046 * for a received CM request
1047 * @dev: An RDMA device on which the request has been received.
1048 * @port: Port number on the RDMA device.
1049 * @pkey: The Pkey the request came on.
1050 * @gid: A GID that the net_dev uses to communicate.
1051 * @addr: Contains the IP address that the request specified as its
1054 struct net_device
*ib_get_net_dev_by_params(struct ib_device
*dev
,
1057 const union ib_gid
*gid
,
1058 const struct sockaddr
*addr
)
1060 struct net_device
*net_dev
= NULL
;
1061 struct ib_client_data
*context
;
1063 if (!rdma_protocol_ib(dev
, port
))
1066 down_read(&lists_rwsem
);
1068 list_for_each_entry(context
, &dev
->client_data_list
, list
) {
1069 struct ib_client
*client
= context
->client
;
1071 if (context
->going_down
)
1074 if (client
->get_net_dev_by_params
) {
1075 net_dev
= client
->get_net_dev_by_params(dev
, port
, pkey
,
1083 up_read(&lists_rwsem
);
1087 EXPORT_SYMBOL(ib_get_net_dev_by_params
);
1089 static struct ibnl_client_cbs ibnl_ls_cb_table
[] = {
1090 [RDMA_NL_LS_OP_RESOLVE
] = {
1091 .dump
= ib_nl_handle_resolve_resp
,
1092 .module
= THIS_MODULE
},
1093 [RDMA_NL_LS_OP_SET_TIMEOUT
] = {
1094 .dump
= ib_nl_handle_set_timeout
,
1095 .module
= THIS_MODULE
},
1096 [RDMA_NL_LS_OP_IP_RESOLVE
] = {
1097 .dump
= ib_nl_handle_ip_res_resp
,
1098 .module
= THIS_MODULE
},
1101 static int ib_add_ibnl_clients(void)
1103 return ibnl_add_client(RDMA_NL_LS
, ARRAY_SIZE(ibnl_ls_cb_table
),
1107 static void ib_remove_ibnl_clients(void)
1109 ibnl_remove_client(RDMA_NL_LS
);
1112 static int __init
ib_core_init(void)
1116 ib_wq
= alloc_workqueue("infiniband", 0, 0);
1120 ib_comp_wq
= alloc_workqueue("ib-comp-wq",
1121 WQ_HIGHPRI
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
1127 ret
= class_register(&ib_class
);
1129 pr_warn("Couldn't create InfiniBand device class\n");
1135 pr_warn("Couldn't init IB netlink interface\n");
1141 pr_warn("Could't init IB address resolution\n");
1145 ret
= ib_mad_init();
1147 pr_warn("Couldn't init IB MAD\n");
1153 pr_warn("Couldn't init SA\n");
1157 ret
= ib_add_ibnl_clients();
1159 pr_warn("Couldn't register ibnl clients\n");
1163 ret
= register_lsm_notifier(&ibdev_lsm_nb
);
1165 pr_warn("Couldn't register LSM notifier. ret %d\n", ret
);
1166 goto err_ibnl_clients
;
1174 ib_remove_ibnl_clients();
1184 class_unregister(&ib_class
);
1186 destroy_workqueue(ib_comp_wq
);
1188 destroy_workqueue(ib_wq
);
1192 static void __exit
ib_core_cleanup(void)
1194 unregister_lsm_notifier(&ibdev_lsm_nb
);
1196 ib_remove_ibnl_clients();
1201 class_unregister(&ib_class
);
1202 destroy_workqueue(ib_comp_wq
);
1203 /* Make sure that any pending umem accounting work is done. */
1204 destroy_workqueue(ib_wq
);
1207 module_init(ib_core_init
);
1208 module_exit(ib_core_cleanup
);