2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/module.h>
35 #include <linux/string.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/init.h>
40 #include <linux/mutex.h>
41 #include <linux/netdevice.h>
42 #include <rdma/rdma_netlink.h>
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_cache.h>
46 #include "core_priv.h"
48 MODULE_AUTHOR("Roland Dreier");
49 MODULE_DESCRIPTION("core kernel InfiniBand API");
50 MODULE_LICENSE("Dual BSD/GPL");
52 struct ib_client_data
{
53 struct list_head list
;
54 struct ib_client
*client
;
56 /* The device or client is going down. Do not call client or device
57 * callbacks other than remove(). */
61 struct workqueue_struct
*ib_comp_wq
;
62 struct workqueue_struct
*ib_wq
;
63 EXPORT_SYMBOL_GPL(ib_wq
);
65 /* The device_list and client_list contain devices and clients after their
66 * registration has completed, and the devices and clients are removed
67 * during unregistration. */
68 static LIST_HEAD(device_list
);
69 static LIST_HEAD(client_list
);
72 * device_mutex and lists_rwsem protect access to both device_list and
73 * client_list. device_mutex protects writer access by device and client
74 * registration / de-registration. lists_rwsem protects reader access to
75 * these lists. Iterators of these lists must lock it for read, while updates
76 * to the lists must be done with a write lock. A special case is when the
77 * device_mutex is locked. In this case locking the lists for read access is
78 * not necessary as the device_mutex implies it.
80 * lists_rwsem also protects access to the client data list.
82 static DEFINE_MUTEX(device_mutex
);
83 static DECLARE_RWSEM(lists_rwsem
);
86 static int ib_device_check_mandatory(struct ib_device
*device
)
88 #define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
92 } mandatory_table
[] = {
93 IB_MANDATORY_FUNC(query_device
),
94 IB_MANDATORY_FUNC(query_port
),
95 IB_MANDATORY_FUNC(query_pkey
),
96 IB_MANDATORY_FUNC(query_gid
),
97 IB_MANDATORY_FUNC(alloc_pd
),
98 IB_MANDATORY_FUNC(dealloc_pd
),
99 IB_MANDATORY_FUNC(create_ah
),
100 IB_MANDATORY_FUNC(destroy_ah
),
101 IB_MANDATORY_FUNC(create_qp
),
102 IB_MANDATORY_FUNC(modify_qp
),
103 IB_MANDATORY_FUNC(destroy_qp
),
104 IB_MANDATORY_FUNC(post_send
),
105 IB_MANDATORY_FUNC(post_recv
),
106 IB_MANDATORY_FUNC(create_cq
),
107 IB_MANDATORY_FUNC(destroy_cq
),
108 IB_MANDATORY_FUNC(poll_cq
),
109 IB_MANDATORY_FUNC(req_notify_cq
),
110 IB_MANDATORY_FUNC(get_dma_mr
),
111 IB_MANDATORY_FUNC(dereg_mr
),
112 IB_MANDATORY_FUNC(get_port_immutable
)
116 for (i
= 0; i
< ARRAY_SIZE(mandatory_table
); ++i
) {
117 if (!*(void **) ((void *) device
+ mandatory_table
[i
].offset
)) {
118 pr_warn("Device %s is missing mandatory function %s\n",
119 device
->name
, mandatory_table
[i
].name
);
127 static struct ib_device
*__ib_device_get_by_name(const char *name
)
129 struct ib_device
*device
;
131 list_for_each_entry(device
, &device_list
, core_list
)
132 if (!strncmp(name
, device
->name
, IB_DEVICE_NAME_MAX
))
139 static int alloc_name(char *name
)
141 unsigned long *inuse
;
142 char buf
[IB_DEVICE_NAME_MAX
];
143 struct ib_device
*device
;
146 inuse
= (unsigned long *) get_zeroed_page(GFP_KERNEL
);
150 list_for_each_entry(device
, &device_list
, core_list
) {
151 if (!sscanf(device
->name
, name
, &i
))
153 if (i
< 0 || i
>= PAGE_SIZE
* 8)
155 snprintf(buf
, sizeof buf
, name
, i
);
156 if (!strncmp(buf
, device
->name
, IB_DEVICE_NAME_MAX
))
160 i
= find_first_zero_bit(inuse
, PAGE_SIZE
* 8);
161 free_page((unsigned long) inuse
);
162 snprintf(buf
, sizeof buf
, name
, i
);
164 if (__ib_device_get_by_name(buf
))
167 strlcpy(name
, buf
, IB_DEVICE_NAME_MAX
);
171 static void ib_device_release(struct device
*device
)
173 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
175 ib_cache_release_one(dev
);
176 kfree(dev
->port_immutable
);
180 static int ib_device_uevent(struct device
*device
,
181 struct kobj_uevent_env
*env
)
183 struct ib_device
*dev
= container_of(device
, struct ib_device
, dev
);
185 if (add_uevent_var(env
, "NAME=%s", dev
->name
))
189 * It would be nice to pass the node GUID with the event...
195 static struct class ib_class
= {
196 .name
= "infiniband",
197 .dev_release
= ib_device_release
,
198 .dev_uevent
= ib_device_uevent
,
202 * ib_alloc_device - allocate an IB device struct
203 * @size:size of structure to allocate
205 * Low-level drivers should use ib_alloc_device() to allocate &struct
206 * ib_device. @size is the size of the structure to be allocated,
207 * including any private data used by the low-level driver.
208 * ib_dealloc_device() must be used to free structures allocated with
211 struct ib_device
*ib_alloc_device(size_t size
)
213 struct ib_device
*device
;
215 if (WARN_ON(size
< sizeof(struct ib_device
)))
218 device
= kzalloc(size
, GFP_KERNEL
);
222 device
->dev
.class = &ib_class
;
223 device_initialize(&device
->dev
);
225 dev_set_drvdata(&device
->dev
, device
);
227 INIT_LIST_HEAD(&device
->event_handler_list
);
228 spin_lock_init(&device
->event_handler_lock
);
229 spin_lock_init(&device
->client_data_lock
);
230 INIT_LIST_HEAD(&device
->client_data_list
);
231 INIT_LIST_HEAD(&device
->port_list
);
235 EXPORT_SYMBOL(ib_alloc_device
);
238 * ib_dealloc_device - free an IB device struct
239 * @device:structure to free
241 * Free a structure allocated with ib_alloc_device().
243 void ib_dealloc_device(struct ib_device
*device
)
245 WARN_ON(device
->reg_state
!= IB_DEV_UNREGISTERED
&&
246 device
->reg_state
!= IB_DEV_UNINITIALIZED
);
247 kobject_put(&device
->dev
.kobj
);
249 EXPORT_SYMBOL(ib_dealloc_device
);
251 static int add_client_context(struct ib_device
*device
, struct ib_client
*client
)
253 struct ib_client_data
*context
;
256 context
= kmalloc(sizeof *context
, GFP_KERNEL
);
260 context
->client
= client
;
261 context
->data
= NULL
;
262 context
->going_down
= false;
264 down_write(&lists_rwsem
);
265 spin_lock_irqsave(&device
->client_data_lock
, flags
);
266 list_add(&context
->list
, &device
->client_data_list
);
267 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
268 up_write(&lists_rwsem
);
273 static int verify_immutable(const struct ib_device
*dev
, u8 port
)
275 return WARN_ON(!rdma_cap_ib_mad(dev
, port
) &&
276 rdma_max_mad_size(dev
, port
) != 0);
279 static int read_port_immutable(struct ib_device
*device
)
282 u8 start_port
= rdma_start_port(device
);
283 u8 end_port
= rdma_end_port(device
);
287 * device->port_immutable is indexed directly by the port number to make
288 * access to this data as efficient as possible.
290 * Therefore port_immutable is declared as a 1 based array with
291 * potential empty slots at the beginning.
293 device
->port_immutable
= kzalloc(sizeof(*device
->port_immutable
)
296 if (!device
->port_immutable
)
299 for (port
= start_port
; port
<= end_port
; ++port
) {
300 ret
= device
->get_port_immutable(device
, port
,
301 &device
->port_immutable
[port
]);
305 if (verify_immutable(device
, port
))
311 void ib_get_device_fw_str(struct ib_device
*dev
, char *str
, size_t str_len
)
313 if (dev
->get_dev_fw_str
)
314 dev
->get_dev_fw_str(dev
, str
, str_len
);
318 EXPORT_SYMBOL(ib_get_device_fw_str
);
321 * ib_register_device - Register an IB device with IB core
322 * @device:Device to register
324 * Low-level drivers use ib_register_device() to register their
325 * devices with the IB core. All registered clients will receive a
326 * callback for each device that is added. @device must be allocated
327 * with ib_alloc_device().
329 int ib_register_device(struct ib_device
*device
,
330 int (*port_callback
)(struct ib_device
*,
331 u8
, struct kobject
*))
334 struct ib_client
*client
;
335 struct ib_udata uhw
= {.outlen
= 0, .inlen
= 0};
336 struct device
*parent
= device
->dev
.parent
;
338 WARN_ON_ONCE(!parent
);
339 if (!device
->dev
.dma_ops
)
340 device
->dev
.dma_ops
= parent
->dma_ops
;
341 if (!device
->dev
.dma_mask
)
342 device
->dev
.dma_mask
= parent
->dma_mask
;
343 if (!device
->dev
.coherent_dma_mask
)
344 device
->dev
.coherent_dma_mask
= parent
->coherent_dma_mask
;
346 mutex_lock(&device_mutex
);
348 if (strchr(device
->name
, '%')) {
349 ret
= alloc_name(device
->name
);
354 if (ib_device_check_mandatory(device
)) {
359 ret
= read_port_immutable(device
);
361 pr_warn("Couldn't create per port immutable data %s\n",
366 ret
= ib_cache_setup_one(device
);
368 pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
372 memset(&device
->attrs
, 0, sizeof(device
->attrs
));
373 ret
= device
->query_device(device
, &device
->attrs
, &uhw
);
375 pr_warn("Couldn't query the device attributes\n");
376 ib_cache_cleanup_one(device
);
380 ret
= ib_device_register_sysfs(device
, port_callback
);
382 pr_warn("Couldn't register device %s with driver model\n",
384 ib_cache_cleanup_one(device
);
388 device
->reg_state
= IB_DEV_REGISTERED
;
390 list_for_each_entry(client
, &client_list
, list
)
391 if (client
->add
&& !add_client_context(device
, client
))
394 down_write(&lists_rwsem
);
395 list_add_tail(&device
->core_list
, &device_list
);
396 up_write(&lists_rwsem
);
398 mutex_unlock(&device_mutex
);
401 EXPORT_SYMBOL(ib_register_device
);
404 * ib_unregister_device - Unregister an IB device
405 * @device:Device to unregister
407 * Unregister an IB device. All clients will receive a remove callback.
409 void ib_unregister_device(struct ib_device
*device
)
411 struct ib_client_data
*context
, *tmp
;
414 mutex_lock(&device_mutex
);
416 down_write(&lists_rwsem
);
417 list_del(&device
->core_list
);
418 spin_lock_irqsave(&device
->client_data_lock
, flags
);
419 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
, list
)
420 context
->going_down
= true;
421 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
422 downgrade_write(&lists_rwsem
);
424 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
,
426 if (context
->client
->remove
)
427 context
->client
->remove(device
, context
->data
);
429 up_read(&lists_rwsem
);
431 mutex_unlock(&device_mutex
);
433 ib_device_unregister_sysfs(device
);
434 ib_cache_cleanup_one(device
);
436 down_write(&lists_rwsem
);
437 spin_lock_irqsave(&device
->client_data_lock
, flags
);
438 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
, list
)
440 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
441 up_write(&lists_rwsem
);
443 device
->reg_state
= IB_DEV_UNREGISTERED
;
445 EXPORT_SYMBOL(ib_unregister_device
);
448 * ib_register_client - Register an IB client
449 * @client:Client to register
451 * Upper level users of the IB drivers can use ib_register_client() to
452 * register callbacks for IB device addition and removal. When an IB
453 * device is added, each registered client's add method will be called
454 * (in the order the clients were registered), and when a device is
455 * removed, each client's remove method will be called (in the reverse
456 * order that clients were registered). In addition, when
457 * ib_register_client() is called, the client will receive an add
458 * callback for all devices already registered.
460 int ib_register_client(struct ib_client
*client
)
462 struct ib_device
*device
;
464 mutex_lock(&device_mutex
);
466 list_for_each_entry(device
, &device_list
, core_list
)
467 if (client
->add
&& !add_client_context(device
, client
))
470 down_write(&lists_rwsem
);
471 list_add_tail(&client
->list
, &client_list
);
472 up_write(&lists_rwsem
);
474 mutex_unlock(&device_mutex
);
478 EXPORT_SYMBOL(ib_register_client
);
481 * ib_unregister_client - Unregister an IB client
482 * @client:Client to unregister
484 * Upper level users use ib_unregister_client() to remove their client
485 * registration. When ib_unregister_client() is called, the client
486 * will receive a remove callback for each IB device still registered.
488 void ib_unregister_client(struct ib_client
*client
)
490 struct ib_client_data
*context
, *tmp
;
491 struct ib_device
*device
;
494 mutex_lock(&device_mutex
);
496 down_write(&lists_rwsem
);
497 list_del(&client
->list
);
498 up_write(&lists_rwsem
);
500 list_for_each_entry(device
, &device_list
, core_list
) {
501 struct ib_client_data
*found_context
= NULL
;
503 down_write(&lists_rwsem
);
504 spin_lock_irqsave(&device
->client_data_lock
, flags
);
505 list_for_each_entry_safe(context
, tmp
, &device
->client_data_list
, list
)
506 if (context
->client
== client
) {
507 context
->going_down
= true;
508 found_context
= context
;
511 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
512 up_write(&lists_rwsem
);
515 client
->remove(device
, found_context
?
516 found_context
->data
: NULL
);
518 if (!found_context
) {
519 pr_warn("No client context found for %s/%s\n",
520 device
->name
, client
->name
);
524 down_write(&lists_rwsem
);
525 spin_lock_irqsave(&device
->client_data_lock
, flags
);
526 list_del(&found_context
->list
);
527 kfree(found_context
);
528 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
529 up_write(&lists_rwsem
);
532 mutex_unlock(&device_mutex
);
534 EXPORT_SYMBOL(ib_unregister_client
);
537 * ib_get_client_data - Get IB client context
538 * @device:Device to get context for
539 * @client:Client to get context for
541 * ib_get_client_data() returns client context set with
542 * ib_set_client_data().
544 void *ib_get_client_data(struct ib_device
*device
, struct ib_client
*client
)
546 struct ib_client_data
*context
;
550 spin_lock_irqsave(&device
->client_data_lock
, flags
);
551 list_for_each_entry(context
, &device
->client_data_list
, list
)
552 if (context
->client
== client
) {
556 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
560 EXPORT_SYMBOL(ib_get_client_data
);
563 * ib_set_client_data - Set IB client context
564 * @device:Device to set context for
565 * @client:Client to set context for
566 * @data:Context to set
568 * ib_set_client_data() sets client context that can be retrieved with
569 * ib_get_client_data().
571 void ib_set_client_data(struct ib_device
*device
, struct ib_client
*client
,
574 struct ib_client_data
*context
;
577 spin_lock_irqsave(&device
->client_data_lock
, flags
);
578 list_for_each_entry(context
, &device
->client_data_list
, list
)
579 if (context
->client
== client
) {
580 context
->data
= data
;
584 pr_warn("No client context found for %s/%s\n",
585 device
->name
, client
->name
);
588 spin_unlock_irqrestore(&device
->client_data_lock
, flags
);
590 EXPORT_SYMBOL(ib_set_client_data
);
593 * ib_register_event_handler - Register an IB event handler
594 * @event_handler:Handler to register
596 * ib_register_event_handler() registers an event handler that will be
597 * called back when asynchronous IB events occur (as defined in
598 * chapter 11 of the InfiniBand Architecture Specification). This
599 * callback may occur in interrupt context.
601 int ib_register_event_handler (struct ib_event_handler
*event_handler
)
605 spin_lock_irqsave(&event_handler
->device
->event_handler_lock
, flags
);
606 list_add_tail(&event_handler
->list
,
607 &event_handler
->device
->event_handler_list
);
608 spin_unlock_irqrestore(&event_handler
->device
->event_handler_lock
, flags
);
612 EXPORT_SYMBOL(ib_register_event_handler
);
615 * ib_unregister_event_handler - Unregister an event handler
616 * @event_handler:Handler to unregister
618 * Unregister an event handler registered with
619 * ib_register_event_handler().
621 int ib_unregister_event_handler(struct ib_event_handler
*event_handler
)
625 spin_lock_irqsave(&event_handler
->device
->event_handler_lock
, flags
);
626 list_del(&event_handler
->list
);
627 spin_unlock_irqrestore(&event_handler
->device
->event_handler_lock
, flags
);
631 EXPORT_SYMBOL(ib_unregister_event_handler
);
634 * ib_dispatch_event - Dispatch an asynchronous event
635 * @event:Event to dispatch
637 * Low-level drivers must call ib_dispatch_event() to dispatch the
638 * event to all registered event handlers when an asynchronous event
641 void ib_dispatch_event(struct ib_event
*event
)
644 struct ib_event_handler
*handler
;
646 spin_lock_irqsave(&event
->device
->event_handler_lock
, flags
);
648 list_for_each_entry(handler
, &event
->device
->event_handler_list
, list
)
649 handler
->handler(handler
, event
);
651 spin_unlock_irqrestore(&event
->device
->event_handler_lock
, flags
);
653 EXPORT_SYMBOL(ib_dispatch_event
);
656 * ib_query_port - Query IB port attributes
657 * @device:Device to query
658 * @port_num:Port number to query
659 * @port_attr:Port attributes
661 * ib_query_port() returns the attributes of a port through the
662 * @port_attr pointer.
664 int ib_query_port(struct ib_device
*device
,
666 struct ib_port_attr
*port_attr
)
671 if (!rdma_is_port_valid(device
, port_num
))
674 memset(port_attr
, 0, sizeof(*port_attr
));
675 err
= device
->query_port(device
, port_num
, port_attr
);
676 if (err
|| port_attr
->subnet_prefix
)
679 if (rdma_port_get_link_layer(device
, port_num
) != IB_LINK_LAYER_INFINIBAND
)
682 err
= ib_query_gid(device
, port_num
, 0, &gid
, NULL
);
686 port_attr
->subnet_prefix
= be64_to_cpu(gid
.global
.subnet_prefix
);
689 EXPORT_SYMBOL(ib_query_port
);
692 * ib_query_gid - Get GID table entry
693 * @device:Device to query
694 * @port_num:Port number to query
695 * @index:GID table index to query
697 * @attr: Returned GID attributes related to this GID index (only in RoCE).
700 * ib_query_gid() fetches the specified GID table entry.
702 int ib_query_gid(struct ib_device
*device
,
703 u8 port_num
, int index
, union ib_gid
*gid
,
704 struct ib_gid_attr
*attr
)
706 if (rdma_cap_roce_gid_table(device
, port_num
))
707 return ib_get_cached_gid(device
, port_num
, index
, gid
, attr
);
712 return device
->query_gid(device
, port_num
, index
, gid
);
714 EXPORT_SYMBOL(ib_query_gid
);
717 * ib_enum_roce_netdev - enumerate all RoCE ports
718 * @ib_dev : IB device we want to query
719 * @filter: Should we call the callback?
720 * @filter_cookie: Cookie passed to filter
721 * @cb: Callback to call for each found RoCE ports
722 * @cookie: Cookie passed back to the callback
724 * Enumerates all of the physical RoCE ports of ib_dev
725 * which are related to netdevice and calls callback() on each
726 * device for which filter() function returns non zero.
728 void ib_enum_roce_netdev(struct ib_device
*ib_dev
,
729 roce_netdev_filter filter
,
731 roce_netdev_callback cb
,
736 for (port
= rdma_start_port(ib_dev
); port
<= rdma_end_port(ib_dev
);
738 if (rdma_protocol_roce(ib_dev
, port
)) {
739 struct net_device
*idev
= NULL
;
741 if (ib_dev
->get_netdev
)
742 idev
= ib_dev
->get_netdev(ib_dev
, port
);
745 idev
->reg_state
>= NETREG_UNREGISTERED
) {
750 if (filter(ib_dev
, port
, idev
, filter_cookie
))
751 cb(ib_dev
, port
, idev
, cookie
);
759 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
760 * @filter: Should we call the callback?
761 * @filter_cookie: Cookie passed to filter
762 * @cb: Callback to call for each found RoCE ports
763 * @cookie: Cookie passed back to the callback
765 * Enumerates all RoCE devices' physical ports which are related
766 * to netdevices and calls callback() on each device for which
767 * filter() function returns non zero.
769 void ib_enum_all_roce_netdevs(roce_netdev_filter filter
,
771 roce_netdev_callback cb
,
774 struct ib_device
*dev
;
776 down_read(&lists_rwsem
);
777 list_for_each_entry(dev
, &device_list
, core_list
)
778 ib_enum_roce_netdev(dev
, filter
, filter_cookie
, cb
, cookie
);
779 up_read(&lists_rwsem
);
783 * ib_query_pkey - Get P_Key table entry
784 * @device:Device to query
785 * @port_num:Port number to query
786 * @index:P_Key table index to query
787 * @pkey:Returned P_Key
789 * ib_query_pkey() fetches the specified P_Key table entry.
791 int ib_query_pkey(struct ib_device
*device
,
792 u8 port_num
, u16 index
, u16
*pkey
)
794 return device
->query_pkey(device
, port_num
, index
, pkey
);
796 EXPORT_SYMBOL(ib_query_pkey
);
799 * ib_modify_device - Change IB device attributes
800 * @device:Device to modify
801 * @device_modify_mask:Mask of attributes to change
802 * @device_modify:New attribute values
804 * ib_modify_device() changes a device's attributes as specified by
805 * the @device_modify_mask and @device_modify structure.
807 int ib_modify_device(struct ib_device
*device
,
808 int device_modify_mask
,
809 struct ib_device_modify
*device_modify
)
811 if (!device
->modify_device
)
814 return device
->modify_device(device
, device_modify_mask
,
817 EXPORT_SYMBOL(ib_modify_device
);
820 * ib_modify_port - Modifies the attributes for the specified port.
821 * @device: The device to modify.
822 * @port_num: The number of the port to modify.
823 * @port_modify_mask: Mask used to specify which attributes of the port
825 * @port_modify: New attribute values for the port.
827 * ib_modify_port() changes a port's attributes as specified by the
828 * @port_modify_mask and @port_modify structure.
830 int ib_modify_port(struct ib_device
*device
,
831 u8 port_num
, int port_modify_mask
,
832 struct ib_port_modify
*port_modify
)
834 if (!device
->modify_port
)
837 if (!rdma_is_port_valid(device
, port_num
))
840 return device
->modify_port(device
, port_num
, port_modify_mask
,
843 EXPORT_SYMBOL(ib_modify_port
);
846 * ib_find_gid - Returns the port number and GID table index where
847 * a specified GID value occurs.
848 * @device: The device to query.
849 * @gid: The GID value to search for.
850 * @gid_type: Type of GID.
851 * @ndev: The ndev related to the GID to search for.
852 * @port_num: The port number of the device where the GID value was found.
853 * @index: The index into the GID table where the GID was found. This
854 * parameter may be NULL.
856 int ib_find_gid(struct ib_device
*device
, union ib_gid
*gid
,
857 enum ib_gid_type gid_type
, struct net_device
*ndev
,
858 u8
*port_num
, u16
*index
)
860 union ib_gid tmp_gid
;
863 for (port
= rdma_start_port(device
); port
<= rdma_end_port(device
); ++port
) {
864 if (rdma_cap_roce_gid_table(device
, port
)) {
865 if (!ib_find_cached_gid_by_port(device
, gid
, gid_type
, port
,
872 if (gid_type
!= IB_GID_TYPE_IB
)
875 for (i
= 0; i
< device
->port_immutable
[port
].gid_tbl_len
; ++i
) {
876 ret
= ib_query_gid(device
, port
, i
, &tmp_gid
, NULL
);
879 if (!memcmp(&tmp_gid
, gid
, sizeof *gid
)) {
890 EXPORT_SYMBOL(ib_find_gid
);
893 * ib_find_pkey - Returns the PKey table index where a specified
895 * @device: The device to query.
896 * @port_num: The port number of the device to search for the PKey.
897 * @pkey: The PKey value to search for.
898 * @index: The index into the PKey table where the PKey was found.
900 int ib_find_pkey(struct ib_device
*device
,
901 u8 port_num
, u16 pkey
, u16
*index
)
907 for (i
= 0; i
< device
->port_immutable
[port_num
].pkey_tbl_len
; ++i
) {
908 ret
= ib_query_pkey(device
, port_num
, i
, &tmp_pkey
);
911 if ((pkey
& 0x7fff) == (tmp_pkey
& 0x7fff)) {
912 /* if there is full-member pkey take it.*/
913 if (tmp_pkey
& 0x8000) {
922 /*no full-member, if exists take the limited*/
923 if (partial_ix
>= 0) {
929 EXPORT_SYMBOL(ib_find_pkey
);
932 * ib_get_net_dev_by_params() - Return the appropriate net_dev
933 * for a received CM request
934 * @dev: An RDMA device on which the request has been received.
935 * @port: Port number on the RDMA device.
936 * @pkey: The Pkey the request came on.
937 * @gid: A GID that the net_dev uses to communicate.
938 * @addr: Contains the IP address that the request specified as its
941 struct net_device
*ib_get_net_dev_by_params(struct ib_device
*dev
,
944 const union ib_gid
*gid
,
945 const struct sockaddr
*addr
)
947 struct net_device
*net_dev
= NULL
;
948 struct ib_client_data
*context
;
950 if (!rdma_protocol_ib(dev
, port
))
953 down_read(&lists_rwsem
);
955 list_for_each_entry(context
, &dev
->client_data_list
, list
) {
956 struct ib_client
*client
= context
->client
;
958 if (context
->going_down
)
961 if (client
->get_net_dev_by_params
) {
962 net_dev
= client
->get_net_dev_by_params(dev
, port
, pkey
,
970 up_read(&lists_rwsem
);
974 EXPORT_SYMBOL(ib_get_net_dev_by_params
);
976 static struct ibnl_client_cbs ibnl_ls_cb_table
[] = {
977 [RDMA_NL_LS_OP_RESOLVE
] = {
978 .dump
= ib_nl_handle_resolve_resp
,
979 .module
= THIS_MODULE
},
980 [RDMA_NL_LS_OP_SET_TIMEOUT
] = {
981 .dump
= ib_nl_handle_set_timeout
,
982 .module
= THIS_MODULE
},
983 [RDMA_NL_LS_OP_IP_RESOLVE
] = {
984 .dump
= ib_nl_handle_ip_res_resp
,
985 .module
= THIS_MODULE
},
988 static int ib_add_ibnl_clients(void)
990 return ibnl_add_client(RDMA_NL_LS
, ARRAY_SIZE(ibnl_ls_cb_table
),
994 static void ib_remove_ibnl_clients(void)
996 ibnl_remove_client(RDMA_NL_LS
);
999 static int __init
ib_core_init(void)
1003 ib_wq
= alloc_workqueue("infiniband", 0, 0);
1007 ib_comp_wq
= alloc_workqueue("ib-comp-wq",
1008 WQ_UNBOUND
| WQ_HIGHPRI
| WQ_MEM_RECLAIM
,
1009 WQ_UNBOUND_MAX_ACTIVE
);
1015 ret
= class_register(&ib_class
);
1017 pr_warn("Couldn't create InfiniBand device class\n");
1023 pr_warn("Couldn't init IB netlink interface\n");
1029 pr_warn("Could't init IB address resolution\n");
1033 ret
= ib_mad_init();
1035 pr_warn("Couldn't init IB MAD\n");
1041 pr_warn("Couldn't init SA\n");
1045 ret
= ib_add_ibnl_clients();
1047 pr_warn("Couldn't register ibnl clients\n");
1064 class_unregister(&ib_class
);
1066 destroy_workqueue(ib_comp_wq
);
1068 destroy_workqueue(ib_wq
);
1072 static void __exit
ib_core_cleanup(void)
1075 ib_remove_ibnl_clients();
1080 class_unregister(&ib_class
);
1081 destroy_workqueue(ib_comp_wq
);
1082 /* Make sure that any pending umem accounting work is done. */
1083 destroy_workqueue(ib_wq
);
1086 module_init(ib_core_init
);
1087 module_exit(ib_core_cleanup
);