]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/core/device.c
IB/core: lock client data with lists_rwsem
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / core / device.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
2a1d9b7f 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
1da177e4
LT
32 */
33
34#include <linux/module.h>
35#include <linux/string.h>
36#include <linux/errno.h>
9a6b090c 37#include <linux/kernel.h>
1da177e4
LT
38#include <linux/slab.h>
39#include <linux/init.h>
95ed644f 40#include <linux/mutex.h>
b2cbae2c 41#include <rdma/rdma_netlink.h>
1da177e4
LT
42
43#include "core_priv.h"
44
45MODULE_AUTHOR("Roland Dreier");
46MODULE_DESCRIPTION("core kernel InfiniBand API");
47MODULE_LICENSE("Dual BSD/GPL");
48
49struct ib_client_data {
50 struct list_head list;
51 struct ib_client *client;
52 void * data;
7c1eb45a
HE
53 /* The device or client is going down. Do not call client or device
54 * callbacks other than remove(). */
55 bool going_down;
1da177e4
LT
56};
57
f0626710
TH
58struct workqueue_struct *ib_wq;
59EXPORT_SYMBOL_GPL(ib_wq);
60
5aa44bb9
HE
61/* The device_list and client_list contain devices and clients after their
62 * registration has completed, and the devices and clients are removed
63 * during unregistration. */
1da177e4
LT
64static LIST_HEAD(device_list);
65static LIST_HEAD(client_list);
66
67/*
5aa44bb9
HE
68 * device_mutex and lists_rwsem protect access to both device_list and
69 * client_list. device_mutex protects writer access by device and client
70 * registration / de-registration. lists_rwsem protects reader access to
71 * these lists. Iterators of these lists must lock it for read, while updates
72 * to the lists must be done with a write lock. A special case is when the
73 * device_mutex is locked. In this case locking the lists for read access is
74 * not necessary as the device_mutex implies it.
7c1eb45a
HE
75 *
76 * lists_rwsem also protects access to the client data list.
1da177e4 77 */
95ed644f 78static DEFINE_MUTEX(device_mutex);
5aa44bb9
HE
79static DECLARE_RWSEM(lists_rwsem);
80
1da177e4
LT
81
82static int ib_device_check_mandatory(struct ib_device *device)
83{
84#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
85 static const struct {
86 size_t offset;
87 char *name;
88 } mandatory_table[] = {
89 IB_MANDATORY_FUNC(query_device),
90 IB_MANDATORY_FUNC(query_port),
91 IB_MANDATORY_FUNC(query_pkey),
92 IB_MANDATORY_FUNC(query_gid),
93 IB_MANDATORY_FUNC(alloc_pd),
94 IB_MANDATORY_FUNC(dealloc_pd),
95 IB_MANDATORY_FUNC(create_ah),
96 IB_MANDATORY_FUNC(destroy_ah),
97 IB_MANDATORY_FUNC(create_qp),
98 IB_MANDATORY_FUNC(modify_qp),
99 IB_MANDATORY_FUNC(destroy_qp),
100 IB_MANDATORY_FUNC(post_send),
101 IB_MANDATORY_FUNC(post_recv),
102 IB_MANDATORY_FUNC(create_cq),
103 IB_MANDATORY_FUNC(destroy_cq),
104 IB_MANDATORY_FUNC(poll_cq),
105 IB_MANDATORY_FUNC(req_notify_cq),
106 IB_MANDATORY_FUNC(get_dma_mr),
7738613e
IW
107 IB_MANDATORY_FUNC(dereg_mr),
108 IB_MANDATORY_FUNC(get_port_immutable)
1da177e4
LT
109 };
110 int i;
111
9a6b090c 112 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
1da177e4
LT
113 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
114 printk(KERN_WARNING "Device %s is missing mandatory function %s\n",
115 device->name, mandatory_table[i].name);
116 return -EINVAL;
117 }
118 }
119
120 return 0;
121}
122
123static struct ib_device *__ib_device_get_by_name(const char *name)
124{
125 struct ib_device *device;
126
127 list_for_each_entry(device, &device_list, core_list)
128 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
129 return device;
130
131 return NULL;
132}
133
134
135static int alloc_name(char *name)
136{
65d470b3 137 unsigned long *inuse;
1da177e4
LT
138 char buf[IB_DEVICE_NAME_MAX];
139 struct ib_device *device;
140 int i;
141
65d470b3 142 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
1da177e4
LT
143 if (!inuse)
144 return -ENOMEM;
145
146 list_for_each_entry(device, &device_list, core_list) {
147 if (!sscanf(device->name, name, &i))
148 continue;
149 if (i < 0 || i >= PAGE_SIZE * 8)
150 continue;
151 snprintf(buf, sizeof buf, name, i);
152 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
153 set_bit(i, inuse);
154 }
155
156 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
157 free_page((unsigned long) inuse);
158 snprintf(buf, sizeof buf, name, i);
159
160 if (__ib_device_get_by_name(buf))
161 return -ENFILE;
162
163 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
164 return 0;
165}
166
167/**
168 * ib_alloc_device - allocate an IB device struct
169 * @size:size of structure to allocate
170 *
171 * Low-level drivers should use ib_alloc_device() to allocate &struct
172 * ib_device. @size is the size of the structure to be allocated,
173 * including any private data used by the low-level driver.
174 * ib_dealloc_device() must be used to free structures allocated with
175 * ib_alloc_device().
176 */
177struct ib_device *ib_alloc_device(size_t size)
178{
1da177e4
LT
179 BUG_ON(size < sizeof (struct ib_device));
180
de6eb66b 181 return kzalloc(size, GFP_KERNEL);
1da177e4
LT
182}
183EXPORT_SYMBOL(ib_alloc_device);
184
185/**
186 * ib_dealloc_device - free an IB device struct
187 * @device:structure to free
188 *
189 * Free a structure allocated with ib_alloc_device().
190 */
191void ib_dealloc_device(struct ib_device *device)
192{
193 if (device->reg_state == IB_DEV_UNINITIALIZED) {
194 kfree(device);
195 return;
196 }
197
198 BUG_ON(device->reg_state != IB_DEV_UNREGISTERED);
199
9206dff1 200 kobject_put(&device->dev.kobj);
1da177e4
LT
201}
202EXPORT_SYMBOL(ib_dealloc_device);
203
204static int add_client_context(struct ib_device *device, struct ib_client *client)
205{
206 struct ib_client_data *context;
207 unsigned long flags;
208
209 context = kmalloc(sizeof *context, GFP_KERNEL);
210 if (!context) {
211 printk(KERN_WARNING "Couldn't allocate client context for %s/%s\n",
212 device->name, client->name);
213 return -ENOMEM;
214 }
215
216 context->client = client;
217 context->data = NULL;
7c1eb45a 218 context->going_down = false;
1da177e4 219
7c1eb45a 220 down_write(&lists_rwsem);
1da177e4
LT
221 spin_lock_irqsave(&device->client_data_lock, flags);
222 list_add(&context->list, &device->client_data_list);
223 spin_unlock_irqrestore(&device->client_data_lock, flags);
7c1eb45a 224 up_write(&lists_rwsem);
1da177e4
LT
225
226 return 0;
227}
228
337877a4
IW
229static int verify_immutable(const struct ib_device *dev, u8 port)
230{
231 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
232 rdma_max_mad_size(dev, port) != 0);
233}
234
7738613e 235static int read_port_immutable(struct ib_device *device)
5eb620c8 236{
7738613e
IW
237 int ret = -ENOMEM;
238 u8 start_port = rdma_start_port(device);
239 u8 end_port = rdma_end_port(device);
240 u8 port;
241
242 /**
243 * device->port_immutable is indexed directly by the port number to make
244 * access to this data as efficient as possible.
245 *
246 * Therefore port_immutable is declared as a 1 based array with
247 * potential empty slots at the beginning.
248 */
249 device->port_immutable = kzalloc(sizeof(*device->port_immutable)
250 * (end_port + 1),
251 GFP_KERNEL);
252 if (!device->port_immutable)
5eb620c8
YE
253 goto err;
254
7738613e
IW
255 for (port = start_port; port <= end_port; ++port) {
256 ret = device->get_port_immutable(device, port,
257 &device->port_immutable[port]);
5eb620c8
YE
258 if (ret)
259 goto err;
337877a4
IW
260
261 if (verify_immutable(device, port)) {
262 ret = -EINVAL;
263 goto err;
264 }
5eb620c8
YE
265 }
266
267 ret = 0;
268 goto out;
5eb620c8 269err:
7738613e 270 kfree(device->port_immutable);
5eb620c8 271out:
5eb620c8
YE
272 return ret;
273}
274
1da177e4
LT
275/**
276 * ib_register_device - Register an IB device with IB core
277 * @device:Device to register
278 *
279 * Low-level drivers use ib_register_device() to register their
280 * devices with the IB core. All registered clients will receive a
281 * callback for each device that is added. @device must be allocated
282 * with ib_alloc_device().
283 */
9a6edb60
RC
284int ib_register_device(struct ib_device *device,
285 int (*port_callback)(struct ib_device *,
286 u8, struct kobject *))
1da177e4
LT
287{
288 int ret;
289
95ed644f 290 mutex_lock(&device_mutex);
1da177e4
LT
291
292 if (strchr(device->name, '%')) {
293 ret = alloc_name(device->name);
294 if (ret)
295 goto out;
296 }
297
298 if (ib_device_check_mandatory(device)) {
299 ret = -EINVAL;
300 goto out;
301 }
302
303 INIT_LIST_HEAD(&device->event_handler_list);
304 INIT_LIST_HEAD(&device->client_data_list);
305 spin_lock_init(&device->event_handler_lock);
306 spin_lock_init(&device->client_data_lock);
307
7738613e 308 ret = read_port_immutable(device);
5eb620c8 309 if (ret) {
7738613e 310 printk(KERN_WARNING "Couldn't create per port immutable data %s\n",
5eb620c8
YE
311 device->name);
312 goto out;
313 }
314
9a6edb60 315 ret = ib_device_register_sysfs(device, port_callback);
1da177e4
LT
316 if (ret) {
317 printk(KERN_WARNING "Couldn't register device %s with driver model\n",
318 device->name);
7738613e 319 kfree(device->port_immutable);
1da177e4
LT
320 goto out;
321 }
322
1da177e4
LT
323 device->reg_state = IB_DEV_REGISTERED;
324
325 {
326 struct ib_client *client;
327
328 list_for_each_entry(client, &client_list, list)
329 if (client->add && !add_client_context(device, client))
330 client->add(device);
331 }
332
5aa44bb9
HE
333 down_write(&lists_rwsem);
334 list_add_tail(&device->core_list, &device_list);
335 up_write(&lists_rwsem);
336out:
95ed644f 337 mutex_unlock(&device_mutex);
1da177e4
LT
338 return ret;
339}
340EXPORT_SYMBOL(ib_register_device);
341
342/**
343 * ib_unregister_device - Unregister an IB device
344 * @device:Device to unregister
345 *
346 * Unregister an IB device. All clients will receive a remove callback.
347 */
348void ib_unregister_device(struct ib_device *device)
349{
1da177e4
LT
350 struct ib_client_data *context, *tmp;
351 unsigned long flags;
352
95ed644f 353 mutex_lock(&device_mutex);
1da177e4 354
5aa44bb9
HE
355 down_write(&lists_rwsem);
356 list_del(&device->core_list);
7c1eb45a
HE
357 spin_lock_irqsave(&device->client_data_lock, flags);
358 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
359 context->going_down = true;
360 spin_unlock_irqrestore(&device->client_data_lock, flags);
361 downgrade_write(&lists_rwsem);
5aa44bb9 362
7c1eb45a
HE
363 list_for_each_entry_safe(context, tmp, &device->client_data_list,
364 list) {
365 if (context->client->remove)
366 context->client->remove(device, context->data);
367 }
368 up_read(&lists_rwsem);
1da177e4 369
95ed644f 370 mutex_unlock(&device_mutex);
1da177e4 371
9206dff1
RD
372 ib_device_unregister_sysfs(device);
373
7c1eb45a 374 down_write(&lists_rwsem);
1da177e4
LT
375 spin_lock_irqsave(&device->client_data_lock, flags);
376 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
377 kfree(context);
378 spin_unlock_irqrestore(&device->client_data_lock, flags);
7c1eb45a 379 up_write(&lists_rwsem);
1da177e4
LT
380
381 device->reg_state = IB_DEV_UNREGISTERED;
382}
383EXPORT_SYMBOL(ib_unregister_device);
384
385/**
386 * ib_register_client - Register an IB client
387 * @client:Client to register
388 *
389 * Upper level users of the IB drivers can use ib_register_client() to
390 * register callbacks for IB device addition and removal. When an IB
391 * device is added, each registered client's add method will be called
392 * (in the order the clients were registered), and when a device is
393 * removed, each client's remove method will be called (in the reverse
394 * order that clients were registered). In addition, when
395 * ib_register_client() is called, the client will receive an add
396 * callback for all devices already registered.
397 */
398int ib_register_client(struct ib_client *client)
399{
400 struct ib_device *device;
401
95ed644f 402 mutex_lock(&device_mutex);
1da177e4 403
1da177e4
LT
404 list_for_each_entry(device, &device_list, core_list)
405 if (client->add && !add_client_context(device, client))
406 client->add(device);
407
5aa44bb9
HE
408 down_write(&lists_rwsem);
409 list_add_tail(&client->list, &client_list);
410 up_write(&lists_rwsem);
411
95ed644f 412 mutex_unlock(&device_mutex);
1da177e4
LT
413
414 return 0;
415}
416EXPORT_SYMBOL(ib_register_client);
417
418/**
419 * ib_unregister_client - Unregister an IB client
420 * @client:Client to unregister
421 *
422 * Upper level users use ib_unregister_client() to remove their client
423 * registration. When ib_unregister_client() is called, the client
424 * will receive a remove callback for each IB device still registered.
425 */
426void ib_unregister_client(struct ib_client *client)
427{
428 struct ib_client_data *context, *tmp;
429 struct ib_device *device;
430 unsigned long flags;
431
95ed644f 432 mutex_lock(&device_mutex);
1da177e4 433
5aa44bb9
HE
434 down_write(&lists_rwsem);
435 list_del(&client->list);
436 up_write(&lists_rwsem);
437
1da177e4 438 list_for_each_entry(device, &device_list, core_list) {
7c1eb45a 439 struct ib_client_data *found_context = NULL;
1da177e4 440
7c1eb45a 441 down_write(&lists_rwsem);
1da177e4
LT
442 spin_lock_irqsave(&device->client_data_lock, flags);
443 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
444 if (context->client == client) {
7c1eb45a
HE
445 context->going_down = true;
446 found_context = context;
447 break;
1da177e4
LT
448 }
449 spin_unlock_irqrestore(&device->client_data_lock, flags);
7c1eb45a
HE
450 up_write(&lists_rwsem);
451
452 if (client->remove)
453 client->remove(device, found_context ?
454 found_context->data : NULL);
455
456 if (!found_context) {
457 pr_warn("No client context found for %s/%s\n",
458 device->name, client->name);
459 continue;
460 }
461
462 down_write(&lists_rwsem);
463 spin_lock_irqsave(&device->client_data_lock, flags);
464 list_del(&found_context->list);
465 kfree(found_context);
466 spin_unlock_irqrestore(&device->client_data_lock, flags);
467 up_write(&lists_rwsem);
1da177e4 468 }
1da177e4 469
95ed644f 470 mutex_unlock(&device_mutex);
1da177e4
LT
471}
472EXPORT_SYMBOL(ib_unregister_client);
473
474/**
475 * ib_get_client_data - Get IB client context
476 * @device:Device to get context for
477 * @client:Client to get context for
478 *
479 * ib_get_client_data() returns client context set with
480 * ib_set_client_data().
481 */
482void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
483{
484 struct ib_client_data *context;
485 void *ret = NULL;
486 unsigned long flags;
487
488 spin_lock_irqsave(&device->client_data_lock, flags);
489 list_for_each_entry(context, &device->client_data_list, list)
490 if (context->client == client) {
491 ret = context->data;
492 break;
493 }
494 spin_unlock_irqrestore(&device->client_data_lock, flags);
495
496 return ret;
497}
498EXPORT_SYMBOL(ib_get_client_data);
499
500/**
9cd330d3 501 * ib_set_client_data - Set IB client context
1da177e4
LT
502 * @device:Device to set context for
503 * @client:Client to set context for
504 * @data:Context to set
505 *
506 * ib_set_client_data() sets client context that can be retrieved with
507 * ib_get_client_data().
508 */
509void ib_set_client_data(struct ib_device *device, struct ib_client *client,
510 void *data)
511{
512 struct ib_client_data *context;
513 unsigned long flags;
514
515 spin_lock_irqsave(&device->client_data_lock, flags);
516 list_for_each_entry(context, &device->client_data_list, list)
517 if (context->client == client) {
518 context->data = data;
519 goto out;
520 }
521
522 printk(KERN_WARNING "No client context found for %s/%s\n",
523 device->name, client->name);
524
525out:
526 spin_unlock_irqrestore(&device->client_data_lock, flags);
527}
528EXPORT_SYMBOL(ib_set_client_data);
529
530/**
531 * ib_register_event_handler - Register an IB event handler
532 * @event_handler:Handler to register
533 *
534 * ib_register_event_handler() registers an event handler that will be
535 * called back when asynchronous IB events occur (as defined in
536 * chapter 11 of the InfiniBand Architecture Specification). This
537 * callback may occur in interrupt context.
538 */
539int ib_register_event_handler (struct ib_event_handler *event_handler)
540{
541 unsigned long flags;
542
543 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
544 list_add_tail(&event_handler->list,
545 &event_handler->device->event_handler_list);
546 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
547
548 return 0;
549}
550EXPORT_SYMBOL(ib_register_event_handler);
551
552/**
553 * ib_unregister_event_handler - Unregister an event handler
554 * @event_handler:Handler to unregister
555 *
556 * Unregister an event handler registered with
557 * ib_register_event_handler().
558 */
559int ib_unregister_event_handler(struct ib_event_handler *event_handler)
560{
561 unsigned long flags;
562
563 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
564 list_del(&event_handler->list);
565 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
566
567 return 0;
568}
569EXPORT_SYMBOL(ib_unregister_event_handler);
570
571/**
572 * ib_dispatch_event - Dispatch an asynchronous event
573 * @event:Event to dispatch
574 *
575 * Low-level drivers must call ib_dispatch_event() to dispatch the
576 * event to all registered event handlers when an asynchronous event
577 * occurs.
578 */
579void ib_dispatch_event(struct ib_event *event)
580{
581 unsigned long flags;
582 struct ib_event_handler *handler;
583
584 spin_lock_irqsave(&event->device->event_handler_lock, flags);
585
586 list_for_each_entry(handler, &event->device->event_handler_list, list)
587 handler->handler(handler, event);
588
589 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
590}
591EXPORT_SYMBOL(ib_dispatch_event);
592
593/**
594 * ib_query_device - Query IB device attributes
595 * @device:Device to query
596 * @device_attr:Device attributes
597 *
598 * ib_query_device() returns the attributes of a device through the
599 * @device_attr pointer.
600 */
601int ib_query_device(struct ib_device *device,
602 struct ib_device_attr *device_attr)
603{
2528e33e
MB
604 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
605
24306dc6
MB
606 memset(device_attr, 0, sizeof(*device_attr));
607
2528e33e 608 return device->query_device(device, device_attr, &uhw);
1da177e4
LT
609}
610EXPORT_SYMBOL(ib_query_device);
611
612/**
613 * ib_query_port - Query IB port attributes
614 * @device:Device to query
615 * @port_num:Port number to query
616 * @port_attr:Port attributes
617 *
618 * ib_query_port() returns the attributes of a port through the
619 * @port_attr pointer.
620 */
621int ib_query_port(struct ib_device *device,
622 u8 port_num,
623 struct ib_port_attr *port_attr)
624{
0cf18d77 625 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
116c0074
RD
626 return -EINVAL;
627
1da177e4
LT
628 return device->query_port(device, port_num, port_attr);
629}
630EXPORT_SYMBOL(ib_query_port);
631
632/**
633 * ib_query_gid - Get GID table entry
634 * @device:Device to query
635 * @port_num:Port number to query
636 * @index:GID table index to query
637 * @gid:Returned GID
638 *
639 * ib_query_gid() fetches the specified GID table entry.
640 */
641int ib_query_gid(struct ib_device *device,
642 u8 port_num, int index, union ib_gid *gid)
643{
644 return device->query_gid(device, port_num, index, gid);
645}
646EXPORT_SYMBOL(ib_query_gid);
647
648/**
649 * ib_query_pkey - Get P_Key table entry
650 * @device:Device to query
651 * @port_num:Port number to query
652 * @index:P_Key table index to query
653 * @pkey:Returned P_Key
654 *
655 * ib_query_pkey() fetches the specified P_Key table entry.
656 */
657int ib_query_pkey(struct ib_device *device,
658 u8 port_num, u16 index, u16 *pkey)
659{
660 return device->query_pkey(device, port_num, index, pkey);
661}
662EXPORT_SYMBOL(ib_query_pkey);
663
664/**
665 * ib_modify_device - Change IB device attributes
666 * @device:Device to modify
667 * @device_modify_mask:Mask of attributes to change
668 * @device_modify:New attribute values
669 *
670 * ib_modify_device() changes a device's attributes as specified by
671 * the @device_modify_mask and @device_modify structure.
672 */
673int ib_modify_device(struct ib_device *device,
674 int device_modify_mask,
675 struct ib_device_modify *device_modify)
676{
10e1b54b
BVA
677 if (!device->modify_device)
678 return -ENOSYS;
679
1da177e4
LT
680 return device->modify_device(device, device_modify_mask,
681 device_modify);
682}
683EXPORT_SYMBOL(ib_modify_device);
684
685/**
686 * ib_modify_port - Modifies the attributes for the specified port.
687 * @device: The device to modify.
688 * @port_num: The number of the port to modify.
689 * @port_modify_mask: Mask used to specify which attributes of the port
690 * to change.
691 * @port_modify: New attribute values for the port.
692 *
693 * ib_modify_port() changes a port's attributes as specified by the
694 * @port_modify_mask and @port_modify structure.
695 */
696int ib_modify_port(struct ib_device *device,
697 u8 port_num, int port_modify_mask,
698 struct ib_port_modify *port_modify)
699{
10e1b54b
BVA
700 if (!device->modify_port)
701 return -ENOSYS;
702
0cf18d77 703 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
116c0074
RD
704 return -EINVAL;
705
1da177e4
LT
706 return device->modify_port(device, port_num, port_modify_mask,
707 port_modify);
708}
709EXPORT_SYMBOL(ib_modify_port);
710
5eb620c8
YE
711/**
712 * ib_find_gid - Returns the port number and GID table index where
713 * a specified GID value occurs.
714 * @device: The device to query.
715 * @gid: The GID value to search for.
716 * @port_num: The port number of the device where the GID value was found.
717 * @index: The index into the GID table where the GID was found. This
718 * parameter may be NULL.
719 */
720int ib_find_gid(struct ib_device *device, union ib_gid *gid,
721 u8 *port_num, u16 *index)
722{
723 union ib_gid tmp_gid;
724 int ret, port, i;
725
0cf18d77 726 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
7738613e 727 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
5eb620c8
YE
728 ret = ib_query_gid(device, port, i, &tmp_gid);
729 if (ret)
730 return ret;
731 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
732 *port_num = port;
733 if (index)
734 *index = i;
735 return 0;
736 }
737 }
738 }
739
740 return -ENOENT;
741}
742EXPORT_SYMBOL(ib_find_gid);
743
744/**
745 * ib_find_pkey - Returns the PKey table index where a specified
746 * PKey value occurs.
747 * @device: The device to query.
748 * @port_num: The port number of the device to search for the PKey.
749 * @pkey: The PKey value to search for.
750 * @index: The index into the PKey table where the PKey was found.
751 */
752int ib_find_pkey(struct ib_device *device,
753 u8 port_num, u16 pkey, u16 *index)
754{
755 int ret, i;
756 u16 tmp_pkey;
ff7166c4 757 int partial_ix = -1;
5eb620c8 758
7738613e 759 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
5eb620c8
YE
760 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
761 if (ret)
762 return ret;
36026ecc 763 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
ff7166c4
JM
764 /* if there is full-member pkey take it.*/
765 if (tmp_pkey & 0x8000) {
766 *index = i;
767 return 0;
768 }
769 if (partial_ix < 0)
770 partial_ix = i;
5eb620c8
YE
771 }
772 }
773
ff7166c4
JM
774 /*no full-member, if exists take the limited*/
775 if (partial_ix >= 0) {
776 *index = partial_ix;
777 return 0;
778 }
5eb620c8
YE
779 return -ENOENT;
780}
781EXPORT_SYMBOL(ib_find_pkey);
782
1da177e4
LT
783static int __init ib_core_init(void)
784{
785 int ret;
786
f0626710
TH
787 ib_wq = alloc_workqueue("infiniband", 0, 0);
788 if (!ib_wq)
789 return -ENOMEM;
790
1da177e4 791 ret = ib_sysfs_setup();
fd75c789 792 if (ret) {
1da177e4 793 printk(KERN_WARNING "Couldn't create InfiniBand device class\n");
fd75c789
NM
794 goto err;
795 }
1da177e4 796
b2cbae2c
RD
797 ret = ibnl_init();
798 if (ret) {
799 printk(KERN_WARNING "Couldn't init IB netlink interface\n");
800 goto err_sysfs;
801 }
802
1da177e4
LT
803 ret = ib_cache_setup();
804 if (ret) {
805 printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n");
b2cbae2c 806 goto err_nl;
1da177e4
LT
807 }
808
fd75c789
NM
809 return 0;
810
b2cbae2c
RD
811err_nl:
812 ibnl_cleanup();
813
fd75c789
NM
814err_sysfs:
815 ib_sysfs_cleanup();
816
817err:
818 destroy_workqueue(ib_wq);
1da177e4
LT
819 return ret;
820}
821
822static void __exit ib_core_cleanup(void)
823{
824 ib_cache_cleanup();
b2cbae2c 825 ibnl_cleanup();
1da177e4 826 ib_sysfs_cleanup();
f7c6a7b5 827 /* Make sure that any pending umem accounting work is done. */
f0626710 828 destroy_workqueue(ib_wq);
1da177e4
LT
829}
830
831module_init(ib_core_init);
832module_exit(ib_core_cleanup);