]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/infiniband/core/device.c
Speed up console framebuffer imageblit function
[mirror_ubuntu-zesty-kernel.git] / drivers / infiniband / core / device.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
2a1d9b7f 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
1da177e4
LT
32 */
33
34#include <linux/module.h>
35#include <linux/string.h>
36#include <linux/errno.h>
9a6b090c 37#include <linux/kernel.h>
1da177e4
LT
38#include <linux/slab.h>
39#include <linux/init.h>
95ed644f 40#include <linux/mutex.h>
9268f72d 41#include <linux/netdevice.h>
b2cbae2c 42#include <rdma/rdma_netlink.h>
03db3a2d
MB
43#include <rdma/ib_addr.h>
44#include <rdma/ib_cache.h>
1da177e4
LT
45
46#include "core_priv.h"
47
48MODULE_AUTHOR("Roland Dreier");
49MODULE_DESCRIPTION("core kernel InfiniBand API");
50MODULE_LICENSE("Dual BSD/GPL");
51
52struct ib_client_data {
53 struct list_head list;
54 struct ib_client *client;
55 void * data;
7c1eb45a
HE
56 /* The device or client is going down. Do not call client or device
57 * callbacks other than remove(). */
58 bool going_down;
1da177e4
LT
59};
60
14d3a3b2 61struct workqueue_struct *ib_comp_wq;
f0626710
TH
62struct workqueue_struct *ib_wq;
63EXPORT_SYMBOL_GPL(ib_wq);
64
5aa44bb9
HE
65/* The device_list and client_list contain devices and clients after their
66 * registration has completed, and the devices and clients are removed
67 * during unregistration. */
1da177e4
LT
68static LIST_HEAD(device_list);
69static LIST_HEAD(client_list);
70
71/*
5aa44bb9
HE
72 * device_mutex and lists_rwsem protect access to both device_list and
73 * client_list. device_mutex protects writer access by device and client
74 * registration / de-registration. lists_rwsem protects reader access to
75 * these lists. Iterators of these lists must lock it for read, while updates
76 * to the lists must be done with a write lock. A special case is when the
77 * device_mutex is locked. In this case locking the lists for read access is
78 * not necessary as the device_mutex implies it.
7c1eb45a
HE
79 *
80 * lists_rwsem also protects access to the client data list.
1da177e4 81 */
95ed644f 82static DEFINE_MUTEX(device_mutex);
5aa44bb9
HE
83static DECLARE_RWSEM(lists_rwsem);
84
1da177e4
LT
85
86static int ib_device_check_mandatory(struct ib_device *device)
87{
88#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x }
89 static const struct {
90 size_t offset;
91 char *name;
92 } mandatory_table[] = {
93 IB_MANDATORY_FUNC(query_device),
94 IB_MANDATORY_FUNC(query_port),
95 IB_MANDATORY_FUNC(query_pkey),
96 IB_MANDATORY_FUNC(query_gid),
97 IB_MANDATORY_FUNC(alloc_pd),
98 IB_MANDATORY_FUNC(dealloc_pd),
99 IB_MANDATORY_FUNC(create_ah),
100 IB_MANDATORY_FUNC(destroy_ah),
101 IB_MANDATORY_FUNC(create_qp),
102 IB_MANDATORY_FUNC(modify_qp),
103 IB_MANDATORY_FUNC(destroy_qp),
104 IB_MANDATORY_FUNC(post_send),
105 IB_MANDATORY_FUNC(post_recv),
106 IB_MANDATORY_FUNC(create_cq),
107 IB_MANDATORY_FUNC(destroy_cq),
108 IB_MANDATORY_FUNC(poll_cq),
109 IB_MANDATORY_FUNC(req_notify_cq),
110 IB_MANDATORY_FUNC(get_dma_mr),
7738613e
IW
111 IB_MANDATORY_FUNC(dereg_mr),
112 IB_MANDATORY_FUNC(get_port_immutable)
1da177e4
LT
113 };
114 int i;
115
9a6b090c 116 for (i = 0; i < ARRAY_SIZE(mandatory_table); ++i) {
1da177e4 117 if (!*(void **) ((void *) device + mandatory_table[i].offset)) {
aba25a3e
PP
118 pr_warn("Device %s is missing mandatory function %s\n",
119 device->name, mandatory_table[i].name);
1da177e4
LT
120 return -EINVAL;
121 }
122 }
123
124 return 0;
125}
126
127static struct ib_device *__ib_device_get_by_name(const char *name)
128{
129 struct ib_device *device;
130
131 list_for_each_entry(device, &device_list, core_list)
132 if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX))
133 return device;
134
135 return NULL;
136}
137
138
139static int alloc_name(char *name)
140{
65d470b3 141 unsigned long *inuse;
1da177e4
LT
142 char buf[IB_DEVICE_NAME_MAX];
143 struct ib_device *device;
144 int i;
145
65d470b3 146 inuse = (unsigned long *) get_zeroed_page(GFP_KERNEL);
1da177e4
LT
147 if (!inuse)
148 return -ENOMEM;
149
150 list_for_each_entry(device, &device_list, core_list) {
151 if (!sscanf(device->name, name, &i))
152 continue;
153 if (i < 0 || i >= PAGE_SIZE * 8)
154 continue;
155 snprintf(buf, sizeof buf, name, i);
156 if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX))
157 set_bit(i, inuse);
158 }
159
160 i = find_first_zero_bit(inuse, PAGE_SIZE * 8);
161 free_page((unsigned long) inuse);
162 snprintf(buf, sizeof buf, name, i);
163
164 if (__ib_device_get_by_name(buf))
165 return -ENFILE;
166
167 strlcpy(name, buf, IB_DEVICE_NAME_MAX);
168 return 0;
169}
170
55aeed06
JG
171static void ib_device_release(struct device *device)
172{
173 struct ib_device *dev = container_of(device, struct ib_device, dev);
174
03db3a2d 175 ib_cache_release_one(dev);
55aeed06
JG
176 kfree(dev->port_immutable);
177 kfree(dev);
178}
179
180static int ib_device_uevent(struct device *device,
181 struct kobj_uevent_env *env)
182{
183 struct ib_device *dev = container_of(device, struct ib_device, dev);
184
185 if (add_uevent_var(env, "NAME=%s", dev->name))
186 return -ENOMEM;
187
188 /*
189 * It would be nice to pass the node GUID with the event...
190 */
191
192 return 0;
193}
194
195static struct class ib_class = {
196 .name = "infiniband",
197 .dev_release = ib_device_release,
198 .dev_uevent = ib_device_uevent,
199};
200
1da177e4
LT
201/**
202 * ib_alloc_device - allocate an IB device struct
203 * @size:size of structure to allocate
204 *
205 * Low-level drivers should use ib_alloc_device() to allocate &struct
206 * ib_device. @size is the size of the structure to be allocated,
207 * including any private data used by the low-level driver.
208 * ib_dealloc_device() must be used to free structures allocated with
209 * ib_alloc_device().
210 */
211struct ib_device *ib_alloc_device(size_t size)
212{
55aeed06
JG
213 struct ib_device *device;
214
215 if (WARN_ON(size < sizeof(struct ib_device)))
216 return NULL;
217
218 device = kzalloc(size, GFP_KERNEL);
219 if (!device)
220 return NULL;
221
222 device->dev.class = &ib_class;
223 device_initialize(&device->dev);
224
225 dev_set_drvdata(&device->dev, device);
226
227 INIT_LIST_HEAD(&device->event_handler_list);
228 spin_lock_init(&device->event_handler_lock);
229 spin_lock_init(&device->client_data_lock);
230 INIT_LIST_HEAD(&device->client_data_list);
231 INIT_LIST_HEAD(&device->port_list);
1da177e4 232
55aeed06 233 return device;
1da177e4
LT
234}
235EXPORT_SYMBOL(ib_alloc_device);
236
237/**
238 * ib_dealloc_device - free an IB device struct
239 * @device:structure to free
240 *
241 * Free a structure allocated with ib_alloc_device().
242 */
243void ib_dealloc_device(struct ib_device *device)
244{
55aeed06
JG
245 WARN_ON(device->reg_state != IB_DEV_UNREGISTERED &&
246 device->reg_state != IB_DEV_UNINITIALIZED);
9206dff1 247 kobject_put(&device->dev.kobj);
1da177e4
LT
248}
249EXPORT_SYMBOL(ib_dealloc_device);
250
251static int add_client_context(struct ib_device *device, struct ib_client *client)
252{
253 struct ib_client_data *context;
254 unsigned long flags;
255
256 context = kmalloc(sizeof *context, GFP_KERNEL);
a0b3455f 257 if (!context)
1da177e4 258 return -ENOMEM;
1da177e4
LT
259
260 context->client = client;
261 context->data = NULL;
7c1eb45a 262 context->going_down = false;
1da177e4 263
7c1eb45a 264 down_write(&lists_rwsem);
1da177e4
LT
265 spin_lock_irqsave(&device->client_data_lock, flags);
266 list_add(&context->list, &device->client_data_list);
267 spin_unlock_irqrestore(&device->client_data_lock, flags);
7c1eb45a 268 up_write(&lists_rwsem);
1da177e4
LT
269
270 return 0;
271}
272
337877a4
IW
273static int verify_immutable(const struct ib_device *dev, u8 port)
274{
275 return WARN_ON(!rdma_cap_ib_mad(dev, port) &&
276 rdma_max_mad_size(dev, port) != 0);
277}
278
7738613e 279static int read_port_immutable(struct ib_device *device)
5eb620c8 280{
55aeed06 281 int ret;
7738613e
IW
282 u8 start_port = rdma_start_port(device);
283 u8 end_port = rdma_end_port(device);
284 u8 port;
285
286 /**
287 * device->port_immutable is indexed directly by the port number to make
288 * access to this data as efficient as possible.
289 *
290 * Therefore port_immutable is declared as a 1 based array with
291 * potential empty slots at the beginning.
292 */
293 device->port_immutable = kzalloc(sizeof(*device->port_immutable)
294 * (end_port + 1),
295 GFP_KERNEL);
296 if (!device->port_immutable)
55aeed06 297 return -ENOMEM;
5eb620c8 298
7738613e
IW
299 for (port = start_port; port <= end_port; ++port) {
300 ret = device->get_port_immutable(device, port,
301 &device->port_immutable[port]);
5eb620c8 302 if (ret)
55aeed06 303 return ret;
337877a4 304
55aeed06
JG
305 if (verify_immutable(device, port))
306 return -EINVAL;
5eb620c8 307 }
55aeed06 308 return 0;
5eb620c8
YE
309}
310
5fa76c20
IW
311void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len)
312{
313 if (dev->get_dev_fw_str)
314 dev->get_dev_fw_str(dev, str, str_len);
315 else
316 str[0] = '\0';
317}
318EXPORT_SYMBOL(ib_get_device_fw_str);
319
1da177e4
LT
320/**
321 * ib_register_device - Register an IB device with IB core
322 * @device:Device to register
323 *
324 * Low-level drivers use ib_register_device() to register their
325 * devices with the IB core. All registered clients will receive a
326 * callback for each device that is added. @device must be allocated
327 * with ib_alloc_device().
328 */
9a6edb60
RC
329int ib_register_device(struct ib_device *device,
330 int (*port_callback)(struct ib_device *,
331 u8, struct kobject *))
1da177e4
LT
332{
333 int ret;
b8071ad8 334 struct ib_client *client;
3e153a93 335 struct ib_udata uhw = {.outlen = 0, .inlen = 0};
1da177e4 336
95ed644f 337 mutex_lock(&device_mutex);
1da177e4
LT
338
339 if (strchr(device->name, '%')) {
340 ret = alloc_name(device->name);
341 if (ret)
342 goto out;
343 }
344
345 if (ib_device_check_mandatory(device)) {
346 ret = -EINVAL;
347 goto out;
348 }
349
7738613e 350 ret = read_port_immutable(device);
5eb620c8 351 if (ret) {
aba25a3e
PP
352 pr_warn("Couldn't create per port immutable data %s\n",
353 device->name);
5eb620c8
YE
354 goto out;
355 }
356
03db3a2d
MB
357 ret = ib_cache_setup_one(device);
358 if (ret) {
aba25a3e 359 pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n");
03db3a2d
MB
360 goto out;
361 }
362
3e153a93
IW
363 memset(&device->attrs, 0, sizeof(device->attrs));
364 ret = device->query_device(device, &device->attrs, &uhw);
365 if (ret) {
aba25a3e 366 pr_warn("Couldn't query the device attributes\n");
5adebafb 367 ib_cache_cleanup_one(device);
3e153a93
IW
368 goto out;
369 }
370
9a6edb60 371 ret = ib_device_register_sysfs(device, port_callback);
1da177e4 372 if (ret) {
aba25a3e
PP
373 pr_warn("Couldn't register device %s with driver model\n",
374 device->name);
03db3a2d 375 ib_cache_cleanup_one(device);
1da177e4
LT
376 goto out;
377 }
378
1da177e4
LT
379 device->reg_state = IB_DEV_REGISTERED;
380
b8071ad8
DL
381 list_for_each_entry(client, &client_list, list)
382 if (client->add && !add_client_context(device, client))
383 client->add(device);
1da177e4 384
5aa44bb9
HE
385 down_write(&lists_rwsem);
386 list_add_tail(&device->core_list, &device_list);
387 up_write(&lists_rwsem);
388out:
95ed644f 389 mutex_unlock(&device_mutex);
1da177e4
LT
390 return ret;
391}
392EXPORT_SYMBOL(ib_register_device);
393
394/**
395 * ib_unregister_device - Unregister an IB device
396 * @device:Device to unregister
397 *
398 * Unregister an IB device. All clients will receive a remove callback.
399 */
400void ib_unregister_device(struct ib_device *device)
401{
1da177e4
LT
402 struct ib_client_data *context, *tmp;
403 unsigned long flags;
404
95ed644f 405 mutex_lock(&device_mutex);
1da177e4 406
5aa44bb9
HE
407 down_write(&lists_rwsem);
408 list_del(&device->core_list);
7c1eb45a
HE
409 spin_lock_irqsave(&device->client_data_lock, flags);
410 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
411 context->going_down = true;
412 spin_unlock_irqrestore(&device->client_data_lock, flags);
413 downgrade_write(&lists_rwsem);
5aa44bb9 414
7c1eb45a
HE
415 list_for_each_entry_safe(context, tmp, &device->client_data_list,
416 list) {
417 if (context->client->remove)
418 context->client->remove(device, context->data);
419 }
420 up_read(&lists_rwsem);
1da177e4 421
95ed644f 422 mutex_unlock(&device_mutex);
1da177e4 423
9206dff1 424 ib_device_unregister_sysfs(device);
03db3a2d 425 ib_cache_cleanup_one(device);
9206dff1 426
7c1eb45a 427 down_write(&lists_rwsem);
1da177e4
LT
428 spin_lock_irqsave(&device->client_data_lock, flags);
429 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
430 kfree(context);
431 spin_unlock_irqrestore(&device->client_data_lock, flags);
7c1eb45a 432 up_write(&lists_rwsem);
1da177e4
LT
433
434 device->reg_state = IB_DEV_UNREGISTERED;
435}
436EXPORT_SYMBOL(ib_unregister_device);
437
438/**
439 * ib_register_client - Register an IB client
440 * @client:Client to register
441 *
442 * Upper level users of the IB drivers can use ib_register_client() to
443 * register callbacks for IB device addition and removal. When an IB
444 * device is added, each registered client's add method will be called
445 * (in the order the clients were registered), and when a device is
446 * removed, each client's remove method will be called (in the reverse
447 * order that clients were registered). In addition, when
448 * ib_register_client() is called, the client will receive an add
449 * callback for all devices already registered.
450 */
451int ib_register_client(struct ib_client *client)
452{
453 struct ib_device *device;
454
95ed644f 455 mutex_lock(&device_mutex);
1da177e4 456
1da177e4
LT
457 list_for_each_entry(device, &device_list, core_list)
458 if (client->add && !add_client_context(device, client))
459 client->add(device);
460
5aa44bb9
HE
461 down_write(&lists_rwsem);
462 list_add_tail(&client->list, &client_list);
463 up_write(&lists_rwsem);
464
95ed644f 465 mutex_unlock(&device_mutex);
1da177e4
LT
466
467 return 0;
468}
469EXPORT_SYMBOL(ib_register_client);
470
471/**
472 * ib_unregister_client - Unregister an IB client
473 * @client:Client to unregister
474 *
475 * Upper level users use ib_unregister_client() to remove their client
476 * registration. When ib_unregister_client() is called, the client
477 * will receive a remove callback for each IB device still registered.
478 */
479void ib_unregister_client(struct ib_client *client)
480{
481 struct ib_client_data *context, *tmp;
482 struct ib_device *device;
483 unsigned long flags;
484
95ed644f 485 mutex_lock(&device_mutex);
1da177e4 486
5aa44bb9
HE
487 down_write(&lists_rwsem);
488 list_del(&client->list);
489 up_write(&lists_rwsem);
490
1da177e4 491 list_for_each_entry(device, &device_list, core_list) {
7c1eb45a 492 struct ib_client_data *found_context = NULL;
1da177e4 493
7c1eb45a 494 down_write(&lists_rwsem);
1da177e4
LT
495 spin_lock_irqsave(&device->client_data_lock, flags);
496 list_for_each_entry_safe(context, tmp, &device->client_data_list, list)
497 if (context->client == client) {
7c1eb45a
HE
498 context->going_down = true;
499 found_context = context;
500 break;
1da177e4
LT
501 }
502 spin_unlock_irqrestore(&device->client_data_lock, flags);
7c1eb45a
HE
503 up_write(&lists_rwsem);
504
505 if (client->remove)
506 client->remove(device, found_context ?
507 found_context->data : NULL);
508
509 if (!found_context) {
510 pr_warn("No client context found for %s/%s\n",
511 device->name, client->name);
512 continue;
513 }
514
515 down_write(&lists_rwsem);
516 spin_lock_irqsave(&device->client_data_lock, flags);
517 list_del(&found_context->list);
518 kfree(found_context);
519 spin_unlock_irqrestore(&device->client_data_lock, flags);
520 up_write(&lists_rwsem);
1da177e4 521 }
1da177e4 522
95ed644f 523 mutex_unlock(&device_mutex);
1da177e4
LT
524}
525EXPORT_SYMBOL(ib_unregister_client);
526
527/**
528 * ib_get_client_data - Get IB client context
529 * @device:Device to get context for
530 * @client:Client to get context for
531 *
532 * ib_get_client_data() returns client context set with
533 * ib_set_client_data().
534 */
535void *ib_get_client_data(struct ib_device *device, struct ib_client *client)
536{
537 struct ib_client_data *context;
538 void *ret = NULL;
539 unsigned long flags;
540
541 spin_lock_irqsave(&device->client_data_lock, flags);
542 list_for_each_entry(context, &device->client_data_list, list)
543 if (context->client == client) {
544 ret = context->data;
545 break;
546 }
547 spin_unlock_irqrestore(&device->client_data_lock, flags);
548
549 return ret;
550}
551EXPORT_SYMBOL(ib_get_client_data);
552
553/**
9cd330d3 554 * ib_set_client_data - Set IB client context
1da177e4
LT
555 * @device:Device to set context for
556 * @client:Client to set context for
557 * @data:Context to set
558 *
559 * ib_set_client_data() sets client context that can be retrieved with
560 * ib_get_client_data().
561 */
562void ib_set_client_data(struct ib_device *device, struct ib_client *client,
563 void *data)
564{
565 struct ib_client_data *context;
566 unsigned long flags;
567
568 spin_lock_irqsave(&device->client_data_lock, flags);
569 list_for_each_entry(context, &device->client_data_list, list)
570 if (context->client == client) {
571 context->data = data;
572 goto out;
573 }
574
aba25a3e
PP
575 pr_warn("No client context found for %s/%s\n",
576 device->name, client->name);
1da177e4
LT
577
578out:
579 spin_unlock_irqrestore(&device->client_data_lock, flags);
580}
581EXPORT_SYMBOL(ib_set_client_data);
582
583/**
584 * ib_register_event_handler - Register an IB event handler
585 * @event_handler:Handler to register
586 *
587 * ib_register_event_handler() registers an event handler that will be
588 * called back when asynchronous IB events occur (as defined in
589 * chapter 11 of the InfiniBand Architecture Specification). This
590 * callback may occur in interrupt context.
591 */
592int ib_register_event_handler (struct ib_event_handler *event_handler)
593{
594 unsigned long flags;
595
596 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
597 list_add_tail(&event_handler->list,
598 &event_handler->device->event_handler_list);
599 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
600
601 return 0;
602}
603EXPORT_SYMBOL(ib_register_event_handler);
604
605/**
606 * ib_unregister_event_handler - Unregister an event handler
607 * @event_handler:Handler to unregister
608 *
609 * Unregister an event handler registered with
610 * ib_register_event_handler().
611 */
612int ib_unregister_event_handler(struct ib_event_handler *event_handler)
613{
614 unsigned long flags;
615
616 spin_lock_irqsave(&event_handler->device->event_handler_lock, flags);
617 list_del(&event_handler->list);
618 spin_unlock_irqrestore(&event_handler->device->event_handler_lock, flags);
619
620 return 0;
621}
622EXPORT_SYMBOL(ib_unregister_event_handler);
623
624/**
625 * ib_dispatch_event - Dispatch an asynchronous event
626 * @event:Event to dispatch
627 *
628 * Low-level drivers must call ib_dispatch_event() to dispatch the
629 * event to all registered event handlers when an asynchronous event
630 * occurs.
631 */
632void ib_dispatch_event(struct ib_event *event)
633{
634 unsigned long flags;
635 struct ib_event_handler *handler;
636
637 spin_lock_irqsave(&event->device->event_handler_lock, flags);
638
639 list_for_each_entry(handler, &event->device->event_handler_list, list)
640 handler->handler(handler, event);
641
642 spin_unlock_irqrestore(&event->device->event_handler_lock, flags);
643}
644EXPORT_SYMBOL(ib_dispatch_event);
645
1da177e4
LT
646/**
647 * ib_query_port - Query IB port attributes
648 * @device:Device to query
649 * @port_num:Port number to query
650 * @port_attr:Port attributes
651 *
652 * ib_query_port() returns the attributes of a port through the
653 * @port_attr pointer.
654 */
655int ib_query_port(struct ib_device *device,
656 u8 port_num,
657 struct ib_port_attr *port_attr)
658{
fad61ad4
EC
659 union ib_gid gid;
660 int err;
661
0cf18d77 662 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
116c0074
RD
663 return -EINVAL;
664
fad61ad4
EC
665 memset(port_attr, 0, sizeof(*port_attr));
666 err = device->query_port(device, port_num, port_attr);
667 if (err || port_attr->subnet_prefix)
668 return err;
669
d7012467
EC
670 if (rdma_port_get_link_layer(device, port_num) != IB_LINK_LAYER_INFINIBAND)
671 return 0;
672
fad61ad4
EC
673 err = ib_query_gid(device, port_num, 0, &gid, NULL);
674 if (err)
675 return err;
676
677 port_attr->subnet_prefix = be64_to_cpu(gid.global.subnet_prefix);
678 return 0;
1da177e4
LT
679}
680EXPORT_SYMBOL(ib_query_port);
681
682/**
683 * ib_query_gid - Get GID table entry
684 * @device:Device to query
685 * @port_num:Port number to query
686 * @index:GID table index to query
687 * @gid:Returned GID
55ee3ab2
MB
688 * @attr: Returned GID attributes related to this GID index (only in RoCE).
689 * NULL means ignore.
1da177e4
LT
690 *
691 * ib_query_gid() fetches the specified GID table entry.
692 */
693int ib_query_gid(struct ib_device *device,
55ee3ab2
MB
694 u8 port_num, int index, union ib_gid *gid,
695 struct ib_gid_attr *attr)
1da177e4 696{
03db3a2d 697 if (rdma_cap_roce_gid_table(device, port_num))
55ee3ab2
MB
698 return ib_get_cached_gid(device, port_num, index, gid, attr);
699
700 if (attr)
701 return -EINVAL;
03db3a2d 702
1da177e4
LT
703 return device->query_gid(device, port_num, index, gid);
704}
705EXPORT_SYMBOL(ib_query_gid);
706
03db3a2d
MB
707/**
708 * ib_enum_roce_netdev - enumerate all RoCE ports
709 * @ib_dev : IB device we want to query
710 * @filter: Should we call the callback?
711 * @filter_cookie: Cookie passed to filter
712 * @cb: Callback to call for each found RoCE ports
713 * @cookie: Cookie passed back to the callback
714 *
715 * Enumerates all of the physical RoCE ports of ib_dev
716 * which are related to netdevice and calls callback() on each
717 * device for which filter() function returns non zero.
718 */
719void ib_enum_roce_netdev(struct ib_device *ib_dev,
720 roce_netdev_filter filter,
721 void *filter_cookie,
722 roce_netdev_callback cb,
723 void *cookie)
724{
725 u8 port;
726
727 for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev);
728 port++)
729 if (rdma_protocol_roce(ib_dev, port)) {
730 struct net_device *idev = NULL;
731
732 if (ib_dev->get_netdev)
733 idev = ib_dev->get_netdev(ib_dev, port);
734
735 if (idev &&
736 idev->reg_state >= NETREG_UNREGISTERED) {
737 dev_put(idev);
738 idev = NULL;
739 }
740
741 if (filter(ib_dev, port, idev, filter_cookie))
742 cb(ib_dev, port, idev, cookie);
743
744 if (idev)
745 dev_put(idev);
746 }
747}
748
749/**
750 * ib_enum_all_roce_netdevs - enumerate all RoCE devices
751 * @filter: Should we call the callback?
752 * @filter_cookie: Cookie passed to filter
753 * @cb: Callback to call for each found RoCE ports
754 * @cookie: Cookie passed back to the callback
755 *
756 * Enumerates all RoCE devices' physical ports which are related
757 * to netdevices and calls callback() on each device for which
758 * filter() function returns non zero.
759 */
760void ib_enum_all_roce_netdevs(roce_netdev_filter filter,
761 void *filter_cookie,
762 roce_netdev_callback cb,
763 void *cookie)
764{
765 struct ib_device *dev;
766
767 down_read(&lists_rwsem);
768 list_for_each_entry(dev, &device_list, core_list)
769 ib_enum_roce_netdev(dev, filter, filter_cookie, cb, cookie);
770 up_read(&lists_rwsem);
771}
772
1da177e4
LT
773/**
774 * ib_query_pkey - Get P_Key table entry
775 * @device:Device to query
776 * @port_num:Port number to query
777 * @index:P_Key table index to query
778 * @pkey:Returned P_Key
779 *
780 * ib_query_pkey() fetches the specified P_Key table entry.
781 */
782int ib_query_pkey(struct ib_device *device,
783 u8 port_num, u16 index, u16 *pkey)
784{
785 return device->query_pkey(device, port_num, index, pkey);
786}
787EXPORT_SYMBOL(ib_query_pkey);
788
789/**
790 * ib_modify_device - Change IB device attributes
791 * @device:Device to modify
792 * @device_modify_mask:Mask of attributes to change
793 * @device_modify:New attribute values
794 *
795 * ib_modify_device() changes a device's attributes as specified by
796 * the @device_modify_mask and @device_modify structure.
797 */
798int ib_modify_device(struct ib_device *device,
799 int device_modify_mask,
800 struct ib_device_modify *device_modify)
801{
10e1b54b
BVA
802 if (!device->modify_device)
803 return -ENOSYS;
804
1da177e4
LT
805 return device->modify_device(device, device_modify_mask,
806 device_modify);
807}
808EXPORT_SYMBOL(ib_modify_device);
809
810/**
811 * ib_modify_port - Modifies the attributes for the specified port.
812 * @device: The device to modify.
813 * @port_num: The number of the port to modify.
814 * @port_modify_mask: Mask used to specify which attributes of the port
815 * to change.
816 * @port_modify: New attribute values for the port.
817 *
818 * ib_modify_port() changes a port's attributes as specified by the
819 * @port_modify_mask and @port_modify structure.
820 */
821int ib_modify_port(struct ib_device *device,
822 u8 port_num, int port_modify_mask,
823 struct ib_port_modify *port_modify)
824{
10e1b54b
BVA
825 if (!device->modify_port)
826 return -ENOSYS;
827
0cf18d77 828 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
116c0074
RD
829 return -EINVAL;
830
1da177e4
LT
831 return device->modify_port(device, port_num, port_modify_mask,
832 port_modify);
833}
834EXPORT_SYMBOL(ib_modify_port);
835
5eb620c8
YE
836/**
837 * ib_find_gid - Returns the port number and GID table index where
838 * a specified GID value occurs.
839 * @device: The device to query.
840 * @gid: The GID value to search for.
b39ffa1d 841 * @gid_type: Type of GID.
55ee3ab2 842 * @ndev: The ndev related to the GID to search for.
5eb620c8
YE
843 * @port_num: The port number of the device where the GID value was found.
844 * @index: The index into the GID table where the GID was found. This
845 * parameter may be NULL.
846 */
847int ib_find_gid(struct ib_device *device, union ib_gid *gid,
b39ffa1d
MB
848 enum ib_gid_type gid_type, struct net_device *ndev,
849 u8 *port_num, u16 *index)
5eb620c8
YE
850{
851 union ib_gid tmp_gid;
852 int ret, port, i;
853
0cf18d77 854 for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) {
03db3a2d 855 if (rdma_cap_roce_gid_table(device, port)) {
b39ffa1d 856 if (!ib_find_cached_gid_by_port(device, gid, gid_type, port,
d300ec52 857 ndev, index)) {
03db3a2d
MB
858 *port_num = port;
859 return 0;
98d25afa 860 }
03db3a2d
MB
861 }
862
b39ffa1d
MB
863 if (gid_type != IB_GID_TYPE_IB)
864 continue;
865
7738613e 866 for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) {
55ee3ab2 867 ret = ib_query_gid(device, port, i, &tmp_gid, NULL);
5eb620c8
YE
868 if (ret)
869 return ret;
870 if (!memcmp(&tmp_gid, gid, sizeof *gid)) {
871 *port_num = port;
872 if (index)
873 *index = i;
874 return 0;
875 }
876 }
877 }
878
879 return -ENOENT;
880}
881EXPORT_SYMBOL(ib_find_gid);
882
883/**
884 * ib_find_pkey - Returns the PKey table index where a specified
885 * PKey value occurs.
886 * @device: The device to query.
887 * @port_num: The port number of the device to search for the PKey.
888 * @pkey: The PKey value to search for.
889 * @index: The index into the PKey table where the PKey was found.
890 */
891int ib_find_pkey(struct ib_device *device,
892 u8 port_num, u16 pkey, u16 *index)
893{
894 int ret, i;
895 u16 tmp_pkey;
ff7166c4 896 int partial_ix = -1;
5eb620c8 897
7738613e 898 for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) {
5eb620c8
YE
899 ret = ib_query_pkey(device, port_num, i, &tmp_pkey);
900 if (ret)
901 return ret;
36026ecc 902 if ((pkey & 0x7fff) == (tmp_pkey & 0x7fff)) {
ff7166c4
JM
903 /* if there is full-member pkey take it.*/
904 if (tmp_pkey & 0x8000) {
905 *index = i;
906 return 0;
907 }
908 if (partial_ix < 0)
909 partial_ix = i;
5eb620c8
YE
910 }
911 }
912
ff7166c4
JM
913 /*no full-member, if exists take the limited*/
914 if (partial_ix >= 0) {
915 *index = partial_ix;
916 return 0;
917 }
5eb620c8
YE
918 return -ENOENT;
919}
920EXPORT_SYMBOL(ib_find_pkey);
921
9268f72d
YK
922/**
923 * ib_get_net_dev_by_params() - Return the appropriate net_dev
924 * for a received CM request
925 * @dev: An RDMA device on which the request has been received.
926 * @port: Port number on the RDMA device.
927 * @pkey: The Pkey the request came on.
928 * @gid: A GID that the net_dev uses to communicate.
929 * @addr: Contains the IP address that the request specified as its
930 * destination.
931 */
932struct net_device *ib_get_net_dev_by_params(struct ib_device *dev,
933 u8 port,
934 u16 pkey,
935 const union ib_gid *gid,
936 const struct sockaddr *addr)
937{
938 struct net_device *net_dev = NULL;
939 struct ib_client_data *context;
940
941 if (!rdma_protocol_ib(dev, port))
942 return NULL;
943
944 down_read(&lists_rwsem);
945
946 list_for_each_entry(context, &dev->client_data_list, list) {
947 struct ib_client *client = context->client;
948
949 if (context->going_down)
950 continue;
951
952 if (client->get_net_dev_by_params) {
953 net_dev = client->get_net_dev_by_params(dev, port, pkey,
954 gid, addr,
955 context->data);
956 if (net_dev)
957 break;
958 }
959 }
960
961 up_read(&lists_rwsem);
962
963 return net_dev;
964}
965EXPORT_SYMBOL(ib_get_net_dev_by_params);
966
735c631a
MB
967static struct ibnl_client_cbs ibnl_ls_cb_table[] = {
968 [RDMA_NL_LS_OP_RESOLVE] = {
969 .dump = ib_nl_handle_resolve_resp,
970 .module = THIS_MODULE },
971 [RDMA_NL_LS_OP_SET_TIMEOUT] = {
972 .dump = ib_nl_handle_set_timeout,
973 .module = THIS_MODULE },
ae43f828
MB
974 [RDMA_NL_LS_OP_IP_RESOLVE] = {
975 .dump = ib_nl_handle_ip_res_resp,
976 .module = THIS_MODULE },
735c631a
MB
977};
978
979static int ib_add_ibnl_clients(void)
980{
981 return ibnl_add_client(RDMA_NL_LS, ARRAY_SIZE(ibnl_ls_cb_table),
982 ibnl_ls_cb_table);
983}
984
985static void ib_remove_ibnl_clients(void)
986{
987 ibnl_remove_client(RDMA_NL_LS);
988}
989
1da177e4
LT
990static int __init ib_core_init(void)
991{
992 int ret;
993
f0626710
TH
994 ib_wq = alloc_workqueue("infiniband", 0, 0);
995 if (!ib_wq)
996 return -ENOMEM;
997
14d3a3b2
CH
998 ib_comp_wq = alloc_workqueue("ib-comp-wq",
999 WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM,
1000 WQ_UNBOUND_MAX_ACTIVE);
1001 if (!ib_comp_wq) {
1002 ret = -ENOMEM;
1003 goto err;
1004 }
1005
55aeed06 1006 ret = class_register(&ib_class);
fd75c789 1007 if (ret) {
aba25a3e 1008 pr_warn("Couldn't create InfiniBand device class\n");
14d3a3b2 1009 goto err_comp;
fd75c789 1010 }
1da177e4 1011
b2cbae2c
RD
1012 ret = ibnl_init();
1013 if (ret) {
aba25a3e 1014 pr_warn("Couldn't init IB netlink interface\n");
b2cbae2c
RD
1015 goto err_sysfs;
1016 }
1017
e3f20f02
LR
1018 ret = addr_init();
1019 if (ret) {
1020 pr_warn("Could't init IB address resolution\n");
1021 goto err_ibnl;
1022 }
1023
4c2cb422
MB
1024 ret = ib_mad_init();
1025 if (ret) {
1026 pr_warn("Couldn't init IB MAD\n");
1027 goto err_addr;
1028 }
1029
c2e49c92
MB
1030 ret = ib_sa_init();
1031 if (ret) {
1032 pr_warn("Couldn't init SA\n");
1033 goto err_mad;
1034 }
1035
da1f857b
DC
1036 ret = ib_add_ibnl_clients();
1037 if (ret) {
735c631a
MB
1038 pr_warn("Couldn't register ibnl clients\n");
1039 goto err_sa;
1040 }
1041
03db3a2d 1042 ib_cache_setup();
1da177e4 1043
fd75c789
NM
1044 return 0;
1045
735c631a
MB
1046err_sa:
1047 ib_sa_cleanup();
c2e49c92
MB
1048err_mad:
1049 ib_mad_cleanup();
4c2cb422
MB
1050err_addr:
1051 addr_cleanup();
e3f20f02
LR
1052err_ibnl:
1053 ibnl_cleanup();
fd75c789 1054err_sysfs:
55aeed06 1055 class_unregister(&ib_class);
14d3a3b2
CH
1056err_comp:
1057 destroy_workqueue(ib_comp_wq);
fd75c789
NM
1058err:
1059 destroy_workqueue(ib_wq);
1da177e4
LT
1060 return ret;
1061}
1062
1063static void __exit ib_core_cleanup(void)
1064{
1065 ib_cache_cleanup();
735c631a 1066 ib_remove_ibnl_clients();
c2e49c92 1067 ib_sa_cleanup();
4c2cb422 1068 ib_mad_cleanup();
e3f20f02 1069 addr_cleanup();
b2cbae2c 1070 ibnl_cleanup();
55aeed06 1071 class_unregister(&ib_class);
14d3a3b2 1072 destroy_workqueue(ib_comp_wq);
f7c6a7b5 1073 /* Make sure that any pending umem accounting work is done. */
f0626710 1074 destroy_workqueue(ib_wq);
1da177e4
LT
1075}
1076
1077module_init(ib_core_init);
1078module_exit(ib_core_cleanup);