]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/core/cache.c
HID: sony: Remove the size check for the Dualshock 4 HID Descriptor
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / core / cache.c
1 /*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Intel Corporation. All rights reserved.
4 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
5 * Copyright (c) 2005 Voltaire, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/module.h>
37 #include <linux/errno.h>
38 #include <linux/slab.h>
39 #include <linux/workqueue.h>
40 #include <linux/netdevice.h>
41 #include <net/addrconf.h>
42
43 #include <rdma/ib_cache.h>
44
45 #include "core_priv.h"
46
47 struct ib_pkey_cache {
48 int table_len;
49 u16 table[0];
50 };
51
52 struct ib_update_work {
53 struct work_struct work;
54 struct ib_device *device;
55 u8 port_num;
56 };
57
58 union ib_gid zgid;
59 EXPORT_SYMBOL(zgid);
60
61 static const struct ib_gid_attr zattr;
62
63 enum gid_attr_find_mask {
64 GID_ATTR_FIND_MASK_GID = 1UL << 0,
65 GID_ATTR_FIND_MASK_NETDEV = 1UL << 1,
66 GID_ATTR_FIND_MASK_DEFAULT = 1UL << 2,
67 };
68
69 enum gid_table_entry_props {
70 GID_TABLE_ENTRY_INVALID = 1UL << 0,
71 GID_TABLE_ENTRY_DEFAULT = 1UL << 1,
72 };
73
74 enum gid_table_write_action {
75 GID_TABLE_WRITE_ACTION_ADD,
76 GID_TABLE_WRITE_ACTION_DEL,
77 /* MODIFY only updates the GID table. Currently only used by
78 * ib_cache_update.
79 */
80 GID_TABLE_WRITE_ACTION_MODIFY
81 };
82
83 struct ib_gid_table_entry {
84 /* This lock protects an entry from being
85 * read and written simultaneously.
86 */
87 rwlock_t lock;
88 unsigned long props;
89 union ib_gid gid;
90 struct ib_gid_attr attr;
91 void *context;
92 };
93
94 struct ib_gid_table {
95 int sz;
96 /* In RoCE, adding a GID to the table requires:
97 * (a) Find if this GID is already exists.
98 * (b) Find a free space.
99 * (c) Write the new GID
100 *
101 * Delete requires different set of operations:
102 * (a) Find the GID
103 * (b) Delete it.
104 *
105 * Add/delete should be carried out atomically.
106 * This is done by locking this mutex from multiple
107 * writers. We don't need this lock for IB, as the MAD
108 * layer replaces all entries. All data_vec entries
109 * are locked by this lock.
110 **/
111 struct mutex lock;
112 struct ib_gid_table_entry *data_vec;
113 };
114
115 static int write_gid(struct ib_device *ib_dev, u8 port,
116 struct ib_gid_table *table, int ix,
117 const union ib_gid *gid,
118 const struct ib_gid_attr *attr,
119 enum gid_table_write_action action,
120 bool default_gid)
121 {
122 int ret = 0;
123 struct net_device *old_net_dev;
124 unsigned long flags;
125
126 /* in rdma_cap_roce_gid_table, this funciton should be protected by a
127 * sleep-able lock.
128 */
129 write_lock_irqsave(&table->data_vec[ix].lock, flags);
130
131 if (rdma_cap_roce_gid_table(ib_dev, port)) {
132 table->data_vec[ix].props |= GID_TABLE_ENTRY_INVALID;
133 write_unlock_irqrestore(&table->data_vec[ix].lock, flags);
134 /* GID_TABLE_WRITE_ACTION_MODIFY currently isn't supported by
135 * RoCE providers and thus only updates the cache.
136 */
137 if (action == GID_TABLE_WRITE_ACTION_ADD)
138 ret = ib_dev->add_gid(ib_dev, port, ix, gid, attr,
139 &table->data_vec[ix].context);
140 else if (action == GID_TABLE_WRITE_ACTION_DEL)
141 ret = ib_dev->del_gid(ib_dev, port, ix,
142 &table->data_vec[ix].context);
143 write_lock_irqsave(&table->data_vec[ix].lock, flags);
144 }
145
146 old_net_dev = table->data_vec[ix].attr.ndev;
147 if (old_net_dev && old_net_dev != attr->ndev)
148 dev_put(old_net_dev);
149 /* if modify_gid failed, just delete the old gid */
150 if (ret || action == GID_TABLE_WRITE_ACTION_DEL) {
151 gid = &zgid;
152 attr = &zattr;
153 table->data_vec[ix].context = NULL;
154 }
155 if (default_gid)
156 table->data_vec[ix].props |= GID_TABLE_ENTRY_DEFAULT;
157 memcpy(&table->data_vec[ix].gid, gid, sizeof(*gid));
158 memcpy(&table->data_vec[ix].attr, attr, sizeof(*attr));
159 if (table->data_vec[ix].attr.ndev &&
160 table->data_vec[ix].attr.ndev != old_net_dev)
161 dev_hold(table->data_vec[ix].attr.ndev);
162
163 table->data_vec[ix].props &= ~GID_TABLE_ENTRY_INVALID;
164
165 write_unlock_irqrestore(&table->data_vec[ix].lock, flags);
166
167 if (!ret && rdma_cap_roce_gid_table(ib_dev, port)) {
168 struct ib_event event;
169
170 event.device = ib_dev;
171 event.element.port_num = port;
172 event.event = IB_EVENT_GID_CHANGE;
173
174 ib_dispatch_event(&event);
175 }
176 return ret;
177 }
178
179 static int add_gid(struct ib_device *ib_dev, u8 port,
180 struct ib_gid_table *table, int ix,
181 const union ib_gid *gid,
182 const struct ib_gid_attr *attr,
183 bool default_gid) {
184 return write_gid(ib_dev, port, table, ix, gid, attr,
185 GID_TABLE_WRITE_ACTION_ADD, default_gid);
186 }
187
188 static int modify_gid(struct ib_device *ib_dev, u8 port,
189 struct ib_gid_table *table, int ix,
190 const union ib_gid *gid,
191 const struct ib_gid_attr *attr,
192 bool default_gid) {
193 return write_gid(ib_dev, port, table, ix, gid, attr,
194 GID_TABLE_WRITE_ACTION_MODIFY, default_gid);
195 }
196
197 static int del_gid(struct ib_device *ib_dev, u8 port,
198 struct ib_gid_table *table, int ix,
199 bool default_gid) {
200 return write_gid(ib_dev, port, table, ix, &zgid, &zattr,
201 GID_TABLE_WRITE_ACTION_DEL, default_gid);
202 }
203
204 static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
205 const struct ib_gid_attr *val, bool default_gid,
206 unsigned long mask)
207 {
208 int i;
209
210 for (i = 0; i < table->sz; i++) {
211 unsigned long flags;
212 struct ib_gid_attr *attr = &table->data_vec[i].attr;
213
214 read_lock_irqsave(&table->data_vec[i].lock, flags);
215
216 if (table->data_vec[i].props & GID_TABLE_ENTRY_INVALID)
217 goto next;
218
219 if (mask & GID_ATTR_FIND_MASK_GID &&
220 memcmp(gid, &table->data_vec[i].gid, sizeof(*gid)))
221 goto next;
222
223 if (mask & GID_ATTR_FIND_MASK_NETDEV &&
224 attr->ndev != val->ndev)
225 goto next;
226
227 if (mask & GID_ATTR_FIND_MASK_DEFAULT &&
228 !!(table->data_vec[i].props & GID_TABLE_ENTRY_DEFAULT) !=
229 default_gid)
230 goto next;
231
232 read_unlock_irqrestore(&table->data_vec[i].lock, flags);
233 return i;
234 next:
235 read_unlock_irqrestore(&table->data_vec[i].lock, flags);
236 }
237
238 return -1;
239 }
240
241 static void make_default_gid(struct net_device *dev, union ib_gid *gid)
242 {
243 gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
244 addrconf_ifid_eui48(&gid->raw[8], dev);
245 }
246
247 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
248 union ib_gid *gid, struct ib_gid_attr *attr)
249 {
250 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
251 struct ib_gid_table *table;
252 int ix;
253 int ret = 0;
254 struct net_device *idev;
255
256 table = ports_table[port - rdma_start_port(ib_dev)];
257
258 if (!memcmp(gid, &zgid, sizeof(*gid)))
259 return -EINVAL;
260
261 if (ib_dev->get_netdev) {
262 idev = ib_dev->get_netdev(ib_dev, port);
263 if (idev && attr->ndev != idev) {
264 union ib_gid default_gid;
265
266 /* Adding default GIDs in not permitted */
267 make_default_gid(idev, &default_gid);
268 if (!memcmp(gid, &default_gid, sizeof(*gid))) {
269 dev_put(idev);
270 return -EPERM;
271 }
272 }
273 if (idev)
274 dev_put(idev);
275 }
276
277 mutex_lock(&table->lock);
278
279 ix = find_gid(table, gid, attr, false, GID_ATTR_FIND_MASK_GID |
280 GID_ATTR_FIND_MASK_NETDEV);
281 if (ix >= 0)
282 goto out_unlock;
283
284 ix = find_gid(table, &zgid, NULL, false, GID_ATTR_FIND_MASK_GID |
285 GID_ATTR_FIND_MASK_DEFAULT);
286 if (ix < 0) {
287 ret = -ENOSPC;
288 goto out_unlock;
289 }
290
291 add_gid(ib_dev, port, table, ix, gid, attr, false);
292
293 out_unlock:
294 mutex_unlock(&table->lock);
295 return ret;
296 }
297
298 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
299 union ib_gid *gid, struct ib_gid_attr *attr)
300 {
301 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
302 struct ib_gid_table *table;
303 int ix;
304
305 table = ports_table[port - rdma_start_port(ib_dev)];
306
307 mutex_lock(&table->lock);
308
309 ix = find_gid(table, gid, attr, false,
310 GID_ATTR_FIND_MASK_GID |
311 GID_ATTR_FIND_MASK_NETDEV |
312 GID_ATTR_FIND_MASK_DEFAULT);
313 if (ix < 0)
314 goto out_unlock;
315
316 del_gid(ib_dev, port, table, ix, false);
317
318 out_unlock:
319 mutex_unlock(&table->lock);
320 return 0;
321 }
322
323 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
324 struct net_device *ndev)
325 {
326 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
327 struct ib_gid_table *table;
328 int ix;
329
330 table = ports_table[port - rdma_start_port(ib_dev)];
331
332 mutex_lock(&table->lock);
333
334 for (ix = 0; ix < table->sz; ix++)
335 if (table->data_vec[ix].attr.ndev == ndev)
336 del_gid(ib_dev, port, table, ix, false);
337
338 mutex_unlock(&table->lock);
339 return 0;
340 }
341
342 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
343 union ib_gid *gid, struct ib_gid_attr *attr)
344 {
345 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
346 struct ib_gid_table *table;
347 unsigned long flags;
348
349 table = ports_table[port - rdma_start_port(ib_dev)];
350
351 if (index < 0 || index >= table->sz)
352 return -EINVAL;
353
354 read_lock_irqsave(&table->data_vec[index].lock, flags);
355 if (table->data_vec[index].props & GID_TABLE_ENTRY_INVALID) {
356 read_unlock_irqrestore(&table->data_vec[index].lock, flags);
357 return -EAGAIN;
358 }
359
360 memcpy(gid, &table->data_vec[index].gid, sizeof(*gid));
361 if (attr) {
362 memcpy(attr, &table->data_vec[index].attr, sizeof(*attr));
363 if (attr->ndev)
364 dev_hold(attr->ndev);
365 }
366
367 read_unlock_irqrestore(&table->data_vec[index].lock, flags);
368 return 0;
369 }
370
371 static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
372 const union ib_gid *gid,
373 const struct ib_gid_attr *val,
374 unsigned long mask,
375 u8 *port, u16 *index)
376 {
377 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
378 struct ib_gid_table *table;
379 u8 p;
380 int local_index;
381
382 for (p = 0; p < ib_dev->phys_port_cnt; p++) {
383 table = ports_table[p];
384 local_index = find_gid(table, gid, val, false, mask);
385 if (local_index >= 0) {
386 if (index)
387 *index = local_index;
388 if (port)
389 *port = p + rdma_start_port(ib_dev);
390 return 0;
391 }
392 }
393
394 return -ENOENT;
395 }
396
397 static int ib_cache_gid_find(struct ib_device *ib_dev,
398 const union ib_gid *gid,
399 struct net_device *ndev, u8 *port,
400 u16 *index)
401 {
402 unsigned long mask = GID_ATTR_FIND_MASK_GID;
403 struct ib_gid_attr gid_attr_val = {.ndev = ndev};
404
405 if (ndev)
406 mask |= GID_ATTR_FIND_MASK_NETDEV;
407
408 return _ib_cache_gid_table_find(ib_dev, gid, &gid_attr_val,
409 mask, port, index);
410 }
411
412 int ib_cache_gid_find_by_port(struct ib_device *ib_dev,
413 const union ib_gid *gid,
414 u8 port, struct net_device *ndev,
415 u16 *index)
416 {
417 int local_index;
418 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
419 struct ib_gid_table *table;
420 unsigned long mask = GID_ATTR_FIND_MASK_GID;
421 struct ib_gid_attr val = {.ndev = ndev};
422
423 if (port < rdma_start_port(ib_dev) ||
424 port > rdma_end_port(ib_dev))
425 return -ENOENT;
426
427 table = ports_table[port - rdma_start_port(ib_dev)];
428
429 if (ndev)
430 mask |= GID_ATTR_FIND_MASK_NETDEV;
431
432 local_index = find_gid(table, gid, &val, false, mask);
433 if (local_index >= 0) {
434 if (index)
435 *index = local_index;
436 return 0;
437 }
438
439 return -ENOENT;
440 }
441
442 static struct ib_gid_table *alloc_gid_table(int sz)
443 {
444 unsigned int i;
445 struct ib_gid_table *table =
446 kzalloc(sizeof(struct ib_gid_table), GFP_KERNEL);
447 if (!table)
448 return NULL;
449
450 table->data_vec = kcalloc(sz, sizeof(*table->data_vec), GFP_KERNEL);
451 if (!table->data_vec)
452 goto err_free_table;
453
454 mutex_init(&table->lock);
455
456 table->sz = sz;
457
458 for (i = 0; i < sz; i++)
459 rwlock_init(&table->data_vec[i].lock);
460
461 return table;
462
463 err_free_table:
464 kfree(table);
465 return NULL;
466 }
467
468 static void release_gid_table(struct ib_gid_table *table)
469 {
470 if (table) {
471 kfree(table->data_vec);
472 kfree(table);
473 }
474 }
475
476 static void cleanup_gid_table_port(struct ib_device *ib_dev, u8 port,
477 struct ib_gid_table *table)
478 {
479 int i;
480
481 if (!table)
482 return;
483
484 for (i = 0; i < table->sz; ++i) {
485 if (memcmp(&table->data_vec[i].gid, &zgid,
486 sizeof(table->data_vec[i].gid)))
487 del_gid(ib_dev, port, table, i,
488 table->data_vec[i].props &
489 GID_ATTR_FIND_MASK_DEFAULT);
490 }
491 }
492
493 void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
494 struct net_device *ndev,
495 enum ib_cache_gid_default_mode mode)
496 {
497 struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
498 union ib_gid gid;
499 struct ib_gid_attr gid_attr;
500 struct ib_gid_table *table;
501 int ix;
502 union ib_gid current_gid;
503 struct ib_gid_attr current_gid_attr = {};
504
505 table = ports_table[port - rdma_start_port(ib_dev)];
506
507 make_default_gid(ndev, &gid);
508 memset(&gid_attr, 0, sizeof(gid_attr));
509 gid_attr.ndev = ndev;
510
511 mutex_lock(&table->lock);
512 ix = find_gid(table, NULL, NULL, true, GID_ATTR_FIND_MASK_DEFAULT);
513
514 /* Coudn't find default GID location */
515 WARN_ON(ix < 0);
516
517 if (!__ib_cache_gid_get(ib_dev, port, ix,
518 &current_gid, &current_gid_attr) &&
519 mode == IB_CACHE_GID_DEFAULT_MODE_SET &&
520 !memcmp(&gid, &current_gid, sizeof(gid)) &&
521 !memcmp(&gid_attr, &current_gid_attr, sizeof(gid_attr)))
522 goto unlock;
523
524 if ((memcmp(&current_gid, &zgid, sizeof(current_gid)) ||
525 memcmp(&current_gid_attr, &zattr,
526 sizeof(current_gid_attr))) &&
527 del_gid(ib_dev, port, table, ix, true)) {
528 pr_warn("ib_cache_gid: can't delete index %d for default gid %pI6\n",
529 ix, gid.raw);
530 goto unlock;
531 }
532
533 if (mode == IB_CACHE_GID_DEFAULT_MODE_SET)
534 if (add_gid(ib_dev, port, table, ix, &gid, &gid_attr, true))
535 pr_warn("ib_cache_gid: unable to add default gid %pI6\n",
536 gid.raw);
537
538 unlock:
539 if (current_gid_attr.ndev)
540 dev_put(current_gid_attr.ndev);
541 mutex_unlock(&table->lock);
542 }
543
544 static int gid_table_reserve_default(struct ib_device *ib_dev, u8 port,
545 struct ib_gid_table *table)
546 {
547 if (rdma_protocol_roce(ib_dev, port)) {
548 struct ib_gid_table_entry *entry = &table->data_vec[0];
549
550 entry->props |= GID_TABLE_ENTRY_DEFAULT;
551 }
552
553 return 0;
554 }
555
556 static int _gid_table_setup_one(struct ib_device *ib_dev)
557 {
558 u8 port;
559 struct ib_gid_table **table;
560 int err = 0;
561
562 table = kcalloc(ib_dev->phys_port_cnt, sizeof(*table), GFP_KERNEL);
563
564 if (!table) {
565 pr_warn("failed to allocate ib gid cache for %s\n",
566 ib_dev->name);
567 return -ENOMEM;
568 }
569
570 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
571 u8 rdma_port = port + rdma_start_port(ib_dev);
572
573 table[port] =
574 alloc_gid_table(
575 ib_dev->port_immutable[rdma_port].gid_tbl_len);
576 if (!table[port]) {
577 err = -ENOMEM;
578 goto rollback_table_setup;
579 }
580
581 err = gid_table_reserve_default(ib_dev,
582 port + rdma_start_port(ib_dev),
583 table[port]);
584 if (err)
585 goto rollback_table_setup;
586 }
587
588 ib_dev->cache.gid_cache = table;
589 return 0;
590
591 rollback_table_setup:
592 for (port = 0; port < ib_dev->phys_port_cnt; port++) {
593 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
594 table[port]);
595 release_gid_table(table[port]);
596 }
597
598 kfree(table);
599 return err;
600 }
601
602 static void gid_table_release_one(struct ib_device *ib_dev)
603 {
604 struct ib_gid_table **table = ib_dev->cache.gid_cache;
605 u8 port;
606
607 if (!table)
608 return;
609
610 for (port = 0; port < ib_dev->phys_port_cnt; port++)
611 release_gid_table(table[port]);
612
613 kfree(table);
614 ib_dev->cache.gid_cache = NULL;
615 }
616
617 static void gid_table_cleanup_one(struct ib_device *ib_dev)
618 {
619 struct ib_gid_table **table = ib_dev->cache.gid_cache;
620 u8 port;
621
622 if (!table)
623 return;
624
625 for (port = 0; port < ib_dev->phys_port_cnt; port++)
626 cleanup_gid_table_port(ib_dev, port + rdma_start_port(ib_dev),
627 table[port]);
628 }
629
630 static int gid_table_setup_one(struct ib_device *ib_dev)
631 {
632 int err;
633
634 err = _gid_table_setup_one(ib_dev);
635
636 if (err)
637 return err;
638
639 err = roce_rescan_device(ib_dev);
640
641 if (err) {
642 gid_table_cleanup_one(ib_dev);
643 gid_table_release_one(ib_dev);
644 }
645
646 return err;
647 }
648
649 int ib_get_cached_gid(struct ib_device *device,
650 u8 port_num,
651 int index,
652 union ib_gid *gid)
653 {
654 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
655 return -EINVAL;
656
657 return __ib_cache_gid_get(device, port_num, index, gid, NULL);
658 }
659 EXPORT_SYMBOL(ib_get_cached_gid);
660
661 int ib_find_cached_gid(struct ib_device *device,
662 const union ib_gid *gid,
663 u8 *port_num,
664 u16 *index)
665 {
666 return ib_cache_gid_find(device, gid, NULL, port_num, index);
667 }
668 EXPORT_SYMBOL(ib_find_cached_gid);
669
670 int ib_get_cached_pkey(struct ib_device *device,
671 u8 port_num,
672 int index,
673 u16 *pkey)
674 {
675 struct ib_pkey_cache *cache;
676 unsigned long flags;
677 int ret = 0;
678
679 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
680 return -EINVAL;
681
682 read_lock_irqsave(&device->cache.lock, flags);
683
684 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
685
686 if (index < 0 || index >= cache->table_len)
687 ret = -EINVAL;
688 else
689 *pkey = cache->table[index];
690
691 read_unlock_irqrestore(&device->cache.lock, flags);
692
693 return ret;
694 }
695 EXPORT_SYMBOL(ib_get_cached_pkey);
696
697 int ib_find_cached_pkey(struct ib_device *device,
698 u8 port_num,
699 u16 pkey,
700 u16 *index)
701 {
702 struct ib_pkey_cache *cache;
703 unsigned long flags;
704 int i;
705 int ret = -ENOENT;
706 int partial_ix = -1;
707
708 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
709 return -EINVAL;
710
711 read_lock_irqsave(&device->cache.lock, flags);
712
713 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
714
715 *index = -1;
716
717 for (i = 0; i < cache->table_len; ++i)
718 if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
719 if (cache->table[i] & 0x8000) {
720 *index = i;
721 ret = 0;
722 break;
723 } else
724 partial_ix = i;
725 }
726
727 if (ret && partial_ix >= 0) {
728 *index = partial_ix;
729 ret = 0;
730 }
731
732 read_unlock_irqrestore(&device->cache.lock, flags);
733
734 return ret;
735 }
736 EXPORT_SYMBOL(ib_find_cached_pkey);
737
738 int ib_find_exact_cached_pkey(struct ib_device *device,
739 u8 port_num,
740 u16 pkey,
741 u16 *index)
742 {
743 struct ib_pkey_cache *cache;
744 unsigned long flags;
745 int i;
746 int ret = -ENOENT;
747
748 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
749 return -EINVAL;
750
751 read_lock_irqsave(&device->cache.lock, flags);
752
753 cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
754
755 *index = -1;
756
757 for (i = 0; i < cache->table_len; ++i)
758 if (cache->table[i] == pkey) {
759 *index = i;
760 ret = 0;
761 break;
762 }
763
764 read_unlock_irqrestore(&device->cache.lock, flags);
765
766 return ret;
767 }
768 EXPORT_SYMBOL(ib_find_exact_cached_pkey);
769
770 int ib_get_cached_lmc(struct ib_device *device,
771 u8 port_num,
772 u8 *lmc)
773 {
774 unsigned long flags;
775 int ret = 0;
776
777 if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
778 return -EINVAL;
779
780 read_lock_irqsave(&device->cache.lock, flags);
781 *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
782 read_unlock_irqrestore(&device->cache.lock, flags);
783
784 return ret;
785 }
786 EXPORT_SYMBOL(ib_get_cached_lmc);
787
788 static void ib_cache_update(struct ib_device *device,
789 u8 port)
790 {
791 struct ib_port_attr *tprops = NULL;
792 struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
793 struct ib_gid_cache {
794 int table_len;
795 union ib_gid table[0];
796 } *gid_cache = NULL;
797 int i;
798 int ret;
799 struct ib_gid_table *table;
800 struct ib_gid_table **ports_table = device->cache.gid_cache;
801 bool use_roce_gid_table =
802 rdma_cap_roce_gid_table(device, port);
803
804 if (port < rdma_start_port(device) || port > rdma_end_port(device))
805 return;
806
807 table = ports_table[port - rdma_start_port(device)];
808
809 tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
810 if (!tprops)
811 return;
812
813 ret = ib_query_port(device, port, tprops);
814 if (ret) {
815 printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
816 ret, device->name);
817 goto err;
818 }
819
820 pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
821 sizeof *pkey_cache->table, GFP_KERNEL);
822 if (!pkey_cache)
823 goto err;
824
825 pkey_cache->table_len = tprops->pkey_tbl_len;
826
827 if (!use_roce_gid_table) {
828 gid_cache = kmalloc(sizeof(*gid_cache) + tprops->gid_tbl_len *
829 sizeof(*gid_cache->table), GFP_KERNEL);
830 if (!gid_cache)
831 goto err;
832
833 gid_cache->table_len = tprops->gid_tbl_len;
834 }
835
836 for (i = 0; i < pkey_cache->table_len; ++i) {
837 ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
838 if (ret) {
839 printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
840 ret, device->name, i);
841 goto err;
842 }
843 }
844
845 if (!use_roce_gid_table) {
846 for (i = 0; i < gid_cache->table_len; ++i) {
847 ret = ib_query_gid(device, port, i,
848 gid_cache->table + i);
849 if (ret) {
850 printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
851 ret, device->name, i);
852 goto err;
853 }
854 }
855 }
856
857 write_lock_irq(&device->cache.lock);
858
859 old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
860
861 device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
862 if (!use_roce_gid_table) {
863 for (i = 0; i < gid_cache->table_len; i++) {
864 modify_gid(device, port, table, i, gid_cache->table + i,
865 &zattr, false);
866 }
867 }
868
869 device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
870
871 write_unlock_irq(&device->cache.lock);
872
873 kfree(gid_cache);
874 kfree(old_pkey_cache);
875 kfree(tprops);
876 return;
877
878 err:
879 kfree(pkey_cache);
880 kfree(gid_cache);
881 kfree(tprops);
882 }
883
884 static void ib_cache_task(struct work_struct *_work)
885 {
886 struct ib_update_work *work =
887 container_of(_work, struct ib_update_work, work);
888
889 ib_cache_update(work->device, work->port_num);
890 kfree(work);
891 }
892
893 static void ib_cache_event(struct ib_event_handler *handler,
894 struct ib_event *event)
895 {
896 struct ib_update_work *work;
897
898 if (event->event == IB_EVENT_PORT_ERR ||
899 event->event == IB_EVENT_PORT_ACTIVE ||
900 event->event == IB_EVENT_LID_CHANGE ||
901 event->event == IB_EVENT_PKEY_CHANGE ||
902 event->event == IB_EVENT_SM_CHANGE ||
903 event->event == IB_EVENT_CLIENT_REREGISTER ||
904 event->event == IB_EVENT_GID_CHANGE) {
905 work = kmalloc(sizeof *work, GFP_ATOMIC);
906 if (work) {
907 INIT_WORK(&work->work, ib_cache_task);
908 work->device = event->device;
909 work->port_num = event->element.port_num;
910 queue_work(ib_wq, &work->work);
911 }
912 }
913 }
914
915 int ib_cache_setup_one(struct ib_device *device)
916 {
917 int p;
918 int err;
919
920 rwlock_init(&device->cache.lock);
921
922 device->cache.pkey_cache =
923 kzalloc(sizeof *device->cache.pkey_cache *
924 (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
925 device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
926 (rdma_end_port(device) -
927 rdma_start_port(device) + 1),
928 GFP_KERNEL);
929 if (!device->cache.pkey_cache ||
930 !device->cache.lmc_cache) {
931 printk(KERN_WARNING "Couldn't allocate cache "
932 "for %s\n", device->name);
933 return -ENOMEM;
934 }
935
936 err = gid_table_setup_one(device);
937 if (err)
938 /* Allocated memory will be cleaned in the release function */
939 return err;
940
941 for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p)
942 ib_cache_update(device, p + rdma_start_port(device));
943
944 INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
945 device, ib_cache_event);
946 err = ib_register_event_handler(&device->cache.event_handler);
947 if (err)
948 goto err;
949
950 return 0;
951
952 err:
953 gid_table_cleanup_one(device);
954 return err;
955 }
956
957 void ib_cache_release_one(struct ib_device *device)
958 {
959 int p;
960
961 /*
962 * The release function frees all the cache elements.
963 * This function should be called as part of freeing
964 * all the device's resources when the cache could no
965 * longer be accessed.
966 */
967 if (device->cache.pkey_cache)
968 for (p = 0;
969 p <= rdma_end_port(device) - rdma_start_port(device); ++p)
970 kfree(device->cache.pkey_cache[p]);
971
972 gid_table_release_one(device);
973 kfree(device->cache.pkey_cache);
974 kfree(device->cache.lmc_cache);
975 }
976
977 void ib_cache_cleanup_one(struct ib_device *device)
978 {
979 /* The cleanup function unregisters the event handler,
980 * waits for all in-progress workqueue elements and cleans
981 * up the GID cache. This function should be called after
982 * the device was removed from the devices list and all
983 * clients were removed, so the cache exists but is
984 * non-functional and shouldn't be updated anymore.
985 */
986 ib_unregister_event_handler(&device->cache.event_handler);
987 flush_workqueue(ib_wq);
988 gid_table_cleanup_one(device);
989 }
990
991 void __init ib_cache_setup(void)
992 {
993 roce_gid_mgmt_init();
994 }
995
996 void __exit ib_cache_cleanup(void)
997 {
998 roce_gid_mgmt_cleanup();
999 }