]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/infiniband/hw/mlx5/mr.c
mlx5_core: Change optimal_reclaimed_pages for better performance
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / hw / mlx5 / mr.c
CommitLineData
e126ba97
EC
1/*
2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
746b5583 38#include <linux/delay.h>
e126ba97
EC
39#include <rdma/ib_umem.h>
40#include "mlx5_ib.h"
41
42enum {
43 DEF_CACHE_SIZE = 10,
746b5583 44 MAX_PENDING_REG_MR = 8,
e126ba97
EC
45};
46
fe45f827
EC
47enum {
48 MLX5_UMR_ALIGN = 2048
49};
50
e126ba97
EC
51static __be64 *mr_align(__be64 *ptr, int align)
52{
53 unsigned long mask = align - 1;
54
55 return (__be64 *)(((unsigned long)ptr + mask) & ~mask);
56}
57
58static int order2idx(struct mlx5_ib_dev *dev, int order)
59{
60 struct mlx5_mr_cache *cache = &dev->cache;
61
62 if (order < cache->ent[0].order)
63 return 0;
64 else
65 return order - cache->ent[0].order;
66}
67
746b5583
EC
68static void reg_mr_callback(int status, void *context)
69{
70 struct mlx5_ib_mr *mr = context;
71 struct mlx5_ib_dev *dev = mr->dev;
72 struct mlx5_mr_cache *cache = &dev->cache;
73 int c = order2idx(dev, mr->order);
74 struct mlx5_cache_ent *ent = &cache->ent[c];
75 u8 key;
76 unsigned long delta = jiffies - mr->start;
77 unsigned long index;
78 unsigned long flags;
79
80 index = find_last_bit(&delta, 8 * sizeof(delta));
81 if (index == 64)
82 index = 0;
83
84 spin_lock_irqsave(&ent->lock, flags);
85 ent->pending--;
86 spin_unlock_irqrestore(&ent->lock, flags);
87 if (status) {
88 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
89 kfree(mr);
90 dev->fill_delay = 1;
91 mod_timer(&dev->delay_timer, jiffies + HZ);
92 return;
93 }
94
95 if (mr->out.hdr.status) {
96 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
97 mr->out.hdr.status,
98 be32_to_cpu(mr->out.hdr.syndrome));
99 kfree(mr);
100 dev->fill_delay = 1;
101 mod_timer(&dev->delay_timer, jiffies + HZ);
102 return;
103 }
104
105 spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags);
106 key = dev->mdev.priv.mkey_key++;
107 spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags);
108 mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
109
110 cache->last_add = jiffies;
111
112 spin_lock_irqsave(&ent->lock, flags);
113 list_add_tail(&mr->list, &ent->head);
114 ent->cur++;
115 ent->size++;
116 spin_unlock_irqrestore(&ent->lock, flags);
117}
118
e126ba97
EC
119static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
120{
e126ba97
EC
121 struct mlx5_mr_cache *cache = &dev->cache;
122 struct mlx5_cache_ent *ent = &cache->ent[c];
123 struct mlx5_create_mkey_mbox_in *in;
124 struct mlx5_ib_mr *mr;
125 int npages = 1 << ent->order;
e126ba97
EC
126 int err = 0;
127 int i;
128
129 in = kzalloc(sizeof(*in), GFP_KERNEL);
130 if (!in)
131 return -ENOMEM;
132
133 for (i = 0; i < num; i++) {
746b5583
EC
134 if (ent->pending >= MAX_PENDING_REG_MR) {
135 err = -EAGAIN;
136 break;
137 }
138
e126ba97
EC
139 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
140 if (!mr) {
141 err = -ENOMEM;
746b5583 142 break;
e126ba97
EC
143 }
144 mr->order = ent->order;
145 mr->umred = 1;
746b5583 146 mr->dev = dev;
e126ba97
EC
147 in->seg.status = 1 << 6;
148 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
149 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
150 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
151 in->seg.log2_page_size = 12;
152
746b5583
EC
153 spin_lock_irq(&ent->lock);
154 ent->pending++;
155 spin_unlock_irq(&ent->lock);
156 mr->start = jiffies;
e126ba97 157 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
746b5583
EC
158 sizeof(*in), reg_mr_callback,
159 mr, &mr->out);
e126ba97
EC
160 if (err) {
161 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
e126ba97 162 kfree(mr);
746b5583 163 break;
e126ba97 164 }
e126ba97
EC
165 }
166
e126ba97
EC
167 kfree(in);
168 return err;
169}
170
171static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
172{
e126ba97
EC
173 struct mlx5_mr_cache *cache = &dev->cache;
174 struct mlx5_cache_ent *ent = &cache->ent[c];
175 struct mlx5_ib_mr *mr;
e126ba97
EC
176 int err;
177 int i;
178
179 for (i = 0; i < num; i++) {
746b5583 180 spin_lock_irq(&ent->lock);
e126ba97 181 if (list_empty(&ent->head)) {
746b5583 182 spin_unlock_irq(&ent->lock);
e126ba97
EC
183 return;
184 }
185 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
186 list_del(&mr->list);
187 ent->cur--;
188 ent->size--;
746b5583 189 spin_unlock_irq(&ent->lock);
e126ba97 190 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
203099fd 191 if (err)
e126ba97 192 mlx5_ib_warn(dev, "failed destroy mkey\n");
203099fd 193 else
e126ba97 194 kfree(mr);
e126ba97
EC
195 }
196}
197
198static ssize_t size_write(struct file *filp, const char __user *buf,
199 size_t count, loff_t *pos)
200{
201 struct mlx5_cache_ent *ent = filp->private_data;
202 struct mlx5_ib_dev *dev = ent->dev;
203 char lbuf[20];
204 u32 var;
205 int err;
206 int c;
207
208 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
5e631a03 209 return -EFAULT;
e126ba97
EC
210
211 c = order2idx(dev, ent->order);
212 lbuf[sizeof(lbuf) - 1] = 0;
213
214 if (sscanf(lbuf, "%u", &var) != 1)
215 return -EINVAL;
216
217 if (var < ent->limit)
218 return -EINVAL;
219
220 if (var > ent->size) {
746b5583
EC
221 do {
222 err = add_keys(dev, c, var - ent->size);
223 if (err && err != -EAGAIN)
224 return err;
225
226 usleep_range(3000, 5000);
227 } while (err);
e126ba97
EC
228 } else if (var < ent->size) {
229 remove_keys(dev, c, ent->size - var);
230 }
231
232 return count;
233}
234
235static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
236 loff_t *pos)
237{
238 struct mlx5_cache_ent *ent = filp->private_data;
239 char lbuf[20];
240 int err;
241
242 if (*pos)
243 return 0;
244
245 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
246 if (err < 0)
247 return err;
248
249 if (copy_to_user(buf, lbuf, err))
5e631a03 250 return -EFAULT;
e126ba97
EC
251
252 *pos += err;
253
254 return err;
255}
256
257static const struct file_operations size_fops = {
258 .owner = THIS_MODULE,
259 .open = simple_open,
260 .write = size_write,
261 .read = size_read,
262};
263
264static ssize_t limit_write(struct file *filp, const char __user *buf,
265 size_t count, loff_t *pos)
266{
267 struct mlx5_cache_ent *ent = filp->private_data;
268 struct mlx5_ib_dev *dev = ent->dev;
269 char lbuf[20];
270 u32 var;
271 int err;
272 int c;
273
274 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
5e631a03 275 return -EFAULT;
e126ba97
EC
276
277 c = order2idx(dev, ent->order);
278 lbuf[sizeof(lbuf) - 1] = 0;
279
280 if (sscanf(lbuf, "%u", &var) != 1)
281 return -EINVAL;
282
283 if (var > ent->size)
284 return -EINVAL;
285
286 ent->limit = var;
287
288 if (ent->cur < ent->limit) {
289 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
290 if (err)
291 return err;
292 }
293
294 return count;
295}
296
297static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
298 loff_t *pos)
299{
300 struct mlx5_cache_ent *ent = filp->private_data;
301 char lbuf[20];
302 int err;
303
304 if (*pos)
305 return 0;
306
307 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
308 if (err < 0)
309 return err;
310
311 if (copy_to_user(buf, lbuf, err))
5e631a03 312 return -EFAULT;
e126ba97
EC
313
314 *pos += err;
315
316 return err;
317}
318
319static const struct file_operations limit_fops = {
320 .owner = THIS_MODULE,
321 .open = simple_open,
322 .write = limit_write,
323 .read = limit_read,
324};
325
326static int someone_adding(struct mlx5_mr_cache *cache)
327{
328 int i;
329
330 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
331 if (cache->ent[i].cur < cache->ent[i].limit)
332 return 1;
333 }
334
335 return 0;
336}
337
338static void __cache_work_func(struct mlx5_cache_ent *ent)
339{
340 struct mlx5_ib_dev *dev = ent->dev;
341 struct mlx5_mr_cache *cache = &dev->cache;
342 int i = order2idx(dev, ent->order);
746b5583 343 int err;
e126ba97
EC
344
345 if (cache->stopped)
346 return;
347
348 ent = &dev->cache.ent[i];
746b5583
EC
349 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
350 err = add_keys(dev, i, 1);
351 if (ent->cur < 2 * ent->limit) {
352 if (err == -EAGAIN) {
353 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
354 i + 2);
355 queue_delayed_work(cache->wq, &ent->dwork,
356 msecs_to_jiffies(3));
357 } else if (err) {
358 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
359 i + 2, err);
360 queue_delayed_work(cache->wq, &ent->dwork,
361 msecs_to_jiffies(1000));
362 } else {
363 queue_work(cache->wq, &ent->work);
364 }
365 }
e126ba97
EC
366 } else if (ent->cur > 2 * ent->limit) {
367 if (!someone_adding(cache) &&
746b5583 368 time_after(jiffies, cache->last_add + 300 * HZ)) {
e126ba97
EC
369 remove_keys(dev, i, 1);
370 if (ent->cur > ent->limit)
371 queue_work(cache->wq, &ent->work);
372 } else {
746b5583 373 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
e126ba97
EC
374 }
375 }
376}
377
378static void delayed_cache_work_func(struct work_struct *work)
379{
380 struct mlx5_cache_ent *ent;
381
382 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
383 __cache_work_func(ent);
384}
385
386static void cache_work_func(struct work_struct *work)
387{
388 struct mlx5_cache_ent *ent;
389
390 ent = container_of(work, struct mlx5_cache_ent, work);
391 __cache_work_func(ent);
392}
393
394static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
395{
396 struct mlx5_mr_cache *cache = &dev->cache;
397 struct mlx5_ib_mr *mr = NULL;
398 struct mlx5_cache_ent *ent;
399 int c;
400 int i;
401
402 c = order2idx(dev, order);
403 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
404 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
405 return NULL;
406 }
407
408 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
409 ent = &cache->ent[i];
410
411 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
412
746b5583 413 spin_lock_irq(&ent->lock);
e126ba97
EC
414 if (!list_empty(&ent->head)) {
415 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
416 list);
417 list_del(&mr->list);
418 ent->cur--;
746b5583 419 spin_unlock_irq(&ent->lock);
e126ba97
EC
420 if (ent->cur < ent->limit)
421 queue_work(cache->wq, &ent->work);
422 break;
423 }
746b5583 424 spin_unlock_irq(&ent->lock);
e126ba97
EC
425
426 queue_work(cache->wq, &ent->work);
427
428 if (mr)
429 break;
430 }
431
432 if (!mr)
433 cache->ent[c].miss++;
434
435 return mr;
436}
437
438static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
439{
440 struct mlx5_mr_cache *cache = &dev->cache;
441 struct mlx5_cache_ent *ent;
442 int shrink = 0;
443 int c;
444
445 c = order2idx(dev, mr->order);
446 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
447 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
448 return;
449 }
450 ent = &cache->ent[c];
746b5583 451 spin_lock_irq(&ent->lock);
e126ba97
EC
452 list_add_tail(&mr->list, &ent->head);
453 ent->cur++;
454 if (ent->cur > 2 * ent->limit)
455 shrink = 1;
746b5583 456 spin_unlock_irq(&ent->lock);
e126ba97
EC
457
458 if (shrink)
459 queue_work(cache->wq, &ent->work);
460}
461
462static void clean_keys(struct mlx5_ib_dev *dev, int c)
463{
e126ba97
EC
464 struct mlx5_mr_cache *cache = &dev->cache;
465 struct mlx5_cache_ent *ent = &cache->ent[c];
466 struct mlx5_ib_mr *mr;
e126ba97
EC
467 int err;
468
3c461911 469 cancel_delayed_work(&ent->dwork);
e126ba97 470 while (1) {
746b5583 471 spin_lock_irq(&ent->lock);
e126ba97 472 if (list_empty(&ent->head)) {
746b5583 473 spin_unlock_irq(&ent->lock);
e126ba97
EC
474 return;
475 }
476 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
477 list_del(&mr->list);
478 ent->cur--;
479 ent->size--;
746b5583 480 spin_unlock_irq(&ent->lock);
e126ba97 481 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
203099fd 482 if (err)
e126ba97 483 mlx5_ib_warn(dev, "failed destroy mkey\n");
203099fd 484 else
e126ba97 485 kfree(mr);
e126ba97
EC
486 }
487}
488
489static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
490{
491 struct mlx5_mr_cache *cache = &dev->cache;
492 struct mlx5_cache_ent *ent;
493 int i;
494
495 if (!mlx5_debugfs_root)
496 return 0;
497
498 cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root);
499 if (!cache->root)
500 return -ENOMEM;
501
502 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
503 ent = &cache->ent[i];
504 sprintf(ent->name, "%d", ent->order);
505 ent->dir = debugfs_create_dir(ent->name, cache->root);
506 if (!ent->dir)
507 return -ENOMEM;
508
509 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
510 &size_fops);
511 if (!ent->fsize)
512 return -ENOMEM;
513
514 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
515 &limit_fops);
516 if (!ent->flimit)
517 return -ENOMEM;
518
519 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
520 &ent->cur);
521 if (!ent->fcur)
522 return -ENOMEM;
523
524 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
525 &ent->miss);
526 if (!ent->fmiss)
527 return -ENOMEM;
528 }
529
530 return 0;
531}
532
533static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
534{
535 if (!mlx5_debugfs_root)
536 return;
537
538 debugfs_remove_recursive(dev->cache.root);
539}
540
746b5583
EC
541static void delay_time_func(unsigned long ctx)
542{
543 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
544
545 dev->fill_delay = 0;
546}
547
e126ba97
EC
548int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
549{
550 struct mlx5_mr_cache *cache = &dev->cache;
551 struct mlx5_cache_ent *ent;
552 int limit;
553 int size;
554 int err;
555 int i;
556
557 cache->wq = create_singlethread_workqueue("mkey_cache");
558 if (!cache->wq) {
559 mlx5_ib_warn(dev, "failed to create work queue\n");
560 return -ENOMEM;
561 }
562
746b5583 563 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
e126ba97
EC
564 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
565 INIT_LIST_HEAD(&cache->ent[i].head);
566 spin_lock_init(&cache->ent[i].lock);
567
568 ent = &cache->ent[i];
569 INIT_LIST_HEAD(&ent->head);
570 spin_lock_init(&ent->lock);
571 ent->order = i + 2;
572 ent->dev = dev;
573
574 if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE) {
575 size = dev->mdev.profile->mr_cache[i].size;
576 limit = dev->mdev.profile->mr_cache[i].limit;
577 } else {
578 size = DEF_CACHE_SIZE;
579 limit = 0;
580 }
581 INIT_WORK(&ent->work, cache_work_func);
582 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
583 ent->limit = limit;
584 queue_work(cache->wq, &ent->work);
585 }
586
587 err = mlx5_mr_cache_debugfs_init(dev);
588 if (err)
589 mlx5_ib_warn(dev, "cache debugfs failure\n");
590
591 return 0;
592}
593
594int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
595{
596 int i;
597
598 dev->cache.stopped = 1;
3c461911 599 flush_workqueue(dev->cache.wq);
e126ba97
EC
600
601 mlx5_mr_cache_debugfs_cleanup(dev);
602
603 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
604 clean_keys(dev, i);
605
3c461911 606 destroy_workqueue(dev->cache.wq);
746b5583 607 del_timer_sync(&dev->delay_timer);
3c461911 608
e126ba97
EC
609 return 0;
610}
611
612struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
613{
614 struct mlx5_ib_dev *dev = to_mdev(pd->device);
615 struct mlx5_core_dev *mdev = &dev->mdev;
616 struct mlx5_create_mkey_mbox_in *in;
617 struct mlx5_mkey_seg *seg;
618 struct mlx5_ib_mr *mr;
619 int err;
620
621 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
622 if (!mr)
623 return ERR_PTR(-ENOMEM);
624
625 in = kzalloc(sizeof(*in), GFP_KERNEL);
626 if (!in) {
627 err = -ENOMEM;
628 goto err_free;
629 }
630
631 seg = &in->seg;
632 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
633 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
634 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
635 seg->start_addr = 0;
636
746b5583
EC
637 err = mlx5_core_create_mkey(mdev, &mr->mmr, in, sizeof(*in), NULL, NULL,
638 NULL);
e126ba97
EC
639 if (err)
640 goto err_in;
641
642 kfree(in);
643 mr->ibmr.lkey = mr->mmr.key;
644 mr->ibmr.rkey = mr->mmr.key;
645 mr->umem = NULL;
646
647 return &mr->ibmr;
648
649err_in:
650 kfree(in);
651
652err_free:
653 kfree(mr);
654
655 return ERR_PTR(err);
656}
657
658static int get_octo_len(u64 addr, u64 len, int page_size)
659{
660 u64 offset;
661 int npages;
662
663 offset = addr & (page_size - 1);
664 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
665 return (npages + 1) / 2;
666}
667
668static int use_umr(int order)
669{
670 return order <= 17;
671}
672
673static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
674 struct ib_sge *sg, u64 dma, int n, u32 key,
675 int page_shift, u64 virt_addr, u64 len,
676 int access_flags)
677{
678 struct mlx5_ib_dev *dev = to_mdev(pd->device);
679 struct ib_mr *mr = dev->umrc.mr;
680
681 sg->addr = dma;
682 sg->length = ALIGN(sizeof(u64) * n, 64);
683 sg->lkey = mr->lkey;
684
685 wr->next = NULL;
686 wr->send_flags = 0;
687 wr->sg_list = sg;
688 if (n)
689 wr->num_sge = 1;
690 else
691 wr->num_sge = 0;
692
693 wr->opcode = MLX5_IB_WR_UMR;
694 wr->wr.fast_reg.page_list_len = n;
695 wr->wr.fast_reg.page_shift = page_shift;
696 wr->wr.fast_reg.rkey = key;
697 wr->wr.fast_reg.iova_start = virt_addr;
698 wr->wr.fast_reg.length = len;
699 wr->wr.fast_reg.access_flags = access_flags;
700 wr->wr.fast_reg.page_list = (struct ib_fast_reg_page_list *)pd;
701}
702
703static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
704 struct ib_send_wr *wr, u32 key)
705{
706 wr->send_flags = MLX5_IB_SEND_UMR_UNREG;
707 wr->opcode = MLX5_IB_WR_UMR;
708 wr->wr.fast_reg.rkey = key;
709}
710
711void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
712{
713 struct mlx5_ib_mr *mr;
714 struct ib_wc wc;
715 int err;
716
717 while (1) {
718 err = ib_poll_cq(cq, 1, &wc);
719 if (err < 0) {
720 pr_warn("poll cq error %d\n", err);
721 return;
722 }
723 if (err == 0)
724 break;
725
726 mr = (struct mlx5_ib_mr *)(unsigned long)wc.wr_id;
727 mr->status = wc.status;
728 complete(&mr->done);
729 }
730 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
731}
732
733static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
734 u64 virt_addr, u64 len, int npages,
735 int page_shift, int order, int access_flags)
736{
737 struct mlx5_ib_dev *dev = to_mdev(pd->device);
203099fd 738 struct device *ddev = dev->ib_dev.dma_device;
e126ba97
EC
739 struct umr_common *umrc = &dev->umrc;
740 struct ib_send_wr wr, *bad;
741 struct mlx5_ib_mr *mr;
742 struct ib_sge sg;
203099fd 743 int size = sizeof(u64) * npages;
e126ba97
EC
744 int err;
745 int i;
746
746b5583 747 for (i = 0; i < 1; i++) {
e126ba97
EC
748 mr = alloc_cached_mr(dev, order);
749 if (mr)
750 break;
751
752 err = add_keys(dev, order2idx(dev, order), 1);
746b5583
EC
753 if (err && err != -EAGAIN) {
754 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
e126ba97
EC
755 break;
756 }
757 }
758
759 if (!mr)
760 return ERR_PTR(-EAGAIN);
761
fe45f827 762 mr->pas = kmalloc(size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
203099fd
EC
763 if (!mr->pas) {
764 err = -ENOMEM;
765 goto error;
766 }
54313907
EC
767
768 mlx5_ib_populate_pas(dev, umem, page_shift,
769 mr_align(mr->pas, MLX5_UMR_ALIGN), 1);
770
fe45f827 771 mr->dma = dma_map_single(ddev, mr_align(mr->pas, MLX5_UMR_ALIGN), size,
203099fd
EC
772 DMA_TO_DEVICE);
773 if (dma_mapping_error(ddev, mr->dma)) {
774 kfree(mr->pas);
775 err = -ENOMEM;
776 goto error;
777 }
778
e126ba97
EC
779 memset(&wr, 0, sizeof(wr));
780 wr.wr_id = (u64)(unsigned long)mr;
781 prep_umr_reg_wqe(pd, &wr, &sg, mr->dma, npages, mr->mmr.key, page_shift, virt_addr, len, access_flags);
782
783 /* We serialize polls so one process does not kidnap another's
784 * completion. This is not a problem since wr is completed in
785 * around 1 usec
786 */
787 down(&umrc->sem);
788 init_completion(&mr->done);
789 err = ib_post_send(umrc->qp, &wr, &bad);
790 if (err) {
791 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
792 up(&umrc->sem);
793 goto error;
794 }
795 wait_for_completion(&mr->done);
796 up(&umrc->sem);
797
203099fd
EC
798 dma_unmap_single(ddev, mr->dma, size, DMA_TO_DEVICE);
799 kfree(mr->pas);
800
e126ba97
EC
801 if (mr->status != IB_WC_SUCCESS) {
802 mlx5_ib_warn(dev, "reg umr failed\n");
803 err = -EFAULT;
804 goto error;
805 }
806
807 return mr;
808
809error:
810 free_cached_mr(dev, mr);
811 return ERR_PTR(err);
812}
813
814static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
815 u64 length, struct ib_umem *umem,
816 int npages, int page_shift,
817 int access_flags)
818{
819 struct mlx5_ib_dev *dev = to_mdev(pd->device);
820 struct mlx5_create_mkey_mbox_in *in;
821 struct mlx5_ib_mr *mr;
822 int inlen;
823 int err;
824
825 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
826 if (!mr)
827 return ERR_PTR(-ENOMEM);
828
829 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
830 in = mlx5_vzalloc(inlen);
831 if (!in) {
832 err = -ENOMEM;
833 goto err_1;
834 }
835 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas, 0);
836
837 in->seg.flags = convert_access(access_flags) |
838 MLX5_ACCESS_MODE_MTT;
839 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
840 in->seg.start_addr = cpu_to_be64(virt_addr);
841 in->seg.len = cpu_to_be64(length);
842 in->seg.bsfs_octo_size = 0;
843 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
844 in->seg.log2_page_size = page_shift;
845 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
746b5583
EC
846 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
847 1 << page_shift));
848 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL,
849 NULL, NULL);
e126ba97
EC
850 if (err) {
851 mlx5_ib_warn(dev, "create mkey failed\n");
852 goto err_2;
853 }
854 mr->umem = umem;
855 mlx5_vfree(in);
856
857 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmr.key);
858
859 return mr;
860
861err_2:
862 mlx5_vfree(in);
863
864err_1:
865 kfree(mr);
866
867 return ERR_PTR(err);
868}
869
870struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
871 u64 virt_addr, int access_flags,
872 struct ib_udata *udata)
873{
874 struct mlx5_ib_dev *dev = to_mdev(pd->device);
875 struct mlx5_ib_mr *mr = NULL;
876 struct ib_umem *umem;
877 int page_shift;
878 int npages;
879 int ncont;
880 int order;
881 int err;
882
883 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx\n",
884 start, virt_addr, length);
885 umem = ib_umem_get(pd->uobject->context, start, length, access_flags,
886 0);
887 if (IS_ERR(umem)) {
888 mlx5_ib_dbg(dev, "umem get failed\n");
889 return (void *)umem;
890 }
891
892 mlx5_ib_cont_pages(umem, start, &npages, &page_shift, &ncont, &order);
893 if (!npages) {
894 mlx5_ib_warn(dev, "avoid zero region\n");
895 err = -EINVAL;
896 goto error;
897 }
898
899 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
900 npages, ncont, order, page_shift);
901
902 if (use_umr(order)) {
903 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
904 order, access_flags);
905 if (PTR_ERR(mr) == -EAGAIN) {
906 mlx5_ib_dbg(dev, "cache empty for order %d", order);
907 mr = NULL;
908 }
909 }
910
911 if (!mr)
912 mr = reg_create(pd, virt_addr, length, umem, ncont, page_shift,
913 access_flags);
914
915 if (IS_ERR(mr)) {
916 err = PTR_ERR(mr);
917 goto error;
918 }
919
920 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmr.key);
921
922 mr->umem = umem;
923 mr->npages = npages;
924 spin_lock(&dev->mr_lock);
925 dev->mdev.priv.reg_pages += npages;
926 spin_unlock(&dev->mr_lock);
927 mr->ibmr.lkey = mr->mmr.key;
928 mr->ibmr.rkey = mr->mmr.key;
929
930 return &mr->ibmr;
931
932error:
933 ib_umem_release(umem);
934 return ERR_PTR(err);
935}
936
937static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
938{
939 struct umr_common *umrc = &dev->umrc;
940 struct ib_send_wr wr, *bad;
941 int err;
942
943 memset(&wr, 0, sizeof(wr));
944 wr.wr_id = (u64)(unsigned long)mr;
945 prep_umr_unreg_wqe(dev, &wr, mr->mmr.key);
946
947 down(&umrc->sem);
948 init_completion(&mr->done);
949 err = ib_post_send(umrc->qp, &wr, &bad);
950 if (err) {
951 up(&umrc->sem);
952 mlx5_ib_dbg(dev, "err %d\n", err);
953 goto error;
954 }
955 wait_for_completion(&mr->done);
956 up(&umrc->sem);
957 if (mr->status != IB_WC_SUCCESS) {
958 mlx5_ib_warn(dev, "unreg umr failed\n");
959 err = -EFAULT;
960 goto error;
961 }
962 return 0;
963
964error:
965 return err;
966}
967
968int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
969{
970 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
971 struct mlx5_ib_mr *mr = to_mmr(ibmr);
972 struct ib_umem *umem = mr->umem;
973 int npages = mr->npages;
974 int umred = mr->umred;
975 int err;
976
977 if (!umred) {
978 err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
979 if (err) {
980 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
981 mr->mmr.key, err);
982 return err;
983 }
984 } else {
985 err = unreg_umr(dev, mr);
986 if (err) {
987 mlx5_ib_warn(dev, "failed unregister\n");
988 return err;
989 }
990 free_cached_mr(dev, mr);
991 }
992
993 if (umem) {
994 ib_umem_release(umem);
995 spin_lock(&dev->mr_lock);
996 dev->mdev.priv.reg_pages -= npages;
997 spin_unlock(&dev->mr_lock);
998 }
999
1000 if (!umred)
1001 kfree(mr);
1002
1003 return 0;
1004}
1005
1006struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
1007 int max_page_list_len)
1008{
1009 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1010 struct mlx5_create_mkey_mbox_in *in;
1011 struct mlx5_ib_mr *mr;
1012 int err;
1013
1014 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1015 if (!mr)
1016 return ERR_PTR(-ENOMEM);
1017
1018 in = kzalloc(sizeof(*in), GFP_KERNEL);
1019 if (!in) {
1020 err = -ENOMEM;
1021 goto err_free;
1022 }
1023
1024 in->seg.status = 1 << 6; /* free */
1025 in->seg.xlt_oct_size = cpu_to_be32((max_page_list_len + 1) / 2);
1026 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1027 in->seg.flags = MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
1028 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1029 /*
1030 * TBD not needed - issue 197292 */
1031 in->seg.log2_page_size = PAGE_SHIFT;
1032
746b5583
EC
1033 err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
1034 NULL, NULL);
e126ba97
EC
1035 kfree(in);
1036 if (err)
1037 goto err_free;
1038
1039 mr->ibmr.lkey = mr->mmr.key;
1040 mr->ibmr.rkey = mr->mmr.key;
1041 mr->umem = NULL;
1042
1043 return &mr->ibmr;
1044
1045err_free:
1046 kfree(mr);
1047 return ERR_PTR(err);
1048}
1049
1050struct ib_fast_reg_page_list *mlx5_ib_alloc_fast_reg_page_list(struct ib_device *ibdev,
1051 int page_list_len)
1052{
1053 struct mlx5_ib_fast_reg_page_list *mfrpl;
1054 int size = page_list_len * sizeof(u64);
1055
1056 mfrpl = kmalloc(sizeof(*mfrpl), GFP_KERNEL);
1057 if (!mfrpl)
1058 return ERR_PTR(-ENOMEM);
1059
1060 mfrpl->ibfrpl.page_list = kmalloc(size, GFP_KERNEL);
1061 if (!mfrpl->ibfrpl.page_list)
1062 goto err_free;
1063
1064 mfrpl->mapped_page_list = dma_alloc_coherent(ibdev->dma_device,
1065 size, &mfrpl->map,
1066 GFP_KERNEL);
1067 if (!mfrpl->mapped_page_list)
1068 goto err_free;
1069
1070 WARN_ON(mfrpl->map & 0x3f);
1071
1072 return &mfrpl->ibfrpl;
1073
1074err_free:
1075 kfree(mfrpl->ibfrpl.page_list);
1076 kfree(mfrpl);
1077 return ERR_PTR(-ENOMEM);
1078}
1079
1080void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
1081{
1082 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(page_list);
1083 struct mlx5_ib_dev *dev = to_mdev(page_list->device);
1084 int size = page_list->max_page_list_len * sizeof(u64);
1085
1086 dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list,
1087 mfrpl->map);
1088 kfree(mfrpl->ibfrpl.page_list);
1089 kfree(mfrpl);
1090}