]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/infiniband/hw/mlx5/mr.c
IB/core: Add vendor's specific data to alloc mw
[mirror_ubuntu-hirsute-kernel.git] / drivers / infiniband / hw / mlx5 / mr.c
CommitLineData
e126ba97 1/*
6cf0a15f 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
e126ba97
EC
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33
34#include <linux/kref.h>
35#include <linux/random.h>
36#include <linux/debugfs.h>
37#include <linux/export.h>
746b5583 38#include <linux/delay.h>
e126ba97 39#include <rdma/ib_umem.h>
b4cfe447 40#include <rdma/ib_umem_odp.h>
968e78dd 41#include <rdma/ib_verbs.h>
e126ba97
EC
42#include "mlx5_ib.h"
43
44enum {
746b5583 45 MAX_PENDING_REG_MR = 8,
e126ba97
EC
46};
47
832a6b06
HE
48#define MLX5_UMR_ALIGN 2048
49#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
50static __be64 mlx5_ib_update_mtt_emergency_buffer[
51 MLX5_UMR_MTT_MIN_CHUNK_SIZE/sizeof(__be64)]
52 __aligned(MLX5_UMR_ALIGN);
53static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex);
54#endif
fe45f827 55
6aec21f6
HE
56static int clean_mr(struct mlx5_ib_mr *mr);
57
b4cfe447
HE
58static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
59{
a606b0f6 60 int err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
b4cfe447
HE
61
62#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
63 /* Wait until all page fault handlers using the mr complete. */
64 synchronize_srcu(&dev->mr_srcu);
65#endif
66
67 return err;
68}
69
e126ba97
EC
70static int order2idx(struct mlx5_ib_dev *dev, int order)
71{
72 struct mlx5_mr_cache *cache = &dev->cache;
73
74 if (order < cache->ent[0].order)
75 return 0;
76 else
77 return order - cache->ent[0].order;
78}
79
56e11d62
NO
80static bool use_umr_mtt_update(struct mlx5_ib_mr *mr, u64 start, u64 length)
81{
82 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
83 length + (start & (MLX5_ADAPTER_PAGE_SIZE - 1));
84}
85
395a8e4c
NO
86#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
87static void update_odp_mr(struct mlx5_ib_mr *mr)
88{
89 if (mr->umem->odp_data) {
90 /*
91 * This barrier prevents the compiler from moving the
92 * setting of umem->odp_data->private to point to our
93 * MR, before reg_umr finished, to ensure that the MR
94 * initialization have finished before starting to
95 * handle invalidations.
96 */
97 smp_wmb();
98 mr->umem->odp_data->private = mr;
99 /*
100 * Make sure we will see the new
101 * umem->odp_data->private value in the invalidation
102 * routines, before we can get page faults on the
103 * MR. Page faults can happen once we put the MR in
104 * the tree, below this line. Without the barrier,
105 * there can be a fault handling and an invalidation
106 * before umem->odp_data->private == mr is visible to
107 * the invalidation handler.
108 */
109 smp_wmb();
110 }
111}
112#endif
113
746b5583
EC
114static void reg_mr_callback(int status, void *context)
115{
116 struct mlx5_ib_mr *mr = context;
117 struct mlx5_ib_dev *dev = mr->dev;
118 struct mlx5_mr_cache *cache = &dev->cache;
119 int c = order2idx(dev, mr->order);
120 struct mlx5_cache_ent *ent = &cache->ent[c];
121 u8 key;
746b5583 122 unsigned long flags;
a606b0f6 123 struct mlx5_mkey_table *table = &dev->mdev->priv.mkey_table;
8605933a 124 int err;
746b5583 125
746b5583
EC
126 spin_lock_irqsave(&ent->lock, flags);
127 ent->pending--;
128 spin_unlock_irqrestore(&ent->lock, flags);
129 if (status) {
130 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
131 kfree(mr);
132 dev->fill_delay = 1;
133 mod_timer(&dev->delay_timer, jiffies + HZ);
134 return;
135 }
136
137 if (mr->out.hdr.status) {
138 mlx5_ib_warn(dev, "failed - status %d, syndorme 0x%x\n",
139 mr->out.hdr.status,
140 be32_to_cpu(mr->out.hdr.syndrome));
141 kfree(mr);
142 dev->fill_delay = 1;
143 mod_timer(&dev->delay_timer, jiffies + HZ);
144 return;
145 }
146
9603b61d
JM
147 spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
148 key = dev->mdev->priv.mkey_key++;
149 spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
a606b0f6 150 mr->mmkey.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
746b5583
EC
151
152 cache->last_add = jiffies;
153
154 spin_lock_irqsave(&ent->lock, flags);
155 list_add_tail(&mr->list, &ent->head);
156 ent->cur++;
157 ent->size++;
158 spin_unlock_irqrestore(&ent->lock, flags);
8605933a
HE
159
160 write_lock_irqsave(&table->lock, flags);
a606b0f6
MB
161 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->mmkey.key),
162 &mr->mmkey);
8605933a 163 if (err)
a606b0f6 164 pr_err("Error inserting to mkey tree. 0x%x\n", -err);
8605933a 165 write_unlock_irqrestore(&table->lock, flags);
746b5583
EC
166}
167
e126ba97
EC
168static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
169{
e126ba97
EC
170 struct mlx5_mr_cache *cache = &dev->cache;
171 struct mlx5_cache_ent *ent = &cache->ent[c];
172 struct mlx5_create_mkey_mbox_in *in;
173 struct mlx5_ib_mr *mr;
174 int npages = 1 << ent->order;
e126ba97
EC
175 int err = 0;
176 int i;
177
178 in = kzalloc(sizeof(*in), GFP_KERNEL);
179 if (!in)
180 return -ENOMEM;
181
182 for (i = 0; i < num; i++) {
746b5583
EC
183 if (ent->pending >= MAX_PENDING_REG_MR) {
184 err = -EAGAIN;
185 break;
186 }
187
e126ba97
EC
188 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
189 if (!mr) {
190 err = -ENOMEM;
746b5583 191 break;
e126ba97
EC
192 }
193 mr->order = ent->order;
194 mr->umred = 1;
746b5583 195 mr->dev = dev;
968e78dd 196 in->seg.status = MLX5_MKEY_STATUS_FREE;
e126ba97
EC
197 in->seg.xlt_oct_size = cpu_to_be32((npages + 1) / 2);
198 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
199 in->seg.flags = MLX5_ACCESS_MODE_MTT | MLX5_PERM_UMR_EN;
200 in->seg.log2_page_size = 12;
201
746b5583
EC
202 spin_lock_irq(&ent->lock);
203 ent->pending++;
204 spin_unlock_irq(&ent->lock);
a606b0f6 205 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in,
746b5583
EC
206 sizeof(*in), reg_mr_callback,
207 mr, &mr->out);
e126ba97 208 if (err) {
d14e7110
EC
209 spin_lock_irq(&ent->lock);
210 ent->pending--;
211 spin_unlock_irq(&ent->lock);
e126ba97 212 mlx5_ib_warn(dev, "create mkey failed %d\n", err);
e126ba97 213 kfree(mr);
746b5583 214 break;
e126ba97 215 }
e126ba97
EC
216 }
217
e126ba97
EC
218 kfree(in);
219 return err;
220}
221
222static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
223{
e126ba97
EC
224 struct mlx5_mr_cache *cache = &dev->cache;
225 struct mlx5_cache_ent *ent = &cache->ent[c];
226 struct mlx5_ib_mr *mr;
e126ba97
EC
227 int err;
228 int i;
229
230 for (i = 0; i < num; i++) {
746b5583 231 spin_lock_irq(&ent->lock);
e126ba97 232 if (list_empty(&ent->head)) {
746b5583 233 spin_unlock_irq(&ent->lock);
e126ba97
EC
234 return;
235 }
236 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
237 list_del(&mr->list);
238 ent->cur--;
239 ent->size--;
746b5583 240 spin_unlock_irq(&ent->lock);
b4cfe447 241 err = destroy_mkey(dev, mr);
203099fd 242 if (err)
e126ba97 243 mlx5_ib_warn(dev, "failed destroy mkey\n");
203099fd 244 else
e126ba97 245 kfree(mr);
e126ba97
EC
246 }
247}
248
249static ssize_t size_write(struct file *filp, const char __user *buf,
250 size_t count, loff_t *pos)
251{
252 struct mlx5_cache_ent *ent = filp->private_data;
253 struct mlx5_ib_dev *dev = ent->dev;
254 char lbuf[20];
255 u32 var;
256 int err;
257 int c;
258
259 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
5e631a03 260 return -EFAULT;
e126ba97
EC
261
262 c = order2idx(dev, ent->order);
263 lbuf[sizeof(lbuf) - 1] = 0;
264
265 if (sscanf(lbuf, "%u", &var) != 1)
266 return -EINVAL;
267
268 if (var < ent->limit)
269 return -EINVAL;
270
271 if (var > ent->size) {
746b5583
EC
272 do {
273 err = add_keys(dev, c, var - ent->size);
274 if (err && err != -EAGAIN)
275 return err;
276
277 usleep_range(3000, 5000);
278 } while (err);
e126ba97
EC
279 } else if (var < ent->size) {
280 remove_keys(dev, c, ent->size - var);
281 }
282
283 return count;
284}
285
286static ssize_t size_read(struct file *filp, char __user *buf, size_t count,
287 loff_t *pos)
288{
289 struct mlx5_cache_ent *ent = filp->private_data;
290 char lbuf[20];
291 int err;
292
293 if (*pos)
294 return 0;
295
296 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->size);
297 if (err < 0)
298 return err;
299
300 if (copy_to_user(buf, lbuf, err))
5e631a03 301 return -EFAULT;
e126ba97
EC
302
303 *pos += err;
304
305 return err;
306}
307
308static const struct file_operations size_fops = {
309 .owner = THIS_MODULE,
310 .open = simple_open,
311 .write = size_write,
312 .read = size_read,
313};
314
315static ssize_t limit_write(struct file *filp, const char __user *buf,
316 size_t count, loff_t *pos)
317{
318 struct mlx5_cache_ent *ent = filp->private_data;
319 struct mlx5_ib_dev *dev = ent->dev;
320 char lbuf[20];
321 u32 var;
322 int err;
323 int c;
324
325 if (copy_from_user(lbuf, buf, sizeof(lbuf)))
5e631a03 326 return -EFAULT;
e126ba97
EC
327
328 c = order2idx(dev, ent->order);
329 lbuf[sizeof(lbuf) - 1] = 0;
330
331 if (sscanf(lbuf, "%u", &var) != 1)
332 return -EINVAL;
333
334 if (var > ent->size)
335 return -EINVAL;
336
337 ent->limit = var;
338
339 if (ent->cur < ent->limit) {
340 err = add_keys(dev, c, 2 * ent->limit - ent->cur);
341 if (err)
342 return err;
343 }
344
345 return count;
346}
347
348static ssize_t limit_read(struct file *filp, char __user *buf, size_t count,
349 loff_t *pos)
350{
351 struct mlx5_cache_ent *ent = filp->private_data;
352 char lbuf[20];
353 int err;
354
355 if (*pos)
356 return 0;
357
358 err = snprintf(lbuf, sizeof(lbuf), "%d\n", ent->limit);
359 if (err < 0)
360 return err;
361
362 if (copy_to_user(buf, lbuf, err))
5e631a03 363 return -EFAULT;
e126ba97
EC
364
365 *pos += err;
366
367 return err;
368}
369
370static const struct file_operations limit_fops = {
371 .owner = THIS_MODULE,
372 .open = simple_open,
373 .write = limit_write,
374 .read = limit_read,
375};
376
377static int someone_adding(struct mlx5_mr_cache *cache)
378{
379 int i;
380
381 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
382 if (cache->ent[i].cur < cache->ent[i].limit)
383 return 1;
384 }
385
386 return 0;
387}
388
389static void __cache_work_func(struct mlx5_cache_ent *ent)
390{
391 struct mlx5_ib_dev *dev = ent->dev;
392 struct mlx5_mr_cache *cache = &dev->cache;
393 int i = order2idx(dev, ent->order);
746b5583 394 int err;
e126ba97
EC
395
396 if (cache->stopped)
397 return;
398
399 ent = &dev->cache.ent[i];
746b5583
EC
400 if (ent->cur < 2 * ent->limit && !dev->fill_delay) {
401 err = add_keys(dev, i, 1);
402 if (ent->cur < 2 * ent->limit) {
403 if (err == -EAGAIN) {
404 mlx5_ib_dbg(dev, "returned eagain, order %d\n",
405 i + 2);
406 queue_delayed_work(cache->wq, &ent->dwork,
407 msecs_to_jiffies(3));
408 } else if (err) {
409 mlx5_ib_warn(dev, "command failed order %d, err %d\n",
410 i + 2, err);
411 queue_delayed_work(cache->wq, &ent->dwork,
412 msecs_to_jiffies(1000));
413 } else {
414 queue_work(cache->wq, &ent->work);
415 }
416 }
e126ba97 417 } else if (ent->cur > 2 * ent->limit) {
ab5cdc31
LR
418 /*
419 * The remove_keys() logic is performed as garbage collection
420 * task. Such task is intended to be run when no other active
421 * processes are running.
422 *
423 * The need_resched() will return TRUE if there are user tasks
424 * to be activated in near future.
425 *
426 * In such case, we don't execute remove_keys() and postpone
427 * the garbage collection work to try to run in next cycle,
428 * in order to free CPU resources to other tasks.
429 */
430 if (!need_resched() && !someone_adding(cache) &&
746b5583 431 time_after(jiffies, cache->last_add + 300 * HZ)) {
e126ba97
EC
432 remove_keys(dev, i, 1);
433 if (ent->cur > ent->limit)
434 queue_work(cache->wq, &ent->work);
435 } else {
746b5583 436 queue_delayed_work(cache->wq, &ent->dwork, 300 * HZ);
e126ba97
EC
437 }
438 }
439}
440
441static void delayed_cache_work_func(struct work_struct *work)
442{
443 struct mlx5_cache_ent *ent;
444
445 ent = container_of(work, struct mlx5_cache_ent, dwork.work);
446 __cache_work_func(ent);
447}
448
449static void cache_work_func(struct work_struct *work)
450{
451 struct mlx5_cache_ent *ent;
452
453 ent = container_of(work, struct mlx5_cache_ent, work);
454 __cache_work_func(ent);
455}
456
457static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
458{
459 struct mlx5_mr_cache *cache = &dev->cache;
460 struct mlx5_ib_mr *mr = NULL;
461 struct mlx5_cache_ent *ent;
462 int c;
463 int i;
464
465 c = order2idx(dev, order);
466 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
467 mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
468 return NULL;
469 }
470
471 for (i = c; i < MAX_MR_CACHE_ENTRIES; i++) {
472 ent = &cache->ent[i];
473
474 mlx5_ib_dbg(dev, "order %d, cache index %d\n", ent->order, i);
475
746b5583 476 spin_lock_irq(&ent->lock);
e126ba97
EC
477 if (!list_empty(&ent->head)) {
478 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
479 list);
480 list_del(&mr->list);
481 ent->cur--;
746b5583 482 spin_unlock_irq(&ent->lock);
e126ba97
EC
483 if (ent->cur < ent->limit)
484 queue_work(cache->wq, &ent->work);
485 break;
486 }
746b5583 487 spin_unlock_irq(&ent->lock);
e126ba97
EC
488
489 queue_work(cache->wq, &ent->work);
e126ba97
EC
490 }
491
492 if (!mr)
493 cache->ent[c].miss++;
494
495 return mr;
496}
497
498static void free_cached_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
499{
500 struct mlx5_mr_cache *cache = &dev->cache;
501 struct mlx5_cache_ent *ent;
502 int shrink = 0;
503 int c;
504
505 c = order2idx(dev, mr->order);
506 if (c < 0 || c >= MAX_MR_CACHE_ENTRIES) {
507 mlx5_ib_warn(dev, "order %d, cache index %d\n", mr->order, c);
508 return;
509 }
510 ent = &cache->ent[c];
746b5583 511 spin_lock_irq(&ent->lock);
e126ba97
EC
512 list_add_tail(&mr->list, &ent->head);
513 ent->cur++;
514 if (ent->cur > 2 * ent->limit)
515 shrink = 1;
746b5583 516 spin_unlock_irq(&ent->lock);
e126ba97
EC
517
518 if (shrink)
519 queue_work(cache->wq, &ent->work);
520}
521
522static void clean_keys(struct mlx5_ib_dev *dev, int c)
523{
e126ba97
EC
524 struct mlx5_mr_cache *cache = &dev->cache;
525 struct mlx5_cache_ent *ent = &cache->ent[c];
526 struct mlx5_ib_mr *mr;
e126ba97
EC
527 int err;
528
3c461911 529 cancel_delayed_work(&ent->dwork);
e126ba97 530 while (1) {
746b5583 531 spin_lock_irq(&ent->lock);
e126ba97 532 if (list_empty(&ent->head)) {
746b5583 533 spin_unlock_irq(&ent->lock);
e126ba97
EC
534 return;
535 }
536 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
537 list_del(&mr->list);
538 ent->cur--;
539 ent->size--;
746b5583 540 spin_unlock_irq(&ent->lock);
b4cfe447 541 err = destroy_mkey(dev, mr);
203099fd 542 if (err)
e126ba97 543 mlx5_ib_warn(dev, "failed destroy mkey\n");
203099fd 544 else
e126ba97 545 kfree(mr);
e126ba97
EC
546 }
547}
548
549static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
550{
551 struct mlx5_mr_cache *cache = &dev->cache;
552 struct mlx5_cache_ent *ent;
553 int i;
554
555 if (!mlx5_debugfs_root)
556 return 0;
557
9603b61d 558 cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
e126ba97
EC
559 if (!cache->root)
560 return -ENOMEM;
561
562 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
563 ent = &cache->ent[i];
564 sprintf(ent->name, "%d", ent->order);
565 ent->dir = debugfs_create_dir(ent->name, cache->root);
566 if (!ent->dir)
567 return -ENOMEM;
568
569 ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent,
570 &size_fops);
571 if (!ent->fsize)
572 return -ENOMEM;
573
574 ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent,
575 &limit_fops);
576 if (!ent->flimit)
577 return -ENOMEM;
578
579 ent->fcur = debugfs_create_u32("cur", 0400, ent->dir,
580 &ent->cur);
581 if (!ent->fcur)
582 return -ENOMEM;
583
584 ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir,
585 &ent->miss);
586 if (!ent->fmiss)
587 return -ENOMEM;
588 }
589
590 return 0;
591}
592
593static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
594{
595 if (!mlx5_debugfs_root)
596 return;
597
598 debugfs_remove_recursive(dev->cache.root);
599}
600
746b5583
EC
601static void delay_time_func(unsigned long ctx)
602{
603 struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
604
605 dev->fill_delay = 0;
606}
607
e126ba97
EC
608int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
609{
610 struct mlx5_mr_cache *cache = &dev->cache;
611 struct mlx5_cache_ent *ent;
612 int limit;
e126ba97
EC
613 int err;
614 int i;
615
616 cache->wq = create_singlethread_workqueue("mkey_cache");
617 if (!cache->wq) {
618 mlx5_ib_warn(dev, "failed to create work queue\n");
619 return -ENOMEM;
620 }
621
746b5583 622 setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
e126ba97
EC
623 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
624 INIT_LIST_HEAD(&cache->ent[i].head);
625 spin_lock_init(&cache->ent[i].lock);
626
627 ent = &cache->ent[i];
628 INIT_LIST_HEAD(&ent->head);
629 spin_lock_init(&ent->lock);
630 ent->order = i + 2;
631 ent->dev = dev;
632
9603b61d
JM
633 if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
634 limit = dev->mdev->profile->mr_cache[i].limit;
2d036fad 635 else
e126ba97 636 limit = 0;
2d036fad 637
e126ba97
EC
638 INIT_WORK(&ent->work, cache_work_func);
639 INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
640 ent->limit = limit;
641 queue_work(cache->wq, &ent->work);
642 }
643
644 err = mlx5_mr_cache_debugfs_init(dev);
645 if (err)
646 mlx5_ib_warn(dev, "cache debugfs failure\n");
647
648 return 0;
649}
650
651int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
652{
653 int i;
654
655 dev->cache.stopped = 1;
3c461911 656 flush_workqueue(dev->cache.wq);
e126ba97
EC
657
658 mlx5_mr_cache_debugfs_cleanup(dev);
659
660 for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++)
661 clean_keys(dev, i);
662
3c461911 663 destroy_workqueue(dev->cache.wq);
746b5583 664 del_timer_sync(&dev->delay_timer);
3c461911 665
e126ba97
EC
666 return 0;
667}
668
669struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
670{
671 struct mlx5_ib_dev *dev = to_mdev(pd->device);
9603b61d 672 struct mlx5_core_dev *mdev = dev->mdev;
e126ba97
EC
673 struct mlx5_create_mkey_mbox_in *in;
674 struct mlx5_mkey_seg *seg;
675 struct mlx5_ib_mr *mr;
676 int err;
677
678 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
679 if (!mr)
680 return ERR_PTR(-ENOMEM);
681
682 in = kzalloc(sizeof(*in), GFP_KERNEL);
683 if (!in) {
684 err = -ENOMEM;
685 goto err_free;
686 }
687
688 seg = &in->seg;
689 seg->flags = convert_access(acc) | MLX5_ACCESS_MODE_PA;
690 seg->flags_pd = cpu_to_be32(to_mpd(pd)->pdn | MLX5_MKEY_LEN64);
691 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
692 seg->start_addr = 0;
693
a606b0f6 694 err = mlx5_core_create_mkey(mdev, &mr->mmkey, in, sizeof(*in), NULL, NULL,
746b5583 695 NULL);
e126ba97
EC
696 if (err)
697 goto err_in;
698
699 kfree(in);
a606b0f6
MB
700 mr->ibmr.lkey = mr->mmkey.key;
701 mr->ibmr.rkey = mr->mmkey.key;
e126ba97
EC
702 mr->umem = NULL;
703
704 return &mr->ibmr;
705
706err_in:
707 kfree(in);
708
709err_free:
710 kfree(mr);
711
712 return ERR_PTR(err);
713}
714
715static int get_octo_len(u64 addr, u64 len, int page_size)
716{
717 u64 offset;
718 int npages;
719
720 offset = addr & (page_size - 1);
721 npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
722 return (npages + 1) / 2;
723}
724
725static int use_umr(int order)
726{
cc149f75 727 return order <= MLX5_MAX_UMR_SHIFT;
e126ba97
EC
728}
729
395a8e4c
NO
730static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
731 int npages, int page_shift, int *size,
732 __be64 **mr_pas, dma_addr_t *dma)
733{
734 __be64 *pas;
735 struct device *ddev = dev->ib_dev.dma_device;
736
737 /*
738 * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
739 * To avoid copying garbage after the pas array, we allocate
740 * a little more.
741 */
742 *size = ALIGN(sizeof(u64) * npages, MLX5_UMR_MTT_ALIGNMENT);
743 *mr_pas = kmalloc(*size + MLX5_UMR_ALIGN - 1, GFP_KERNEL);
744 if (!(*mr_pas))
745 return -ENOMEM;
746
747 pas = PTR_ALIGN(*mr_pas, MLX5_UMR_ALIGN);
748 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT);
749 /* Clear padding after the actual pages. */
750 memset(pas + npages, 0, *size - npages * sizeof(u64));
751
752 *dma = dma_map_single(ddev, pas, *size, DMA_TO_DEVICE);
753 if (dma_mapping_error(ddev, *dma)) {
754 kfree(*mr_pas);
755 return -ENOMEM;
756 }
757
758 return 0;
759}
760
761static void prep_umr_wqe_common(struct ib_pd *pd, struct ib_send_wr *wr,
762 struct ib_sge *sg, u64 dma, int n, u32 key,
763 int page_shift)
e126ba97
EC
764{
765 struct mlx5_ib_dev *dev = to_mdev(pd->device);
e622f2f4 766 struct mlx5_umr_wr *umrwr = umr_wr(wr);
e126ba97
EC
767
768 sg->addr = dma;
769 sg->length = ALIGN(sizeof(u64) * n, 64);
b37c788f 770 sg->lkey = dev->umrc.pd->local_dma_lkey;
e126ba97
EC
771
772 wr->next = NULL;
e126ba97
EC
773 wr->sg_list = sg;
774 if (n)
775 wr->num_sge = 1;
776 else
777 wr->num_sge = 0;
778
779 wr->opcode = MLX5_IB_WR_UMR;
968e78dd
HE
780
781 umrwr->npages = n;
782 umrwr->page_shift = page_shift;
783 umrwr->mkey = key;
395a8e4c
NO
784}
785
786static void prep_umr_reg_wqe(struct ib_pd *pd, struct ib_send_wr *wr,
787 struct ib_sge *sg, u64 dma, int n, u32 key,
788 int page_shift, u64 virt_addr, u64 len,
789 int access_flags)
790{
791 struct mlx5_umr_wr *umrwr = umr_wr(wr);
792
793 prep_umr_wqe_common(pd, wr, sg, dma, n, key, page_shift);
794
795 wr->send_flags = 0;
796
968e78dd
HE
797 umrwr->target.virt_addr = virt_addr;
798 umrwr->length = len;
799 umrwr->access_flags = access_flags;
800 umrwr->pd = pd;
e126ba97
EC
801}
802
803static void prep_umr_unreg_wqe(struct mlx5_ib_dev *dev,
804 struct ib_send_wr *wr, u32 key)
805{
e622f2f4 806 struct mlx5_umr_wr *umrwr = umr_wr(wr);
968e78dd
HE
807
808 wr->send_flags = MLX5_IB_SEND_UMR_UNREG | MLX5_IB_SEND_UMR_FAIL_IF_FREE;
e126ba97 809 wr->opcode = MLX5_IB_WR_UMR;
968e78dd 810 umrwr->mkey = key;
e126ba97
EC
811}
812
395a8e4c
NO
813static struct ib_umem *mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
814 int access_flags, int *npages,
815 int *page_shift, int *ncont, int *order)
816{
817 struct mlx5_ib_dev *dev = to_mdev(pd->device);
818 struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length,
819 access_flags, 0);
820 if (IS_ERR(umem)) {
821 mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem));
822 return (void *)umem;
823 }
824
825 mlx5_ib_cont_pages(umem, start, npages, page_shift, ncont, order);
826 if (!*npages) {
827 mlx5_ib_warn(dev, "avoid zero region\n");
828 ib_umem_release(umem);
829 return ERR_PTR(-EINVAL);
830 }
831
832 mlx5_ib_dbg(dev, "npages %d, ncont %d, order %d, page_shift %d\n",
833 *npages, *ncont, *order, *page_shift);
834
835 return umem;
836}
837
e126ba97
EC
838void mlx5_umr_cq_handler(struct ib_cq *cq, void *cq_context)
839{
a74d2416 840 struct mlx5_ib_umr_context *context;
e126ba97
EC
841 struct ib_wc wc;
842 int err;
843
844 while (1) {
845 err = ib_poll_cq(cq, 1, &wc);
846 if (err < 0) {
847 pr_warn("poll cq error %d\n", err);
848 return;
849 }
850 if (err == 0)
851 break;
852
6c9b5d9b 853 context = (struct mlx5_ib_umr_context *) (unsigned long) wc.wr_id;
a74d2416
SR
854 context->status = wc.status;
855 complete(&context->done);
e126ba97
EC
856 }
857 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
858}
859
860static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
861 u64 virt_addr, u64 len, int npages,
862 int page_shift, int order, int access_flags)
863{
864 struct mlx5_ib_dev *dev = to_mdev(pd->device);
203099fd 865 struct device *ddev = dev->ib_dev.dma_device;
e126ba97 866 struct umr_common *umrc = &dev->umrc;
a74d2416 867 struct mlx5_ib_umr_context umr_context;
e622f2f4
CH
868 struct mlx5_umr_wr umrwr;
869 struct ib_send_wr *bad;
e126ba97
EC
870 struct mlx5_ib_mr *mr;
871 struct ib_sge sg;
cc149f75 872 int size;
21af2c3e
HE
873 __be64 *mr_pas;
874 dma_addr_t dma;
096f7e72 875 int err = 0;
e126ba97
EC
876 int i;
877
746b5583 878 for (i = 0; i < 1; i++) {
e126ba97
EC
879 mr = alloc_cached_mr(dev, order);
880 if (mr)
881 break;
882
883 err = add_keys(dev, order2idx(dev, order), 1);
746b5583
EC
884 if (err && err != -EAGAIN) {
885 mlx5_ib_warn(dev, "add_keys failed, err %d\n", err);
e126ba97
EC
886 break;
887 }
888 }
889
890 if (!mr)
891 return ERR_PTR(-EAGAIN);
892
395a8e4c
NO
893 err = dma_map_mr_pas(dev, umem, npages, page_shift, &size, &mr_pas,
894 &dma);
895 if (err)
096f7e72 896 goto free_mr;
203099fd 897
e622f2f4
CH
898 memset(&umrwr, 0, sizeof(umrwr));
899 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
a606b0f6 900 prep_umr_reg_wqe(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
e622f2f4 901 page_shift, virt_addr, len, access_flags);
e126ba97 902
a74d2416 903 mlx5_ib_init_umr_context(&umr_context);
e126ba97 904 down(&umrc->sem);
e622f2f4 905 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
e126ba97
EC
906 if (err) {
907 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
096f7e72 908 goto unmap_dma;
a74d2416
SR
909 } else {
910 wait_for_completion(&umr_context.done);
911 if (umr_context.status != IB_WC_SUCCESS) {
912 mlx5_ib_warn(dev, "reg umr failed\n");
913 err = -EFAULT;
914 }
096f7e72 915 }
e126ba97 916
a606b0f6
MB
917 mr->mmkey.iova = virt_addr;
918 mr->mmkey.size = len;
919 mr->mmkey.pd = to_mpd(pd)->pdn;
b475598a 920
b4cfe447
HE
921 mr->live = 1;
922
096f7e72
HE
923unmap_dma:
924 up(&umrc->sem);
21af2c3e 925 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
096f7e72 926
21af2c3e 927 kfree(mr_pas);
203099fd 928
096f7e72
HE
929free_mr:
930 if (err) {
931 free_cached_mr(dev, mr);
932 return ERR_PTR(err);
e126ba97
EC
933 }
934
935 return mr;
e126ba97
EC
936}
937
832a6b06
HE
938#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
939int mlx5_ib_update_mtt(struct mlx5_ib_mr *mr, u64 start_page_index, int npages,
940 int zap)
941{
942 struct mlx5_ib_dev *dev = mr->dev;
943 struct device *ddev = dev->ib_dev.dma_device;
944 struct umr_common *umrc = &dev->umrc;
945 struct mlx5_ib_umr_context umr_context;
946 struct ib_umem *umem = mr->umem;
947 int size;
948 __be64 *pas;
949 dma_addr_t dma;
e622f2f4
CH
950 struct ib_send_wr *bad;
951 struct mlx5_umr_wr wr;
832a6b06
HE
952 struct ib_sge sg;
953 int err = 0;
954 const int page_index_alignment = MLX5_UMR_MTT_ALIGNMENT / sizeof(u64);
955 const int page_index_mask = page_index_alignment - 1;
956 size_t pages_mapped = 0;
957 size_t pages_to_map = 0;
958 size_t pages_iter = 0;
959 int use_emergency_buf = 0;
960
961 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
962 * so we need to align the offset and length accordingly */
963 if (start_page_index & page_index_mask) {
964 npages += start_page_index & page_index_mask;
965 start_page_index &= ~page_index_mask;
966 }
967
968 pages_to_map = ALIGN(npages, page_index_alignment);
969
970 if (start_page_index + pages_to_map > MLX5_MAX_UMR_PAGES)
971 return -EINVAL;
972
973 size = sizeof(u64) * pages_to_map;
974 size = min_t(int, PAGE_SIZE, size);
975 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
976 * code, when we are called from an invalidation. The pas buffer must
977 * be 2k-aligned for Connect-IB. */
978 pas = (__be64 *)get_zeroed_page(GFP_ATOMIC);
979 if (!pas) {
980 mlx5_ib_warn(dev, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
981 pas = mlx5_ib_update_mtt_emergency_buffer;
982 size = MLX5_UMR_MTT_MIN_CHUNK_SIZE;
983 use_emergency_buf = 1;
984 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
985 memset(pas, 0, size);
986 }
987 pages_iter = size / sizeof(u64);
988 dma = dma_map_single(ddev, pas, size, DMA_TO_DEVICE);
989 if (dma_mapping_error(ddev, dma)) {
990 mlx5_ib_err(dev, "unable to map DMA during MTT update.\n");
991 err = -ENOMEM;
992 goto free_pas;
993 }
994
995 for (pages_mapped = 0;
996 pages_mapped < pages_to_map && !err;
997 pages_mapped += pages_iter, start_page_index += pages_iter) {
998 dma_sync_single_for_cpu(ddev, dma, size, DMA_TO_DEVICE);
999
1000 npages = min_t(size_t,
1001 pages_iter,
1002 ib_umem_num_pages(umem) - start_page_index);
1003
1004 if (!zap) {
1005 __mlx5_ib_populate_pas(dev, umem, PAGE_SHIFT,
1006 start_page_index, npages, pas,
1007 MLX5_IB_MTT_PRESENT);
1008 /* Clear padding after the pages brought from the
1009 * umem. */
1010 memset(pas + npages, 0, size - npages * sizeof(u64));
1011 }
1012
1013 dma_sync_single_for_device(ddev, dma, size, DMA_TO_DEVICE);
1014
1015 memset(&wr, 0, sizeof(wr));
e622f2f4 1016 wr.wr.wr_id = (u64)(unsigned long)&umr_context;
832a6b06
HE
1017
1018 sg.addr = dma;
1019 sg.length = ALIGN(npages * sizeof(u64),
1020 MLX5_UMR_MTT_ALIGNMENT);
b37c788f 1021 sg.lkey = dev->umrc.pd->local_dma_lkey;
832a6b06 1022
e622f2f4 1023 wr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE |
832a6b06 1024 MLX5_IB_SEND_UMR_UPDATE_MTT;
e622f2f4
CH
1025 wr.wr.sg_list = &sg;
1026 wr.wr.num_sge = 1;
1027 wr.wr.opcode = MLX5_IB_WR_UMR;
1028 wr.npages = sg.length / sizeof(u64);
1029 wr.page_shift = PAGE_SHIFT;
a606b0f6 1030 wr.mkey = mr->mmkey.key;
e622f2f4 1031 wr.target.offset = start_page_index;
832a6b06
HE
1032
1033 mlx5_ib_init_umr_context(&umr_context);
1034 down(&umrc->sem);
e622f2f4 1035 err = ib_post_send(umrc->qp, &wr.wr, &bad);
832a6b06
HE
1036 if (err) {
1037 mlx5_ib_err(dev, "UMR post send failed, err %d\n", err);
1038 } else {
1039 wait_for_completion(&umr_context.done);
1040 if (umr_context.status != IB_WC_SUCCESS) {
1041 mlx5_ib_err(dev, "UMR completion failed, code %d\n",
1042 umr_context.status);
1043 err = -EFAULT;
1044 }
1045 }
1046 up(&umrc->sem);
1047 }
1048 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1049
1050free_pas:
1051 if (!use_emergency_buf)
1052 free_page((unsigned long)pas);
1053 else
1054 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex);
1055
1056 return err;
1057}
1058#endif
1059
395a8e4c
NO
1060/*
1061 * If ibmr is NULL it will be allocated by reg_create.
1062 * Else, the given ibmr will be used.
1063 */
1064static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
1065 u64 virt_addr, u64 length,
1066 struct ib_umem *umem, int npages,
1067 int page_shift, int access_flags)
e126ba97
EC
1068{
1069 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1070 struct mlx5_create_mkey_mbox_in *in;
1071 struct mlx5_ib_mr *mr;
1072 int inlen;
1073 int err;
938fe83c 1074 bool pg_cap = !!(MLX5_CAP_GEN(dev->mdev, pg));
e126ba97 1075
395a8e4c 1076 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
e126ba97
EC
1077 if (!mr)
1078 return ERR_PTR(-ENOMEM);
1079
1080 inlen = sizeof(*in) + sizeof(*in->pas) * ((npages + 1) / 2) * 2;
1081 in = mlx5_vzalloc(inlen);
1082 if (!in) {
1083 err = -ENOMEM;
1084 goto err_1;
1085 }
cc149f75
HE
1086 mlx5_ib_populate_pas(dev, umem, page_shift, in->pas,
1087 pg_cap ? MLX5_IB_MTT_PRESENT : 0);
e126ba97 1088
cc149f75
HE
1089 /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
1090 * in the page list submitted with the command. */
1091 in->flags = pg_cap ? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS) : 0;
e126ba97
EC
1092 in->seg.flags = convert_access(access_flags) |
1093 MLX5_ACCESS_MODE_MTT;
1094 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
1095 in->seg.start_addr = cpu_to_be64(virt_addr);
1096 in->seg.len = cpu_to_be64(length);
1097 in->seg.bsfs_octo_size = 0;
1098 in->seg.xlt_oct_size = cpu_to_be32(get_octo_len(virt_addr, length, 1 << page_shift));
1099 in->seg.log2_page_size = page_shift;
1100 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
746b5583
EC
1101 in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
1102 1 << page_shift));
a606b0f6 1103 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen, NULL,
746b5583 1104 NULL, NULL);
e126ba97
EC
1105 if (err) {
1106 mlx5_ib_warn(dev, "create mkey failed\n");
1107 goto err_2;
1108 }
1109 mr->umem = umem;
7eae20db 1110 mr->dev = dev;
b4cfe447 1111 mr->live = 1;
479163f4 1112 kvfree(in);
e126ba97 1113
a606b0f6 1114 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
e126ba97
EC
1115
1116 return mr;
1117
1118err_2:
479163f4 1119 kvfree(in);
e126ba97
EC
1120
1121err_1:
395a8e4c
NO
1122 if (!ibmr)
1123 kfree(mr);
e126ba97
EC
1124
1125 return ERR_PTR(err);
1126}
1127
395a8e4c
NO
1128static void set_mr_fileds(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1129 int npages, u64 length, int access_flags)
1130{
1131 mr->npages = npages;
1132 atomic_add(npages, &dev->mdev->priv.reg_pages);
a606b0f6
MB
1133 mr->ibmr.lkey = mr->mmkey.key;
1134 mr->ibmr.rkey = mr->mmkey.key;
395a8e4c 1135 mr->ibmr.length = length;
56e11d62 1136 mr->access_flags = access_flags;
395a8e4c
NO
1137}
1138
e126ba97
EC
1139struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1140 u64 virt_addr, int access_flags,
1141 struct ib_udata *udata)
1142{
1143 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1144 struct mlx5_ib_mr *mr = NULL;
1145 struct ib_umem *umem;
1146 int page_shift;
1147 int npages;
1148 int ncont;
1149 int order;
1150 int err;
1151
900a6d79
EC
1152 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1153 start, virt_addr, length, access_flags);
395a8e4c
NO
1154 umem = mr_umem_get(pd, start, length, access_flags, &npages,
1155 &page_shift, &ncont, &order);
e126ba97 1156
395a8e4c
NO
1157 if (IS_ERR(umem))
1158 return (void *)umem;
e126ba97
EC
1159
1160 if (use_umr(order)) {
1161 mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
1162 order, access_flags);
1163 if (PTR_ERR(mr) == -EAGAIN) {
1164 mlx5_ib_dbg(dev, "cache empty for order %d", order);
1165 mr = NULL;
1166 }
6aec21f6
HE
1167 } else if (access_flags & IB_ACCESS_ON_DEMAND) {
1168 err = -EINVAL;
1169 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1170 goto error;
e126ba97
EC
1171 }
1172
1173 if (!mr)
395a8e4c
NO
1174 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1175 page_shift, access_flags);
e126ba97
EC
1176
1177 if (IS_ERR(mr)) {
1178 err = PTR_ERR(mr);
1179 goto error;
1180 }
1181
a606b0f6 1182 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
e126ba97
EC
1183
1184 mr->umem = umem;
395a8e4c 1185 set_mr_fileds(dev, mr, npages, length, access_flags);
e126ba97 1186
b4cfe447 1187#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
395a8e4c 1188 update_odp_mr(mr);
b4cfe447
HE
1189#endif
1190
e126ba97
EC
1191 return &mr->ibmr;
1192
1193error:
1194 ib_umem_release(umem);
1195 return ERR_PTR(err);
1196}
1197
1198static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1199{
1200 struct umr_common *umrc = &dev->umrc;
a74d2416 1201 struct mlx5_ib_umr_context umr_context;
e622f2f4
CH
1202 struct mlx5_umr_wr umrwr;
1203 struct ib_send_wr *bad;
e126ba97
EC
1204 int err;
1205
e622f2f4
CH
1206 memset(&umrwr.wr, 0, sizeof(umrwr));
1207 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
a606b0f6 1208 prep_umr_unreg_wqe(dev, &umrwr.wr, mr->mmkey.key);
e126ba97 1209
a74d2416 1210 mlx5_ib_init_umr_context(&umr_context);
e126ba97 1211 down(&umrc->sem);
e622f2f4 1212 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
e126ba97
EC
1213 if (err) {
1214 up(&umrc->sem);
1215 mlx5_ib_dbg(dev, "err %d\n", err);
1216 goto error;
a74d2416
SR
1217 } else {
1218 wait_for_completion(&umr_context.done);
1219 up(&umrc->sem);
e126ba97 1220 }
a74d2416 1221 if (umr_context.status != IB_WC_SUCCESS) {
e126ba97
EC
1222 mlx5_ib_warn(dev, "unreg umr failed\n");
1223 err = -EFAULT;
1224 goto error;
1225 }
1226 return 0;
1227
1228error:
1229 return err;
1230}
1231
56e11d62
NO
1232static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr, u64 virt_addr,
1233 u64 length, int npages, int page_shift, int order,
1234 int access_flags, int flags)
1235{
1236 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1237 struct device *ddev = dev->ib_dev.dma_device;
1238 struct mlx5_ib_umr_context umr_context;
1239 struct ib_send_wr *bad;
1240 struct mlx5_umr_wr umrwr = {};
1241 struct ib_sge sg;
1242 struct umr_common *umrc = &dev->umrc;
1243 dma_addr_t dma = 0;
1244 __be64 *mr_pas = NULL;
1245 int size;
1246 int err;
1247
1248 umrwr.wr.wr_id = (u64)(unsigned long)&umr_context;
1249 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_FAIL_IF_FREE;
1250
1251 if (flags & IB_MR_REREG_TRANS) {
1252 err = dma_map_mr_pas(dev, mr->umem, npages, page_shift, &size,
1253 &mr_pas, &dma);
1254 if (err)
1255 return err;
1256
1257 umrwr.target.virt_addr = virt_addr;
1258 umrwr.length = length;
1259 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION;
1260 }
1261
a606b0f6 1262 prep_umr_wqe_common(pd, &umrwr.wr, &sg, dma, npages, mr->mmkey.key,
56e11d62
NO
1263 page_shift);
1264
1265 if (flags & IB_MR_REREG_PD) {
1266 umrwr.pd = pd;
1267 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_PD;
1268 }
1269
1270 if (flags & IB_MR_REREG_ACCESS) {
1271 umrwr.access_flags = access_flags;
1272 umrwr.wr.send_flags |= MLX5_IB_SEND_UMR_UPDATE_ACCESS;
1273 }
1274
1275 mlx5_ib_init_umr_context(&umr_context);
1276
1277 /* post send request to UMR QP */
1278 down(&umrc->sem);
1279 err = ib_post_send(umrc->qp, &umrwr.wr, &bad);
1280
1281 if (err) {
1282 mlx5_ib_warn(dev, "post send failed, err %d\n", err);
1283 } else {
1284 wait_for_completion(&umr_context.done);
1285 if (umr_context.status != IB_WC_SUCCESS) {
1286 mlx5_ib_warn(dev, "reg umr failed (%u)\n",
1287 umr_context.status);
1288 err = -EFAULT;
1289 }
1290 }
1291
1292 up(&umrc->sem);
1293 if (flags & IB_MR_REREG_TRANS) {
1294 dma_unmap_single(ddev, dma, size, DMA_TO_DEVICE);
1295 kfree(mr_pas);
1296 }
1297 return err;
1298}
1299
1300int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1301 u64 length, u64 virt_addr, int new_access_flags,
1302 struct ib_pd *new_pd, struct ib_udata *udata)
1303{
1304 struct mlx5_ib_dev *dev = to_mdev(ib_mr->device);
1305 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1306 struct ib_pd *pd = (flags & IB_MR_REREG_PD) ? new_pd : ib_mr->pd;
1307 int access_flags = flags & IB_MR_REREG_ACCESS ?
1308 new_access_flags :
1309 mr->access_flags;
1310 u64 addr = (flags & IB_MR_REREG_TRANS) ? virt_addr : mr->umem->address;
1311 u64 len = (flags & IB_MR_REREG_TRANS) ? length : mr->umem->length;
1312 int page_shift = 0;
1313 int npages = 0;
1314 int ncont = 0;
1315 int order = 0;
1316 int err;
1317
1318 mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1319 start, virt_addr, length, access_flags);
1320
1321 if (flags != IB_MR_REREG_PD) {
1322 /*
1323 * Replace umem. This needs to be done whether or not UMR is
1324 * used.
1325 */
1326 flags |= IB_MR_REREG_TRANS;
1327 ib_umem_release(mr->umem);
1328 mr->umem = mr_umem_get(pd, addr, len, access_flags, &npages,
1329 &page_shift, &ncont, &order);
1330 if (IS_ERR(mr->umem)) {
1331 err = PTR_ERR(mr->umem);
1332 mr->umem = NULL;
1333 return err;
1334 }
1335 }
1336
1337 if (flags & IB_MR_REREG_TRANS && !use_umr_mtt_update(mr, addr, len)) {
1338 /*
1339 * UMR can't be used - MKey needs to be replaced.
1340 */
1341 if (mr->umred) {
1342 err = unreg_umr(dev, mr);
1343 if (err)
1344 mlx5_ib_warn(dev, "Failed to unregister MR\n");
1345 } else {
1346 err = destroy_mkey(dev, mr);
1347 if (err)
1348 mlx5_ib_warn(dev, "Failed to destroy MKey\n");
1349 }
1350 if (err)
1351 return err;
1352
1353 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1354 page_shift, access_flags);
1355
1356 if (IS_ERR(mr))
1357 return PTR_ERR(mr);
1358
1359 mr->umred = 0;
1360 } else {
1361 /*
1362 * Send a UMR WQE
1363 */
1364 err = rereg_umr(pd, mr, addr, len, npages, page_shift,
1365 order, access_flags, flags);
1366 if (err) {
1367 mlx5_ib_warn(dev, "Failed to rereg UMR\n");
1368 return err;
1369 }
1370 }
1371
1372 if (flags & IB_MR_REREG_PD) {
1373 ib_mr->pd = pd;
a606b0f6 1374 mr->mmkey.pd = to_mpd(pd)->pdn;
56e11d62
NO
1375 }
1376
1377 if (flags & IB_MR_REREG_ACCESS)
1378 mr->access_flags = access_flags;
1379
1380 if (flags & IB_MR_REREG_TRANS) {
1381 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1382 set_mr_fileds(dev, mr, npages, len, access_flags);
a606b0f6
MB
1383 mr->mmkey.iova = addr;
1384 mr->mmkey.size = len;
56e11d62
NO
1385 }
1386#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1387 update_odp_mr(mr);
1388#endif
1389
1390 return 0;
1391}
1392
8a187ee5
SG
1393static int
1394mlx5_alloc_priv_descs(struct ib_device *device,
1395 struct mlx5_ib_mr *mr,
1396 int ndescs,
1397 int desc_size)
1398{
1399 int size = ndescs * desc_size;
1400 int add_size;
1401 int ret;
1402
1403 add_size = max_t(int, MLX5_UMR_ALIGN - ARCH_KMALLOC_MINALIGN, 0);
1404
1405 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1406 if (!mr->descs_alloc)
1407 return -ENOMEM;
1408
1409 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1410
1411 mr->desc_map = dma_map_single(device->dma_device, mr->descs,
1412 size, DMA_TO_DEVICE);
1413 if (dma_mapping_error(device->dma_device, mr->desc_map)) {
1414 ret = -ENOMEM;
1415 goto err;
1416 }
1417
1418 return 0;
1419err:
1420 kfree(mr->descs_alloc);
1421
1422 return ret;
1423}
1424
1425static void
1426mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1427{
1428 if (mr->descs) {
1429 struct ib_device *device = mr->ibmr.device;
1430 int size = mr->max_descs * mr->desc_size;
1431
1432 dma_unmap_single(device->dma_device, mr->desc_map,
1433 size, DMA_TO_DEVICE);
1434 kfree(mr->descs_alloc);
1435 mr->descs = NULL;
1436 }
1437}
1438
6aec21f6 1439static int clean_mr(struct mlx5_ib_mr *mr)
e126ba97 1440{
6aec21f6 1441 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
e126ba97
EC
1442 int umred = mr->umred;
1443 int err;
1444
8b91ffc1
SG
1445 if (mr->sig) {
1446 if (mlx5_core_destroy_psv(dev->mdev,
1447 mr->sig->psv_memory.psv_idx))
1448 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1449 mr->sig->psv_memory.psv_idx);
1450 if (mlx5_core_destroy_psv(dev->mdev,
1451 mr->sig->psv_wire.psv_idx))
1452 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1453 mr->sig->psv_wire.psv_idx);
1454 kfree(mr->sig);
1455 mr->sig = NULL;
1456 }
1457
8a187ee5
SG
1458 mlx5_free_priv_descs(mr);
1459
e126ba97 1460 if (!umred) {
b4cfe447 1461 err = destroy_mkey(dev, mr);
e126ba97
EC
1462 if (err) {
1463 mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
a606b0f6 1464 mr->mmkey.key, err);
e126ba97
EC
1465 return err;
1466 }
1467 } else {
1468 err = unreg_umr(dev, mr);
1469 if (err) {
1470 mlx5_ib_warn(dev, "failed unregister\n");
1471 return err;
1472 }
1473 free_cached_mr(dev, mr);
1474 }
1475
6aec21f6
HE
1476 if (!umred)
1477 kfree(mr);
1478
1479 return 0;
1480}
1481
1482int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
1483{
1484 struct mlx5_ib_dev *dev = to_mdev(ibmr->device);
1485 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1486 int npages = mr->npages;
1487 struct ib_umem *umem = mr->umem;
1488
1489#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
b4cfe447
HE
1490 if (umem && umem->odp_data) {
1491 /* Prevent new page faults from succeeding */
1492 mr->live = 0;
6aec21f6
HE
1493 /* Wait for all running page-fault handlers to finish. */
1494 synchronize_srcu(&dev->mr_srcu);
b4cfe447
HE
1495 /* Destroy all page mappings */
1496 mlx5_ib_invalidate_range(umem, ib_umem_start(umem),
1497 ib_umem_end(umem));
1498 /*
1499 * We kill the umem before the MR for ODP,
1500 * so that there will not be any invalidations in
1501 * flight, looking at the *mr struct.
1502 */
1503 ib_umem_release(umem);
1504 atomic_sub(npages, &dev->mdev->priv.reg_pages);
1505
1506 /* Avoid double-freeing the umem. */
1507 umem = NULL;
1508 }
6aec21f6
HE
1509#endif
1510
1511 clean_mr(mr);
1512
e126ba97
EC
1513 if (umem) {
1514 ib_umem_release(umem);
6aec21f6 1515 atomic_sub(npages, &dev->mdev->priv.reg_pages);
e126ba97
EC
1516 }
1517
e126ba97
EC
1518 return 0;
1519}
1520
9bee178b
SG
1521struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd,
1522 enum ib_mr_type mr_type,
1523 u32 max_num_sg)
3121e3c4
SG
1524{
1525 struct mlx5_ib_dev *dev = to_mdev(pd->device);
1526 struct mlx5_create_mkey_mbox_in *in;
1527 struct mlx5_ib_mr *mr;
1528 int access_mode, err;
9bee178b 1529 int ndescs = roundup(max_num_sg, 4);
3121e3c4
SG
1530
1531 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1532 if (!mr)
1533 return ERR_PTR(-ENOMEM);
1534
1535 in = kzalloc(sizeof(*in), GFP_KERNEL);
1536 if (!in) {
1537 err = -ENOMEM;
1538 goto err_free;
1539 }
1540
968e78dd 1541 in->seg.status = MLX5_MKEY_STATUS_FREE;
3121e3c4
SG
1542 in->seg.xlt_oct_size = cpu_to_be32(ndescs);
1543 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
1544 in->seg.flags_pd = cpu_to_be32(to_mpd(pd)->pdn);
3121e3c4 1545
9bee178b
SG
1546 if (mr_type == IB_MR_TYPE_MEM_REG) {
1547 access_mode = MLX5_ACCESS_MODE_MTT;
1548 in->seg.log2_page_size = PAGE_SHIFT;
8a187ee5
SG
1549
1550 err = mlx5_alloc_priv_descs(pd->device, mr,
1551 ndescs, sizeof(u64));
1552 if (err)
1553 goto err_free_in;
1554
1555 mr->desc_size = sizeof(u64);
1556 mr->max_descs = ndescs;
9bee178b 1557 } else if (mr_type == IB_MR_TYPE_SIGNATURE) {
3121e3c4
SG
1558 u32 psv_index[2];
1559
1560 in->seg.flags_pd = cpu_to_be32(be32_to_cpu(in->seg.flags_pd) |
1561 MLX5_MKEY_BSF_EN);
1562 in->seg.bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
1563 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1564 if (!mr->sig) {
1565 err = -ENOMEM;
1566 goto err_free_in;
1567 }
1568
1569 /* create mem & wire PSVs */
9603b61d 1570 err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
3121e3c4
SG
1571 2, psv_index);
1572 if (err)
1573 goto err_free_sig;
1574
1575 access_mode = MLX5_ACCESS_MODE_KLM;
1576 mr->sig->psv_memory.psv_idx = psv_index[0];
1577 mr->sig->psv_wire.psv_idx = psv_index[1];
d5436ba0
SG
1578
1579 mr->sig->sig_status_checked = true;
1580 mr->sig->sig_err_exists = false;
1581 /* Next UMR, Arm SIGERR */
1582 ++mr->sig->sigerr_count;
9bee178b
SG
1583 } else {
1584 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1585 err = -EINVAL;
1586 goto err_free_in;
3121e3c4
SG
1587 }
1588
1589 in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
a606b0f6 1590 err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, sizeof(*in),
3121e3c4
SG
1591 NULL, NULL, NULL);
1592 if (err)
1593 goto err_destroy_psv;
1594
a606b0f6
MB
1595 mr->ibmr.lkey = mr->mmkey.key;
1596 mr->ibmr.rkey = mr->mmkey.key;
3121e3c4
SG
1597 mr->umem = NULL;
1598 kfree(in);
1599
1600 return &mr->ibmr;
1601
1602err_destroy_psv:
1603 if (mr->sig) {
9603b61d 1604 if (mlx5_core_destroy_psv(dev->mdev,
3121e3c4
SG
1605 mr->sig->psv_memory.psv_idx))
1606 mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
1607 mr->sig->psv_memory.psv_idx);
9603b61d 1608 if (mlx5_core_destroy_psv(dev->mdev,
3121e3c4
SG
1609 mr->sig->psv_wire.psv_idx))
1610 mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
1611 mr->sig->psv_wire.psv_idx);
1612 }
8a187ee5 1613 mlx5_free_priv_descs(mr);
3121e3c4
SG
1614err_free_sig:
1615 kfree(mr->sig);
1616err_free_in:
1617 kfree(in);
1618err_free:
1619 kfree(mr);
1620 return ERR_PTR(err);
1621}
1622
d5436ba0
SG
1623int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1624 struct ib_mr_status *mr_status)
1625{
1626 struct mlx5_ib_mr *mmr = to_mmr(ibmr);
1627 int ret = 0;
1628
1629 if (check_mask & ~IB_MR_CHECK_SIG_STATUS) {
1630 pr_err("Invalid status check mask\n");
1631 ret = -EINVAL;
1632 goto done;
1633 }
1634
1635 mr_status->fail_status = 0;
1636 if (check_mask & IB_MR_CHECK_SIG_STATUS) {
1637 if (!mmr->sig) {
1638 ret = -EINVAL;
1639 pr_err("signature status check requested on a non-signature enabled MR\n");
1640 goto done;
1641 }
1642
1643 mmr->sig->sig_status_checked = true;
1644 if (!mmr->sig->sig_err_exists)
1645 goto done;
1646
1647 if (ibmr->lkey == mmr->sig->err_item.key)
1648 memcpy(&mr_status->sig_err, &mmr->sig->err_item,
1649 sizeof(mr_status->sig_err));
1650 else {
1651 mr_status->sig_err.err_type = IB_SIG_BAD_GUARD;
1652 mr_status->sig_err.sig_err_offset = 0;
1653 mr_status->sig_err.key = mmr->sig->err_item.key;
1654 }
1655
1656 mmr->sig->sig_err_exists = false;
1657 mr_status->fail_status |= IB_MR_CHECK_SIG_STATUS;
1658 }
1659
1660done:
1661 return ret;
1662}
8a187ee5
SG
1663
1664static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
1665{
1666 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1667 __be64 *descs;
1668
1669 if (unlikely(mr->ndescs == mr->max_descs))
1670 return -ENOMEM;
1671
1672 descs = mr->descs;
1673 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
1674
1675 return 0;
1676}
1677
1678int mlx5_ib_map_mr_sg(struct ib_mr *ibmr,
1679 struct scatterlist *sg,
1680 int sg_nents)
1681{
1682 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1683 int n;
1684
1685 mr->ndescs = 0;
1686
1687 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
1688 mr->desc_size * mr->max_descs,
1689 DMA_TO_DEVICE);
1690
1691 n = ib_sg_to_pages(ibmr, sg, sg_nents, mlx5_set_page);
1692
1693 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
1694 mr->desc_size * mr->max_descs,
1695 DMA_TO_DEVICE);
1696
1697 return n;
1698}