2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR
= 8,
48 #define MLX5_UMR_ALIGN 2048
50 static void clean_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
51 static void dereg_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
52 static int mr_cache_max_order(struct mlx5_ib_dev
*dev
);
53 static int unreg_umr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
);
55 static bool umr_can_use_indirect_mkey(struct mlx5_ib_dev
*dev
)
57 return !MLX5_CAP_GEN(dev
->mdev
, umr_indirect_mkey_disabled
);
60 static int destroy_mkey(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
62 int err
= mlx5_core_destroy_mkey(dev
->mdev
, &mr
->mmkey
);
64 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
))
65 /* Wait until all page fault handlers using the mr complete. */
66 synchronize_srcu(&dev
->mr_srcu
);
71 static int order2idx(struct mlx5_ib_dev
*dev
, int order
)
73 struct mlx5_mr_cache
*cache
= &dev
->cache
;
75 if (order
< cache
->ent
[0].order
)
78 return order
- cache
->ent
[0].order
;
81 static bool use_umr_mtt_update(struct mlx5_ib_mr
*mr
, u64 start
, u64 length
)
83 return ((u64
)1 << mr
->order
) * MLX5_ADAPTER_PAGE_SIZE
>=
84 length
+ (start
& (MLX5_ADAPTER_PAGE_SIZE
- 1));
87 static void update_odp_mr(struct mlx5_ib_mr
*mr
)
91 * This barrier prevents the compiler from moving the
92 * setting of umem->odp_data->private to point to our
93 * MR, before reg_umr finished, to ensure that the MR
94 * initialization have finished before starting to
95 * handle invalidations.
98 to_ib_umem_odp(mr
->umem
)->private = mr
;
100 * Make sure we will see the new
101 * umem->odp_data->private value in the invalidation
102 * routines, before we can get page faults on the
103 * MR. Page faults can happen once we put the MR in
104 * the tree, below this line. Without the barrier,
105 * there can be a fault handling and an invalidation
106 * before umem->odp_data->private == mr is visible to
107 * the invalidation handler.
113 static void reg_mr_callback(int status
, struct mlx5_async_work
*context
)
115 struct mlx5_ib_mr
*mr
=
116 container_of(context
, struct mlx5_ib_mr
, cb_work
);
117 struct mlx5_ib_dev
*dev
= mr
->dev
;
118 struct mlx5_mr_cache
*cache
= &dev
->cache
;
119 int c
= order2idx(dev
, mr
->order
);
120 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
123 struct xarray
*mkeys
= &dev
->mdev
->priv
.mkey_table
;
126 spin_lock_irqsave(&ent
->lock
, flags
);
128 spin_unlock_irqrestore(&ent
->lock
, flags
);
130 mlx5_ib_warn(dev
, "async reg mr failed. status %d\n", status
);
133 mod_timer(&dev
->delay_timer
, jiffies
+ HZ
);
137 mr
->mmkey
.type
= MLX5_MKEY_MR
;
138 spin_lock_irqsave(&dev
->mdev
->priv
.mkey_lock
, flags
);
139 key
= dev
->mdev
->priv
.mkey_key
++;
140 spin_unlock_irqrestore(&dev
->mdev
->priv
.mkey_lock
, flags
);
141 mr
->mmkey
.key
= mlx5_idx_to_mkey(MLX5_GET(create_mkey_out
, mr
->out
, mkey_index
)) | key
;
143 cache
->last_add
= jiffies
;
145 spin_lock_irqsave(&ent
->lock
, flags
);
146 list_add_tail(&mr
->list
, &ent
->head
);
149 spin_unlock_irqrestore(&ent
->lock
, flags
);
151 xa_lock_irqsave(mkeys
, flags
);
152 err
= xa_err(__xa_store(mkeys
, mlx5_base_mkey(mr
->mmkey
.key
),
153 &mr
->mmkey
, GFP_ATOMIC
));
154 xa_unlock_irqrestore(mkeys
, flags
);
156 pr_err("Error inserting to mkey tree. 0x%x\n", -err
);
158 if (!completion_done(&ent
->compl))
159 complete(&ent
->compl);
162 static int add_keys(struct mlx5_ib_dev
*dev
, int c
, int num
)
164 struct mlx5_mr_cache
*cache
= &dev
->cache
;
165 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
166 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
167 struct mlx5_ib_mr
*mr
;
173 in
= kzalloc(inlen
, GFP_KERNEL
);
177 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
178 for (i
= 0; i
< num
; i
++) {
179 if (ent
->pending
>= MAX_PENDING_REG_MR
) {
184 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
189 mr
->order
= ent
->order
;
190 mr
->allocated_from_cache
= 1;
193 MLX5_SET(mkc
, mkc
, free
, 1);
194 MLX5_SET(mkc
, mkc
, umr_en
, 1);
195 MLX5_SET(mkc
, mkc
, access_mode_1_0
, ent
->access_mode
& 0x3);
196 MLX5_SET(mkc
, mkc
, access_mode_4_2
,
197 (ent
->access_mode
>> 2) & 0x7);
199 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
200 MLX5_SET(mkc
, mkc
, translations_octword_size
, ent
->xlt
);
201 MLX5_SET(mkc
, mkc
, log_page_size
, ent
->page
);
203 spin_lock_irq(&ent
->lock
);
205 spin_unlock_irq(&ent
->lock
);
206 err
= mlx5_core_create_mkey_cb(dev
->mdev
, &mr
->mmkey
,
207 &dev
->async_ctx
, in
, inlen
,
208 mr
->out
, sizeof(mr
->out
),
209 reg_mr_callback
, &mr
->cb_work
);
211 spin_lock_irq(&ent
->lock
);
213 spin_unlock_irq(&ent
->lock
);
214 mlx5_ib_warn(dev
, "create mkey failed %d\n", err
);
224 static void remove_keys(struct mlx5_ib_dev
*dev
, int c
, int num
)
226 struct mlx5_mr_cache
*cache
= &dev
->cache
;
227 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
228 struct mlx5_ib_mr
*tmp_mr
;
229 struct mlx5_ib_mr
*mr
;
233 for (i
= 0; i
< num
; i
++) {
234 spin_lock_irq(&ent
->lock
);
235 if (list_empty(&ent
->head
)) {
236 spin_unlock_irq(&ent
->lock
);
239 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
240 list_move(&mr
->list
, &del_list
);
243 spin_unlock_irq(&ent
->lock
);
244 mlx5_core_destroy_mkey(dev
->mdev
, &mr
->mmkey
);
247 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
))
248 synchronize_srcu(&dev
->mr_srcu
);
250 list_for_each_entry_safe(mr
, tmp_mr
, &del_list
, list
) {
256 static ssize_t
size_write(struct file
*filp
, const char __user
*buf
,
257 size_t count
, loff_t
*pos
)
259 struct mlx5_cache_ent
*ent
= filp
->private_data
;
260 struct mlx5_ib_dev
*dev
= ent
->dev
;
266 count
= min(count
, sizeof(lbuf
) - 1);
267 if (copy_from_user(lbuf
, buf
, count
))
270 c
= order2idx(dev
, ent
->order
);
272 if (sscanf(lbuf
, "%u", &var
) != 1)
275 if (var
< ent
->limit
)
278 if (var
> ent
->size
) {
280 err
= add_keys(dev
, c
, var
- ent
->size
);
281 if (err
&& err
!= -EAGAIN
)
284 usleep_range(3000, 5000);
286 } else if (var
< ent
->size
) {
287 remove_keys(dev
, c
, ent
->size
- var
);
293 static ssize_t
size_read(struct file
*filp
, char __user
*buf
, size_t count
,
296 struct mlx5_cache_ent
*ent
= filp
->private_data
;
300 err
= snprintf(lbuf
, sizeof(lbuf
), "%d\n", ent
->size
);
304 return simple_read_from_buffer(buf
, count
, pos
, lbuf
, err
);
307 static const struct file_operations size_fops
= {
308 .owner
= THIS_MODULE
,
314 static ssize_t
limit_write(struct file
*filp
, const char __user
*buf
,
315 size_t count
, loff_t
*pos
)
317 struct mlx5_cache_ent
*ent
= filp
->private_data
;
318 struct mlx5_ib_dev
*dev
= ent
->dev
;
324 count
= min(count
, sizeof(lbuf
) - 1);
325 if (copy_from_user(lbuf
, buf
, count
))
328 c
= order2idx(dev
, ent
->order
);
330 if (sscanf(lbuf
, "%u", &var
) != 1)
338 if (ent
->cur
< ent
->limit
) {
339 err
= add_keys(dev
, c
, 2 * ent
->limit
- ent
->cur
);
347 static ssize_t
limit_read(struct file
*filp
, char __user
*buf
, size_t count
,
350 struct mlx5_cache_ent
*ent
= filp
->private_data
;
354 err
= snprintf(lbuf
, sizeof(lbuf
), "%d\n", ent
->limit
);
358 return simple_read_from_buffer(buf
, count
, pos
, lbuf
, err
);
361 static const struct file_operations limit_fops
= {
362 .owner
= THIS_MODULE
,
364 .write
= limit_write
,
368 static int someone_adding(struct mlx5_mr_cache
*cache
)
372 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
373 if (cache
->ent
[i
].cur
< cache
->ent
[i
].limit
)
380 static void __cache_work_func(struct mlx5_cache_ent
*ent
)
382 struct mlx5_ib_dev
*dev
= ent
->dev
;
383 struct mlx5_mr_cache
*cache
= &dev
->cache
;
384 int i
= order2idx(dev
, ent
->order
);
390 ent
= &dev
->cache
.ent
[i
];
391 if (ent
->cur
< 2 * ent
->limit
&& !dev
->fill_delay
) {
392 err
= add_keys(dev
, i
, 1);
393 if (ent
->cur
< 2 * ent
->limit
) {
394 if (err
== -EAGAIN
) {
395 mlx5_ib_dbg(dev
, "returned eagain, order %d\n",
397 queue_delayed_work(cache
->wq
, &ent
->dwork
,
398 msecs_to_jiffies(3));
400 mlx5_ib_warn(dev
, "command failed order %d, err %d\n",
402 queue_delayed_work(cache
->wq
, &ent
->dwork
,
403 msecs_to_jiffies(1000));
405 queue_work(cache
->wq
, &ent
->work
);
408 } else if (ent
->cur
> 2 * ent
->limit
) {
410 * The remove_keys() logic is performed as garbage collection
411 * task. Such task is intended to be run when no other active
412 * processes are running.
414 * The need_resched() will return TRUE if there are user tasks
415 * to be activated in near future.
417 * In such case, we don't execute remove_keys() and postpone
418 * the garbage collection work to try to run in next cycle,
419 * in order to free CPU resources to other tasks.
421 if (!need_resched() && !someone_adding(cache
) &&
422 time_after(jiffies
, cache
->last_add
+ 300 * HZ
)) {
423 remove_keys(dev
, i
, 1);
424 if (ent
->cur
> ent
->limit
)
425 queue_work(cache
->wq
, &ent
->work
);
427 queue_delayed_work(cache
->wq
, &ent
->dwork
, 300 * HZ
);
432 static void delayed_cache_work_func(struct work_struct
*work
)
434 struct mlx5_cache_ent
*ent
;
436 ent
= container_of(work
, struct mlx5_cache_ent
, dwork
.work
);
437 __cache_work_func(ent
);
440 static void cache_work_func(struct work_struct
*work
)
442 struct mlx5_cache_ent
*ent
;
444 ent
= container_of(work
, struct mlx5_cache_ent
, work
);
445 __cache_work_func(ent
);
448 struct mlx5_ib_mr
*mlx5_mr_cache_alloc(struct mlx5_ib_dev
*dev
, int entry
)
450 struct mlx5_mr_cache
*cache
= &dev
->cache
;
451 struct mlx5_cache_ent
*ent
;
452 struct mlx5_ib_mr
*mr
;
455 if (entry
< 0 || entry
>= MAX_MR_CACHE_ENTRIES
) {
456 mlx5_ib_err(dev
, "cache entry %d is out of range\n", entry
);
460 ent
= &cache
->ent
[entry
];
462 spin_lock_irq(&ent
->lock
);
463 if (list_empty(&ent
->head
)) {
464 spin_unlock_irq(&ent
->lock
);
466 err
= add_keys(dev
, entry
, 1);
467 if (err
&& err
!= -EAGAIN
)
470 wait_for_completion(&ent
->compl);
472 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
,
476 spin_unlock_irq(&ent
->lock
);
477 if (ent
->cur
< ent
->limit
)
478 queue_work(cache
->wq
, &ent
->work
);
484 static struct mlx5_ib_mr
*alloc_cached_mr(struct mlx5_ib_dev
*dev
, int order
)
486 struct mlx5_mr_cache
*cache
= &dev
->cache
;
487 struct mlx5_ib_mr
*mr
= NULL
;
488 struct mlx5_cache_ent
*ent
;
489 int last_umr_cache_entry
;
493 c
= order2idx(dev
, order
);
494 last_umr_cache_entry
= order2idx(dev
, mr_cache_max_order(dev
));
495 if (c
< 0 || c
> last_umr_cache_entry
) {
496 mlx5_ib_warn(dev
, "order %d, cache index %d\n", order
, c
);
500 for (i
= c
; i
<= last_umr_cache_entry
; i
++) {
501 ent
= &cache
->ent
[i
];
503 mlx5_ib_dbg(dev
, "order %d, cache index %d\n", ent
->order
, i
);
505 spin_lock_irq(&ent
->lock
);
506 if (!list_empty(&ent
->head
)) {
507 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
,
511 spin_unlock_irq(&ent
->lock
);
512 if (ent
->cur
< ent
->limit
)
513 queue_work(cache
->wq
, &ent
->work
);
516 spin_unlock_irq(&ent
->lock
);
518 queue_work(cache
->wq
, &ent
->work
);
522 cache
->ent
[c
].miss
++;
527 void mlx5_mr_cache_free(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
529 struct mlx5_mr_cache
*cache
= &dev
->cache
;
530 struct mlx5_cache_ent
*ent
;
534 if (!mr
->allocated_from_cache
)
537 c
= order2idx(dev
, mr
->order
);
538 WARN_ON(c
< 0 || c
>= MAX_MR_CACHE_ENTRIES
);
540 if (unreg_umr(dev
, mr
)) {
541 mr
->allocated_from_cache
= false;
542 destroy_mkey(dev
, mr
);
543 ent
= &cache
->ent
[c
];
544 if (ent
->cur
< ent
->limit
)
545 queue_work(cache
->wq
, &ent
->work
);
549 ent
= &cache
->ent
[c
];
550 spin_lock_irq(&ent
->lock
);
551 list_add_tail(&mr
->list
, &ent
->head
);
553 if (ent
->cur
> 2 * ent
->limit
)
555 spin_unlock_irq(&ent
->lock
);
558 queue_work(cache
->wq
, &ent
->work
);
561 static void clean_keys(struct mlx5_ib_dev
*dev
, int c
)
563 struct mlx5_mr_cache
*cache
= &dev
->cache
;
564 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
565 struct mlx5_ib_mr
*tmp_mr
;
566 struct mlx5_ib_mr
*mr
;
569 cancel_delayed_work(&ent
->dwork
);
571 spin_lock_irq(&ent
->lock
);
572 if (list_empty(&ent
->head
)) {
573 spin_unlock_irq(&ent
->lock
);
576 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
577 list_move(&mr
->list
, &del_list
);
580 spin_unlock_irq(&ent
->lock
);
581 mlx5_core_destroy_mkey(dev
->mdev
, &mr
->mmkey
);
584 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
585 synchronize_srcu(&dev
->mr_srcu
);
588 list_for_each_entry_safe(mr
, tmp_mr
, &del_list
, list
) {
594 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev
*dev
)
596 if (!mlx5_debugfs_root
|| dev
->is_rep
)
599 debugfs_remove_recursive(dev
->cache
.root
);
600 dev
->cache
.root
= NULL
;
603 static void mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev
*dev
)
605 struct mlx5_mr_cache
*cache
= &dev
->cache
;
606 struct mlx5_cache_ent
*ent
;
610 if (!mlx5_debugfs_root
|| dev
->is_rep
)
613 cache
->root
= debugfs_create_dir("mr_cache", dev
->mdev
->priv
.dbg_root
);
615 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
616 ent
= &cache
->ent
[i
];
617 sprintf(ent
->name
, "%d", ent
->order
);
618 dir
= debugfs_create_dir(ent
->name
, cache
->root
);
619 debugfs_create_file("size", 0600, dir
, ent
, &size_fops
);
620 debugfs_create_file("limit", 0600, dir
, ent
, &limit_fops
);
621 debugfs_create_u32("cur", 0400, dir
, &ent
->cur
);
622 debugfs_create_u32("miss", 0600, dir
, &ent
->miss
);
626 static void delay_time_func(struct timer_list
*t
)
628 struct mlx5_ib_dev
*dev
= from_timer(dev
, t
, delay_timer
);
633 int mlx5_mr_cache_init(struct mlx5_ib_dev
*dev
)
635 struct mlx5_mr_cache
*cache
= &dev
->cache
;
636 struct mlx5_cache_ent
*ent
;
639 mutex_init(&dev
->slow_path_mutex
);
640 cache
->wq
= alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM
);
642 mlx5_ib_warn(dev
, "failed to create work queue\n");
646 mlx5_cmd_init_async_ctx(dev
->mdev
, &dev
->async_ctx
);
647 timer_setup(&dev
->delay_timer
, delay_time_func
, 0);
648 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
649 ent
= &cache
->ent
[i
];
650 INIT_LIST_HEAD(&ent
->head
);
651 spin_lock_init(&ent
->lock
);
656 init_completion(&ent
->compl);
657 INIT_WORK(&ent
->work
, cache_work_func
);
658 INIT_DELAYED_WORK(&ent
->dwork
, delayed_cache_work_func
);
660 if (i
> MR_CACHE_LAST_STD_ENTRY
) {
661 mlx5_odp_init_mr_cache_entry(ent
);
665 if (ent
->order
> mr_cache_max_order(dev
))
668 ent
->page
= PAGE_SHIFT
;
669 ent
->xlt
= (1 << ent
->order
) * sizeof(struct mlx5_mtt
) /
670 MLX5_IB_UMR_OCTOWORD
;
671 ent
->access_mode
= MLX5_MKC_ACCESS_MODE_MTT
;
672 if ((dev
->mdev
->profile
->mask
& MLX5_PROF_MASK_MR_CACHE
) &&
674 mlx5_core_is_pf(dev
->mdev
))
675 ent
->limit
= dev
->mdev
->profile
->mr_cache
[i
].limit
;
678 queue_work(cache
->wq
, &ent
->work
);
681 mlx5_mr_cache_debugfs_init(dev
);
686 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev
*dev
)
693 dev
->cache
.stopped
= 1;
694 flush_workqueue(dev
->cache
.wq
);
696 mlx5_mr_cache_debugfs_cleanup(dev
);
697 mlx5_cmd_cleanup_async_ctx(&dev
->async_ctx
);
699 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++)
702 destroy_workqueue(dev
->cache
.wq
);
703 del_timer_sync(&dev
->delay_timer
);
708 struct ib_mr
*mlx5_ib_get_dma_mr(struct ib_pd
*pd
, int acc
)
710 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
711 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
712 struct mlx5_core_dev
*mdev
= dev
->mdev
;
713 struct mlx5_ib_mr
*mr
;
718 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
720 return ERR_PTR(-ENOMEM
);
722 in
= kzalloc(inlen
, GFP_KERNEL
);
728 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
730 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_PA
);
731 MLX5_SET(mkc
, mkc
, a
, !!(acc
& IB_ACCESS_REMOTE_ATOMIC
));
732 MLX5_SET(mkc
, mkc
, rw
, !!(acc
& IB_ACCESS_REMOTE_WRITE
));
733 MLX5_SET(mkc
, mkc
, rr
, !!(acc
& IB_ACCESS_REMOTE_READ
));
734 MLX5_SET(mkc
, mkc
, lw
, !!(acc
& IB_ACCESS_LOCAL_WRITE
));
735 MLX5_SET(mkc
, mkc
, lr
, 1);
737 MLX5_SET(mkc
, mkc
, length64
, 1);
738 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
739 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
740 MLX5_SET64(mkc
, mkc
, start_addr
, 0);
742 err
= mlx5_core_create_mkey(mdev
, &mr
->mmkey
, in
, inlen
);
747 mr
->mmkey
.type
= MLX5_MKEY_MR
;
748 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
749 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
763 static int get_octo_len(u64 addr
, u64 len
, int page_shift
)
765 u64 page_size
= 1ULL << page_shift
;
769 offset
= addr
& (page_size
- 1);
770 npages
= ALIGN(len
+ offset
, page_size
) >> page_shift
;
771 return (npages
+ 1) / 2;
774 static int mr_cache_max_order(struct mlx5_ib_dev
*dev
)
776 if (MLX5_CAP_GEN(dev
->mdev
, umr_extended_translation_offset
))
777 return MR_CACHE_LAST_STD_ENTRY
+ 2;
778 return MLX5_MAX_UMR_SHIFT
;
781 static int mr_umem_get(struct mlx5_ib_dev
*dev
, struct ib_udata
*udata
,
782 u64 start
, u64 length
, int access_flags
,
783 struct ib_umem
**umem
, int *npages
, int *page_shift
,
784 int *ncont
, int *order
)
790 if (access_flags
& IB_ACCESS_ON_DEMAND
) {
791 struct ib_umem_odp
*odp
;
793 odp
= ib_umem_odp_get(udata
, start
, length
, access_flags
);
795 mlx5_ib_dbg(dev
, "umem get failed (%ld)\n",
802 *page_shift
= odp
->page_shift
;
803 *ncont
= ib_umem_odp_num_pages(odp
);
804 *npages
= *ncont
<< (*page_shift
- PAGE_SHIFT
);
806 *order
= ilog2(roundup_pow_of_two(*ncont
));
808 u
= ib_umem_get(udata
, start
, length
, access_flags
, 0);
810 mlx5_ib_dbg(dev
, "umem get failed (%ld)\n", PTR_ERR(u
));
814 mlx5_ib_cont_pages(u
, start
, MLX5_MKEY_PAGE_SHIFT_MASK
, npages
,
815 page_shift
, ncont
, order
);
819 mlx5_ib_warn(dev
, "avoid zero region\n");
826 mlx5_ib_dbg(dev
, "npages %d, ncont %d, order %d, page_shift %d\n",
827 *npages
, *ncont
, *order
, *page_shift
);
832 static void mlx5_ib_umr_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
834 struct mlx5_ib_umr_context
*context
=
835 container_of(wc
->wr_cqe
, struct mlx5_ib_umr_context
, cqe
);
837 context
->status
= wc
->status
;
838 complete(&context
->done
);
841 static inline void mlx5_ib_init_umr_context(struct mlx5_ib_umr_context
*context
)
843 context
->cqe
.done
= mlx5_ib_umr_done
;
844 context
->status
= -1;
845 init_completion(&context
->done
);
848 static int mlx5_ib_post_send_wait(struct mlx5_ib_dev
*dev
,
849 struct mlx5_umr_wr
*umrwr
)
851 struct umr_common
*umrc
= &dev
->umrc
;
852 const struct ib_send_wr
*bad
;
854 struct mlx5_ib_umr_context umr_context
;
856 mlx5_ib_init_umr_context(&umr_context
);
857 umrwr
->wr
.wr_cqe
= &umr_context
.cqe
;
860 err
= ib_post_send(umrc
->qp
, &umrwr
->wr
, &bad
);
862 mlx5_ib_warn(dev
, "UMR post send failed, err %d\n", err
);
864 wait_for_completion(&umr_context
.done
);
865 if (umr_context
.status
!= IB_WC_SUCCESS
) {
866 mlx5_ib_warn(dev
, "reg umr failed (%u)\n",
875 static struct mlx5_ib_mr
*alloc_mr_from_cache(
876 struct ib_pd
*pd
, struct ib_umem
*umem
,
877 u64 virt_addr
, u64 len
, int npages
,
878 int page_shift
, int order
, int access_flags
)
880 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
881 struct mlx5_ib_mr
*mr
;
885 for (i
= 0; i
< 1; i
++) {
886 mr
= alloc_cached_mr(dev
, order
);
890 err
= add_keys(dev
, order2idx(dev
, order
), 1);
891 if (err
&& err
!= -EAGAIN
) {
892 mlx5_ib_warn(dev
, "add_keys failed, err %d\n", err
);
898 return ERR_PTR(-EAGAIN
);
902 mr
->access_flags
= access_flags
;
903 mr
->desc_size
= sizeof(struct mlx5_mtt
);
904 mr
->mmkey
.iova
= virt_addr
;
905 mr
->mmkey
.size
= len
;
906 mr
->mmkey
.pd
= to_mpd(pd
)->pdn
;
911 static inline int populate_xlt(struct mlx5_ib_mr
*mr
, int idx
, int npages
,
912 void *xlt
, int page_shift
, size_t size
,
915 struct mlx5_ib_dev
*dev
= mr
->dev
;
916 struct ib_umem
*umem
= mr
->umem
;
918 if (flags
& MLX5_IB_UPD_XLT_INDIRECT
) {
919 if (!umr_can_use_indirect_mkey(dev
))
921 mlx5_odp_populate_klm(xlt
, idx
, npages
, mr
, flags
);
925 npages
= min_t(size_t, npages
, ib_umem_num_pages(umem
) - idx
);
927 if (!(flags
& MLX5_IB_UPD_XLT_ZAP
)) {
928 __mlx5_ib_populate_pas(dev
, umem
, page_shift
,
930 MLX5_IB_MTT_PRESENT
);
931 /* Clear padding after the pages
932 * brought from the umem.
934 memset(xlt
+ (npages
* sizeof(struct mlx5_mtt
)), 0,
935 size
- npages
* sizeof(struct mlx5_mtt
));
941 #define MLX5_MAX_UMR_CHUNK ((1 << (MLX5_MAX_UMR_SHIFT + 4)) - \
942 MLX5_UMR_MTT_ALIGNMENT)
943 #define MLX5_SPARE_UMR_CHUNK 0x10000
945 int mlx5_ib_update_xlt(struct mlx5_ib_mr
*mr
, u64 idx
, int npages
,
946 int page_shift
, int flags
)
948 struct mlx5_ib_dev
*dev
= mr
->dev
;
949 struct device
*ddev
= dev
->ib_dev
.dev
.parent
;
953 struct mlx5_umr_wr wr
;
956 int desc_size
= (flags
& MLX5_IB_UPD_XLT_INDIRECT
)
957 ? sizeof(struct mlx5_klm
)
958 : sizeof(struct mlx5_mtt
);
959 const int page_align
= MLX5_UMR_MTT_ALIGNMENT
/ desc_size
;
960 const int page_mask
= page_align
- 1;
961 size_t pages_mapped
= 0;
962 size_t pages_to_map
= 0;
963 size_t pages_iter
= 0;
965 bool use_emergency_page
= false;
967 if ((flags
& MLX5_IB_UPD_XLT_INDIRECT
) &&
968 !umr_can_use_indirect_mkey(dev
))
971 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
972 * so we need to align the offset and length accordingly
974 if (idx
& page_mask
) {
975 npages
+= idx
& page_mask
;
979 gfp
= flags
& MLX5_IB_UPD_XLT_ATOMIC
? GFP_ATOMIC
: GFP_KERNEL
;
980 gfp
|= __GFP_ZERO
| __GFP_NOWARN
;
982 pages_to_map
= ALIGN(npages
, page_align
);
983 size
= desc_size
* pages_to_map
;
984 size
= min_t(int, size
, MLX5_MAX_UMR_CHUNK
);
986 xlt
= (void *)__get_free_pages(gfp
, get_order(size
));
987 if (!xlt
&& size
> MLX5_SPARE_UMR_CHUNK
) {
988 mlx5_ib_dbg(dev
, "Failed to allocate %d bytes of order %d. fallback to spare UMR allocation od %d bytes\n",
989 size
, get_order(size
), MLX5_SPARE_UMR_CHUNK
);
991 size
= MLX5_SPARE_UMR_CHUNK
;
992 xlt
= (void *)__get_free_pages(gfp
, get_order(size
));
996 mlx5_ib_warn(dev
, "Using XLT emergency buffer\n");
997 xlt
= (void *)mlx5_ib_get_xlt_emergency_page();
999 memset(xlt
, 0, size
);
1000 use_emergency_page
= true;
1002 pages_iter
= size
/ desc_size
;
1003 dma
= dma_map_single(ddev
, xlt
, size
, DMA_TO_DEVICE
);
1004 if (dma_mapping_error(ddev
, dma
)) {
1005 mlx5_ib_err(dev
, "unable to map DMA during XLT update.\n");
1011 sg
.lkey
= dev
->umrc
.pd
->local_dma_lkey
;
1013 memset(&wr
, 0, sizeof(wr
));
1014 wr
.wr
.send_flags
= MLX5_IB_SEND_UMR_UPDATE_XLT
;
1015 if (!(flags
& MLX5_IB_UPD_XLT_ENABLE
))
1016 wr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
1017 wr
.wr
.sg_list
= &sg
;
1019 wr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1021 wr
.pd
= mr
->ibmr
.pd
;
1022 wr
.mkey
= mr
->mmkey
.key
;
1023 wr
.length
= mr
->mmkey
.size
;
1024 wr
.virt_addr
= mr
->mmkey
.iova
;
1025 wr
.access_flags
= mr
->access_flags
;
1026 wr
.page_shift
= page_shift
;
1028 for (pages_mapped
= 0;
1029 pages_mapped
< pages_to_map
&& !err
;
1030 pages_mapped
+= pages_iter
, idx
+= pages_iter
) {
1031 npages
= min_t(int, pages_iter
, pages_to_map
- pages_mapped
);
1032 dma_sync_single_for_cpu(ddev
, dma
, size
, DMA_TO_DEVICE
);
1033 npages
= populate_xlt(mr
, idx
, npages
, xlt
,
1034 page_shift
, size
, flags
);
1036 dma_sync_single_for_device(ddev
, dma
, size
, DMA_TO_DEVICE
);
1038 sg
.length
= ALIGN(npages
* desc_size
,
1039 MLX5_UMR_MTT_ALIGNMENT
);
1041 if (pages_mapped
+ pages_iter
>= pages_to_map
) {
1042 if (flags
& MLX5_IB_UPD_XLT_ENABLE
)
1044 MLX5_IB_SEND_UMR_ENABLE_MR
|
1045 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
|
1046 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
;
1047 if (flags
& MLX5_IB_UPD_XLT_PD
||
1048 flags
& MLX5_IB_UPD_XLT_ACCESS
)
1050 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
;
1051 if (flags
& MLX5_IB_UPD_XLT_ADDR
)
1053 MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
;
1056 wr
.offset
= idx
* desc_size
;
1057 wr
.xlt_size
= sg
.length
;
1059 err
= mlx5_ib_post_send_wait(dev
, &wr
);
1061 dma_unmap_single(ddev
, dma
, size
, DMA_TO_DEVICE
);
1064 if (use_emergency_page
)
1065 mlx5_ib_put_xlt_emergency_page();
1067 free_pages((unsigned long)xlt
, get_order(size
));
1073 * If ibmr is NULL it will be allocated by reg_create.
1074 * Else, the given ibmr will be used.
1076 static struct mlx5_ib_mr
*reg_create(struct ib_mr
*ibmr
, struct ib_pd
*pd
,
1077 u64 virt_addr
, u64 length
,
1078 struct ib_umem
*umem
, int npages
,
1079 int page_shift
, int access_flags
,
1082 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1083 struct mlx5_ib_mr
*mr
;
1089 bool pg_cap
= !!(MLX5_CAP_GEN(dev
->mdev
, pg
));
1091 mr
= ibmr
? to_mmr(ibmr
) : kzalloc(sizeof(*mr
), GFP_KERNEL
);
1093 return ERR_PTR(-ENOMEM
);
1096 mr
->access_flags
= access_flags
;
1098 inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1100 inlen
+= sizeof(*pas
) * roundup(npages
, 2);
1101 in
= kvzalloc(inlen
, GFP_KERNEL
);
1106 pas
= (__be64
*)MLX5_ADDR_OF(create_mkey_in
, in
, klm_pas_mtt
);
1107 if (populate
&& !(access_flags
& IB_ACCESS_ON_DEMAND
))
1108 mlx5_ib_populate_pas(dev
, umem
, page_shift
, pas
,
1109 pg_cap
? MLX5_IB_MTT_PRESENT
: 0);
1111 /* The pg_access bit allows setting the access flags
1112 * in the page list submitted with the command. */
1113 MLX5_SET(create_mkey_in
, in
, pg_access
, !!(pg_cap
));
1115 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1116 MLX5_SET(mkc
, mkc
, free
, !populate
);
1117 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_MTT
);
1118 MLX5_SET(mkc
, mkc
, a
, !!(access_flags
& IB_ACCESS_REMOTE_ATOMIC
));
1119 MLX5_SET(mkc
, mkc
, rw
, !!(access_flags
& IB_ACCESS_REMOTE_WRITE
));
1120 MLX5_SET(mkc
, mkc
, rr
, !!(access_flags
& IB_ACCESS_REMOTE_READ
));
1121 MLX5_SET(mkc
, mkc
, lw
, !!(access_flags
& IB_ACCESS_LOCAL_WRITE
));
1122 MLX5_SET(mkc
, mkc
, lr
, 1);
1123 MLX5_SET(mkc
, mkc
, umr_en
, 1);
1125 MLX5_SET64(mkc
, mkc
, start_addr
, virt_addr
);
1126 MLX5_SET64(mkc
, mkc
, len
, length
);
1127 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1128 MLX5_SET(mkc
, mkc
, bsf_octword_size
, 0);
1129 MLX5_SET(mkc
, mkc
, translations_octword_size
,
1130 get_octo_len(virt_addr
, length
, page_shift
));
1131 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
1132 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1134 MLX5_SET(create_mkey_in
, in
, translations_octword_actual_size
,
1135 get_octo_len(virt_addr
, length
, page_shift
));
1138 err
= mlx5_core_create_mkey(dev
->mdev
, &mr
->mmkey
, in
, inlen
);
1140 mlx5_ib_warn(dev
, "create mkey failed\n");
1143 mr
->mmkey
.type
= MLX5_MKEY_MR
;
1144 mr
->desc_size
= sizeof(struct mlx5_mtt
);
1148 mlx5_ib_dbg(dev
, "mkey = 0x%x\n", mr
->mmkey
.key
);
1159 return ERR_PTR(err
);
1162 static void set_mr_fields(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
,
1163 int npages
, u64 length
, int access_flags
)
1165 mr
->npages
= npages
;
1166 atomic_add(npages
, &dev
->mdev
->priv
.reg_pages
);
1167 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
1168 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
1169 mr
->ibmr
.length
= length
;
1170 mr
->access_flags
= access_flags
;
1173 static struct ib_mr
*mlx5_ib_get_dm_mr(struct ib_pd
*pd
, u64 start_addr
,
1174 u64 length
, int acc
, int mode
)
1176 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1177 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1178 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1179 struct mlx5_ib_mr
*mr
;
1184 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1186 return ERR_PTR(-ENOMEM
);
1188 in
= kzalloc(inlen
, GFP_KERNEL
);
1194 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1196 MLX5_SET(mkc
, mkc
, access_mode_1_0
, mode
& 0x3);
1197 MLX5_SET(mkc
, mkc
, access_mode_4_2
, (mode
>> 2) & 0x7);
1198 MLX5_SET(mkc
, mkc
, a
, !!(acc
& IB_ACCESS_REMOTE_ATOMIC
));
1199 MLX5_SET(mkc
, mkc
, rw
, !!(acc
& IB_ACCESS_REMOTE_WRITE
));
1200 MLX5_SET(mkc
, mkc
, rr
, !!(acc
& IB_ACCESS_REMOTE_READ
));
1201 MLX5_SET(mkc
, mkc
, lw
, !!(acc
& IB_ACCESS_LOCAL_WRITE
));
1202 MLX5_SET(mkc
, mkc
, lr
, 1);
1204 MLX5_SET64(mkc
, mkc
, len
, length
);
1205 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1206 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1207 MLX5_SET64(mkc
, mkc
, start_addr
, start_addr
);
1209 err
= mlx5_core_create_mkey(mdev
, &mr
->mmkey
, in
, inlen
);
1216 set_mr_fields(dev
, mr
, 0, length
, acc
);
1226 return ERR_PTR(err
);
1229 int mlx5_ib_advise_mr(struct ib_pd
*pd
,
1230 enum ib_uverbs_advise_mr_advice advice
,
1232 struct ib_sge
*sg_list
,
1234 struct uverbs_attr_bundle
*attrs
)
1236 if (advice
!= IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH
&&
1237 advice
!= IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE
)
1240 return mlx5_ib_advise_mr_prefetch(pd
, advice
, flags
,
1244 struct ib_mr
*mlx5_ib_reg_dm_mr(struct ib_pd
*pd
, struct ib_dm
*dm
,
1245 struct ib_dm_mr_attr
*attr
,
1246 struct uverbs_attr_bundle
*attrs
)
1248 struct mlx5_ib_dm
*mdm
= to_mdm(dm
);
1249 struct mlx5_core_dev
*dev
= to_mdev(dm
->device
)->mdev
;
1250 u64 start_addr
= mdm
->dev_addr
+ attr
->offset
;
1253 switch (mdm
->type
) {
1254 case MLX5_IB_UAPI_DM_TYPE_MEMIC
:
1255 if (attr
->access_flags
& ~MLX5_IB_DM_MEMIC_ALLOWED_ACCESS
)
1256 return ERR_PTR(-EINVAL
);
1258 mode
= MLX5_MKC_ACCESS_MODE_MEMIC
;
1259 start_addr
-= pci_resource_start(dev
->pdev
, 0);
1261 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM
:
1262 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM
:
1263 if (attr
->access_flags
& ~MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS
)
1264 return ERR_PTR(-EINVAL
);
1266 mode
= MLX5_MKC_ACCESS_MODE_SW_ICM
;
1269 return ERR_PTR(-EINVAL
);
1272 return mlx5_ib_get_dm_mr(pd
, start_addr
, attr
->length
,
1273 attr
->access_flags
, mode
);
1276 struct ib_mr
*mlx5_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1277 u64 virt_addr
, int access_flags
,
1278 struct ib_udata
*udata
)
1280 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1281 struct mlx5_ib_mr
*mr
= NULL
;
1283 struct ib_umem
*umem
;
1290 if (!IS_ENABLED(CONFIG_INFINIBAND_USER_MEM
))
1291 return ERR_PTR(-EOPNOTSUPP
);
1293 mlx5_ib_dbg(dev
, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1294 start
, virt_addr
, length
, access_flags
);
1296 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
) && !start
&&
1297 length
== U64_MAX
) {
1298 if (!(access_flags
& IB_ACCESS_ON_DEMAND
) ||
1299 !(dev
->odp_caps
.general_caps
& IB_ODP_SUPPORT_IMPLICIT
))
1300 return ERR_PTR(-EINVAL
);
1302 mr
= mlx5_ib_alloc_implicit_mr(to_mpd(pd
), udata
, access_flags
);
1304 return ERR_CAST(mr
);
1308 err
= mr_umem_get(dev
, udata
, start
, length
, access_flags
, &umem
,
1309 &npages
, &page_shift
, &ncont
, &order
);
1312 return ERR_PTR(err
);
1314 use_umr
= mlx5_ib_can_use_umr(dev
, true);
1316 if (order
<= mr_cache_max_order(dev
) && use_umr
) {
1317 mr
= alloc_mr_from_cache(pd
, umem
, virt_addr
, length
, ncont
,
1318 page_shift
, order
, access_flags
);
1319 if (PTR_ERR(mr
) == -EAGAIN
) {
1320 mlx5_ib_dbg(dev
, "cache empty for order %d\n", order
);
1323 } else if (!MLX5_CAP_GEN(dev
->mdev
, umr_extended_translation_offset
)) {
1324 if (access_flags
& IB_ACCESS_ON_DEMAND
) {
1326 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
1333 mutex_lock(&dev
->slow_path_mutex
);
1334 mr
= reg_create(NULL
, pd
, virt_addr
, length
, umem
, ncont
,
1335 page_shift
, access_flags
, !use_umr
);
1336 mutex_unlock(&dev
->slow_path_mutex
);
1344 mlx5_ib_dbg(dev
, "mkey 0x%x\n", mr
->mmkey
.key
);
1347 set_mr_fields(dev
, mr
, npages
, length
, access_flags
);
1352 int update_xlt_flags
= MLX5_IB_UPD_XLT_ENABLE
;
1354 if (access_flags
& IB_ACCESS_ON_DEMAND
)
1355 update_xlt_flags
|= MLX5_IB_UPD_XLT_ZAP
;
1357 err
= mlx5_ib_update_xlt(mr
, 0, ncont
, page_shift
,
1362 return ERR_PTR(err
);
1366 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
)) {
1368 atomic_set(&mr
->num_pending_prefetch
, 0);
1373 ib_umem_release(umem
);
1374 return ERR_PTR(err
);
1377 static int unreg_umr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
1379 struct mlx5_core_dev
*mdev
= dev
->mdev
;
1380 struct mlx5_umr_wr umrwr
= {};
1382 if (mdev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
)
1385 umrwr
.wr
.send_flags
= MLX5_IB_SEND_UMR_DISABLE_MR
|
1386 MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
;
1387 umrwr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1388 umrwr
.pd
= dev
->umrc
.pd
;
1389 umrwr
.mkey
= mr
->mmkey
.key
;
1390 umrwr
.ignore_free_state
= 1;
1392 return mlx5_ib_post_send_wait(dev
, &umrwr
);
1395 static int rereg_umr(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1396 int access_flags
, int flags
)
1398 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1399 struct mlx5_umr_wr umrwr
= {};
1402 umrwr
.wr
.send_flags
= MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
1404 umrwr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1405 umrwr
.mkey
= mr
->mmkey
.key
;
1407 if (flags
& IB_MR_REREG_PD
|| flags
& IB_MR_REREG_ACCESS
) {
1409 umrwr
.access_flags
= access_flags
;
1410 umrwr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS
;
1413 err
= mlx5_ib_post_send_wait(dev
, &umrwr
);
1418 int mlx5_ib_rereg_user_mr(struct ib_mr
*ib_mr
, int flags
, u64 start
,
1419 u64 length
, u64 virt_addr
, int new_access_flags
,
1420 struct ib_pd
*new_pd
, struct ib_udata
*udata
)
1422 struct mlx5_ib_dev
*dev
= to_mdev(ib_mr
->device
);
1423 struct mlx5_ib_mr
*mr
= to_mmr(ib_mr
);
1424 struct ib_pd
*pd
= (flags
& IB_MR_REREG_PD
) ? new_pd
: ib_mr
->pd
;
1425 int access_flags
= flags
& IB_MR_REREG_ACCESS
?
1436 mlx5_ib_dbg(dev
, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1437 start
, virt_addr
, length
, access_flags
);
1439 atomic_sub(mr
->npages
, &dev
->mdev
->priv
.reg_pages
);
1444 if (flags
& IB_MR_REREG_TRANS
) {
1448 addr
= mr
->umem
->address
;
1449 len
= mr
->umem
->length
;
1452 if (flags
!= IB_MR_REREG_PD
) {
1454 * Replace umem. This needs to be done whether or not UMR is
1457 flags
|= IB_MR_REREG_TRANS
;
1458 ib_umem_release(mr
->umem
);
1460 err
= mr_umem_get(dev
, udata
, addr
, len
, access_flags
,
1461 &mr
->umem
, &npages
, &page_shift
, &ncont
,
1467 if (!mlx5_ib_can_use_umr(dev
, true) ||
1468 (flags
& IB_MR_REREG_TRANS
&& !use_umr_mtt_update(mr
, addr
, len
))) {
1470 * UMR can't be used - MKey needs to be replaced.
1472 if (mr
->allocated_from_cache
)
1473 err
= unreg_umr(dev
, mr
);
1475 err
= destroy_mkey(dev
, mr
);
1479 mr
= reg_create(ib_mr
, pd
, addr
, len
, mr
->umem
, ncont
,
1480 page_shift
, access_flags
, true);
1488 mr
->allocated_from_cache
= 0;
1489 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
))
1496 mr
->access_flags
= access_flags
;
1497 mr
->mmkey
.iova
= addr
;
1498 mr
->mmkey
.size
= len
;
1499 mr
->mmkey
.pd
= to_mpd(pd
)->pdn
;
1501 if (flags
& IB_MR_REREG_TRANS
) {
1502 upd_flags
= MLX5_IB_UPD_XLT_ADDR
;
1503 if (flags
& IB_MR_REREG_PD
)
1504 upd_flags
|= MLX5_IB_UPD_XLT_PD
;
1505 if (flags
& IB_MR_REREG_ACCESS
)
1506 upd_flags
|= MLX5_IB_UPD_XLT_ACCESS
;
1507 err
= mlx5_ib_update_xlt(mr
, 0, npages
, page_shift
,
1510 err
= rereg_umr(pd
, mr
, access_flags
, flags
);
1517 set_mr_fields(dev
, mr
, npages
, len
, access_flags
);
1523 ib_umem_release(mr
->umem
);
1531 mlx5_alloc_priv_descs(struct ib_device
*device
,
1532 struct mlx5_ib_mr
*mr
,
1536 int size
= ndescs
* desc_size
;
1540 add_size
= max_t(int, MLX5_UMR_ALIGN
- ARCH_KMALLOC_MINALIGN
, 0);
1542 mr
->descs_alloc
= kzalloc(size
+ add_size
, GFP_KERNEL
);
1543 if (!mr
->descs_alloc
)
1546 mr
->descs
= PTR_ALIGN(mr
->descs_alloc
, MLX5_UMR_ALIGN
);
1548 mr
->desc_map
= dma_map_single(device
->dev
.parent
, mr
->descs
,
1549 size
, DMA_TO_DEVICE
);
1550 if (dma_mapping_error(device
->dev
.parent
, mr
->desc_map
)) {
1557 kfree(mr
->descs_alloc
);
1563 mlx5_free_priv_descs(struct mlx5_ib_mr
*mr
)
1566 struct ib_device
*device
= mr
->ibmr
.device
;
1567 int size
= mr
->max_descs
* mr
->desc_size
;
1569 dma_unmap_single(device
->dev
.parent
, mr
->desc_map
,
1570 size
, DMA_TO_DEVICE
);
1571 kfree(mr
->descs_alloc
);
1576 static void clean_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
1578 int allocated_from_cache
= mr
->allocated_from_cache
;
1581 if (mlx5_core_destroy_psv(dev
->mdev
,
1582 mr
->sig
->psv_memory
.psv_idx
))
1583 mlx5_ib_warn(dev
, "failed to destroy mem psv %d\n",
1584 mr
->sig
->psv_memory
.psv_idx
);
1585 if (mlx5_core_destroy_psv(dev
->mdev
,
1586 mr
->sig
->psv_wire
.psv_idx
))
1587 mlx5_ib_warn(dev
, "failed to destroy wire psv %d\n",
1588 mr
->sig
->psv_wire
.psv_idx
);
1593 if (!allocated_from_cache
) {
1594 destroy_mkey(dev
, mr
);
1595 mlx5_free_priv_descs(mr
);
1599 static void dereg_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
1601 int npages
= mr
->npages
;
1602 struct ib_umem
*umem
= mr
->umem
;
1604 if (is_odp_mr(mr
)) {
1605 struct ib_umem_odp
*umem_odp
= to_ib_umem_odp(umem
);
1607 /* Prevent new page faults and
1608 * prefetch requests from succeeding
1612 /* dequeue pending prefetch requests for the mr */
1613 if (atomic_read(&mr
->num_pending_prefetch
))
1614 flush_workqueue(system_unbound_wq
);
1615 WARN_ON(atomic_read(&mr
->num_pending_prefetch
));
1617 /* Wait for all running page-fault handlers to finish. */
1618 synchronize_srcu(&dev
->mr_srcu
);
1619 /* Destroy all page mappings */
1620 if (!umem_odp
->is_implicit_odp
)
1621 mlx5_ib_invalidate_range(umem_odp
,
1622 ib_umem_start(umem_odp
),
1623 ib_umem_end(umem_odp
));
1625 mlx5_ib_free_implicit_mr(mr
);
1627 * We kill the umem before the MR for ODP,
1628 * so that there will not be any invalidations in
1629 * flight, looking at the *mr struct.
1631 ib_umem_odp_release(umem_odp
);
1632 atomic_sub(npages
, &dev
->mdev
->priv
.reg_pages
);
1634 /* Avoid double-freeing the umem. */
1641 * We should unregister the DMA address from the HCA before
1642 * remove the DMA mapping.
1644 mlx5_mr_cache_free(dev
, mr
);
1645 ib_umem_release(umem
);
1647 atomic_sub(npages
, &dev
->mdev
->priv
.reg_pages
);
1649 if (!mr
->allocated_from_cache
)
1653 int mlx5_ib_dereg_mr(struct ib_mr
*ibmr
, struct ib_udata
*udata
)
1655 struct mlx5_ib_mr
*mmr
= to_mmr(ibmr
);
1657 if (ibmr
->type
== IB_MR_TYPE_INTEGRITY
) {
1658 dereg_mr(to_mdev(mmr
->mtt_mr
->ibmr
.device
), mmr
->mtt_mr
);
1659 dereg_mr(to_mdev(mmr
->klm_mr
->ibmr
.device
), mmr
->klm_mr
);
1662 dereg_mr(to_mdev(ibmr
->device
), mmr
);
1667 static void mlx5_set_umr_free_mkey(struct ib_pd
*pd
, u32
*in
, int ndescs
,
1668 int access_mode
, int page_shift
)
1672 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1674 MLX5_SET(mkc
, mkc
, free
, 1);
1675 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1676 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1677 MLX5_SET(mkc
, mkc
, translations_octword_size
, ndescs
);
1678 MLX5_SET(mkc
, mkc
, access_mode_1_0
, access_mode
& 0x3);
1679 MLX5_SET(mkc
, mkc
, access_mode_4_2
, (access_mode
>> 2) & 0x7);
1680 MLX5_SET(mkc
, mkc
, umr_en
, 1);
1681 MLX5_SET(mkc
, mkc
, log_page_size
, page_shift
);
1684 static int _mlx5_alloc_mkey_descs(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1685 int ndescs
, int desc_size
, int page_shift
,
1686 int access_mode
, u32
*in
, int inlen
)
1688 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1691 mr
->access_mode
= access_mode
;
1692 mr
->desc_size
= desc_size
;
1693 mr
->max_descs
= ndescs
;
1695 err
= mlx5_alloc_priv_descs(pd
->device
, mr
, ndescs
, desc_size
);
1699 mlx5_set_umr_free_mkey(pd
, in
, ndescs
, access_mode
, page_shift
);
1701 err
= mlx5_core_create_mkey(dev
->mdev
, &mr
->mmkey
, in
, inlen
);
1703 goto err_free_descs
;
1705 mr
->mmkey
.type
= MLX5_MKEY_MR
;
1706 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
1707 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
1712 mlx5_free_priv_descs(mr
);
1716 static struct mlx5_ib_mr
*mlx5_ib_alloc_pi_mr(struct ib_pd
*pd
,
1717 u32 max_num_sg
, u32 max_num_meta_sg
,
1718 int desc_size
, int access_mode
)
1720 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1721 int ndescs
= ALIGN(max_num_sg
+ max_num_meta_sg
, 4);
1723 struct mlx5_ib_mr
*mr
;
1727 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1729 return ERR_PTR(-ENOMEM
);
1732 mr
->ibmr
.device
= pd
->device
;
1734 in
= kzalloc(inlen
, GFP_KERNEL
);
1740 if (access_mode
== MLX5_MKC_ACCESS_MODE_MTT
)
1741 page_shift
= PAGE_SHIFT
;
1743 err
= _mlx5_alloc_mkey_descs(pd
, mr
, ndescs
, desc_size
, page_shift
,
1744 access_mode
, in
, inlen
);
1757 return ERR_PTR(err
);
1760 static int mlx5_alloc_mem_reg_descs(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1761 int ndescs
, u32
*in
, int inlen
)
1763 return _mlx5_alloc_mkey_descs(pd
, mr
, ndescs
, sizeof(struct mlx5_mtt
),
1764 PAGE_SHIFT
, MLX5_MKC_ACCESS_MODE_MTT
, in
,
1768 static int mlx5_alloc_sg_gaps_descs(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1769 int ndescs
, u32
*in
, int inlen
)
1771 return _mlx5_alloc_mkey_descs(pd
, mr
, ndescs
, sizeof(struct mlx5_klm
),
1772 0, MLX5_MKC_ACCESS_MODE_KLMS
, in
, inlen
);
1775 static int mlx5_alloc_integrity_descs(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
,
1776 int max_num_sg
, int max_num_meta_sg
,
1779 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1784 mr
->sig
= kzalloc(sizeof(*mr
->sig
), GFP_KERNEL
);
1788 /* create mem & wire PSVs */
1789 err
= mlx5_core_create_psv(dev
->mdev
, to_mpd(pd
)->pdn
, 2, psv_index
);
1793 mr
->sig
->psv_memory
.psv_idx
= psv_index
[0];
1794 mr
->sig
->psv_wire
.psv_idx
= psv_index
[1];
1796 mr
->sig
->sig_status_checked
= true;
1797 mr
->sig
->sig_err_exists
= false;
1798 /* Next UMR, Arm SIGERR */
1799 ++mr
->sig
->sigerr_count
;
1800 mr
->klm_mr
= mlx5_ib_alloc_pi_mr(pd
, max_num_sg
, max_num_meta_sg
,
1801 sizeof(struct mlx5_klm
),
1802 MLX5_MKC_ACCESS_MODE_KLMS
);
1803 if (IS_ERR(mr
->klm_mr
)) {
1804 err
= PTR_ERR(mr
->klm_mr
);
1805 goto err_destroy_psv
;
1807 mr
->mtt_mr
= mlx5_ib_alloc_pi_mr(pd
, max_num_sg
, max_num_meta_sg
,
1808 sizeof(struct mlx5_mtt
),
1809 MLX5_MKC_ACCESS_MODE_MTT
);
1810 if (IS_ERR(mr
->mtt_mr
)) {
1811 err
= PTR_ERR(mr
->mtt_mr
);
1812 goto err_free_klm_mr
;
1815 /* Set bsf descriptors for mkey */
1816 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1817 MLX5_SET(mkc
, mkc
, bsf_en
, 1);
1818 MLX5_SET(mkc
, mkc
, bsf_octword_size
, MLX5_MKEY_BSF_OCTO_SIZE
);
1820 err
= _mlx5_alloc_mkey_descs(pd
, mr
, 4, sizeof(struct mlx5_klm
), 0,
1821 MLX5_MKC_ACCESS_MODE_KLMS
, in
, inlen
);
1823 goto err_free_mtt_mr
;
1828 dereg_mr(to_mdev(mr
->mtt_mr
->ibmr
.device
), mr
->mtt_mr
);
1831 dereg_mr(to_mdev(mr
->klm_mr
->ibmr
.device
), mr
->klm_mr
);
1834 if (mlx5_core_destroy_psv(dev
->mdev
, mr
->sig
->psv_memory
.psv_idx
))
1835 mlx5_ib_warn(dev
, "failed to destroy mem psv %d\n",
1836 mr
->sig
->psv_memory
.psv_idx
);
1837 if (mlx5_core_destroy_psv(dev
->mdev
, mr
->sig
->psv_wire
.psv_idx
))
1838 mlx5_ib_warn(dev
, "failed to destroy wire psv %d\n",
1839 mr
->sig
->psv_wire
.psv_idx
);
1846 static struct ib_mr
*__mlx5_ib_alloc_mr(struct ib_pd
*pd
,
1847 enum ib_mr_type mr_type
, u32 max_num_sg
,
1848 u32 max_num_meta_sg
)
1850 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1851 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1852 int ndescs
= ALIGN(max_num_sg
, 4);
1853 struct mlx5_ib_mr
*mr
;
1857 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1859 return ERR_PTR(-ENOMEM
);
1861 in
= kzalloc(inlen
, GFP_KERNEL
);
1867 mr
->ibmr
.device
= pd
->device
;
1871 case IB_MR_TYPE_MEM_REG
:
1872 err
= mlx5_alloc_mem_reg_descs(pd
, mr
, ndescs
, in
, inlen
);
1874 case IB_MR_TYPE_SG_GAPS
:
1875 err
= mlx5_alloc_sg_gaps_descs(pd
, mr
, ndescs
, in
, inlen
);
1877 case IB_MR_TYPE_INTEGRITY
:
1878 err
= mlx5_alloc_integrity_descs(pd
, mr
, max_num_sg
,
1879 max_num_meta_sg
, in
, inlen
);
1882 mlx5_ib_warn(dev
, "Invalid mr type %d\n", mr_type
);
1897 return ERR_PTR(err
);
1900 struct ib_mr
*mlx5_ib_alloc_mr(struct ib_pd
*pd
, enum ib_mr_type mr_type
,
1901 u32 max_num_sg
, struct ib_udata
*udata
)
1903 return __mlx5_ib_alloc_mr(pd
, mr_type
, max_num_sg
, 0);
1906 struct ib_mr
*mlx5_ib_alloc_mr_integrity(struct ib_pd
*pd
,
1907 u32 max_num_sg
, u32 max_num_meta_sg
)
1909 return __mlx5_ib_alloc_mr(pd
, IB_MR_TYPE_INTEGRITY
, max_num_sg
,
1913 struct ib_mw
*mlx5_ib_alloc_mw(struct ib_pd
*pd
, enum ib_mw_type type
,
1914 struct ib_udata
*udata
)
1916 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1917 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
1918 struct mlx5_ib_mw
*mw
= NULL
;
1923 struct mlx5_ib_alloc_mw req
= {};
1926 __u32 response_length
;
1929 err
= ib_copy_from_udata(&req
, udata
, min(udata
->inlen
, sizeof(req
)));
1931 return ERR_PTR(err
);
1933 if (req
.comp_mask
|| req
.reserved1
|| req
.reserved2
)
1934 return ERR_PTR(-EOPNOTSUPP
);
1936 if (udata
->inlen
> sizeof(req
) &&
1937 !ib_is_udata_cleared(udata
, sizeof(req
),
1938 udata
->inlen
- sizeof(req
)))
1939 return ERR_PTR(-EOPNOTSUPP
);
1941 ndescs
= req
.num_klms
? roundup(req
.num_klms
, 4) : roundup(1, 4);
1943 mw
= kzalloc(sizeof(*mw
), GFP_KERNEL
);
1944 in
= kzalloc(inlen
, GFP_KERNEL
);
1950 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1952 MLX5_SET(mkc
, mkc
, free
, 1);
1953 MLX5_SET(mkc
, mkc
, translations_octword_size
, ndescs
);
1954 MLX5_SET(mkc
, mkc
, pd
, to_mpd(pd
)->pdn
);
1955 MLX5_SET(mkc
, mkc
, umr_en
, 1);
1956 MLX5_SET(mkc
, mkc
, lr
, 1);
1957 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_KLMS
);
1958 MLX5_SET(mkc
, mkc
, en_rinval
, !!((type
== IB_MW_TYPE_2
)));
1959 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
1961 err
= mlx5_core_create_mkey(dev
->mdev
, &mw
->mmkey
, in
, inlen
);
1965 mw
->mmkey
.type
= MLX5_MKEY_MW
;
1966 mw
->ibmw
.rkey
= mw
->mmkey
.key
;
1967 mw
->ndescs
= ndescs
;
1969 resp
.response_length
= min(offsetof(typeof(resp
), response_length
) +
1970 sizeof(resp
.response_length
), udata
->outlen
);
1971 if (resp
.response_length
) {
1972 err
= ib_copy_to_udata(udata
, &resp
, resp
.response_length
);
1974 mlx5_core_destroy_mkey(dev
->mdev
, &mw
->mmkey
);
1985 return ERR_PTR(err
);
1988 int mlx5_ib_dealloc_mw(struct ib_mw
*mw
)
1990 struct mlx5_ib_mw
*mmw
= to_mmw(mw
);
1993 err
= mlx5_core_destroy_mkey((to_mdev(mw
->device
))->mdev
,
2000 int mlx5_ib_check_mr_status(struct ib_mr
*ibmr
, u32 check_mask
,
2001 struct ib_mr_status
*mr_status
)
2003 struct mlx5_ib_mr
*mmr
= to_mmr(ibmr
);
2006 if (check_mask
& ~IB_MR_CHECK_SIG_STATUS
) {
2007 pr_err("Invalid status check mask\n");
2012 mr_status
->fail_status
= 0;
2013 if (check_mask
& IB_MR_CHECK_SIG_STATUS
) {
2016 pr_err("signature status check requested on a non-signature enabled MR\n");
2020 mmr
->sig
->sig_status_checked
= true;
2021 if (!mmr
->sig
->sig_err_exists
)
2024 if (ibmr
->lkey
== mmr
->sig
->err_item
.key
)
2025 memcpy(&mr_status
->sig_err
, &mmr
->sig
->err_item
,
2026 sizeof(mr_status
->sig_err
));
2028 mr_status
->sig_err
.err_type
= IB_SIG_BAD_GUARD
;
2029 mr_status
->sig_err
.sig_err_offset
= 0;
2030 mr_status
->sig_err
.key
= mmr
->sig
->err_item
.key
;
2033 mmr
->sig
->sig_err_exists
= false;
2034 mr_status
->fail_status
|= IB_MR_CHECK_SIG_STATUS
;
2042 mlx5_ib_map_pa_mr_sg_pi(struct ib_mr
*ibmr
, struct scatterlist
*data_sg
,
2043 int data_sg_nents
, unsigned int *data_sg_offset
,
2044 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2045 unsigned int *meta_sg_offset
)
2047 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2048 unsigned int sg_offset
= 0;
2051 mr
->meta_length
= 0;
2052 if (data_sg_nents
== 1) {
2056 sg_offset
= *data_sg_offset
;
2057 mr
->data_length
= sg_dma_len(data_sg
) - sg_offset
;
2058 mr
->data_iova
= sg_dma_address(data_sg
) + sg_offset
;
2059 if (meta_sg_nents
== 1) {
2061 mr
->meta_ndescs
= 1;
2063 sg_offset
= *meta_sg_offset
;
2066 mr
->meta_length
= sg_dma_len(meta_sg
) - sg_offset
;
2067 mr
->pi_iova
= sg_dma_address(meta_sg
) + sg_offset
;
2069 ibmr
->length
= mr
->data_length
+ mr
->meta_length
;
2076 mlx5_ib_sg_to_klms(struct mlx5_ib_mr
*mr
,
2077 struct scatterlist
*sgl
,
2078 unsigned short sg_nents
,
2079 unsigned int *sg_offset_p
,
2080 struct scatterlist
*meta_sgl
,
2081 unsigned short meta_sg_nents
,
2082 unsigned int *meta_sg_offset_p
)
2084 struct scatterlist
*sg
= sgl
;
2085 struct mlx5_klm
*klms
= mr
->descs
;
2086 unsigned int sg_offset
= sg_offset_p
? *sg_offset_p
: 0;
2087 u32 lkey
= mr
->ibmr
.pd
->local_dma_lkey
;
2090 mr
->ibmr
.iova
= sg_dma_address(sg
) + sg_offset
;
2091 mr
->ibmr
.length
= 0;
2093 for_each_sg(sgl
, sg
, sg_nents
, i
) {
2094 if (unlikely(i
>= mr
->max_descs
))
2096 klms
[i
].va
= cpu_to_be64(sg_dma_address(sg
) + sg_offset
);
2097 klms
[i
].bcount
= cpu_to_be32(sg_dma_len(sg
) - sg_offset
);
2098 klms
[i
].key
= cpu_to_be32(lkey
);
2099 mr
->ibmr
.length
+= sg_dma_len(sg
) - sg_offset
;
2105 *sg_offset_p
= sg_offset
;
2108 mr
->data_length
= mr
->ibmr
.length
;
2110 if (meta_sg_nents
) {
2112 sg_offset
= meta_sg_offset_p
? *meta_sg_offset_p
: 0;
2113 for_each_sg(meta_sgl
, sg
, meta_sg_nents
, j
) {
2114 if (unlikely(i
+ j
>= mr
->max_descs
))
2116 klms
[i
+ j
].va
= cpu_to_be64(sg_dma_address(sg
) +
2118 klms
[i
+ j
].bcount
= cpu_to_be32(sg_dma_len(sg
) -
2120 klms
[i
+ j
].key
= cpu_to_be32(lkey
);
2121 mr
->ibmr
.length
+= sg_dma_len(sg
) - sg_offset
;
2125 if (meta_sg_offset_p
)
2126 *meta_sg_offset_p
= sg_offset
;
2128 mr
->meta_ndescs
= j
;
2129 mr
->meta_length
= mr
->ibmr
.length
- mr
->data_length
;
2135 static int mlx5_set_page(struct ib_mr
*ibmr
, u64 addr
)
2137 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2140 if (unlikely(mr
->ndescs
== mr
->max_descs
))
2144 descs
[mr
->ndescs
++] = cpu_to_be64(addr
| MLX5_EN_RD
| MLX5_EN_WR
);
2149 static int mlx5_set_page_pi(struct ib_mr
*ibmr
, u64 addr
)
2151 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2154 if (unlikely(mr
->ndescs
+ mr
->meta_ndescs
== mr
->max_descs
))
2158 descs
[mr
->ndescs
+ mr
->meta_ndescs
++] =
2159 cpu_to_be64(addr
| MLX5_EN_RD
| MLX5_EN_WR
);
2165 mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr
*ibmr
, struct scatterlist
*data_sg
,
2166 int data_sg_nents
, unsigned int *data_sg_offset
,
2167 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2168 unsigned int *meta_sg_offset
)
2170 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2171 struct mlx5_ib_mr
*pi_mr
= mr
->mtt_mr
;
2175 pi_mr
->meta_ndescs
= 0;
2176 pi_mr
->meta_length
= 0;
2178 ib_dma_sync_single_for_cpu(ibmr
->device
, pi_mr
->desc_map
,
2179 pi_mr
->desc_size
* pi_mr
->max_descs
,
2182 pi_mr
->ibmr
.page_size
= ibmr
->page_size
;
2183 n
= ib_sg_to_pages(&pi_mr
->ibmr
, data_sg
, data_sg_nents
, data_sg_offset
,
2185 if (n
!= data_sg_nents
)
2188 pi_mr
->data_iova
= pi_mr
->ibmr
.iova
;
2189 pi_mr
->data_length
= pi_mr
->ibmr
.length
;
2190 pi_mr
->ibmr
.length
= pi_mr
->data_length
;
2191 ibmr
->length
= pi_mr
->data_length
;
2193 if (meta_sg_nents
) {
2194 u64 page_mask
= ~((u64
)ibmr
->page_size
- 1);
2195 u64 iova
= pi_mr
->data_iova
;
2197 n
+= ib_sg_to_pages(&pi_mr
->ibmr
, meta_sg
, meta_sg_nents
,
2198 meta_sg_offset
, mlx5_set_page_pi
);
2200 pi_mr
->meta_length
= pi_mr
->ibmr
.length
;
2202 * PI address for the HW is the offset of the metadata address
2203 * relative to the first data page address.
2204 * It equals to first data page address + size of data pages +
2205 * metadata offset at the first metadata page
2207 pi_mr
->pi_iova
= (iova
& page_mask
) +
2208 pi_mr
->ndescs
* ibmr
->page_size
+
2209 (pi_mr
->ibmr
.iova
& ~page_mask
);
2211 * In order to use one MTT MR for data and metadata, we register
2212 * also the gaps between the end of the data and the start of
2213 * the metadata (the sig MR will verify that the HW will access
2214 * to right addresses). This mapping is safe because we use
2215 * internal mkey for the registration.
2217 pi_mr
->ibmr
.length
= pi_mr
->pi_iova
+ pi_mr
->meta_length
- iova
;
2218 pi_mr
->ibmr
.iova
= iova
;
2219 ibmr
->length
+= pi_mr
->meta_length
;
2222 ib_dma_sync_single_for_device(ibmr
->device
, pi_mr
->desc_map
,
2223 pi_mr
->desc_size
* pi_mr
->max_descs
,
2230 mlx5_ib_map_klm_mr_sg_pi(struct ib_mr
*ibmr
, struct scatterlist
*data_sg
,
2231 int data_sg_nents
, unsigned int *data_sg_offset
,
2232 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2233 unsigned int *meta_sg_offset
)
2235 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2236 struct mlx5_ib_mr
*pi_mr
= mr
->klm_mr
;
2240 pi_mr
->meta_ndescs
= 0;
2241 pi_mr
->meta_length
= 0;
2243 ib_dma_sync_single_for_cpu(ibmr
->device
, pi_mr
->desc_map
,
2244 pi_mr
->desc_size
* pi_mr
->max_descs
,
2247 n
= mlx5_ib_sg_to_klms(pi_mr
, data_sg
, data_sg_nents
, data_sg_offset
,
2248 meta_sg
, meta_sg_nents
, meta_sg_offset
);
2250 ib_dma_sync_single_for_device(ibmr
->device
, pi_mr
->desc_map
,
2251 pi_mr
->desc_size
* pi_mr
->max_descs
,
2254 /* This is zero-based memory region */
2255 pi_mr
->data_iova
= 0;
2256 pi_mr
->ibmr
.iova
= 0;
2257 pi_mr
->pi_iova
= pi_mr
->data_length
;
2258 ibmr
->length
= pi_mr
->ibmr
.length
;
2263 int mlx5_ib_map_mr_sg_pi(struct ib_mr
*ibmr
, struct scatterlist
*data_sg
,
2264 int data_sg_nents
, unsigned int *data_sg_offset
,
2265 struct scatterlist
*meta_sg
, int meta_sg_nents
,
2266 unsigned int *meta_sg_offset
)
2268 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2269 struct mlx5_ib_mr
*pi_mr
= NULL
;
2272 WARN_ON(ibmr
->type
!= IB_MR_TYPE_INTEGRITY
);
2275 mr
->data_length
= 0;
2277 mr
->meta_ndescs
= 0;
2280 * As a performance optimization, if possible, there is no need to
2281 * perform UMR operation to register the data/metadata buffers.
2282 * First try to map the sg lists to PA descriptors with local_dma_lkey.
2283 * Fallback to UMR only in case of a failure.
2285 n
= mlx5_ib_map_pa_mr_sg_pi(ibmr
, data_sg
, data_sg_nents
,
2286 data_sg_offset
, meta_sg
, meta_sg_nents
,
2288 if (n
== data_sg_nents
+ meta_sg_nents
)
2291 * As a performance optimization, if possible, there is no need to map
2292 * the sg lists to KLM descriptors. First try to map the sg lists to MTT
2293 * descriptors and fallback to KLM only in case of a failure.
2294 * It's more efficient for the HW to work with MTT descriptors
2295 * (especially in high load).
2296 * Use KLM (indirect access) only if it's mandatory.
2299 n
= mlx5_ib_map_mtt_mr_sg_pi(ibmr
, data_sg
, data_sg_nents
,
2300 data_sg_offset
, meta_sg
, meta_sg_nents
,
2302 if (n
== data_sg_nents
+ meta_sg_nents
)
2306 n
= mlx5_ib_map_klm_mr_sg_pi(ibmr
, data_sg
, data_sg_nents
,
2307 data_sg_offset
, meta_sg
, meta_sg_nents
,
2309 if (unlikely(n
!= data_sg_nents
+ meta_sg_nents
))
2313 /* This is zero-based memory region */
2317 ibmr
->sig_attrs
->meta_length
= pi_mr
->meta_length
;
2319 ibmr
->sig_attrs
->meta_length
= mr
->meta_length
;
2324 int mlx5_ib_map_mr_sg(struct ib_mr
*ibmr
, struct scatterlist
*sg
, int sg_nents
,
2325 unsigned int *sg_offset
)
2327 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
2332 ib_dma_sync_single_for_cpu(ibmr
->device
, mr
->desc_map
,
2333 mr
->desc_size
* mr
->max_descs
,
2336 if (mr
->access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
)
2337 n
= mlx5_ib_sg_to_klms(mr
, sg
, sg_nents
, sg_offset
, NULL
, 0,
2340 n
= ib_sg_to_pages(ibmr
, sg
, sg_nents
, sg_offset
,
2343 ib_dma_sync_single_for_device(ibmr
->device
, mr
->desc_map
,
2344 mr
->desc_size
* mr
->max_descs
,