2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/kref.h>
35 #include <linux/random.h>
36 #include <linux/debugfs.h>
37 #include <linux/export.h>
38 #include <linux/delay.h>
39 #include <rdma/ib_umem.h>
40 #include <rdma/ib_umem_odp.h>
41 #include <rdma/ib_verbs.h>
45 MAX_PENDING_REG_MR
= 8,
48 #define MLX5_UMR_ALIGN 2048
49 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
50 static __be64 mlx5_ib_update_mtt_emergency_buffer
[
51 MLX5_UMR_MTT_MIN_CHUNK_SIZE
/sizeof(__be64
)]
52 __aligned(MLX5_UMR_ALIGN
);
53 static DEFINE_MUTEX(mlx5_ib_update_mtt_emergency_buffer_mutex
);
56 static int clean_mr(struct mlx5_ib_mr
*mr
);
58 static int destroy_mkey(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
60 int err
= mlx5_core_destroy_mkey(dev
->mdev
, &mr
->mmkey
);
62 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
63 /* Wait until all page fault handlers using the mr complete. */
64 synchronize_srcu(&dev
->mr_srcu
);
70 static int order2idx(struct mlx5_ib_dev
*dev
, int order
)
72 struct mlx5_mr_cache
*cache
= &dev
->cache
;
74 if (order
< cache
->ent
[0].order
)
77 return order
- cache
->ent
[0].order
;
80 static bool use_umr_mtt_update(struct mlx5_ib_mr
*mr
, u64 start
, u64 length
)
82 return ((u64
)1 << mr
->order
) * MLX5_ADAPTER_PAGE_SIZE
>=
83 length
+ (start
& (MLX5_ADAPTER_PAGE_SIZE
- 1));
86 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
87 static void update_odp_mr(struct mlx5_ib_mr
*mr
)
89 if (mr
->umem
->odp_data
) {
91 * This barrier prevents the compiler from moving the
92 * setting of umem->odp_data->private to point to our
93 * MR, before reg_umr finished, to ensure that the MR
94 * initialization have finished before starting to
95 * handle invalidations.
98 mr
->umem
->odp_data
->private = mr
;
100 * Make sure we will see the new
101 * umem->odp_data->private value in the invalidation
102 * routines, before we can get page faults on the
103 * MR. Page faults can happen once we put the MR in
104 * the tree, below this line. Without the barrier,
105 * there can be a fault handling and an invalidation
106 * before umem->odp_data->private == mr is visible to
107 * the invalidation handler.
114 static void reg_mr_callback(int status
, void *context
)
116 struct mlx5_ib_mr
*mr
= context
;
117 struct mlx5_ib_dev
*dev
= mr
->dev
;
118 struct mlx5_mr_cache
*cache
= &dev
->cache
;
119 int c
= order2idx(dev
, mr
->order
);
120 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
123 struct mlx5_mkey_table
*table
= &dev
->mdev
->priv
.mkey_table
;
126 spin_lock_irqsave(&ent
->lock
, flags
);
128 spin_unlock_irqrestore(&ent
->lock
, flags
);
130 mlx5_ib_warn(dev
, "async reg mr failed. status %d\n", status
);
133 mod_timer(&dev
->delay_timer
, jiffies
+ HZ
);
137 if (mr
->out
.hdr
.status
) {
138 mlx5_ib_warn(dev
, "failed - status %d, syndorme 0x%x\n",
140 be32_to_cpu(mr
->out
.hdr
.syndrome
));
143 mod_timer(&dev
->delay_timer
, jiffies
+ HZ
);
147 spin_lock_irqsave(&dev
->mdev
->priv
.mkey_lock
, flags
);
148 key
= dev
->mdev
->priv
.mkey_key
++;
149 spin_unlock_irqrestore(&dev
->mdev
->priv
.mkey_lock
, flags
);
150 mr
->mmkey
.key
= mlx5_idx_to_mkey(be32_to_cpu(mr
->out
.mkey
) & 0xffffff) | key
;
152 cache
->last_add
= jiffies
;
154 spin_lock_irqsave(&ent
->lock
, flags
);
155 list_add_tail(&mr
->list
, &ent
->head
);
158 spin_unlock_irqrestore(&ent
->lock
, flags
);
160 write_lock_irqsave(&table
->lock
, flags
);
161 err
= radix_tree_insert(&table
->tree
, mlx5_base_mkey(mr
->mmkey
.key
),
164 pr_err("Error inserting to mkey tree. 0x%x\n", -err
);
165 write_unlock_irqrestore(&table
->lock
, flags
);
168 static int add_keys(struct mlx5_ib_dev
*dev
, int c
, int num
)
170 struct mlx5_mr_cache
*cache
= &dev
->cache
;
171 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
172 struct mlx5_create_mkey_mbox_in
*in
;
173 struct mlx5_ib_mr
*mr
;
174 int npages
= 1 << ent
->order
;
178 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
182 for (i
= 0; i
< num
; i
++) {
183 if (ent
->pending
>= MAX_PENDING_REG_MR
) {
188 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
193 mr
->order
= ent
->order
;
196 in
->seg
.status
= MLX5_MKEY_STATUS_FREE
;
197 in
->seg
.xlt_oct_size
= cpu_to_be32((npages
+ 1) / 2);
198 in
->seg
.qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
199 in
->seg
.flags
= MLX5_ACCESS_MODE_MTT
| MLX5_PERM_UMR_EN
;
200 in
->seg
.log2_page_size
= 12;
202 spin_lock_irq(&ent
->lock
);
204 spin_unlock_irq(&ent
->lock
);
205 err
= mlx5_core_create_mkey(dev
->mdev
, &mr
->mmkey
, in
,
206 sizeof(*in
), reg_mr_callback
,
209 spin_lock_irq(&ent
->lock
);
211 spin_unlock_irq(&ent
->lock
);
212 mlx5_ib_warn(dev
, "create mkey failed %d\n", err
);
222 static void remove_keys(struct mlx5_ib_dev
*dev
, int c
, int num
)
224 struct mlx5_mr_cache
*cache
= &dev
->cache
;
225 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
226 struct mlx5_ib_mr
*mr
;
230 for (i
= 0; i
< num
; i
++) {
231 spin_lock_irq(&ent
->lock
);
232 if (list_empty(&ent
->head
)) {
233 spin_unlock_irq(&ent
->lock
);
236 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
240 spin_unlock_irq(&ent
->lock
);
241 err
= destroy_mkey(dev
, mr
);
243 mlx5_ib_warn(dev
, "failed destroy mkey\n");
249 static ssize_t
size_write(struct file
*filp
, const char __user
*buf
,
250 size_t count
, loff_t
*pos
)
252 struct mlx5_cache_ent
*ent
= filp
->private_data
;
253 struct mlx5_ib_dev
*dev
= ent
->dev
;
259 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
)))
262 c
= order2idx(dev
, ent
->order
);
263 lbuf
[sizeof(lbuf
) - 1] = 0;
265 if (sscanf(lbuf
, "%u", &var
) != 1)
268 if (var
< ent
->limit
)
271 if (var
> ent
->size
) {
273 err
= add_keys(dev
, c
, var
- ent
->size
);
274 if (err
&& err
!= -EAGAIN
)
277 usleep_range(3000, 5000);
279 } else if (var
< ent
->size
) {
280 remove_keys(dev
, c
, ent
->size
- var
);
286 static ssize_t
size_read(struct file
*filp
, char __user
*buf
, size_t count
,
289 struct mlx5_cache_ent
*ent
= filp
->private_data
;
296 err
= snprintf(lbuf
, sizeof(lbuf
), "%d\n", ent
->size
);
300 if (copy_to_user(buf
, lbuf
, err
))
308 static const struct file_operations size_fops
= {
309 .owner
= THIS_MODULE
,
315 static ssize_t
limit_write(struct file
*filp
, const char __user
*buf
,
316 size_t count
, loff_t
*pos
)
318 struct mlx5_cache_ent
*ent
= filp
->private_data
;
319 struct mlx5_ib_dev
*dev
= ent
->dev
;
325 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
)))
328 c
= order2idx(dev
, ent
->order
);
329 lbuf
[sizeof(lbuf
) - 1] = 0;
331 if (sscanf(lbuf
, "%u", &var
) != 1)
339 if (ent
->cur
< ent
->limit
) {
340 err
= add_keys(dev
, c
, 2 * ent
->limit
- ent
->cur
);
348 static ssize_t
limit_read(struct file
*filp
, char __user
*buf
, size_t count
,
351 struct mlx5_cache_ent
*ent
= filp
->private_data
;
358 err
= snprintf(lbuf
, sizeof(lbuf
), "%d\n", ent
->limit
);
362 if (copy_to_user(buf
, lbuf
, err
))
370 static const struct file_operations limit_fops
= {
371 .owner
= THIS_MODULE
,
373 .write
= limit_write
,
377 static int someone_adding(struct mlx5_mr_cache
*cache
)
381 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
382 if (cache
->ent
[i
].cur
< cache
->ent
[i
].limit
)
389 static void __cache_work_func(struct mlx5_cache_ent
*ent
)
391 struct mlx5_ib_dev
*dev
= ent
->dev
;
392 struct mlx5_mr_cache
*cache
= &dev
->cache
;
393 int i
= order2idx(dev
, ent
->order
);
399 ent
= &dev
->cache
.ent
[i
];
400 if (ent
->cur
< 2 * ent
->limit
&& !dev
->fill_delay
) {
401 err
= add_keys(dev
, i
, 1);
402 if (ent
->cur
< 2 * ent
->limit
) {
403 if (err
== -EAGAIN
) {
404 mlx5_ib_dbg(dev
, "returned eagain, order %d\n",
406 queue_delayed_work(cache
->wq
, &ent
->dwork
,
407 msecs_to_jiffies(3));
409 mlx5_ib_warn(dev
, "command failed order %d, err %d\n",
411 queue_delayed_work(cache
->wq
, &ent
->dwork
,
412 msecs_to_jiffies(1000));
414 queue_work(cache
->wq
, &ent
->work
);
417 } else if (ent
->cur
> 2 * ent
->limit
) {
419 * The remove_keys() logic is performed as garbage collection
420 * task. Such task is intended to be run when no other active
421 * processes are running.
423 * The need_resched() will return TRUE if there are user tasks
424 * to be activated in near future.
426 * In such case, we don't execute remove_keys() and postpone
427 * the garbage collection work to try to run in next cycle,
428 * in order to free CPU resources to other tasks.
430 if (!need_resched() && !someone_adding(cache
) &&
431 time_after(jiffies
, cache
->last_add
+ 300 * HZ
)) {
432 remove_keys(dev
, i
, 1);
433 if (ent
->cur
> ent
->limit
)
434 queue_work(cache
->wq
, &ent
->work
);
436 queue_delayed_work(cache
->wq
, &ent
->dwork
, 300 * HZ
);
441 static void delayed_cache_work_func(struct work_struct
*work
)
443 struct mlx5_cache_ent
*ent
;
445 ent
= container_of(work
, struct mlx5_cache_ent
, dwork
.work
);
446 __cache_work_func(ent
);
449 static void cache_work_func(struct work_struct
*work
)
451 struct mlx5_cache_ent
*ent
;
453 ent
= container_of(work
, struct mlx5_cache_ent
, work
);
454 __cache_work_func(ent
);
457 static struct mlx5_ib_mr
*alloc_cached_mr(struct mlx5_ib_dev
*dev
, int order
)
459 struct mlx5_mr_cache
*cache
= &dev
->cache
;
460 struct mlx5_ib_mr
*mr
= NULL
;
461 struct mlx5_cache_ent
*ent
;
465 c
= order2idx(dev
, order
);
466 if (c
< 0 || c
>= MAX_MR_CACHE_ENTRIES
) {
467 mlx5_ib_warn(dev
, "order %d, cache index %d\n", order
, c
);
471 for (i
= c
; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
472 ent
= &cache
->ent
[i
];
474 mlx5_ib_dbg(dev
, "order %d, cache index %d\n", ent
->order
, i
);
476 spin_lock_irq(&ent
->lock
);
477 if (!list_empty(&ent
->head
)) {
478 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
,
482 spin_unlock_irq(&ent
->lock
);
483 if (ent
->cur
< ent
->limit
)
484 queue_work(cache
->wq
, &ent
->work
);
487 spin_unlock_irq(&ent
->lock
);
489 queue_work(cache
->wq
, &ent
->work
);
493 cache
->ent
[c
].miss
++;
498 static void free_cached_mr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
500 struct mlx5_mr_cache
*cache
= &dev
->cache
;
501 struct mlx5_cache_ent
*ent
;
505 c
= order2idx(dev
, mr
->order
);
506 if (c
< 0 || c
>= MAX_MR_CACHE_ENTRIES
) {
507 mlx5_ib_warn(dev
, "order %d, cache index %d\n", mr
->order
, c
);
510 ent
= &cache
->ent
[c
];
511 spin_lock_irq(&ent
->lock
);
512 list_add_tail(&mr
->list
, &ent
->head
);
514 if (ent
->cur
> 2 * ent
->limit
)
516 spin_unlock_irq(&ent
->lock
);
519 queue_work(cache
->wq
, &ent
->work
);
522 static void clean_keys(struct mlx5_ib_dev
*dev
, int c
)
524 struct mlx5_mr_cache
*cache
= &dev
->cache
;
525 struct mlx5_cache_ent
*ent
= &cache
->ent
[c
];
526 struct mlx5_ib_mr
*mr
;
529 cancel_delayed_work(&ent
->dwork
);
531 spin_lock_irq(&ent
->lock
);
532 if (list_empty(&ent
->head
)) {
533 spin_unlock_irq(&ent
->lock
);
536 mr
= list_first_entry(&ent
->head
, struct mlx5_ib_mr
, list
);
540 spin_unlock_irq(&ent
->lock
);
541 err
= destroy_mkey(dev
, mr
);
543 mlx5_ib_warn(dev
, "failed destroy mkey\n");
549 static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev
*dev
)
551 struct mlx5_mr_cache
*cache
= &dev
->cache
;
552 struct mlx5_cache_ent
*ent
;
555 if (!mlx5_debugfs_root
)
558 cache
->root
= debugfs_create_dir("mr_cache", dev
->mdev
->priv
.dbg_root
);
562 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
563 ent
= &cache
->ent
[i
];
564 sprintf(ent
->name
, "%d", ent
->order
);
565 ent
->dir
= debugfs_create_dir(ent
->name
, cache
->root
);
569 ent
->fsize
= debugfs_create_file("size", 0600, ent
->dir
, ent
,
574 ent
->flimit
= debugfs_create_file("limit", 0600, ent
->dir
, ent
,
579 ent
->fcur
= debugfs_create_u32("cur", 0400, ent
->dir
,
584 ent
->fmiss
= debugfs_create_u32("miss", 0600, ent
->dir
,
593 static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev
*dev
)
595 if (!mlx5_debugfs_root
)
598 debugfs_remove_recursive(dev
->cache
.root
);
601 static void delay_time_func(unsigned long ctx
)
603 struct mlx5_ib_dev
*dev
= (struct mlx5_ib_dev
*)ctx
;
608 int mlx5_mr_cache_init(struct mlx5_ib_dev
*dev
)
610 struct mlx5_mr_cache
*cache
= &dev
->cache
;
611 struct mlx5_cache_ent
*ent
;
616 cache
->wq
= create_singlethread_workqueue("mkey_cache");
618 mlx5_ib_warn(dev
, "failed to create work queue\n");
622 setup_timer(&dev
->delay_timer
, delay_time_func
, (unsigned long)dev
);
623 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++) {
624 INIT_LIST_HEAD(&cache
->ent
[i
].head
);
625 spin_lock_init(&cache
->ent
[i
].lock
);
627 ent
= &cache
->ent
[i
];
628 INIT_LIST_HEAD(&ent
->head
);
629 spin_lock_init(&ent
->lock
);
633 if (dev
->mdev
->profile
->mask
& MLX5_PROF_MASK_MR_CACHE
)
634 limit
= dev
->mdev
->profile
->mr_cache
[i
].limit
;
638 INIT_WORK(&ent
->work
, cache_work_func
);
639 INIT_DELAYED_WORK(&ent
->dwork
, delayed_cache_work_func
);
641 queue_work(cache
->wq
, &ent
->work
);
644 err
= mlx5_mr_cache_debugfs_init(dev
);
646 mlx5_ib_warn(dev
, "cache debugfs failure\n");
651 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev
*dev
)
655 dev
->cache
.stopped
= 1;
656 flush_workqueue(dev
->cache
.wq
);
658 mlx5_mr_cache_debugfs_cleanup(dev
);
660 for (i
= 0; i
< MAX_MR_CACHE_ENTRIES
; i
++)
663 destroy_workqueue(dev
->cache
.wq
);
664 del_timer_sync(&dev
->delay_timer
);
669 struct ib_mr
*mlx5_ib_get_dma_mr(struct ib_pd
*pd
, int acc
)
671 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
672 struct mlx5_core_dev
*mdev
= dev
->mdev
;
673 struct mlx5_create_mkey_mbox_in
*in
;
674 struct mlx5_mkey_seg
*seg
;
675 struct mlx5_ib_mr
*mr
;
678 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
680 return ERR_PTR(-ENOMEM
);
682 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
689 seg
->flags
= convert_access(acc
) | MLX5_ACCESS_MODE_PA
;
690 seg
->flags_pd
= cpu_to_be32(to_mpd(pd
)->pdn
| MLX5_MKEY_LEN64
);
691 seg
->qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
694 err
= mlx5_core_create_mkey(mdev
, &mr
->mmkey
, in
, sizeof(*in
), NULL
, NULL
,
700 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
701 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
715 static int get_octo_len(u64 addr
, u64 len
, int page_size
)
720 offset
= addr
& (page_size
- 1);
721 npages
= ALIGN(len
+ offset
, page_size
) >> ilog2(page_size
);
722 return (npages
+ 1) / 2;
725 static int use_umr(int order
)
727 return order
<= MLX5_MAX_UMR_SHIFT
;
730 static int dma_map_mr_pas(struct mlx5_ib_dev
*dev
, struct ib_umem
*umem
,
731 int npages
, int page_shift
, int *size
,
732 __be64
**mr_pas
, dma_addr_t
*dma
)
735 struct device
*ddev
= dev
->ib_dev
.dma_device
;
738 * UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
739 * To avoid copying garbage after the pas array, we allocate
742 *size
= ALIGN(sizeof(u64
) * npages
, MLX5_UMR_MTT_ALIGNMENT
);
743 *mr_pas
= kmalloc(*size
+ MLX5_UMR_ALIGN
- 1, GFP_KERNEL
);
747 pas
= PTR_ALIGN(*mr_pas
, MLX5_UMR_ALIGN
);
748 mlx5_ib_populate_pas(dev
, umem
, page_shift
, pas
, MLX5_IB_MTT_PRESENT
);
749 /* Clear padding after the actual pages. */
750 memset(pas
+ npages
, 0, *size
- npages
* sizeof(u64
));
752 *dma
= dma_map_single(ddev
, pas
, *size
, DMA_TO_DEVICE
);
753 if (dma_mapping_error(ddev
, *dma
)) {
761 static void prep_umr_wqe_common(struct ib_pd
*pd
, struct ib_send_wr
*wr
,
762 struct ib_sge
*sg
, u64 dma
, int n
, u32 key
,
765 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
766 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
769 sg
->length
= ALIGN(sizeof(u64
) * n
, 64);
770 sg
->lkey
= dev
->umrc
.pd
->local_dma_lkey
;
779 wr
->opcode
= MLX5_IB_WR_UMR
;
782 umrwr
->page_shift
= page_shift
;
786 static void prep_umr_reg_wqe(struct ib_pd
*pd
, struct ib_send_wr
*wr
,
787 struct ib_sge
*sg
, u64 dma
, int n
, u32 key
,
788 int page_shift
, u64 virt_addr
, u64 len
,
791 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
793 prep_umr_wqe_common(pd
, wr
, sg
, dma
, n
, key
, page_shift
);
797 umrwr
->target
.virt_addr
= virt_addr
;
799 umrwr
->access_flags
= access_flags
;
803 static void prep_umr_unreg_wqe(struct mlx5_ib_dev
*dev
,
804 struct ib_send_wr
*wr
, u32 key
)
806 struct mlx5_umr_wr
*umrwr
= umr_wr(wr
);
808 wr
->send_flags
= MLX5_IB_SEND_UMR_UNREG
| MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
809 wr
->opcode
= MLX5_IB_WR_UMR
;
813 static struct ib_umem
*mr_umem_get(struct ib_pd
*pd
, u64 start
, u64 length
,
814 int access_flags
, int *npages
,
815 int *page_shift
, int *ncont
, int *order
)
817 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
818 struct ib_umem
*umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
821 mlx5_ib_err(dev
, "umem get failed (%ld)\n", PTR_ERR(umem
));
825 mlx5_ib_cont_pages(umem
, start
, npages
, page_shift
, ncont
, order
);
827 mlx5_ib_warn(dev
, "avoid zero region\n");
828 ib_umem_release(umem
);
829 return ERR_PTR(-EINVAL
);
832 mlx5_ib_dbg(dev
, "npages %d, ncont %d, order %d, page_shift %d\n",
833 *npages
, *ncont
, *order
, *page_shift
);
838 void mlx5_umr_cq_handler(struct ib_cq
*cq
, void *cq_context
)
840 struct mlx5_ib_umr_context
*context
;
845 err
= ib_poll_cq(cq
, 1, &wc
);
847 pr_warn("poll cq error %d\n", err
);
853 context
= (struct mlx5_ib_umr_context
*) (unsigned long) wc
.wr_id
;
854 context
->status
= wc
.status
;
855 complete(&context
->done
);
857 ib_req_notify_cq(cq
, IB_CQ_NEXT_COMP
);
860 static struct mlx5_ib_mr
*reg_umr(struct ib_pd
*pd
, struct ib_umem
*umem
,
861 u64 virt_addr
, u64 len
, int npages
,
862 int page_shift
, int order
, int access_flags
)
864 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
865 struct device
*ddev
= dev
->ib_dev
.dma_device
;
866 struct umr_common
*umrc
= &dev
->umrc
;
867 struct mlx5_ib_umr_context umr_context
;
868 struct mlx5_umr_wr umrwr
;
869 struct ib_send_wr
*bad
;
870 struct mlx5_ib_mr
*mr
;
878 for (i
= 0; i
< 1; i
++) {
879 mr
= alloc_cached_mr(dev
, order
);
883 err
= add_keys(dev
, order2idx(dev
, order
), 1);
884 if (err
&& err
!= -EAGAIN
) {
885 mlx5_ib_warn(dev
, "add_keys failed, err %d\n", err
);
891 return ERR_PTR(-EAGAIN
);
893 err
= dma_map_mr_pas(dev
, umem
, npages
, page_shift
, &size
, &mr_pas
,
898 memset(&umrwr
, 0, sizeof(umrwr
));
899 umrwr
.wr
.wr_id
= (u64
)(unsigned long)&umr_context
;
900 prep_umr_reg_wqe(pd
, &umrwr
.wr
, &sg
, dma
, npages
, mr
->mmkey
.key
,
901 page_shift
, virt_addr
, len
, access_flags
);
903 mlx5_ib_init_umr_context(&umr_context
);
905 err
= ib_post_send(umrc
->qp
, &umrwr
.wr
, &bad
);
907 mlx5_ib_warn(dev
, "post send failed, err %d\n", err
);
910 wait_for_completion(&umr_context
.done
);
911 if (umr_context
.status
!= IB_WC_SUCCESS
) {
912 mlx5_ib_warn(dev
, "reg umr failed\n");
917 mr
->mmkey
.iova
= virt_addr
;
918 mr
->mmkey
.size
= len
;
919 mr
->mmkey
.pd
= to_mpd(pd
)->pdn
;
925 dma_unmap_single(ddev
, dma
, size
, DMA_TO_DEVICE
);
931 free_cached_mr(dev
, mr
);
938 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
939 int mlx5_ib_update_mtt(struct mlx5_ib_mr
*mr
, u64 start_page_index
, int npages
,
942 struct mlx5_ib_dev
*dev
= mr
->dev
;
943 struct device
*ddev
= dev
->ib_dev
.dma_device
;
944 struct umr_common
*umrc
= &dev
->umrc
;
945 struct mlx5_ib_umr_context umr_context
;
946 struct ib_umem
*umem
= mr
->umem
;
950 struct ib_send_wr
*bad
;
951 struct mlx5_umr_wr wr
;
954 const int page_index_alignment
= MLX5_UMR_MTT_ALIGNMENT
/ sizeof(u64
);
955 const int page_index_mask
= page_index_alignment
- 1;
956 size_t pages_mapped
= 0;
957 size_t pages_to_map
= 0;
958 size_t pages_iter
= 0;
959 int use_emergency_buf
= 0;
961 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes,
962 * so we need to align the offset and length accordingly */
963 if (start_page_index
& page_index_mask
) {
964 npages
+= start_page_index
& page_index_mask
;
965 start_page_index
&= ~page_index_mask
;
968 pages_to_map
= ALIGN(npages
, page_index_alignment
);
970 if (start_page_index
+ pages_to_map
> MLX5_MAX_UMR_PAGES
)
973 size
= sizeof(u64
) * pages_to_map
;
974 size
= min_t(int, PAGE_SIZE
, size
);
975 /* We allocate with GFP_ATOMIC to avoid recursion into page-reclaim
976 * code, when we are called from an invalidation. The pas buffer must
977 * be 2k-aligned for Connect-IB. */
978 pas
= (__be64
*)get_zeroed_page(GFP_ATOMIC
);
980 mlx5_ib_warn(dev
, "unable to allocate memory during MTT update, falling back to slower chunked mechanism.\n");
981 pas
= mlx5_ib_update_mtt_emergency_buffer
;
982 size
= MLX5_UMR_MTT_MIN_CHUNK_SIZE
;
983 use_emergency_buf
= 1;
984 mutex_lock(&mlx5_ib_update_mtt_emergency_buffer_mutex
);
985 memset(pas
, 0, size
);
987 pages_iter
= size
/ sizeof(u64
);
988 dma
= dma_map_single(ddev
, pas
, size
, DMA_TO_DEVICE
);
989 if (dma_mapping_error(ddev
, dma
)) {
990 mlx5_ib_err(dev
, "unable to map DMA during MTT update.\n");
995 for (pages_mapped
= 0;
996 pages_mapped
< pages_to_map
&& !err
;
997 pages_mapped
+= pages_iter
, start_page_index
+= pages_iter
) {
998 dma_sync_single_for_cpu(ddev
, dma
, size
, DMA_TO_DEVICE
);
1000 npages
= min_t(size_t,
1002 ib_umem_num_pages(umem
) - start_page_index
);
1005 __mlx5_ib_populate_pas(dev
, umem
, PAGE_SHIFT
,
1006 start_page_index
, npages
, pas
,
1007 MLX5_IB_MTT_PRESENT
);
1008 /* Clear padding after the pages brought from the
1010 memset(pas
+ npages
, 0, size
- npages
* sizeof(u64
));
1013 dma_sync_single_for_device(ddev
, dma
, size
, DMA_TO_DEVICE
);
1015 memset(&wr
, 0, sizeof(wr
));
1016 wr
.wr
.wr_id
= (u64
)(unsigned long)&umr_context
;
1019 sg
.length
= ALIGN(npages
* sizeof(u64
),
1020 MLX5_UMR_MTT_ALIGNMENT
);
1021 sg
.lkey
= dev
->umrc
.pd
->local_dma_lkey
;
1023 wr
.wr
.send_flags
= MLX5_IB_SEND_UMR_FAIL_IF_FREE
|
1024 MLX5_IB_SEND_UMR_UPDATE_MTT
;
1025 wr
.wr
.sg_list
= &sg
;
1027 wr
.wr
.opcode
= MLX5_IB_WR_UMR
;
1028 wr
.npages
= sg
.length
/ sizeof(u64
);
1029 wr
.page_shift
= PAGE_SHIFT
;
1030 wr
.mkey
= mr
->mmkey
.key
;
1031 wr
.target
.offset
= start_page_index
;
1033 mlx5_ib_init_umr_context(&umr_context
);
1035 err
= ib_post_send(umrc
->qp
, &wr
.wr
, &bad
);
1037 mlx5_ib_err(dev
, "UMR post send failed, err %d\n", err
);
1039 wait_for_completion(&umr_context
.done
);
1040 if (umr_context
.status
!= IB_WC_SUCCESS
) {
1041 mlx5_ib_err(dev
, "UMR completion failed, code %d\n",
1042 umr_context
.status
);
1048 dma_unmap_single(ddev
, dma
, size
, DMA_TO_DEVICE
);
1051 if (!use_emergency_buf
)
1052 free_page((unsigned long)pas
);
1054 mutex_unlock(&mlx5_ib_update_mtt_emergency_buffer_mutex
);
1061 * If ibmr is NULL it will be allocated by reg_create.
1062 * Else, the given ibmr will be used.
1064 static struct mlx5_ib_mr
*reg_create(struct ib_mr
*ibmr
, struct ib_pd
*pd
,
1065 u64 virt_addr
, u64 length
,
1066 struct ib_umem
*umem
, int npages
,
1067 int page_shift
, int access_flags
)
1069 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1070 struct mlx5_create_mkey_mbox_in
*in
;
1071 struct mlx5_ib_mr
*mr
;
1074 bool pg_cap
= !!(MLX5_CAP_GEN(dev
->mdev
, pg
));
1076 mr
= ibmr
? to_mmr(ibmr
) : kzalloc(sizeof(*mr
), GFP_KERNEL
);
1078 return ERR_PTR(-ENOMEM
);
1080 inlen
= sizeof(*in
) + sizeof(*in
->pas
) * ((npages
+ 1) / 2) * 2;
1081 in
= mlx5_vzalloc(inlen
);
1086 mlx5_ib_populate_pas(dev
, umem
, page_shift
, in
->pas
,
1087 pg_cap
? MLX5_IB_MTT_PRESENT
: 0);
1089 /* The MLX5_MKEY_INBOX_PG_ACCESS bit allows setting the access flags
1090 * in the page list submitted with the command. */
1091 in
->flags
= pg_cap
? cpu_to_be32(MLX5_MKEY_INBOX_PG_ACCESS
) : 0;
1092 in
->seg
.flags
= convert_access(access_flags
) |
1093 MLX5_ACCESS_MODE_MTT
;
1094 in
->seg
.flags_pd
= cpu_to_be32(to_mpd(pd
)->pdn
);
1095 in
->seg
.start_addr
= cpu_to_be64(virt_addr
);
1096 in
->seg
.len
= cpu_to_be64(length
);
1097 in
->seg
.bsfs_octo_size
= 0;
1098 in
->seg
.xlt_oct_size
= cpu_to_be32(get_octo_len(virt_addr
, length
, 1 << page_shift
));
1099 in
->seg
.log2_page_size
= page_shift
;
1100 in
->seg
.qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
1101 in
->xlat_oct_act_size
= cpu_to_be32(get_octo_len(virt_addr
, length
,
1103 err
= mlx5_core_create_mkey(dev
->mdev
, &mr
->mmkey
, in
, inlen
, NULL
,
1106 mlx5_ib_warn(dev
, "create mkey failed\n");
1114 mlx5_ib_dbg(dev
, "mkey = 0x%x\n", mr
->mmkey
.key
);
1125 return ERR_PTR(err
);
1128 static void set_mr_fileds(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
,
1129 int npages
, u64 length
, int access_flags
)
1131 mr
->npages
= npages
;
1132 atomic_add(npages
, &dev
->mdev
->priv
.reg_pages
);
1133 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
1134 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
1135 mr
->ibmr
.length
= length
;
1136 mr
->access_flags
= access_flags
;
1139 struct ib_mr
*mlx5_ib_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
1140 u64 virt_addr
, int access_flags
,
1141 struct ib_udata
*udata
)
1143 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1144 struct mlx5_ib_mr
*mr
= NULL
;
1145 struct ib_umem
*umem
;
1152 mlx5_ib_dbg(dev
, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1153 start
, virt_addr
, length
, access_flags
);
1154 umem
= mr_umem_get(pd
, start
, length
, access_flags
, &npages
,
1155 &page_shift
, &ncont
, &order
);
1158 return (void *)umem
;
1160 if (use_umr(order
)) {
1161 mr
= reg_umr(pd
, umem
, virt_addr
, length
, ncont
, page_shift
,
1162 order
, access_flags
);
1163 if (PTR_ERR(mr
) == -EAGAIN
) {
1164 mlx5_ib_dbg(dev
, "cache empty for order %d", order
);
1167 } else if (access_flags
& IB_ACCESS_ON_DEMAND
) {
1169 pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
1174 mr
= reg_create(NULL
, pd
, virt_addr
, length
, umem
, ncont
,
1175 page_shift
, access_flags
);
1182 mlx5_ib_dbg(dev
, "mkey 0x%x\n", mr
->mmkey
.key
);
1185 set_mr_fileds(dev
, mr
, npages
, length
, access_flags
);
1187 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1194 ib_umem_release(umem
);
1195 return ERR_PTR(err
);
1198 static int unreg_umr(struct mlx5_ib_dev
*dev
, struct mlx5_ib_mr
*mr
)
1200 struct umr_common
*umrc
= &dev
->umrc
;
1201 struct mlx5_ib_umr_context umr_context
;
1202 struct mlx5_umr_wr umrwr
;
1203 struct ib_send_wr
*bad
;
1206 memset(&umrwr
.wr
, 0, sizeof(umrwr
));
1207 umrwr
.wr
.wr_id
= (u64
)(unsigned long)&umr_context
;
1208 prep_umr_unreg_wqe(dev
, &umrwr
.wr
, mr
->mmkey
.key
);
1210 mlx5_ib_init_umr_context(&umr_context
);
1212 err
= ib_post_send(umrc
->qp
, &umrwr
.wr
, &bad
);
1215 mlx5_ib_dbg(dev
, "err %d\n", err
);
1218 wait_for_completion(&umr_context
.done
);
1221 if (umr_context
.status
!= IB_WC_SUCCESS
) {
1222 mlx5_ib_warn(dev
, "unreg umr failed\n");
1232 static int rereg_umr(struct ib_pd
*pd
, struct mlx5_ib_mr
*mr
, u64 virt_addr
,
1233 u64 length
, int npages
, int page_shift
, int order
,
1234 int access_flags
, int flags
)
1236 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1237 struct device
*ddev
= dev
->ib_dev
.dma_device
;
1238 struct mlx5_ib_umr_context umr_context
;
1239 struct ib_send_wr
*bad
;
1240 struct mlx5_umr_wr umrwr
= {};
1242 struct umr_common
*umrc
= &dev
->umrc
;
1244 __be64
*mr_pas
= NULL
;
1248 umrwr
.wr
.wr_id
= (u64
)(unsigned long)&umr_context
;
1249 umrwr
.wr
.send_flags
= MLX5_IB_SEND_UMR_FAIL_IF_FREE
;
1251 if (flags
& IB_MR_REREG_TRANS
) {
1252 err
= dma_map_mr_pas(dev
, mr
->umem
, npages
, page_shift
, &size
,
1257 umrwr
.target
.virt_addr
= virt_addr
;
1258 umrwr
.length
= length
;
1259 umrwr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_UPDATE_TRANSLATION
;
1262 prep_umr_wqe_common(pd
, &umrwr
.wr
, &sg
, dma
, npages
, mr
->mmkey
.key
,
1265 if (flags
& IB_MR_REREG_PD
) {
1267 umrwr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_UPDATE_PD
;
1270 if (flags
& IB_MR_REREG_ACCESS
) {
1271 umrwr
.access_flags
= access_flags
;
1272 umrwr
.wr
.send_flags
|= MLX5_IB_SEND_UMR_UPDATE_ACCESS
;
1275 mlx5_ib_init_umr_context(&umr_context
);
1277 /* post send request to UMR QP */
1279 err
= ib_post_send(umrc
->qp
, &umrwr
.wr
, &bad
);
1282 mlx5_ib_warn(dev
, "post send failed, err %d\n", err
);
1284 wait_for_completion(&umr_context
.done
);
1285 if (umr_context
.status
!= IB_WC_SUCCESS
) {
1286 mlx5_ib_warn(dev
, "reg umr failed (%u)\n",
1287 umr_context
.status
);
1293 if (flags
& IB_MR_REREG_TRANS
) {
1294 dma_unmap_single(ddev
, dma
, size
, DMA_TO_DEVICE
);
1300 int mlx5_ib_rereg_user_mr(struct ib_mr
*ib_mr
, int flags
, u64 start
,
1301 u64 length
, u64 virt_addr
, int new_access_flags
,
1302 struct ib_pd
*new_pd
, struct ib_udata
*udata
)
1304 struct mlx5_ib_dev
*dev
= to_mdev(ib_mr
->device
);
1305 struct mlx5_ib_mr
*mr
= to_mmr(ib_mr
);
1306 struct ib_pd
*pd
= (flags
& IB_MR_REREG_PD
) ? new_pd
: ib_mr
->pd
;
1307 int access_flags
= flags
& IB_MR_REREG_ACCESS
?
1310 u64 addr
= (flags
& IB_MR_REREG_TRANS
) ? virt_addr
: mr
->umem
->address
;
1311 u64 len
= (flags
& IB_MR_REREG_TRANS
) ? length
: mr
->umem
->length
;
1318 mlx5_ib_dbg(dev
, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
1319 start
, virt_addr
, length
, access_flags
);
1321 if (flags
!= IB_MR_REREG_PD
) {
1323 * Replace umem. This needs to be done whether or not UMR is
1326 flags
|= IB_MR_REREG_TRANS
;
1327 ib_umem_release(mr
->umem
);
1328 mr
->umem
= mr_umem_get(pd
, addr
, len
, access_flags
, &npages
,
1329 &page_shift
, &ncont
, &order
);
1330 if (IS_ERR(mr
->umem
)) {
1331 err
= PTR_ERR(mr
->umem
);
1337 if (flags
& IB_MR_REREG_TRANS
&& !use_umr_mtt_update(mr
, addr
, len
)) {
1339 * UMR can't be used - MKey needs to be replaced.
1342 err
= unreg_umr(dev
, mr
);
1344 mlx5_ib_warn(dev
, "Failed to unregister MR\n");
1346 err
= destroy_mkey(dev
, mr
);
1348 mlx5_ib_warn(dev
, "Failed to destroy MKey\n");
1353 mr
= reg_create(ib_mr
, pd
, addr
, len
, mr
->umem
, ncont
,
1354 page_shift
, access_flags
);
1364 err
= rereg_umr(pd
, mr
, addr
, len
, npages
, page_shift
,
1365 order
, access_flags
, flags
);
1367 mlx5_ib_warn(dev
, "Failed to rereg UMR\n");
1372 if (flags
& IB_MR_REREG_PD
) {
1374 mr
->mmkey
.pd
= to_mpd(pd
)->pdn
;
1377 if (flags
& IB_MR_REREG_ACCESS
)
1378 mr
->access_flags
= access_flags
;
1380 if (flags
& IB_MR_REREG_TRANS
) {
1381 atomic_sub(mr
->npages
, &dev
->mdev
->priv
.reg_pages
);
1382 set_mr_fileds(dev
, mr
, npages
, len
, access_flags
);
1383 mr
->mmkey
.iova
= addr
;
1384 mr
->mmkey
.size
= len
;
1386 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1394 mlx5_alloc_priv_descs(struct ib_device
*device
,
1395 struct mlx5_ib_mr
*mr
,
1399 int size
= ndescs
* desc_size
;
1403 add_size
= max_t(int, MLX5_UMR_ALIGN
- ARCH_KMALLOC_MINALIGN
, 0);
1405 mr
->descs_alloc
= kzalloc(size
+ add_size
, GFP_KERNEL
);
1406 if (!mr
->descs_alloc
)
1409 mr
->descs
= PTR_ALIGN(mr
->descs_alloc
, MLX5_UMR_ALIGN
);
1411 mr
->desc_map
= dma_map_single(device
->dma_device
, mr
->descs
,
1412 size
, DMA_TO_DEVICE
);
1413 if (dma_mapping_error(device
->dma_device
, mr
->desc_map
)) {
1420 kfree(mr
->descs_alloc
);
1426 mlx5_free_priv_descs(struct mlx5_ib_mr
*mr
)
1429 struct ib_device
*device
= mr
->ibmr
.device
;
1430 int size
= mr
->max_descs
* mr
->desc_size
;
1432 dma_unmap_single(device
->dma_device
, mr
->desc_map
,
1433 size
, DMA_TO_DEVICE
);
1434 kfree(mr
->descs_alloc
);
1439 static int clean_mr(struct mlx5_ib_mr
*mr
)
1441 struct mlx5_ib_dev
*dev
= to_mdev(mr
->ibmr
.device
);
1442 int umred
= mr
->umred
;
1446 if (mlx5_core_destroy_psv(dev
->mdev
,
1447 mr
->sig
->psv_memory
.psv_idx
))
1448 mlx5_ib_warn(dev
, "failed to destroy mem psv %d\n",
1449 mr
->sig
->psv_memory
.psv_idx
);
1450 if (mlx5_core_destroy_psv(dev
->mdev
,
1451 mr
->sig
->psv_wire
.psv_idx
))
1452 mlx5_ib_warn(dev
, "failed to destroy wire psv %d\n",
1453 mr
->sig
->psv_wire
.psv_idx
);
1458 mlx5_free_priv_descs(mr
);
1461 err
= destroy_mkey(dev
, mr
);
1463 mlx5_ib_warn(dev
, "failed to destroy mkey 0x%x (%d)\n",
1464 mr
->mmkey
.key
, err
);
1468 err
= unreg_umr(dev
, mr
);
1470 mlx5_ib_warn(dev
, "failed unregister\n");
1473 free_cached_mr(dev
, mr
);
1482 int mlx5_ib_dereg_mr(struct ib_mr
*ibmr
)
1484 struct mlx5_ib_dev
*dev
= to_mdev(ibmr
->device
);
1485 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
1486 int npages
= mr
->npages
;
1487 struct ib_umem
*umem
= mr
->umem
;
1489 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1490 if (umem
&& umem
->odp_data
) {
1491 /* Prevent new page faults from succeeding */
1493 /* Wait for all running page-fault handlers to finish. */
1494 synchronize_srcu(&dev
->mr_srcu
);
1495 /* Destroy all page mappings */
1496 mlx5_ib_invalidate_range(umem
, ib_umem_start(umem
),
1499 * We kill the umem before the MR for ODP,
1500 * so that there will not be any invalidations in
1501 * flight, looking at the *mr struct.
1503 ib_umem_release(umem
);
1504 atomic_sub(npages
, &dev
->mdev
->priv
.reg_pages
);
1506 /* Avoid double-freeing the umem. */
1514 ib_umem_release(umem
);
1515 atomic_sub(npages
, &dev
->mdev
->priv
.reg_pages
);
1521 struct ib_mr
*mlx5_ib_alloc_mr(struct ib_pd
*pd
,
1522 enum ib_mr_type mr_type
,
1525 struct mlx5_ib_dev
*dev
= to_mdev(pd
->device
);
1526 struct mlx5_create_mkey_mbox_in
*in
;
1527 struct mlx5_ib_mr
*mr
;
1528 int access_mode
, err
;
1529 int ndescs
= roundup(max_num_sg
, 4);
1531 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1533 return ERR_PTR(-ENOMEM
);
1535 in
= kzalloc(sizeof(*in
), GFP_KERNEL
);
1541 in
->seg
.status
= MLX5_MKEY_STATUS_FREE
;
1542 in
->seg
.xlt_oct_size
= cpu_to_be32(ndescs
);
1543 in
->seg
.qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
1544 in
->seg
.flags_pd
= cpu_to_be32(to_mpd(pd
)->pdn
);
1546 if (mr_type
== IB_MR_TYPE_MEM_REG
) {
1547 access_mode
= MLX5_ACCESS_MODE_MTT
;
1548 in
->seg
.log2_page_size
= PAGE_SHIFT
;
1550 err
= mlx5_alloc_priv_descs(pd
->device
, mr
,
1551 ndescs
, sizeof(u64
));
1555 mr
->desc_size
= sizeof(u64
);
1556 mr
->max_descs
= ndescs
;
1557 } else if (mr_type
== IB_MR_TYPE_SIGNATURE
) {
1560 in
->seg
.flags_pd
= cpu_to_be32(be32_to_cpu(in
->seg
.flags_pd
) |
1562 in
->seg
.bsfs_octo_size
= cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE
);
1563 mr
->sig
= kzalloc(sizeof(*mr
->sig
), GFP_KERNEL
);
1569 /* create mem & wire PSVs */
1570 err
= mlx5_core_create_psv(dev
->mdev
, to_mpd(pd
)->pdn
,
1575 access_mode
= MLX5_ACCESS_MODE_KLM
;
1576 mr
->sig
->psv_memory
.psv_idx
= psv_index
[0];
1577 mr
->sig
->psv_wire
.psv_idx
= psv_index
[1];
1579 mr
->sig
->sig_status_checked
= true;
1580 mr
->sig
->sig_err_exists
= false;
1581 /* Next UMR, Arm SIGERR */
1582 ++mr
->sig
->sigerr_count
;
1584 mlx5_ib_warn(dev
, "Invalid mr type %d\n", mr_type
);
1589 in
->seg
.flags
= MLX5_PERM_UMR_EN
| access_mode
;
1590 err
= mlx5_core_create_mkey(dev
->mdev
, &mr
->mmkey
, in
, sizeof(*in
),
1593 goto err_destroy_psv
;
1595 mr
->ibmr
.lkey
= mr
->mmkey
.key
;
1596 mr
->ibmr
.rkey
= mr
->mmkey
.key
;
1604 if (mlx5_core_destroy_psv(dev
->mdev
,
1605 mr
->sig
->psv_memory
.psv_idx
))
1606 mlx5_ib_warn(dev
, "failed to destroy mem psv %d\n",
1607 mr
->sig
->psv_memory
.psv_idx
);
1608 if (mlx5_core_destroy_psv(dev
->mdev
,
1609 mr
->sig
->psv_wire
.psv_idx
))
1610 mlx5_ib_warn(dev
, "failed to destroy wire psv %d\n",
1611 mr
->sig
->psv_wire
.psv_idx
);
1613 mlx5_free_priv_descs(mr
);
1620 return ERR_PTR(err
);
1623 int mlx5_ib_check_mr_status(struct ib_mr
*ibmr
, u32 check_mask
,
1624 struct ib_mr_status
*mr_status
)
1626 struct mlx5_ib_mr
*mmr
= to_mmr(ibmr
);
1629 if (check_mask
& ~IB_MR_CHECK_SIG_STATUS
) {
1630 pr_err("Invalid status check mask\n");
1635 mr_status
->fail_status
= 0;
1636 if (check_mask
& IB_MR_CHECK_SIG_STATUS
) {
1639 pr_err("signature status check requested on a non-signature enabled MR\n");
1643 mmr
->sig
->sig_status_checked
= true;
1644 if (!mmr
->sig
->sig_err_exists
)
1647 if (ibmr
->lkey
== mmr
->sig
->err_item
.key
)
1648 memcpy(&mr_status
->sig_err
, &mmr
->sig
->err_item
,
1649 sizeof(mr_status
->sig_err
));
1651 mr_status
->sig_err
.err_type
= IB_SIG_BAD_GUARD
;
1652 mr_status
->sig_err
.sig_err_offset
= 0;
1653 mr_status
->sig_err
.key
= mmr
->sig
->err_item
.key
;
1656 mmr
->sig
->sig_err_exists
= false;
1657 mr_status
->fail_status
|= IB_MR_CHECK_SIG_STATUS
;
1664 static int mlx5_set_page(struct ib_mr
*ibmr
, u64 addr
)
1666 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
1669 if (unlikely(mr
->ndescs
== mr
->max_descs
))
1673 descs
[mr
->ndescs
++] = cpu_to_be64(addr
| MLX5_EN_RD
| MLX5_EN_WR
);
1678 int mlx5_ib_map_mr_sg(struct ib_mr
*ibmr
,
1679 struct scatterlist
*sg
,
1682 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
1687 ib_dma_sync_single_for_cpu(ibmr
->device
, mr
->desc_map
,
1688 mr
->desc_size
* mr
->max_descs
,
1691 n
= ib_sg_to_pages(ibmr
, sg
, sg_nents
, mlx5_set_page
);
1693 ib_dma_sync_single_for_device(ibmr
->device
, mr
->desc_map
,
1694 mr
->desc_size
* mr
->max_descs
,