]>
git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/infiniband/sw/rdmavt/mr.c
2 * Copyright(c) 2016 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 #include <linux/slab.h>
49 #include <linux/vmalloc.h>
50 #include <rdma/ib_umem.h>
51 #include <rdma/rdma_vt.h>
56 * rvt_driver_mr_init - Init MR resources per driver
57 * @rdi: rvt dev struct
59 * Do any intilization needed when a driver registers with rdmavt.
61 * Return: 0 on success or errno on failure
63 int rvt_driver_mr_init(struct rvt_dev_info
*rdi
)
65 unsigned int lkey_table_size
= rdi
->dparms
.lkey_table_size
;
70 * The top hfi1_lkey_table_size bits are used to index the
71 * table. The lower 8 bits can be owned by the user (copied from
72 * the LKEY). The remaining bits act as a generation number or tag.
77 spin_lock_init(&rdi
->lkey_table
.lock
);
79 /* ensure generation is at least 4 bits */
80 if (lkey_table_size
> RVT_MAX_LKEY_TABLE_BITS
) {
81 rvt_pr_warn(rdi
, "lkey bits %u too large, reduced to %u\n",
82 lkey_table_size
, RVT_MAX_LKEY_TABLE_BITS
);
83 rdi
->dparms
.lkey_table_size
= RVT_MAX_LKEY_TABLE_BITS
;
84 lkey_table_size
= rdi
->dparms
.lkey_table_size
;
86 rdi
->lkey_table
.max
= 1 << lkey_table_size
;
87 lk_tab_size
= rdi
->lkey_table
.max
* sizeof(*rdi
->lkey_table
.table
);
88 rdi
->lkey_table
.table
= (struct rvt_mregion __rcu
**)
89 vmalloc_node(lk_tab_size
, rdi
->dparms
.node
);
90 if (!rdi
->lkey_table
.table
)
93 RCU_INIT_POINTER(rdi
->dma_mr
, NULL
);
94 for (i
= 0; i
< rdi
->lkey_table
.max
; i
++)
95 RCU_INIT_POINTER(rdi
->lkey_table
.table
[i
], NULL
);
101 *rvt_mr_exit: clean up MR
102 *@rdi: rvt dev structure
104 * called when drivers have unregistered or perhaps failed to register with us
106 void rvt_mr_exit(struct rvt_dev_info
*rdi
)
109 rvt_pr_err(rdi
, "DMA MR not null!\n");
111 vfree(rdi
->lkey_table
.table
);
114 static void rvt_deinit_mregion(struct rvt_mregion
*mr
)
123 static int rvt_init_mregion(struct rvt_mregion
*mr
, struct ib_pd
*pd
,
127 struct rvt_dev_info
*dev
= ib_to_rvt(pd
->device
);
130 m
= (count
+ RVT_SEGSZ
- 1) / RVT_SEGSZ
;
132 mr
->map
[i
] = kzalloc_node(sizeof(*mr
->map
[0]), GFP_KERNEL
,
135 rvt_deinit_mregion(mr
);
140 init_completion(&mr
->comp
);
141 /* count returning the ptr to user */
142 atomic_set(&mr
->refcount
, 1);
144 mr
->max_segs
= count
;
149 * rvt_alloc_lkey - allocate an lkey
150 * @mr: memory region that this lkey protects
151 * @dma_region: 0->normal key, 1->restricted DMA key
153 * Returns 0 if successful, otherwise returns -errno.
155 * Increments mr reference count as required.
157 * Sets the lkey field mr for non-dma regions.
160 static int rvt_alloc_lkey(struct rvt_mregion
*mr
, int dma_region
)
166 struct rvt_dev_info
*dev
= ib_to_rvt(mr
->pd
->device
);
167 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
170 spin_lock_irqsave(&rkt
->lock
, flags
);
172 /* special case for dma_mr lkey == 0 */
174 struct rvt_mregion
*tmr
;
176 tmr
= rcu_access_pointer(dev
->dma_mr
);
178 rcu_assign_pointer(dev
->dma_mr
, mr
);
179 mr
->lkey_published
= 1;
186 /* Find the next available LKEY */
190 if (!rcu_access_pointer(rkt
->table
[r
]))
192 r
= (r
+ 1) & (rkt
->max
- 1);
196 rkt
->next
= (r
+ 1) & (rkt
->max
- 1);
198 * Make sure lkey is never zero which is reserved to indicate an
203 * bits are capped to ensure enough bits for generation number
205 mr
->lkey
= (r
<< (32 - dev
->dparms
.lkey_table_size
)) |
206 ((((1 << (24 - dev
->dparms
.lkey_table_size
)) - 1) & rkt
->gen
)
212 rcu_assign_pointer(rkt
->table
[r
], mr
);
213 mr
->lkey_published
= 1;
215 spin_unlock_irqrestore(&rkt
->lock
, flags
);
220 spin_unlock_irqrestore(&rkt
->lock
, flags
);
226 * rvt_free_lkey - free an lkey
227 * @mr: mr to free from tables
229 static void rvt_free_lkey(struct rvt_mregion
*mr
)
234 struct rvt_dev_info
*dev
= ib_to_rvt(mr
->pd
->device
);
235 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
238 spin_lock_irqsave(&rkt
->lock
, flags
);
239 if (!mr
->lkey_published
)
242 RCU_INIT_POINTER(dev
->dma_mr
, NULL
);
244 r
= lkey
>> (32 - dev
->dparms
.lkey_table_size
);
245 RCU_INIT_POINTER(rkt
->table
[r
], NULL
);
247 mr
->lkey_published
= 0;
250 spin_unlock_irqrestore(&rkt
->lock
, flags
);
257 static struct rvt_mr
*__rvt_alloc_mr(int count
, struct ib_pd
*pd
)
263 /* Allocate struct plus pointers to first level page tables. */
264 m
= (count
+ RVT_SEGSZ
- 1) / RVT_SEGSZ
;
265 mr
= kzalloc(sizeof(*mr
) + m
* sizeof(mr
->mr
.map
[0]), GFP_KERNEL
);
269 rval
= rvt_init_mregion(&mr
->mr
, pd
, count
);
273 * ib_reg_phys_mr() will initialize mr->ibmr except for
276 rval
= rvt_alloc_lkey(&mr
->mr
, 0);
279 mr
->ibmr
.lkey
= mr
->mr
.lkey
;
280 mr
->ibmr
.rkey
= mr
->mr
.lkey
;
285 rvt_deinit_mregion(&mr
->mr
);
292 static void __rvt_free_mr(struct rvt_mr
*mr
)
294 rvt_deinit_mregion(&mr
->mr
);
295 rvt_free_lkey(&mr
->mr
);
300 * rvt_get_dma_mr - get a DMA memory region
301 * @pd: protection domain for this memory region
304 * Return: the memory region on success, otherwise returns an errno.
305 * Note that all DMA addresses should be created via the
306 * struct ib_dma_mapping_ops functions (see dma.c).
308 struct ib_mr
*rvt_get_dma_mr(struct ib_pd
*pd
, int acc
)
314 if (ibpd_to_rvtpd(pd
)->user
)
315 return ERR_PTR(-EPERM
);
317 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
319 ret
= ERR_PTR(-ENOMEM
);
323 rval
= rvt_init_mregion(&mr
->mr
, pd
, 0);
329 rval
= rvt_alloc_lkey(&mr
->mr
, 1);
335 mr
->mr
.access_flags
= acc
;
341 rvt_deinit_mregion(&mr
->mr
);
348 * rvt_reg_user_mr - register a userspace memory region
349 * @pd: protection domain for this memory region
350 * @start: starting userspace address
351 * @length: length of region to register
352 * @mr_access_flags: access flags for this memory region
353 * @udata: unused by the driver
355 * Return: the memory region on success, otherwise returns an errno.
357 struct ib_mr
*rvt_reg_user_mr(struct ib_pd
*pd
, u64 start
, u64 length
,
358 u64 virt_addr
, int mr_access_flags
,
359 struct ib_udata
*udata
)
362 struct ib_umem
*umem
;
363 struct scatterlist
*sg
;
368 return ERR_PTR(-EINVAL
);
370 umem
= ib_umem_get(pd
->uobject
->context
, start
, length
,
377 mr
= __rvt_alloc_mr(n
, pd
);
379 ret
= (struct ib_mr
*)mr
;
383 mr
->mr
.user_base
= start
;
384 mr
->mr
.iova
= virt_addr
;
385 mr
->mr
.length
= length
;
386 mr
->mr
.offset
= ib_umem_offset(umem
);
387 mr
->mr
.access_flags
= mr_access_flags
;
390 if (is_power_of_2(umem
->page_size
))
391 mr
->mr
.page_shift
= ilog2(umem
->page_size
);
394 for_each_sg(umem
->sg_head
.sgl
, sg
, umem
->nmap
, entry
) {
397 vaddr
= page_address(sg_page(sg
));
399 ret
= ERR_PTR(-EINVAL
);
402 mr
->mr
.map
[m
]->segs
[n
].vaddr
= vaddr
;
403 mr
->mr
.map
[m
]->segs
[n
].length
= umem
->page_size
;
405 if (n
== RVT_SEGSZ
) {
416 ib_umem_release(umem
);
422 * rvt_dereg_mr - unregister and free a memory region
423 * @ibmr: the memory region to free
426 * Note that this is called to free MRs created by rvt_get_dma_mr()
427 * or rvt_reg_user_mr().
429 * Returns 0 on success.
431 int rvt_dereg_mr(struct ib_mr
*ibmr
)
433 struct rvt_mr
*mr
= to_imr(ibmr
);
434 struct rvt_dev_info
*rdi
= ib_to_rvt(ibmr
->pd
->device
);
436 unsigned long timeout
;
438 rvt_free_lkey(&mr
->mr
);
440 rvt_put_mr(&mr
->mr
); /* will set completion if last */
441 timeout
= wait_for_completion_timeout(&mr
->mr
.comp
, 5 * HZ
);
444 "rvt_dereg_mr timeout mr %p pd %p refcount %u\n",
445 mr
, mr
->mr
.pd
, atomic_read(&mr
->mr
.refcount
));
450 rvt_deinit_mregion(&mr
->mr
);
452 ib_umem_release(mr
->umem
);
459 * rvt_alloc_mr - Allocate a memory region usable with the
460 * @pd: protection domain for this memory region
461 * @mr_type: mem region type
462 * @max_num_sg: Max number of segments allowed
464 * Return: the memory region on success, otherwise return an errno.
466 struct ib_mr
*rvt_alloc_mr(struct ib_pd
*pd
,
467 enum ib_mr_type mr_type
,
472 if (mr_type
!= IB_MR_TYPE_MEM_REG
)
473 return ERR_PTR(-EINVAL
);
475 mr
= __rvt_alloc_mr(max_num_sg
, pd
);
477 return (struct ib_mr
*)mr
;
483 * rvt_alloc_fmr - allocate a fast memory region
484 * @pd: the protection domain for this memory region
485 * @mr_access_flags: access flags for this memory region
486 * @fmr_attr: fast memory region attributes
488 * Return: the memory region on success, otherwise returns an errno.
490 struct ib_fmr
*rvt_alloc_fmr(struct ib_pd
*pd
, int mr_access_flags
,
491 struct ib_fmr_attr
*fmr_attr
)
498 /* Allocate struct plus pointers to first level page tables. */
499 m
= (fmr_attr
->max_pages
+ RVT_SEGSZ
- 1) / RVT_SEGSZ
;
500 fmr
= kzalloc(sizeof(*fmr
) + m
* sizeof(fmr
->mr
.map
[0]), GFP_KERNEL
);
504 rval
= rvt_init_mregion(&fmr
->mr
, pd
, fmr_attr
->max_pages
);
509 * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey &
512 rval
= rvt_alloc_lkey(&fmr
->mr
, 0);
515 fmr
->ibfmr
.rkey
= fmr
->mr
.lkey
;
516 fmr
->ibfmr
.lkey
= fmr
->mr
.lkey
;
518 * Resources are allocated but no valid mapping (RKEY can't be
521 fmr
->mr
.access_flags
= mr_access_flags
;
522 fmr
->mr
.max_segs
= fmr_attr
->max_pages
;
523 fmr
->mr
.page_shift
= fmr_attr
->page_shift
;
530 rvt_deinit_mregion(&fmr
->mr
);
538 * rvt_map_phys_fmr - set up a fast memory region
539 * @ibmfr: the fast memory region to set up
540 * @page_list: the list of pages to associate with the fast memory region
541 * @list_len: the number of pages to associate with the fast memory region
542 * @iova: the virtual address of the start of the fast memory region
544 * This may be called from interrupt context.
546 * Return: 0 on success
549 int rvt_map_phys_fmr(struct ib_fmr
*ibfmr
, u64
*page_list
,
550 int list_len
, u64 iova
)
552 struct rvt_fmr
*fmr
= to_ifmr(ibfmr
);
553 struct rvt_lkey_table
*rkt
;
557 struct rvt_dev_info
*rdi
= ib_to_rvt(ibfmr
->device
);
559 i
= atomic_read(&fmr
->mr
.refcount
);
563 if (list_len
> fmr
->mr
.max_segs
)
566 rkt
= &rdi
->lkey_table
;
567 spin_lock_irqsave(&rkt
->lock
, flags
);
568 fmr
->mr
.user_base
= iova
;
570 ps
= 1 << fmr
->mr
.page_shift
;
571 fmr
->mr
.length
= list_len
* ps
;
574 for (i
= 0; i
< list_len
; i
++) {
575 fmr
->mr
.map
[m
]->segs
[n
].vaddr
= (void *)page_list
[i
];
576 fmr
->mr
.map
[m
]->segs
[n
].length
= ps
;
577 if (++n
== RVT_SEGSZ
) {
582 spin_unlock_irqrestore(&rkt
->lock
, flags
);
587 * rvt_unmap_fmr - unmap fast memory regions
588 * @fmr_list: the list of fast memory regions to unmap
590 * Return: 0 on success.
592 int rvt_unmap_fmr(struct list_head
*fmr_list
)
595 struct rvt_lkey_table
*rkt
;
597 struct rvt_dev_info
*rdi
;
599 list_for_each_entry(fmr
, fmr_list
, ibfmr
.list
) {
600 rdi
= ib_to_rvt(fmr
->ibfmr
.device
);
601 rkt
= &rdi
->lkey_table
;
602 spin_lock_irqsave(&rkt
->lock
, flags
);
603 fmr
->mr
.user_base
= 0;
606 spin_unlock_irqrestore(&rkt
->lock
, flags
);
612 * rvt_dealloc_fmr - deallocate a fast memory region
613 * @ibfmr: the fast memory region to deallocate
615 * Return: 0 on success.
617 int rvt_dealloc_fmr(struct ib_fmr
*ibfmr
)
619 struct rvt_fmr
*fmr
= to_ifmr(ibfmr
);
621 unsigned long timeout
;
623 rvt_free_lkey(&fmr
->mr
);
624 rvt_put_mr(&fmr
->mr
); /* will set completion if last */
625 timeout
= wait_for_completion_timeout(&fmr
->mr
.comp
, 5 * HZ
);
627 rvt_get_mr(&fmr
->mr
);
631 rvt_deinit_mregion(&fmr
->mr
);
638 * rvt_lkey_ok - check IB SGE for validity and initialize
639 * @rkt: table containing lkey to check SGE against
640 * @pd: protection domain
641 * @isge: outgoing internal SGE
645 * Check the IB SGE for validity and initialize our internal version
648 * Return: 1 if valid and successful, otherwise returns 0.
650 * increments the reference count upon success
653 int rvt_lkey_ok(struct rvt_lkey_table
*rkt
, struct rvt_pd
*pd
,
654 struct rvt_sge
*isge
, struct ib_sge
*sge
, int acc
)
656 struct rvt_mregion
*mr
;
659 struct rvt_dev_info
*dev
= ib_to_rvt(pd
->ibpd
.device
);
662 * We use LKEY == zero for kernel virtual addresses
663 * (see rvt_get_dma_mr and dma.c).
666 if (sge
->lkey
== 0) {
669 mr
= rcu_dereference(dev
->dma_mr
);
672 atomic_inc(&mr
->refcount
);
676 isge
->vaddr
= (void *)sge
->addr
;
677 isge
->length
= sge
->length
;
678 isge
->sge_length
= sge
->length
;
683 mr
= rcu_dereference(
684 rkt
->table
[(sge
->lkey
>> (32 - dev
->dparms
.lkey_table_size
))]);
685 if (unlikely(!mr
|| mr
->lkey
!= sge
->lkey
|| mr
->pd
!= &pd
->ibpd
))
688 off
= sge
->addr
- mr
->user_base
;
689 if (unlikely(sge
->addr
< mr
->user_base
||
690 off
+ sge
->length
> mr
->length
||
691 (mr
->access_flags
& acc
) != acc
))
693 atomic_inc(&mr
->refcount
);
697 if (mr
->page_shift
) {
699 * page sizes are uniform power of 2 so no loop is necessary
700 * entries_spanned_by_off is the number of times the loop below
701 * would have executed.
703 size_t entries_spanned_by_off
;
705 entries_spanned_by_off
= off
>> mr
->page_shift
;
706 off
-= (entries_spanned_by_off
<< mr
->page_shift
);
707 m
= entries_spanned_by_off
/ RVT_SEGSZ
;
708 n
= entries_spanned_by_off
% RVT_SEGSZ
;
712 while (off
>= mr
->map
[m
]->segs
[n
].length
) {
713 off
-= mr
->map
[m
]->segs
[n
].length
;
715 if (n
>= RVT_SEGSZ
) {
722 isge
->vaddr
= mr
->map
[m
]->segs
[n
].vaddr
+ off
;
723 isge
->length
= mr
->map
[m
]->segs
[n
].length
- off
;
724 isge
->sge_length
= sge
->length
;
733 EXPORT_SYMBOL(rvt_lkey_ok
);
736 * rvt_rkey_ok - check the IB virtual address, length, and RKEY
737 * @qp: qp for validation
739 * @len: length of data
740 * @vaddr: virtual address to place data
741 * @rkey: rkey to check
744 * Return: 1 if successful, otherwise 0.
746 * increments the reference count upon success
748 int rvt_rkey_ok(struct rvt_qp
*qp
, struct rvt_sge
*sge
,
749 u32 len
, u64 vaddr
, u32 rkey
, int acc
)
751 struct rvt_dev_info
*dev
= ib_to_rvt(qp
->ibqp
.device
);
752 struct rvt_lkey_table
*rkt
= &dev
->lkey_table
;
753 struct rvt_mregion
*mr
;
758 * We use RKEY == zero for kernel virtual addresses
759 * (see rvt_get_dma_mr and dma.c).
763 struct rvt_pd
*pd
= ibpd_to_rvtpd(qp
->ibqp
.pd
);
764 struct rvt_dev_info
*rdi
= ib_to_rvt(pd
->ibpd
.device
);
768 mr
= rcu_dereference(rdi
->dma_mr
);
771 atomic_inc(&mr
->refcount
);
775 sge
->vaddr
= (void *)vaddr
;
777 sge
->sge_length
= len
;
783 mr
= rcu_dereference(
784 rkt
->table
[(rkey
>> (32 - dev
->dparms
.lkey_table_size
))]);
785 if (unlikely(!mr
|| mr
->lkey
!= rkey
|| qp
->ibqp
.pd
!= mr
->pd
))
788 off
= vaddr
- mr
->iova
;
789 if (unlikely(vaddr
< mr
->iova
|| off
+ len
> mr
->length
||
790 (mr
->access_flags
& acc
) == 0))
792 atomic_inc(&mr
->refcount
);
796 if (mr
->page_shift
) {
798 * page sizes are uniform power of 2 so no loop is necessary
799 * entries_spanned_by_off is the number of times the loop below
800 * would have executed.
802 size_t entries_spanned_by_off
;
804 entries_spanned_by_off
= off
>> mr
->page_shift
;
805 off
-= (entries_spanned_by_off
<< mr
->page_shift
);
806 m
= entries_spanned_by_off
/ RVT_SEGSZ
;
807 n
= entries_spanned_by_off
% RVT_SEGSZ
;
811 while (off
>= mr
->map
[m
]->segs
[n
].length
) {
812 off
-= mr
->map
[m
]->segs
[n
].length
;
814 if (n
>= RVT_SEGSZ
) {
821 sge
->vaddr
= mr
->map
[m
]->segs
[n
].vaddr
+ off
;
822 sge
->length
= mr
->map
[m
]->segs
[n
].length
- off
;
823 sge
->sge_length
= len
;
832 EXPORT_SYMBOL(rvt_rkey_ok
);