2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2013 Cisco Systems. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/dma-mapping.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/mm.h>
39 #include <linux/hugetlb.h>
40 #include <linux/iommu.h>
41 #include <linux/workqueue.h>
42 #include <linux/list.h>
43 #include <linux/pci.h>
44 #include <rdma/ib_verbs.h>
46 #include "usnic_log.h"
47 #include "usnic_uiom.h"
48 #include "usnic_uiom_interval_tree.h"
50 static struct workqueue_struct
*usnic_uiom_wq
;
52 #define USNIC_UIOM_PAGE_CHUNK \
53 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\
54 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \
55 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
57 static int usnic_uiom_dma_fault(struct iommu_domain
*domain
,
59 unsigned long iova
, int flags
,
62 usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
68 static void usnic_uiom_put_pages(struct list_head
*chunk_list
, int dirty
)
70 struct usnic_uiom_chunk
*chunk
, *tmp
;
72 struct scatterlist
*sg
;
76 list_for_each_entry_safe(chunk
, tmp
, chunk_list
, list
) {
77 for_each_sg(chunk
->page_list
, sg
, chunk
->nents
, i
) {
80 if (!PageDirty(page
) && dirty
)
81 set_page_dirty_lock(page
);
83 usnic_dbg("pa: %pa\n", &pa
);
89 static int usnic_uiom_get_pages(unsigned long addr
, size_t size
, int writable
,
90 int dmasync
, struct usnic_uiom_reg
*uiomr
)
92 struct list_head
*chunk_list
= &uiomr
->chunk_list
;
93 struct page
**page_list
;
94 struct scatterlist
*sg
;
95 struct usnic_uiom_chunk
*chunk
;
97 unsigned long lock_limit
;
98 unsigned long cur_base
;
105 unsigned int gup_flags
;
106 struct mm_struct
*mm
;
109 * If the combination of the addr and size requested for this memory
110 * region causes an integer overflow, return error.
112 if (((addr
+ size
) < addr
) || PAGE_ALIGN(addr
+ size
) < (addr
+ size
))
121 INIT_LIST_HEAD(chunk_list
);
123 page_list
= (struct page
**) __get_free_page(GFP_KERNEL
);
127 npages
= PAGE_ALIGN(size
+ (addr
& ~PAGE_MASK
)) >> PAGE_SHIFT
;
129 uiomr
->owning_mm
= mm
= current
->mm
;
130 down_write(&mm
->mmap_sem
);
132 locked
= npages
+ current
->mm
->pinned_vm
;
133 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
135 if ((locked
> lock_limit
) && !capable(CAP_IPC_LOCK
)) {
140 flags
= IOMMU_READ
| IOMMU_CACHE
;
141 flags
|= (writable
) ? IOMMU_WRITE
: 0;
142 gup_flags
= FOLL_WRITE
;
143 gup_flags
|= (writable
) ? 0 : FOLL_FORCE
;
144 cur_base
= addr
& PAGE_MASK
;
148 ret
= get_user_pages_longterm(cur_base
,
149 min_t(unsigned long, npages
,
150 PAGE_SIZE
/ sizeof(struct page
*)),
151 gup_flags
, page_list
, NULL
);
160 chunk
= kmalloc(sizeof(*chunk
) +
161 sizeof(struct scatterlist
) *
162 min_t(int, ret
, USNIC_UIOM_PAGE_CHUNK
),
169 chunk
->nents
= min_t(int, ret
, USNIC_UIOM_PAGE_CHUNK
);
170 sg_init_table(chunk
->page_list
, chunk
->nents
);
171 for_each_sg(chunk
->page_list
, sg
, chunk
->nents
, i
) {
172 sg_set_page(sg
, page_list
[i
+ off
],
175 usnic_dbg("va: 0x%lx pa: %pa\n",
176 cur_base
+ i
*PAGE_SIZE
, &pa
);
178 cur_base
+= chunk
->nents
* PAGE_SIZE
;
181 list_add_tail(&chunk
->list
, chunk_list
);
189 usnic_uiom_put_pages(chunk_list
, 0);
191 mm
->pinned_vm
= locked
;
192 mmgrab(uiomr
->owning_mm
);
195 up_write(&mm
->mmap_sem
);
196 free_page((unsigned long) page_list
);
200 static void usnic_uiom_unmap_sorted_intervals(struct list_head
*intervals
,
201 struct usnic_uiom_pd
*pd
)
203 struct usnic_uiom_interval_node
*interval
, *tmp
;
204 long unsigned va
, size
;
206 list_for_each_entry_safe(interval
, tmp
, intervals
, link
) {
207 va
= interval
->start
<< PAGE_SHIFT
;
208 size
= ((interval
->last
- interval
->start
) + 1) << PAGE_SHIFT
;
210 /* Workaround for RH 970401 */
211 usnic_dbg("va 0x%lx size 0x%lx", va
, PAGE_SIZE
);
212 iommu_unmap(pd
->domain
, va
, PAGE_SIZE
);
219 static void __usnic_uiom_reg_release(struct usnic_uiom_pd
*pd
,
220 struct usnic_uiom_reg
*uiomr
,
224 unsigned long vpn_start
, vpn_last
;
225 struct usnic_uiom_interval_node
*interval
, *tmp
;
227 LIST_HEAD(rm_intervals
);
229 npages
= PAGE_ALIGN(uiomr
->length
+ uiomr
->offset
) >> PAGE_SHIFT
;
230 vpn_start
= (uiomr
->va
& PAGE_MASK
) >> PAGE_SHIFT
;
231 vpn_last
= vpn_start
+ npages
- 1;
233 spin_lock(&pd
->lock
);
234 usnic_uiom_remove_interval(&pd
->root
, vpn_start
,
235 vpn_last
, &rm_intervals
);
236 usnic_uiom_unmap_sorted_intervals(&rm_intervals
, pd
);
238 list_for_each_entry_safe(interval
, tmp
, &rm_intervals
, link
) {
239 if (interval
->flags
& IOMMU_WRITE
)
241 list_del(&interval
->link
);
245 usnic_uiom_put_pages(&uiomr
->chunk_list
, dirty
& writable
);
246 spin_unlock(&pd
->lock
);
249 static int usnic_uiom_map_sorted_intervals(struct list_head
*intervals
,
250 struct usnic_uiom_reg
*uiomr
)
254 struct usnic_uiom_chunk
*chunk
;
255 struct usnic_uiom_interval_node
*interval_node
;
257 dma_addr_t pa_start
= 0;
258 dma_addr_t pa_end
= 0;
259 long int va_start
= -EINVAL
;
260 struct usnic_uiom_pd
*pd
= uiomr
->pd
;
261 long int va
= uiomr
->va
& PAGE_MASK
;
262 int flags
= IOMMU_READ
| IOMMU_CACHE
;
264 flags
|= (uiomr
->writable
) ? IOMMU_WRITE
: 0;
265 chunk
= list_first_entry(&uiomr
->chunk_list
, struct usnic_uiom_chunk
,
267 list_for_each_entry(interval_node
, intervals
, link
) {
269 for (i
= 0; i
< chunk
->nents
; i
++, va
+= PAGE_SIZE
) {
270 pa
= sg_phys(&chunk
->page_list
[i
]);
271 if ((va
>> PAGE_SHIFT
) < interval_node
->start
)
274 if ((va
>> PAGE_SHIFT
) == interval_node
->start
) {
275 /* First page of the interval */
281 WARN_ON(va_start
== -EINVAL
);
283 if ((pa_end
+ PAGE_SIZE
!= pa
) &&
285 /* PAs are not contiguous */
286 size
= pa_end
- pa_start
+ PAGE_SIZE
;
287 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
288 va_start
, &pa_start
, size
, flags
);
289 err
= iommu_map(pd
->domain
, va_start
, pa_start
,
292 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
293 va_start
, &pa_start
, size
, err
);
301 if ((va
>> PAGE_SHIFT
) == interval_node
->last
) {
302 /* Last page of the interval */
303 size
= pa
- pa_start
+ PAGE_SIZE
;
304 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
305 va_start
, &pa_start
, size
, flags
);
306 err
= iommu_map(pd
->domain
, va_start
, pa_start
,
309 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
310 va_start
, &pa_start
, size
, err
);
320 if (i
== chunk
->nents
) {
322 * Hit last entry of the chunk,
323 * hence advance to next chunk
325 chunk
= list_first_entry(&chunk
->list
,
326 struct usnic_uiom_chunk
,
335 usnic_uiom_unmap_sorted_intervals(intervals
, pd
);
339 struct usnic_uiom_reg
*usnic_uiom_reg_get(struct usnic_uiom_pd
*pd
,
340 unsigned long addr
, size_t size
,
341 int writable
, int dmasync
)
343 struct usnic_uiom_reg
*uiomr
;
344 unsigned long va_base
, vpn_start
, vpn_last
;
345 unsigned long npages
;
347 LIST_HEAD(sorted_diff_intervals
);
350 * Intel IOMMU map throws an error if a translation entry is
351 * changed from read to write. This module may not unmap
352 * and then remap the entry after fixing the permission
353 * b/c this open up a small windows where hw DMA may page fault
354 * Hence, make all entries to be writable.
358 va_base
= addr
& PAGE_MASK
;
359 offset
= addr
& ~PAGE_MASK
;
360 npages
= PAGE_ALIGN(size
+ offset
) >> PAGE_SHIFT
;
361 vpn_start
= (addr
& PAGE_MASK
) >> PAGE_SHIFT
;
362 vpn_last
= vpn_start
+ npages
- 1;
364 uiomr
= kmalloc(sizeof(*uiomr
), GFP_KERNEL
);
366 return ERR_PTR(-ENOMEM
);
369 uiomr
->offset
= offset
;
370 uiomr
->length
= size
;
371 uiomr
->writable
= writable
;
374 err
= usnic_uiom_get_pages(addr
, size
, writable
, dmasync
,
377 usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
378 vpn_start
, vpn_last
, err
);
382 spin_lock(&pd
->lock
);
383 err
= usnic_uiom_get_intervals_diff(vpn_start
, vpn_last
,
384 (writable
) ? IOMMU_WRITE
: 0,
387 &sorted_diff_intervals
);
389 usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
390 vpn_start
, vpn_last
, err
);
394 err
= usnic_uiom_map_sorted_intervals(&sorted_diff_intervals
, uiomr
);
396 usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
397 vpn_start
, vpn_last
, err
);
398 goto out_put_intervals
;
402 err
= usnic_uiom_insert_interval(&pd
->root
, vpn_start
, vpn_last
,
403 (writable
) ? IOMMU_WRITE
: 0);
405 usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
406 vpn_start
, vpn_last
, err
);
407 goto out_unmap_intervals
;
410 usnic_uiom_put_interval_set(&sorted_diff_intervals
);
411 spin_unlock(&pd
->lock
);
416 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals
, pd
);
418 usnic_uiom_put_interval_set(&sorted_diff_intervals
);
420 usnic_uiom_put_pages(&uiomr
->chunk_list
, 0);
421 spin_unlock(&pd
->lock
);
422 mmdrop(uiomr
->owning_mm
);
428 static void __usnic_uiom_release_tail(struct usnic_uiom_reg
*uiomr
)
430 mmdrop(uiomr
->owning_mm
);
434 static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg
*uiomr
)
436 return PAGE_ALIGN(uiomr
->length
+ uiomr
->offset
) >> PAGE_SHIFT
;
439 static void usnic_uiom_release_defer(struct work_struct
*work
)
441 struct usnic_uiom_reg
*uiomr
=
442 container_of(work
, struct usnic_uiom_reg
, work
);
444 down_write(&uiomr
->owning_mm
->mmap_sem
);
445 uiomr
->owning_mm
->pinned_vm
-= usnic_uiom_num_pages(uiomr
);
446 up_write(&uiomr
->owning_mm
->mmap_sem
);
448 __usnic_uiom_release_tail(uiomr
);
451 void usnic_uiom_reg_release(struct usnic_uiom_reg
*uiomr
,
452 struct ib_ucontext
*context
)
454 __usnic_uiom_reg_release(uiomr
->pd
, uiomr
, 1);
457 * We may be called with the mm's mmap_sem already held. This
458 * can happen when a userspace munmap() is the call that drops
459 * the last reference to our file and calls our release
460 * method. If there are memory regions to destroy, we'll end
461 * up here and not be able to take the mmap_sem. In that case
462 * we defer the vm_locked accounting to a workqueue.
464 if (context
->closing
) {
465 if (!down_write_trylock(&uiomr
->owning_mm
->mmap_sem
)) {
466 INIT_WORK(&uiomr
->work
, usnic_uiom_release_defer
);
467 queue_work(usnic_uiom_wq
, &uiomr
->work
);
471 down_write(&uiomr
->owning_mm
->mmap_sem
);
473 uiomr
->owning_mm
->pinned_vm
-= usnic_uiom_num_pages(uiomr
);
474 up_write(&uiomr
->owning_mm
->mmap_sem
);
476 __usnic_uiom_release_tail(uiomr
);
479 struct usnic_uiom_pd
*usnic_uiom_alloc_pd(void)
481 struct usnic_uiom_pd
*pd
;
484 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
486 return ERR_PTR(-ENOMEM
);
488 pd
->domain
= domain
= iommu_domain_alloc(&pci_bus_type
);
490 usnic_err("Failed to allocate IOMMU domain");
492 return ERR_PTR(-ENOMEM
);
495 iommu_set_fault_handler(pd
->domain
, usnic_uiom_dma_fault
, NULL
);
497 spin_lock_init(&pd
->lock
);
498 INIT_LIST_HEAD(&pd
->devs
);
503 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd
*pd
)
505 iommu_domain_free(pd
->domain
);
509 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd
*pd
, struct device
*dev
)
511 struct usnic_uiom_dev
*uiom_dev
;
514 uiom_dev
= kzalloc(sizeof(*uiom_dev
), GFP_ATOMIC
);
519 err
= iommu_attach_device(pd
->domain
, dev
);
523 if (!iommu_capable(dev
->bus
, IOMMU_CAP_CACHE_COHERENCY
)) {
524 usnic_err("IOMMU of %s does not support cache coherency\n",
527 goto out_detach_device
;
530 spin_lock(&pd
->lock
);
531 list_add_tail(&uiom_dev
->link
, &pd
->devs
);
533 spin_unlock(&pd
->lock
);
538 iommu_detach_device(pd
->domain
, dev
);
544 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd
*pd
, struct device
*dev
)
546 struct usnic_uiom_dev
*uiom_dev
;
549 spin_lock(&pd
->lock
);
550 list_for_each_entry(uiom_dev
, &pd
->devs
, link
) {
551 if (uiom_dev
->dev
== dev
) {
558 usnic_err("Unable to free dev %s - not found\n",
560 spin_unlock(&pd
->lock
);
564 list_del(&uiom_dev
->link
);
566 spin_unlock(&pd
->lock
);
568 return iommu_detach_device(pd
->domain
, dev
);
571 struct device
**usnic_uiom_get_dev_list(struct usnic_uiom_pd
*pd
)
573 struct usnic_uiom_dev
*uiom_dev
;
574 struct device
**devs
;
577 spin_lock(&pd
->lock
);
578 devs
= kcalloc(pd
->dev_cnt
+ 1, sizeof(*devs
), GFP_ATOMIC
);
580 devs
= ERR_PTR(-ENOMEM
);
584 list_for_each_entry(uiom_dev
, &pd
->devs
, link
) {
585 devs
[i
++] = uiom_dev
->dev
;
588 spin_unlock(&pd
->lock
);
592 void usnic_uiom_free_dev_list(struct device
**devs
)
597 int usnic_uiom_init(char *drv_name
)
599 if (!iommu_present(&pci_bus_type
)) {
600 usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n");
604 usnic_uiom_wq
= create_workqueue(drv_name
);
605 if (!usnic_uiom_wq
) {
606 usnic_err("Unable to alloc wq for drv %s\n", drv_name
);
613 void usnic_uiom_fini(void)
615 flush_workqueue(usnic_uiom_wq
);
616 destroy_workqueue(usnic_uiom_wq
);