2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42 #include <linux/interval_tree.h>
43 #include <linux/pagemap.h>
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_umem.h>
47 #include <rdma/ib_umem_odp.h>
51 static inline int ib_init_umem_odp(struct ib_umem_odp
*umem_odp
,
52 const struct mmu_interval_notifier_ops
*ops
)
56 umem_odp
->umem
.is_odp
= 1;
57 mutex_init(&umem_odp
->umem_mutex
);
59 if (!umem_odp
->is_implicit_odp
) {
60 size_t page_size
= 1UL << umem_odp
->page_shift
;
65 start
= ALIGN_DOWN(umem_odp
->umem
.address
, page_size
);
66 if (check_add_overflow(umem_odp
->umem
.address
,
67 (unsigned long)umem_odp
->umem
.length
,
70 end
= ALIGN(end
, page_size
);
71 if (unlikely(end
< page_size
))
74 pages
= (end
- start
) >> umem_odp
->page_shift
;
78 umem_odp
->page_list
= kvcalloc(
79 pages
, sizeof(*umem_odp
->page_list
), GFP_KERNEL
);
80 if (!umem_odp
->page_list
)
83 umem_odp
->dma_list
= kvcalloc(
84 pages
, sizeof(*umem_odp
->dma_list
), GFP_KERNEL
);
85 if (!umem_odp
->dma_list
) {
90 ret
= mmu_interval_notifier_insert(&umem_odp
->notifier
,
91 umem_odp
->umem
.owning_mm
,
92 start
, end
- start
, ops
);
100 kvfree(umem_odp
->dma_list
);
102 kvfree(umem_odp
->page_list
);
107 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem
109 * Implicit ODP umems do not have a VA range and do not have any page lists.
110 * They exist only to hold the per_mm reference to help the driver create
113 * @device: IB device to create UMEM
114 * @access: ib_reg_mr access flags
116 struct ib_umem_odp
*ib_umem_odp_alloc_implicit(struct ib_device
*device
,
119 struct ib_umem
*umem
;
120 struct ib_umem_odp
*umem_odp
;
123 if (access
& IB_ACCESS_HUGETLB
)
124 return ERR_PTR(-EINVAL
);
126 umem_odp
= kzalloc(sizeof(*umem_odp
), GFP_KERNEL
);
128 return ERR_PTR(-ENOMEM
);
129 umem
= &umem_odp
->umem
;
130 umem
->ibdev
= device
;
131 umem
->writable
= ib_access_writable(access
);
132 umem
->owning_mm
= current
->mm
;
133 umem_odp
->is_implicit_odp
= 1;
134 umem_odp
->page_shift
= PAGE_SHIFT
;
136 umem_odp
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
137 ret
= ib_init_umem_odp(umem_odp
, NULL
);
139 put_pid(umem_odp
->tgid
);
145 EXPORT_SYMBOL(ib_umem_odp_alloc_implicit
);
148 * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit
151 * @root: The parent umem enclosing the child. This must be allocated using
152 * ib_alloc_implicit_odp_umem()
153 * @addr: The starting userspace VA
154 * @size: The length of the userspace VA
157 ib_umem_odp_alloc_child(struct ib_umem_odp
*root
, unsigned long addr
,
159 const struct mmu_interval_notifier_ops
*ops
)
162 * Caller must ensure that root cannot be freed during the call to
165 struct ib_umem_odp
*odp_data
;
166 struct ib_umem
*umem
;
169 if (WARN_ON(!root
->is_implicit_odp
))
170 return ERR_PTR(-EINVAL
);
172 odp_data
= kzalloc(sizeof(*odp_data
), GFP_KERNEL
);
174 return ERR_PTR(-ENOMEM
);
175 umem
= &odp_data
->umem
;
176 umem
->ibdev
= root
->umem
.ibdev
;
178 umem
->address
= addr
;
179 umem
->writable
= root
->umem
.writable
;
180 umem
->owning_mm
= root
->umem
.owning_mm
;
181 odp_data
->page_shift
= PAGE_SHIFT
;
182 odp_data
->notifier
.ops
= ops
;
185 * A mmget must be held when registering a notifier, the owming_mm only
186 * has a mm_grab at this point.
188 if (!mmget_not_zero(umem
->owning_mm
)) {
193 odp_data
->tgid
= get_pid(root
->tgid
);
194 ret
= ib_init_umem_odp(odp_data
, ops
);
197 mmput(umem
->owning_mm
);
201 put_pid(odp_data
->tgid
);
202 mmput(umem
->owning_mm
);
207 EXPORT_SYMBOL(ib_umem_odp_alloc_child
);
210 * ib_umem_odp_get - Create a umem_odp for a userspace va
212 * @device: IB device struct to get UMEM
213 * @addr: userspace virtual address to start at
214 * @size: length of region to pin
215 * @access: IB_ACCESS_xxx flags for memory being pinned
217 * The driver should use when the access flags indicate ODP memory. It avoids
218 * pinning, instead, stores the mm for future page fault handling in
219 * conjunction with MMU notifiers.
221 struct ib_umem_odp
*ib_umem_odp_get(struct ib_device
*device
,
222 unsigned long addr
, size_t size
, int access
,
223 const struct mmu_interval_notifier_ops
*ops
)
225 struct ib_umem_odp
*umem_odp
;
226 struct mm_struct
*mm
;
229 if (WARN_ON_ONCE(!(access
& IB_ACCESS_ON_DEMAND
)))
230 return ERR_PTR(-EINVAL
);
232 umem_odp
= kzalloc(sizeof(struct ib_umem_odp
), GFP_KERNEL
);
234 return ERR_PTR(-ENOMEM
);
236 umem_odp
->umem
.ibdev
= device
;
237 umem_odp
->umem
.length
= size
;
238 umem_odp
->umem
.address
= addr
;
239 umem_odp
->umem
.writable
= ib_access_writable(access
);
240 umem_odp
->umem
.owning_mm
= mm
= current
->mm
;
241 umem_odp
->notifier
.ops
= ops
;
243 umem_odp
->page_shift
= PAGE_SHIFT
;
244 #ifdef CONFIG_HUGETLB_PAGE
245 if (access
& IB_ACCESS_HUGETLB
)
246 umem_odp
->page_shift
= HPAGE_SHIFT
;
249 umem_odp
->tgid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
250 ret
= ib_init_umem_odp(umem_odp
, ops
);
256 put_pid(umem_odp
->tgid
);
260 EXPORT_SYMBOL(ib_umem_odp_get
);
262 void ib_umem_odp_release(struct ib_umem_odp
*umem_odp
)
265 * Ensure that no more pages are mapped in the umem.
267 * It is the driver's responsibility to ensure, before calling us,
268 * that the hardware will not attempt to access the MR any more.
270 if (!umem_odp
->is_implicit_odp
) {
271 mutex_lock(&umem_odp
->umem_mutex
);
272 ib_umem_odp_unmap_dma_pages(umem_odp
, ib_umem_start(umem_odp
),
273 ib_umem_end(umem_odp
));
274 mutex_unlock(&umem_odp
->umem_mutex
);
275 mmu_interval_notifier_remove(&umem_odp
->notifier
);
276 kvfree(umem_odp
->dma_list
);
277 kvfree(umem_odp
->page_list
);
279 put_pid(umem_odp
->tgid
);
282 EXPORT_SYMBOL(ib_umem_odp_release
);
285 * Map for DMA and insert a single page into the on-demand paging page tables.
287 * @umem: the umem to insert the page to.
288 * @page_index: index in the umem to add the page to.
289 * @page: the page struct to map and add.
290 * @access_mask: access permissions needed for this page.
291 * @current_seq: sequence number for synchronization with invalidations.
292 * the sequence number is taken from
293 * umem_odp->notifiers_seq.
295 * The function returns -EFAULT if the DMA mapping operation fails. It returns
296 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
298 * The page is released via put_page even if the operation failed. For on-demand
299 * pinning, the page is released whenever it isn't stored in the umem.
301 static int ib_umem_odp_map_dma_single_page(
302 struct ib_umem_odp
*umem_odp
,
303 unsigned int page_index
,
306 unsigned long current_seq
)
308 struct ib_device
*dev
= umem_odp
->umem
.ibdev
;
312 if (mmu_interval_check_retry(&umem_odp
->notifier
, current_seq
)) {
316 if (!(umem_odp
->dma_list
[page_index
])) {
318 ib_dma_map_page(dev
, page
, 0, BIT(umem_odp
->page_shift
),
320 if (ib_dma_mapping_error(dev
, dma_addr
)) {
324 umem_odp
->dma_list
[page_index
] = dma_addr
| access_mask
;
325 umem_odp
->page_list
[page_index
] = page
;
327 } else if (umem_odp
->page_list
[page_index
] == page
) {
328 umem_odp
->dma_list
[page_index
] |= access_mask
;
331 * This is a race here where we could have done:
337 * mutex_lock(umem_mutex)
338 * page from GUP != page in ODP
340 * It should be prevented by the retry test above as reading
341 * the seq number should be reliable under the
342 * umem_mutex. Thus something is really not working right if
346 "Got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
347 umem_odp
->page_list
[page_index
], page
);
357 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
359 * Pins the range of pages passed in the argument, and maps them to
360 * DMA addresses. The DMA addresses of the mapped pages is updated in
361 * umem_odp->dma_list.
363 * Returns the number of pages mapped in success, negative error code
365 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
366 * the function from completing its task.
367 * An -ENOENT error code indicates that userspace process is being terminated
368 * and mm was already destroyed.
369 * @umem_odp: the umem to map and pin
370 * @user_virt: the address from which we need to map.
371 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
372 * bigger due to alignment, and may also be smaller in case of an error
373 * pinning or mapping a page. The actual pages mapped is returned in
375 * @access_mask: bit mask of the requested access permissions for the given
377 * @current_seq: the MMU notifiers sequance value for synchronization with
378 * invalidations. the sequance number is read from
379 * umem_odp->notifiers_seq before calling this function
381 int ib_umem_odp_map_dma_pages(struct ib_umem_odp
*umem_odp
, u64 user_virt
,
382 u64 bcnt
, u64 access_mask
,
383 unsigned long current_seq
)
385 struct task_struct
*owning_process
= NULL
;
386 struct mm_struct
*owning_mm
= umem_odp
->umem
.owning_mm
;
387 struct page
**local_page_list
= NULL
;
389 int j
, k
, ret
= 0, start_idx
, npages
= 0;
390 unsigned int flags
= 0, page_shift
;
393 if (access_mask
== 0)
396 if (user_virt
< ib_umem_start(umem_odp
) ||
397 user_virt
+ bcnt
> ib_umem_end(umem_odp
))
400 local_page_list
= (struct page
**)__get_free_page(GFP_KERNEL
);
401 if (!local_page_list
)
404 page_shift
= umem_odp
->page_shift
;
405 page_mask
= ~(BIT(page_shift
) - 1);
406 off
= user_virt
& (~page_mask
);
407 user_virt
= user_virt
& page_mask
;
408 bcnt
+= off
; /* Charge for the first page offset as well. */
411 * owning_process is allowed to be NULL, this means somehow the mm is
412 * existing beyond the lifetime of the originating process.. Presumably
413 * mmget_not_zero will fail in this case.
415 owning_process
= get_pid_task(umem_odp
->tgid
, PIDTYPE_PID
);
416 if (!owning_process
|| !mmget_not_zero(owning_mm
)) {
421 if (access_mask
& ODP_WRITE_ALLOWED_BIT
)
424 start_idx
= (user_virt
- ib_umem_start(umem_odp
)) >> page_shift
;
428 const size_t gup_num_pages
= min_t(size_t,
429 ALIGN(bcnt
, PAGE_SIZE
) / PAGE_SIZE
,
430 PAGE_SIZE
/ sizeof(struct page
*));
432 mmap_read_lock(owning_mm
);
434 * Note: this might result in redundent page getting. We can
435 * avoid this by checking dma_list to be 0 before calling
436 * get_user_pages. However, this make the code much more
437 * complex (and doesn't gain us much performance in most use
440 npages
= get_user_pages_remote(owning_process
, owning_mm
,
441 user_virt
, gup_num_pages
,
442 flags
, local_page_list
, NULL
, NULL
);
443 mmap_read_unlock(owning_mm
);
446 if (npages
!= -EAGAIN
)
447 pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages
, npages
);
449 pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages
, npages
);
453 bcnt
-= min_t(size_t, npages
<< PAGE_SHIFT
, bcnt
);
454 mutex_lock(&umem_odp
->umem_mutex
);
455 for (j
= 0; j
< npages
; j
++, user_virt
+= PAGE_SIZE
) {
456 if (user_virt
& ~page_mask
) {
458 if (page_to_phys(local_page_list
[j
]) != p
) {
462 put_page(local_page_list
[j
]);
466 ret
= ib_umem_odp_map_dma_single_page(
467 umem_odp
, k
, local_page_list
[j
],
468 access_mask
, current_seq
);
471 pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret
);
473 pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret
);
477 p
= page_to_phys(local_page_list
[j
]);
480 mutex_unlock(&umem_odp
->umem_mutex
);
484 * Release pages, remembering that the first page
485 * to hit an error was already released by
486 * ib_umem_odp_map_dma_single_page().
488 if (npages
- (j
+ 1) > 0)
489 release_pages(&local_page_list
[j
+1],
496 if (npages
< 0 && k
== start_idx
)
505 put_task_struct(owning_process
);
506 free_page((unsigned long)local_page_list
);
509 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages
);
511 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp
*umem_odp
, u64 virt
,
516 struct ib_device
*dev
= umem_odp
->umem
.ibdev
;
518 lockdep_assert_held(&umem_odp
->umem_mutex
);
520 virt
= max_t(u64
, virt
, ib_umem_start(umem_odp
));
521 bound
= min_t(u64
, bound
, ib_umem_end(umem_odp
));
522 /* Note that during the run of this function, the
523 * notifiers_count of the MR is > 0, preventing any racing
524 * faults from completion. We might be racing with other
525 * invalidations, so we must make sure we free each page only
527 for (addr
= virt
; addr
< bound
; addr
+= BIT(umem_odp
->page_shift
)) {
528 idx
= (addr
- ib_umem_start(umem_odp
)) >> umem_odp
->page_shift
;
529 if (umem_odp
->page_list
[idx
]) {
530 struct page
*page
= umem_odp
->page_list
[idx
];
531 dma_addr_t dma
= umem_odp
->dma_list
[idx
];
532 dma_addr_t dma_addr
= dma
& ODP_DMA_ADDR_MASK
;
536 ib_dma_unmap_page(dev
, dma_addr
,
537 BIT(umem_odp
->page_shift
),
539 if (dma
& ODP_WRITE_ALLOWED_BIT
) {
540 struct page
*head_page
= compound_head(page
);
542 * set_page_dirty prefers being called with
543 * the page lock. However, MMU notifiers are
544 * called sometimes with and sometimes without
545 * the lock. We rely on the umem_mutex instead
546 * to prevent other mmu notifiers from
547 * continuing and allowing the page mapping to
550 set_page_dirty(head_page
);
552 umem_odp
->page_list
[idx
] = NULL
;
553 umem_odp
->dma_list
[idx
] = 0;
558 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages
);