2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
43 #include <rdma/ib_verbs.h>
44 #include <rdma/ib_umem.h>
45 #include <rdma/ib_umem_odp.h>
47 static void ib_umem_notifier_start_account(struct ib_umem
*item
)
49 mutex_lock(&item
->odp_data
->umem_mutex
);
51 /* Only update private counters for this umem if it has them.
52 * Otherwise skip it. All page faults will be delayed for this umem. */
53 if (item
->odp_data
->mn_counters_active
) {
54 int notifiers_count
= item
->odp_data
->notifiers_count
++;
56 if (notifiers_count
== 0)
57 /* Initialize the completion object for waiting on
58 * notifiers. Since notifier_count is zero, no one
59 * should be waiting right now. */
60 reinit_completion(&item
->odp_data
->notifier_completion
);
62 mutex_unlock(&item
->odp_data
->umem_mutex
);
65 static void ib_umem_notifier_end_account(struct ib_umem
*item
)
67 mutex_lock(&item
->odp_data
->umem_mutex
);
69 /* Only update private counters for this umem if it has them.
70 * Otherwise skip it. All page faults will be delayed for this umem. */
71 if (item
->odp_data
->mn_counters_active
) {
73 * This sequence increase will notify the QP page fault that
74 * the page that is going to be mapped in the spte could have
77 ++item
->odp_data
->notifiers_seq
;
78 if (--item
->odp_data
->notifiers_count
== 0)
79 complete_all(&item
->odp_data
->notifier_completion
);
81 mutex_unlock(&item
->odp_data
->umem_mutex
);
84 /* Account for a new mmu notifier in an ib_ucontext. */
85 static void ib_ucontext_notifier_start_account(struct ib_ucontext
*context
)
87 atomic_inc(&context
->notifier_count
);
90 /* Account for a terminating mmu notifier in an ib_ucontext.
92 * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
93 * the function takes the semaphore itself. */
94 static void ib_ucontext_notifier_end_account(struct ib_ucontext
*context
)
96 int zero_notifiers
= atomic_dec_and_test(&context
->notifier_count
);
99 !list_empty(&context
->no_private_counters
)) {
100 /* No currently running mmu notifiers. Now is the chance to
101 * add private accounting to all previously added umems. */
102 struct ib_umem_odp
*odp_data
, *next
;
104 /* Prevent concurrent mmu notifiers from working on the
105 * no_private_counters list. */
106 down_write(&context
->umem_rwsem
);
108 /* Read the notifier_count again, with the umem_rwsem
109 * semaphore taken for write. */
110 if (!atomic_read(&context
->notifier_count
)) {
111 list_for_each_entry_safe(odp_data
, next
,
112 &context
->no_private_counters
,
113 no_private_counters
) {
114 mutex_lock(&odp_data
->umem_mutex
);
115 odp_data
->mn_counters_active
= true;
116 list_del(&odp_data
->no_private_counters
);
117 complete_all(&odp_data
->notifier_completion
);
118 mutex_unlock(&odp_data
->umem_mutex
);
122 up_write(&context
->umem_rwsem
);
126 static int ib_umem_notifier_release_trampoline(struct ib_umem
*item
, u64 start
,
127 u64 end
, void *cookie
) {
129 * Increase the number of notifiers running, to
130 * prevent any further fault handling on this MR.
132 ib_umem_notifier_start_account(item
);
133 item
->odp_data
->dying
= 1;
134 /* Make sure that the fact the umem is dying is out before we release
135 * all pending page faults. */
137 complete_all(&item
->odp_data
->notifier_completion
);
138 item
->context
->invalidate_range(item
, ib_umem_start(item
),
143 static void ib_umem_notifier_release(struct mmu_notifier
*mn
,
144 struct mm_struct
*mm
)
146 struct ib_ucontext
*context
= container_of(mn
, struct ib_ucontext
, mn
);
148 if (!context
->invalidate_range
)
151 ib_ucontext_notifier_start_account(context
);
152 down_read(&context
->umem_rwsem
);
153 rbt_ib_umem_for_each_in_range(&context
->umem_tree
, 0,
155 ib_umem_notifier_release_trampoline
,
157 up_read(&context
->umem_rwsem
);
160 static int invalidate_page_trampoline(struct ib_umem
*item
, u64 start
,
161 u64 end
, void *cookie
)
163 ib_umem_notifier_start_account(item
);
164 item
->context
->invalidate_range(item
, start
, start
+ PAGE_SIZE
);
165 ib_umem_notifier_end_account(item
);
169 static void ib_umem_notifier_invalidate_page(struct mmu_notifier
*mn
,
170 struct mm_struct
*mm
,
171 unsigned long address
)
173 struct ib_ucontext
*context
= container_of(mn
, struct ib_ucontext
, mn
);
175 if (!context
->invalidate_range
)
178 ib_ucontext_notifier_start_account(context
);
179 down_read(&context
->umem_rwsem
);
180 rbt_ib_umem_for_each_in_range(&context
->umem_tree
, address
,
182 invalidate_page_trampoline
, NULL
);
183 up_read(&context
->umem_rwsem
);
184 ib_ucontext_notifier_end_account(context
);
187 static int invalidate_range_start_trampoline(struct ib_umem
*item
, u64 start
,
188 u64 end
, void *cookie
)
190 ib_umem_notifier_start_account(item
);
191 item
->context
->invalidate_range(item
, start
, end
);
195 static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier
*mn
,
196 struct mm_struct
*mm
,
200 struct ib_ucontext
*context
= container_of(mn
, struct ib_ucontext
, mn
);
202 if (!context
->invalidate_range
)
205 ib_ucontext_notifier_start_account(context
);
206 down_read(&context
->umem_rwsem
);
207 rbt_ib_umem_for_each_in_range(&context
->umem_tree
, start
,
209 invalidate_range_start_trampoline
, NULL
);
210 up_read(&context
->umem_rwsem
);
213 static int invalidate_range_end_trampoline(struct ib_umem
*item
, u64 start
,
214 u64 end
, void *cookie
)
216 ib_umem_notifier_end_account(item
);
220 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier
*mn
,
221 struct mm_struct
*mm
,
225 struct ib_ucontext
*context
= container_of(mn
, struct ib_ucontext
, mn
);
227 if (!context
->invalidate_range
)
230 down_read(&context
->umem_rwsem
);
231 rbt_ib_umem_for_each_in_range(&context
->umem_tree
, start
,
233 invalidate_range_end_trampoline
, NULL
);
234 up_read(&context
->umem_rwsem
);
235 ib_ucontext_notifier_end_account(context
);
238 static const struct mmu_notifier_ops ib_umem_notifiers
= {
239 .release
= ib_umem_notifier_release
,
240 .invalidate_page
= ib_umem_notifier_invalidate_page
,
241 .invalidate_range_start
= ib_umem_notifier_invalidate_range_start
,
242 .invalidate_range_end
= ib_umem_notifier_invalidate_range_end
,
245 struct ib_umem
*ib_alloc_odp_umem(struct ib_ucontext
*context
,
249 struct ib_umem
*umem
;
250 struct ib_umem_odp
*odp_data
;
251 int pages
= size
>> PAGE_SHIFT
;
254 umem
= kzalloc(sizeof(*umem
), GFP_KERNEL
);
256 return ERR_PTR(-ENOMEM
);
258 umem
->context
= context
;
260 umem
->address
= addr
;
261 umem
->page_shift
= PAGE_SHIFT
;
264 odp_data
= kzalloc(sizeof(*odp_data
), GFP_KERNEL
);
269 odp_data
->umem
= umem
;
271 mutex_init(&odp_data
->umem_mutex
);
272 init_completion(&odp_data
->notifier_completion
);
274 odp_data
->page_list
= vzalloc(pages
* sizeof(*odp_data
->page_list
));
275 if (!odp_data
->page_list
) {
280 odp_data
->dma_list
= vzalloc(pages
* sizeof(*odp_data
->dma_list
));
281 if (!odp_data
->dma_list
) {
286 down_write(&context
->umem_rwsem
);
287 context
->odp_mrs_count
++;
288 rbt_ib_umem_insert(&odp_data
->interval_tree
, &context
->umem_tree
);
289 if (likely(!atomic_read(&context
->notifier_count
)))
290 odp_data
->mn_counters_active
= true;
292 list_add(&odp_data
->no_private_counters
,
293 &context
->no_private_counters
);
294 up_write(&context
->umem_rwsem
);
296 umem
->odp_data
= odp_data
;
301 vfree(odp_data
->page_list
);
308 EXPORT_SYMBOL(ib_alloc_odp_umem
);
310 int ib_umem_odp_get(struct ib_ucontext
*context
, struct ib_umem
*umem
,
315 struct mm_struct
*mm
= get_task_mm(current
);
320 if (access
& IB_ACCESS_HUGETLB
) {
321 struct vm_area_struct
*vma
;
324 down_read(&mm
->mmap_sem
);
325 vma
= find_vma(mm
, ib_umem_start(umem
));
326 if (!vma
|| !is_vm_hugetlb_page(vma
)) {
327 up_read(&mm
->mmap_sem
);
331 umem
->page_shift
= huge_page_shift(h
);
332 up_read(&mm
->mmap_sem
);
338 /* Prevent creating ODP MRs in child processes */
340 our_pid
= get_task_pid(current
->group_leader
, PIDTYPE_PID
);
343 if (context
->tgid
!= our_pid
) {
348 umem
->odp_data
= kzalloc(sizeof(*umem
->odp_data
), GFP_KERNEL
);
349 if (!umem
->odp_data
) {
353 umem
->odp_data
->umem
= umem
;
355 mutex_init(&umem
->odp_data
->umem_mutex
);
357 init_completion(&umem
->odp_data
->notifier_completion
);
359 if (ib_umem_num_pages(umem
)) {
360 umem
->odp_data
->page_list
= vzalloc(ib_umem_num_pages(umem
) *
361 sizeof(*umem
->odp_data
->page_list
));
362 if (!umem
->odp_data
->page_list
) {
367 umem
->odp_data
->dma_list
= vzalloc(ib_umem_num_pages(umem
) *
368 sizeof(*umem
->odp_data
->dma_list
));
369 if (!umem
->odp_data
->dma_list
) {
376 * When using MMU notifiers, we will get a
377 * notification before the "current" task (and MM) is
378 * destroyed. We use the umem_rwsem semaphore to synchronize.
380 down_write(&context
->umem_rwsem
);
381 context
->odp_mrs_count
++;
382 if (likely(ib_umem_start(umem
) != ib_umem_end(umem
)))
383 rbt_ib_umem_insert(&umem
->odp_data
->interval_tree
,
384 &context
->umem_tree
);
385 if (likely(!atomic_read(&context
->notifier_count
)) ||
386 context
->odp_mrs_count
== 1)
387 umem
->odp_data
->mn_counters_active
= true;
389 list_add(&umem
->odp_data
->no_private_counters
,
390 &context
->no_private_counters
);
391 downgrade_write(&context
->umem_rwsem
);
393 if (context
->odp_mrs_count
== 1) {
395 * Note that at this point, no MMU notifier is running
398 atomic_set(&context
->notifier_count
, 0);
399 INIT_HLIST_NODE(&context
->mn
.hlist
);
400 context
->mn
.ops
= &ib_umem_notifiers
;
402 * Lock-dep detects a false positive for mmap_sem vs.
403 * umem_rwsem, due to not grasping downgrade_write correctly.
406 ret_val
= mmu_notifier_register(&context
->mn
, mm
);
409 pr_err("Failed to register mmu_notifier %d\n", ret_val
);
415 up_read(&context
->umem_rwsem
);
418 * Note that doing an mmput can cause a notifier for the relevant mm.
419 * If the notifier is called while we hold the umem_rwsem, this will
420 * cause a deadlock. Therefore, we release the reference only after we
421 * released the semaphore.
427 up_read(&context
->umem_rwsem
);
428 vfree(umem
->odp_data
->dma_list
);
430 vfree(umem
->odp_data
->page_list
);
432 kfree(umem
->odp_data
);
438 void ib_umem_odp_release(struct ib_umem
*umem
)
440 struct ib_ucontext
*context
= umem
->context
;
443 * Ensure that no more pages are mapped in the umem.
445 * It is the driver's responsibility to ensure, before calling us,
446 * that the hardware will not attempt to access the MR any more.
448 ib_umem_odp_unmap_dma_pages(umem
, ib_umem_start(umem
),
451 down_write(&context
->umem_rwsem
);
452 if (likely(ib_umem_start(umem
) != ib_umem_end(umem
)))
453 rbt_ib_umem_remove(&umem
->odp_data
->interval_tree
,
454 &context
->umem_tree
);
455 context
->odp_mrs_count
--;
456 if (!umem
->odp_data
->mn_counters_active
) {
457 list_del(&umem
->odp_data
->no_private_counters
);
458 complete_all(&umem
->odp_data
->notifier_completion
);
462 * Downgrade the lock to a read lock. This ensures that the notifiers
463 * (who lock the mutex for reading) will be able to finish, and we
464 * will be able to enventually obtain the mmu notifiers SRCU. Note
465 * that since we are doing it atomically, no other user could register
466 * and unregister while we do the check.
468 downgrade_write(&context
->umem_rwsem
);
469 if (!context
->odp_mrs_count
) {
470 struct task_struct
*owning_process
= NULL
;
471 struct mm_struct
*owning_mm
= NULL
;
473 owning_process
= get_pid_task(context
->tgid
,
475 if (owning_process
== NULL
)
477 * The process is already dead, notifier were removed
482 owning_mm
= get_task_mm(owning_process
);
483 if (owning_mm
== NULL
)
485 * The process' mm is already dead, notifier were
489 mmu_notifier_unregister(&context
->mn
, owning_mm
);
494 put_task_struct(owning_process
);
497 up_read(&context
->umem_rwsem
);
499 vfree(umem
->odp_data
->dma_list
);
500 vfree(umem
->odp_data
->page_list
);
501 kfree(umem
->odp_data
);
506 * Map for DMA and insert a single page into the on-demand paging page tables.
508 * @umem: the umem to insert the page to.
509 * @page_index: index in the umem to add the page to.
510 * @page: the page struct to map and add.
511 * @access_mask: access permissions needed for this page.
512 * @current_seq: sequence number for synchronization with invalidations.
513 * the sequence number is taken from
514 * umem->odp_data->notifiers_seq.
516 * The function returns -EFAULT if the DMA mapping operation fails. It returns
517 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
519 * The page is released via put_page even if the operation failed. For
520 * on-demand pinning, the page is released whenever it isn't stored in the
523 static int ib_umem_odp_map_dma_single_page(
524 struct ib_umem
*umem
,
528 unsigned long current_seq
)
530 struct ib_device
*dev
= umem
->context
->device
;
533 int remove_existing_mapping
= 0;
537 * Note: we avoid writing if seq is different from the initial seq, to
538 * handle case of a racing notifier. This check also allows us to bail
539 * early if we have a notifier running in parallel with us.
541 if (ib_umem_mmu_notifier_retry(umem
, current_seq
)) {
545 if (!(umem
->odp_data
->dma_list
[page_index
])) {
546 dma_addr
= ib_dma_map_page(dev
,
548 0, BIT(umem
->page_shift
),
550 if (ib_dma_mapping_error(dev
, dma_addr
)) {
554 umem
->odp_data
->dma_list
[page_index
] = dma_addr
| access_mask
;
555 umem
->odp_data
->page_list
[page_index
] = page
;
558 } else if (umem
->odp_data
->page_list
[page_index
] == page
) {
559 umem
->odp_data
->dma_list
[page_index
] |= access_mask
;
561 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
562 umem
->odp_data
->page_list
[page_index
], page
);
563 /* Better remove the mapping now, to prevent any further
565 remove_existing_mapping
= 1;
569 /* On Demand Paging - avoid pinning the page */
570 if (umem
->context
->invalidate_range
|| !stored_page
)
573 if (remove_existing_mapping
&& umem
->context
->invalidate_range
) {
574 invalidate_page_trampoline(
576 ib_umem_start(umem
) + (page_index
>> umem
->page_shift
),
577 ib_umem_start(umem
) + ((page_index
+ 1) >>
587 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
589 * Pins the range of pages passed in the argument, and maps them to
590 * DMA addresses. The DMA addresses of the mapped pages is updated in
591 * umem->odp_data->dma_list.
593 * Returns the number of pages mapped in success, negative error code
595 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
596 * the function from completing its task.
597 * An -ENOENT error code indicates that userspace process is being terminated
598 * and mm was already destroyed.
599 * @umem: the umem to map and pin
600 * @user_virt: the address from which we need to map.
601 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
602 * bigger due to alignment, and may also be smaller in case of an error
603 * pinning or mapping a page. The actual pages mapped is returned in
605 * @access_mask: bit mask of the requested access permissions for the given
607 * @current_seq: the MMU notifiers sequance value for synchronization with
608 * invalidations. the sequance number is read from
609 * umem->odp_data->notifiers_seq before calling this function
611 int ib_umem_odp_map_dma_pages(struct ib_umem
*umem
, u64 user_virt
, u64 bcnt
,
612 u64 access_mask
, unsigned long current_seq
)
614 struct task_struct
*owning_process
= NULL
;
615 struct mm_struct
*owning_mm
= NULL
;
616 struct page
**local_page_list
= NULL
;
618 int j
, k
, ret
= 0, start_idx
, npages
= 0, page_shift
;
619 unsigned int flags
= 0;
622 if (access_mask
== 0)
625 if (user_virt
< ib_umem_start(umem
) ||
626 user_virt
+ bcnt
> ib_umem_end(umem
))
629 local_page_list
= (struct page
**)__get_free_page(GFP_KERNEL
);
630 if (!local_page_list
)
633 page_shift
= umem
->page_shift
;
634 page_mask
= ~(BIT(page_shift
) - 1);
635 off
= user_virt
& (~page_mask
);
636 user_virt
= user_virt
& page_mask
;
637 bcnt
+= off
; /* Charge for the first page offset as well. */
639 owning_process
= get_pid_task(umem
->context
->tgid
, PIDTYPE_PID
);
640 if (owning_process
== NULL
) {
645 owning_mm
= get_task_mm(owning_process
);
646 if (owning_mm
== NULL
) {
651 if (access_mask
& ODP_WRITE_ALLOWED_BIT
)
654 start_idx
= (user_virt
- ib_umem_start(umem
)) >> page_shift
;
658 const size_t gup_num_pages
= min_t(size_t,
659 (bcnt
+ BIT(page_shift
) - 1) >> page_shift
,
660 PAGE_SIZE
/ sizeof(struct page
*));
662 down_read(&owning_mm
->mmap_sem
);
664 * Note: this might result in redundent page getting. We can
665 * avoid this by checking dma_list to be 0 before calling
666 * get_user_pages. However, this make the code much more
667 * complex (and doesn't gain us much performance in most use
670 npages
= get_user_pages_remote(owning_process
, owning_mm
,
671 user_virt
, gup_num_pages
,
672 flags
, local_page_list
, NULL
, NULL
);
673 up_read(&owning_mm
->mmap_sem
);
678 bcnt
-= min_t(size_t, npages
<< PAGE_SHIFT
, bcnt
);
679 mutex_lock(&umem
->odp_data
->umem_mutex
);
680 for (j
= 0; j
< npages
; j
++, user_virt
+= PAGE_SIZE
) {
681 if (user_virt
& ~page_mask
) {
683 if (page_to_phys(local_page_list
[j
]) != p
) {
687 put_page(local_page_list
[j
]);
691 ret
= ib_umem_odp_map_dma_single_page(
692 umem
, k
, local_page_list
[j
],
693 access_mask
, current_seq
);
697 p
= page_to_phys(local_page_list
[j
]);
700 mutex_unlock(&umem
->odp_data
->umem_mutex
);
703 /* Release left over pages when handling errors. */
704 for (++j
; j
< npages
; ++j
)
705 put_page(local_page_list
[j
]);
711 if (npages
< 0 && k
== start_idx
)
719 put_task_struct(owning_process
);
721 free_page((unsigned long)local_page_list
);
724 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages
);
726 void ib_umem_odp_unmap_dma_pages(struct ib_umem
*umem
, u64 virt
,
731 struct ib_device
*dev
= umem
->context
->device
;
733 virt
= max_t(u64
, virt
, ib_umem_start(umem
));
734 bound
= min_t(u64
, bound
, ib_umem_end(umem
));
735 /* Note that during the run of this function, the
736 * notifiers_count of the MR is > 0, preventing any racing
737 * faults from completion. We might be racing with other
738 * invalidations, so we must make sure we free each page only
740 mutex_lock(&umem
->odp_data
->umem_mutex
);
741 for (addr
= virt
; addr
< bound
; addr
+= BIT(umem
->page_shift
)) {
742 idx
= (addr
- ib_umem_start(umem
)) >> umem
->page_shift
;
743 if (umem
->odp_data
->page_list
[idx
]) {
744 struct page
*page
= umem
->odp_data
->page_list
[idx
];
745 dma_addr_t dma
= umem
->odp_data
->dma_list
[idx
];
746 dma_addr_t dma_addr
= dma
& ODP_DMA_ADDR_MASK
;
750 ib_dma_unmap_page(dev
, dma_addr
, PAGE_SIZE
,
752 if (dma
& ODP_WRITE_ALLOWED_BIT
) {
753 struct page
*head_page
= compound_head(page
);
755 * set_page_dirty prefers being called with
756 * the page lock. However, MMU notifiers are
757 * called sometimes with and sometimes without
758 * the lock. We rely on the umem_mutex instead
759 * to prevent other mmu notifiers from
760 * continuing and allowing the page mapping to
763 set_page_dirty(head_page
);
765 /* on demand pinning support */
766 if (!umem
->context
->invalidate_range
)
768 umem
->odp_data
->page_list
[idx
] = NULL
;
769 umem
->odp_data
->dma_list
[idx
] = 0;
773 mutex_unlock(&umem
->odp_data
->umem_mutex
);
775 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages
);