]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/infiniband/core/umem_odp.c
mmc: sdhci-xenon: add set_power callback
[mirror_ubuntu-artful-kernel.git] / drivers / infiniband / core / umem_odp.c
1 /*
2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42
43 #include <rdma/ib_verbs.h>
44 #include <rdma/ib_umem.h>
45 #include <rdma/ib_umem_odp.h>
46
47 static void ib_umem_notifier_start_account(struct ib_umem *item)
48 {
49 mutex_lock(&item->odp_data->umem_mutex);
50
51 /* Only update private counters for this umem if it has them.
52 * Otherwise skip it. All page faults will be delayed for this umem. */
53 if (item->odp_data->mn_counters_active) {
54 int notifiers_count = item->odp_data->notifiers_count++;
55
56 if (notifiers_count == 0)
57 /* Initialize the completion object for waiting on
58 * notifiers. Since notifier_count is zero, no one
59 * should be waiting right now. */
60 reinit_completion(&item->odp_data->notifier_completion);
61 }
62 mutex_unlock(&item->odp_data->umem_mutex);
63 }
64
65 static void ib_umem_notifier_end_account(struct ib_umem *item)
66 {
67 mutex_lock(&item->odp_data->umem_mutex);
68
69 /* Only update private counters for this umem if it has them.
70 * Otherwise skip it. All page faults will be delayed for this umem. */
71 if (item->odp_data->mn_counters_active) {
72 /*
73 * This sequence increase will notify the QP page fault that
74 * the page that is going to be mapped in the spte could have
75 * been freed.
76 */
77 ++item->odp_data->notifiers_seq;
78 if (--item->odp_data->notifiers_count == 0)
79 complete_all(&item->odp_data->notifier_completion);
80 }
81 mutex_unlock(&item->odp_data->umem_mutex);
82 }
83
84 /* Account for a new mmu notifier in an ib_ucontext. */
85 static void ib_ucontext_notifier_start_account(struct ib_ucontext *context)
86 {
87 atomic_inc(&context->notifier_count);
88 }
89
90 /* Account for a terminating mmu notifier in an ib_ucontext.
91 *
92 * Must be called with the ib_ucontext->umem_rwsem semaphore unlocked, since
93 * the function takes the semaphore itself. */
94 static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
95 {
96 int zero_notifiers = atomic_dec_and_test(&context->notifier_count);
97
98 if (zero_notifiers &&
99 !list_empty(&context->no_private_counters)) {
100 /* No currently running mmu notifiers. Now is the chance to
101 * add private accounting to all previously added umems. */
102 struct ib_umem_odp *odp_data, *next;
103
104 /* Prevent concurrent mmu notifiers from working on the
105 * no_private_counters list. */
106 down_write(&context->umem_rwsem);
107
108 /* Read the notifier_count again, with the umem_rwsem
109 * semaphore taken for write. */
110 if (!atomic_read(&context->notifier_count)) {
111 list_for_each_entry_safe(odp_data, next,
112 &context->no_private_counters,
113 no_private_counters) {
114 mutex_lock(&odp_data->umem_mutex);
115 odp_data->mn_counters_active = true;
116 list_del(&odp_data->no_private_counters);
117 complete_all(&odp_data->notifier_completion);
118 mutex_unlock(&odp_data->umem_mutex);
119 }
120 }
121
122 up_write(&context->umem_rwsem);
123 }
124 }
125
126 static int ib_umem_notifier_release_trampoline(struct ib_umem *item, u64 start,
127 u64 end, void *cookie) {
128 /*
129 * Increase the number of notifiers running, to
130 * prevent any further fault handling on this MR.
131 */
132 ib_umem_notifier_start_account(item);
133 item->odp_data->dying = 1;
134 /* Make sure that the fact the umem is dying is out before we release
135 * all pending page faults. */
136 smp_wmb();
137 complete_all(&item->odp_data->notifier_completion);
138 item->context->invalidate_range(item, ib_umem_start(item),
139 ib_umem_end(item));
140 return 0;
141 }
142
143 static void ib_umem_notifier_release(struct mmu_notifier *mn,
144 struct mm_struct *mm)
145 {
146 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
147
148 if (!context->invalidate_range)
149 return;
150
151 ib_ucontext_notifier_start_account(context);
152 down_read(&context->umem_rwsem);
153 rbt_ib_umem_for_each_in_range(&context->umem_tree, 0,
154 ULLONG_MAX,
155 ib_umem_notifier_release_trampoline,
156 NULL);
157 up_read(&context->umem_rwsem);
158 }
159
160 static int invalidate_page_trampoline(struct ib_umem *item, u64 start,
161 u64 end, void *cookie)
162 {
163 ib_umem_notifier_start_account(item);
164 item->context->invalidate_range(item, start, start + PAGE_SIZE);
165 ib_umem_notifier_end_account(item);
166 return 0;
167 }
168
169 static void ib_umem_notifier_invalidate_page(struct mmu_notifier *mn,
170 struct mm_struct *mm,
171 unsigned long address)
172 {
173 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
174
175 if (!context->invalidate_range)
176 return;
177
178 ib_ucontext_notifier_start_account(context);
179 down_read(&context->umem_rwsem);
180 rbt_ib_umem_for_each_in_range(&context->umem_tree, address,
181 address + PAGE_SIZE,
182 invalidate_page_trampoline, NULL);
183 up_read(&context->umem_rwsem);
184 ib_ucontext_notifier_end_account(context);
185 }
186
187 static int invalidate_range_start_trampoline(struct ib_umem *item, u64 start,
188 u64 end, void *cookie)
189 {
190 ib_umem_notifier_start_account(item);
191 item->context->invalidate_range(item, start, end);
192 return 0;
193 }
194
195 static void ib_umem_notifier_invalidate_range_start(struct mmu_notifier *mn,
196 struct mm_struct *mm,
197 unsigned long start,
198 unsigned long end)
199 {
200 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
201
202 if (!context->invalidate_range)
203 return;
204
205 ib_ucontext_notifier_start_account(context);
206 down_read(&context->umem_rwsem);
207 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
208 end,
209 invalidate_range_start_trampoline, NULL);
210 up_read(&context->umem_rwsem);
211 }
212
213 static int invalidate_range_end_trampoline(struct ib_umem *item, u64 start,
214 u64 end, void *cookie)
215 {
216 ib_umem_notifier_end_account(item);
217 return 0;
218 }
219
220 static void ib_umem_notifier_invalidate_range_end(struct mmu_notifier *mn,
221 struct mm_struct *mm,
222 unsigned long start,
223 unsigned long end)
224 {
225 struct ib_ucontext *context = container_of(mn, struct ib_ucontext, mn);
226
227 if (!context->invalidate_range)
228 return;
229
230 down_read(&context->umem_rwsem);
231 rbt_ib_umem_for_each_in_range(&context->umem_tree, start,
232 end,
233 invalidate_range_end_trampoline, NULL);
234 up_read(&context->umem_rwsem);
235 ib_ucontext_notifier_end_account(context);
236 }
237
238 static const struct mmu_notifier_ops ib_umem_notifiers = {
239 .release = ib_umem_notifier_release,
240 .invalidate_page = ib_umem_notifier_invalidate_page,
241 .invalidate_range_start = ib_umem_notifier_invalidate_range_start,
242 .invalidate_range_end = ib_umem_notifier_invalidate_range_end,
243 };
244
245 struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
246 unsigned long addr,
247 size_t size)
248 {
249 struct ib_umem *umem;
250 struct ib_umem_odp *odp_data;
251 int pages = size >> PAGE_SHIFT;
252 int ret;
253
254 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
255 if (!umem)
256 return ERR_PTR(-ENOMEM);
257
258 umem->context = context;
259 umem->length = size;
260 umem->address = addr;
261 umem->page_shift = PAGE_SHIFT;
262 umem->writable = 1;
263
264 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
265 if (!odp_data) {
266 ret = -ENOMEM;
267 goto out_umem;
268 }
269 odp_data->umem = umem;
270
271 mutex_init(&odp_data->umem_mutex);
272 init_completion(&odp_data->notifier_completion);
273
274 odp_data->page_list = vzalloc(pages * sizeof(*odp_data->page_list));
275 if (!odp_data->page_list) {
276 ret = -ENOMEM;
277 goto out_odp_data;
278 }
279
280 odp_data->dma_list = vzalloc(pages * sizeof(*odp_data->dma_list));
281 if (!odp_data->dma_list) {
282 ret = -ENOMEM;
283 goto out_page_list;
284 }
285
286 down_write(&context->umem_rwsem);
287 context->odp_mrs_count++;
288 rbt_ib_umem_insert(&odp_data->interval_tree, &context->umem_tree);
289 if (likely(!atomic_read(&context->notifier_count)))
290 odp_data->mn_counters_active = true;
291 else
292 list_add(&odp_data->no_private_counters,
293 &context->no_private_counters);
294 up_write(&context->umem_rwsem);
295
296 umem->odp_data = odp_data;
297
298 return umem;
299
300 out_page_list:
301 vfree(odp_data->page_list);
302 out_odp_data:
303 kfree(odp_data);
304 out_umem:
305 kfree(umem);
306 return ERR_PTR(ret);
307 }
308 EXPORT_SYMBOL(ib_alloc_odp_umem);
309
310 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
311 int access)
312 {
313 int ret_val;
314 struct pid *our_pid;
315 struct mm_struct *mm = get_task_mm(current);
316
317 if (!mm)
318 return -EINVAL;
319
320 if (access & IB_ACCESS_HUGETLB) {
321 struct vm_area_struct *vma;
322 struct hstate *h;
323
324 down_read(&mm->mmap_sem);
325 vma = find_vma(mm, ib_umem_start(umem));
326 if (!vma || !is_vm_hugetlb_page(vma)) {
327 up_read(&mm->mmap_sem);
328 return -EINVAL;
329 }
330 h = hstate_vma(vma);
331 umem->page_shift = huge_page_shift(h);
332 up_read(&mm->mmap_sem);
333 umem->hugetlb = 1;
334 } else {
335 umem->hugetlb = 0;
336 }
337
338 /* Prevent creating ODP MRs in child processes */
339 rcu_read_lock();
340 our_pid = get_task_pid(current->group_leader, PIDTYPE_PID);
341 rcu_read_unlock();
342 put_pid(our_pid);
343 if (context->tgid != our_pid) {
344 ret_val = -EINVAL;
345 goto out_mm;
346 }
347
348 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL);
349 if (!umem->odp_data) {
350 ret_val = -ENOMEM;
351 goto out_mm;
352 }
353 umem->odp_data->umem = umem;
354
355 mutex_init(&umem->odp_data->umem_mutex);
356
357 init_completion(&umem->odp_data->notifier_completion);
358
359 if (ib_umem_num_pages(umem)) {
360 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) *
361 sizeof(*umem->odp_data->page_list));
362 if (!umem->odp_data->page_list) {
363 ret_val = -ENOMEM;
364 goto out_odp_data;
365 }
366
367 umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) *
368 sizeof(*umem->odp_data->dma_list));
369 if (!umem->odp_data->dma_list) {
370 ret_val = -ENOMEM;
371 goto out_page_list;
372 }
373 }
374
375 /*
376 * When using MMU notifiers, we will get a
377 * notification before the "current" task (and MM) is
378 * destroyed. We use the umem_rwsem semaphore to synchronize.
379 */
380 down_write(&context->umem_rwsem);
381 context->odp_mrs_count++;
382 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
383 rbt_ib_umem_insert(&umem->odp_data->interval_tree,
384 &context->umem_tree);
385 if (likely(!atomic_read(&context->notifier_count)) ||
386 context->odp_mrs_count == 1)
387 umem->odp_data->mn_counters_active = true;
388 else
389 list_add(&umem->odp_data->no_private_counters,
390 &context->no_private_counters);
391 downgrade_write(&context->umem_rwsem);
392
393 if (context->odp_mrs_count == 1) {
394 /*
395 * Note that at this point, no MMU notifier is running
396 * for this context!
397 */
398 atomic_set(&context->notifier_count, 0);
399 INIT_HLIST_NODE(&context->mn.hlist);
400 context->mn.ops = &ib_umem_notifiers;
401 /*
402 * Lock-dep detects a false positive for mmap_sem vs.
403 * umem_rwsem, due to not grasping downgrade_write correctly.
404 */
405 lockdep_off();
406 ret_val = mmu_notifier_register(&context->mn, mm);
407 lockdep_on();
408 if (ret_val) {
409 pr_err("Failed to register mmu_notifier %d\n", ret_val);
410 ret_val = -EBUSY;
411 goto out_mutex;
412 }
413 }
414
415 up_read(&context->umem_rwsem);
416
417 /*
418 * Note that doing an mmput can cause a notifier for the relevant mm.
419 * If the notifier is called while we hold the umem_rwsem, this will
420 * cause a deadlock. Therefore, we release the reference only after we
421 * released the semaphore.
422 */
423 mmput(mm);
424 return 0;
425
426 out_mutex:
427 up_read(&context->umem_rwsem);
428 vfree(umem->odp_data->dma_list);
429 out_page_list:
430 vfree(umem->odp_data->page_list);
431 out_odp_data:
432 kfree(umem->odp_data);
433 out_mm:
434 mmput(mm);
435 return ret_val;
436 }
437
438 void ib_umem_odp_release(struct ib_umem *umem)
439 {
440 struct ib_ucontext *context = umem->context;
441
442 /*
443 * Ensure that no more pages are mapped in the umem.
444 *
445 * It is the driver's responsibility to ensure, before calling us,
446 * that the hardware will not attempt to access the MR any more.
447 */
448 ib_umem_odp_unmap_dma_pages(umem, ib_umem_start(umem),
449 ib_umem_end(umem));
450
451 down_write(&context->umem_rwsem);
452 if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
453 rbt_ib_umem_remove(&umem->odp_data->interval_tree,
454 &context->umem_tree);
455 context->odp_mrs_count--;
456 if (!umem->odp_data->mn_counters_active) {
457 list_del(&umem->odp_data->no_private_counters);
458 complete_all(&umem->odp_data->notifier_completion);
459 }
460
461 /*
462 * Downgrade the lock to a read lock. This ensures that the notifiers
463 * (who lock the mutex for reading) will be able to finish, and we
464 * will be able to enventually obtain the mmu notifiers SRCU. Note
465 * that since we are doing it atomically, no other user could register
466 * and unregister while we do the check.
467 */
468 downgrade_write(&context->umem_rwsem);
469 if (!context->odp_mrs_count) {
470 struct task_struct *owning_process = NULL;
471 struct mm_struct *owning_mm = NULL;
472
473 owning_process = get_pid_task(context->tgid,
474 PIDTYPE_PID);
475 if (owning_process == NULL)
476 /*
477 * The process is already dead, notifier were removed
478 * already.
479 */
480 goto out;
481
482 owning_mm = get_task_mm(owning_process);
483 if (owning_mm == NULL)
484 /*
485 * The process' mm is already dead, notifier were
486 * removed already.
487 */
488 goto out_put_task;
489 mmu_notifier_unregister(&context->mn, owning_mm);
490
491 mmput(owning_mm);
492
493 out_put_task:
494 put_task_struct(owning_process);
495 }
496 out:
497 up_read(&context->umem_rwsem);
498
499 vfree(umem->odp_data->dma_list);
500 vfree(umem->odp_data->page_list);
501 kfree(umem->odp_data);
502 kfree(umem);
503 }
504
505 /*
506 * Map for DMA and insert a single page into the on-demand paging page tables.
507 *
508 * @umem: the umem to insert the page to.
509 * @page_index: index in the umem to add the page to.
510 * @page: the page struct to map and add.
511 * @access_mask: access permissions needed for this page.
512 * @current_seq: sequence number for synchronization with invalidations.
513 * the sequence number is taken from
514 * umem->odp_data->notifiers_seq.
515 *
516 * The function returns -EFAULT if the DMA mapping operation fails. It returns
517 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
518 *
519 * The page is released via put_page even if the operation failed. For
520 * on-demand pinning, the page is released whenever it isn't stored in the
521 * umem.
522 */
523 static int ib_umem_odp_map_dma_single_page(
524 struct ib_umem *umem,
525 int page_index,
526 struct page *page,
527 u64 access_mask,
528 unsigned long current_seq)
529 {
530 struct ib_device *dev = umem->context->device;
531 dma_addr_t dma_addr;
532 int stored_page = 0;
533 int remove_existing_mapping = 0;
534 int ret = 0;
535
536 /*
537 * Note: we avoid writing if seq is different from the initial seq, to
538 * handle case of a racing notifier. This check also allows us to bail
539 * early if we have a notifier running in parallel with us.
540 */
541 if (ib_umem_mmu_notifier_retry(umem, current_seq)) {
542 ret = -EAGAIN;
543 goto out;
544 }
545 if (!(umem->odp_data->dma_list[page_index])) {
546 dma_addr = ib_dma_map_page(dev,
547 page,
548 0, BIT(umem->page_shift),
549 DMA_BIDIRECTIONAL);
550 if (ib_dma_mapping_error(dev, dma_addr)) {
551 ret = -EFAULT;
552 goto out;
553 }
554 umem->odp_data->dma_list[page_index] = dma_addr | access_mask;
555 umem->odp_data->page_list[page_index] = page;
556 umem->npages++;
557 stored_page = 1;
558 } else if (umem->odp_data->page_list[page_index] == page) {
559 umem->odp_data->dma_list[page_index] |= access_mask;
560 } else {
561 pr_err("error: got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
562 umem->odp_data->page_list[page_index], page);
563 /* Better remove the mapping now, to prevent any further
564 * damage. */
565 remove_existing_mapping = 1;
566 }
567
568 out:
569 /* On Demand Paging - avoid pinning the page */
570 if (umem->context->invalidate_range || !stored_page)
571 put_page(page);
572
573 if (remove_existing_mapping && umem->context->invalidate_range) {
574 invalidate_page_trampoline(
575 umem,
576 ib_umem_start(umem) + (page_index >> umem->page_shift),
577 ib_umem_start(umem) + ((page_index + 1) >>
578 umem->page_shift),
579 NULL);
580 ret = -EAGAIN;
581 }
582
583 return ret;
584 }
585
586 /**
587 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
588 *
589 * Pins the range of pages passed in the argument, and maps them to
590 * DMA addresses. The DMA addresses of the mapped pages is updated in
591 * umem->odp_data->dma_list.
592 *
593 * Returns the number of pages mapped in success, negative error code
594 * for failure.
595 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
596 * the function from completing its task.
597 * An -ENOENT error code indicates that userspace process is being terminated
598 * and mm was already destroyed.
599 * @umem: the umem to map and pin
600 * @user_virt: the address from which we need to map.
601 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
602 * bigger due to alignment, and may also be smaller in case of an error
603 * pinning or mapping a page. The actual pages mapped is returned in
604 * the return value.
605 * @access_mask: bit mask of the requested access permissions for the given
606 * range.
607 * @current_seq: the MMU notifiers sequance value for synchronization with
608 * invalidations. the sequance number is read from
609 * umem->odp_data->notifiers_seq before calling this function
610 */
611 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
612 u64 access_mask, unsigned long current_seq)
613 {
614 struct task_struct *owning_process = NULL;
615 struct mm_struct *owning_mm = NULL;
616 struct page **local_page_list = NULL;
617 u64 page_mask, off;
618 int j, k, ret = 0, start_idx, npages = 0, page_shift;
619 unsigned int flags = 0;
620 phys_addr_t p = 0;
621
622 if (access_mask == 0)
623 return -EINVAL;
624
625 if (user_virt < ib_umem_start(umem) ||
626 user_virt + bcnt > ib_umem_end(umem))
627 return -EFAULT;
628
629 local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
630 if (!local_page_list)
631 return -ENOMEM;
632
633 page_shift = umem->page_shift;
634 page_mask = ~(BIT(page_shift) - 1);
635 off = user_virt & (~page_mask);
636 user_virt = user_virt & page_mask;
637 bcnt += off; /* Charge for the first page offset as well. */
638
639 owning_process = get_pid_task(umem->context->tgid, PIDTYPE_PID);
640 if (owning_process == NULL) {
641 ret = -EINVAL;
642 goto out_no_task;
643 }
644
645 owning_mm = get_task_mm(owning_process);
646 if (owning_mm == NULL) {
647 ret = -ENOENT;
648 goto out_put_task;
649 }
650
651 if (access_mask & ODP_WRITE_ALLOWED_BIT)
652 flags |= FOLL_WRITE;
653
654 start_idx = (user_virt - ib_umem_start(umem)) >> page_shift;
655 k = start_idx;
656
657 while (bcnt > 0) {
658 const size_t gup_num_pages = min_t(size_t,
659 (bcnt + BIT(page_shift) - 1) >> page_shift,
660 PAGE_SIZE / sizeof(struct page *));
661
662 down_read(&owning_mm->mmap_sem);
663 /*
664 * Note: this might result in redundent page getting. We can
665 * avoid this by checking dma_list to be 0 before calling
666 * get_user_pages. However, this make the code much more
667 * complex (and doesn't gain us much performance in most use
668 * cases).
669 */
670 npages = get_user_pages_remote(owning_process, owning_mm,
671 user_virt, gup_num_pages,
672 flags, local_page_list, NULL, NULL);
673 up_read(&owning_mm->mmap_sem);
674
675 if (npages < 0)
676 break;
677
678 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
679 mutex_lock(&umem->odp_data->umem_mutex);
680 for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
681 if (user_virt & ~page_mask) {
682 p += PAGE_SIZE;
683 if (page_to_phys(local_page_list[j]) != p) {
684 ret = -EFAULT;
685 break;
686 }
687 put_page(local_page_list[j]);
688 continue;
689 }
690
691 ret = ib_umem_odp_map_dma_single_page(
692 umem, k, local_page_list[j],
693 access_mask, current_seq);
694 if (ret < 0)
695 break;
696
697 p = page_to_phys(local_page_list[j]);
698 k++;
699 }
700 mutex_unlock(&umem->odp_data->umem_mutex);
701
702 if (ret < 0) {
703 /* Release left over pages when handling errors. */
704 for (++j; j < npages; ++j)
705 put_page(local_page_list[j]);
706 break;
707 }
708 }
709
710 if (ret >= 0) {
711 if (npages < 0 && k == start_idx)
712 ret = npages;
713 else
714 ret = k - start_idx;
715 }
716
717 mmput(owning_mm);
718 out_put_task:
719 put_task_struct(owning_process);
720 out_no_task:
721 free_page((unsigned long)local_page_list);
722 return ret;
723 }
724 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
725
726 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
727 u64 bound)
728 {
729 int idx;
730 u64 addr;
731 struct ib_device *dev = umem->context->device;
732
733 virt = max_t(u64, virt, ib_umem_start(umem));
734 bound = min_t(u64, bound, ib_umem_end(umem));
735 /* Note that during the run of this function, the
736 * notifiers_count of the MR is > 0, preventing any racing
737 * faults from completion. We might be racing with other
738 * invalidations, so we must make sure we free each page only
739 * once. */
740 mutex_lock(&umem->odp_data->umem_mutex);
741 for (addr = virt; addr < bound; addr += BIT(umem->page_shift)) {
742 idx = (addr - ib_umem_start(umem)) >> umem->page_shift;
743 if (umem->odp_data->page_list[idx]) {
744 struct page *page = umem->odp_data->page_list[idx];
745 dma_addr_t dma = umem->odp_data->dma_list[idx];
746 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
747
748 WARN_ON(!dma_addr);
749
750 ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
751 DMA_BIDIRECTIONAL);
752 if (dma & ODP_WRITE_ALLOWED_BIT) {
753 struct page *head_page = compound_head(page);
754 /*
755 * set_page_dirty prefers being called with
756 * the page lock. However, MMU notifiers are
757 * called sometimes with and sometimes without
758 * the lock. We rely on the umem_mutex instead
759 * to prevent other mmu notifiers from
760 * continuing and allowing the page mapping to
761 * be removed.
762 */
763 set_page_dirty(head_page);
764 }
765 /* on demand pinning support */
766 if (!umem->context->invalidate_range)
767 put_page(page);
768 umem->odp_data->page_list[idx] = NULL;
769 umem->odp_data->dma_list[idx] = 0;
770 umem->npages--;
771 }
772 }
773 mutex_unlock(&umem->odp_data->umem_mutex);
774 }
775 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);