]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/infiniband/core/umem_odp.c
mmap locking API: use coccinelle to convert mmap_sem rwsem call sites
[mirror_ubuntu-kernels.git] / drivers / infiniband / core / umem_odp.c
1 /*
2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/types.h>
34 #include <linux/sched.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/task.h>
37 #include <linux/pid.h>
38 #include <linux/slab.h>
39 #include <linux/export.h>
40 #include <linux/vmalloc.h>
41 #include <linux/hugetlb.h>
42 #include <linux/interval_tree.h>
43 #include <linux/pagemap.h>
44
45 #include <rdma/ib_verbs.h>
46 #include <rdma/ib_umem.h>
47 #include <rdma/ib_umem_odp.h>
48
49 #include "uverbs.h"
50
51 static inline int ib_init_umem_odp(struct ib_umem_odp *umem_odp,
52 const struct mmu_interval_notifier_ops *ops)
53 {
54 int ret;
55
56 umem_odp->umem.is_odp = 1;
57 mutex_init(&umem_odp->umem_mutex);
58
59 if (!umem_odp->is_implicit_odp) {
60 size_t page_size = 1UL << umem_odp->page_shift;
61 unsigned long start;
62 unsigned long end;
63 size_t pages;
64
65 start = ALIGN_DOWN(umem_odp->umem.address, page_size);
66 if (check_add_overflow(umem_odp->umem.address,
67 (unsigned long)umem_odp->umem.length,
68 &end))
69 return -EOVERFLOW;
70 end = ALIGN(end, page_size);
71 if (unlikely(end < page_size))
72 return -EOVERFLOW;
73
74 pages = (end - start) >> umem_odp->page_shift;
75 if (!pages)
76 return -EINVAL;
77
78 umem_odp->page_list = kvcalloc(
79 pages, sizeof(*umem_odp->page_list), GFP_KERNEL);
80 if (!umem_odp->page_list)
81 return -ENOMEM;
82
83 umem_odp->dma_list = kvcalloc(
84 pages, sizeof(*umem_odp->dma_list), GFP_KERNEL);
85 if (!umem_odp->dma_list) {
86 ret = -ENOMEM;
87 goto out_page_list;
88 }
89
90 ret = mmu_interval_notifier_insert(&umem_odp->notifier,
91 umem_odp->umem.owning_mm,
92 start, end - start, ops);
93 if (ret)
94 goto out_dma_list;
95 }
96
97 return 0;
98
99 out_dma_list:
100 kvfree(umem_odp->dma_list);
101 out_page_list:
102 kvfree(umem_odp->page_list);
103 return ret;
104 }
105
106 /**
107 * ib_umem_odp_alloc_implicit - Allocate a parent implicit ODP umem
108 *
109 * Implicit ODP umems do not have a VA range and do not have any page lists.
110 * They exist only to hold the per_mm reference to help the driver create
111 * children umems.
112 *
113 * @device: IB device to create UMEM
114 * @access: ib_reg_mr access flags
115 */
116 struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
117 int access)
118 {
119 struct ib_umem *umem;
120 struct ib_umem_odp *umem_odp;
121 int ret;
122
123 if (access & IB_ACCESS_HUGETLB)
124 return ERR_PTR(-EINVAL);
125
126 umem_odp = kzalloc(sizeof(*umem_odp), GFP_KERNEL);
127 if (!umem_odp)
128 return ERR_PTR(-ENOMEM);
129 umem = &umem_odp->umem;
130 umem->ibdev = device;
131 umem->writable = ib_access_writable(access);
132 umem->owning_mm = current->mm;
133 umem_odp->is_implicit_odp = 1;
134 umem_odp->page_shift = PAGE_SHIFT;
135
136 umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
137 ret = ib_init_umem_odp(umem_odp, NULL);
138 if (ret) {
139 put_pid(umem_odp->tgid);
140 kfree(umem_odp);
141 return ERR_PTR(ret);
142 }
143 return umem_odp;
144 }
145 EXPORT_SYMBOL(ib_umem_odp_alloc_implicit);
146
147 /**
148 * ib_umem_odp_alloc_child - Allocate a child ODP umem under an implicit
149 * parent ODP umem
150 *
151 * @root: The parent umem enclosing the child. This must be allocated using
152 * ib_alloc_implicit_odp_umem()
153 * @addr: The starting userspace VA
154 * @size: The length of the userspace VA
155 */
156 struct ib_umem_odp *
157 ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
158 size_t size,
159 const struct mmu_interval_notifier_ops *ops)
160 {
161 /*
162 * Caller must ensure that root cannot be freed during the call to
163 * ib_alloc_odp_umem.
164 */
165 struct ib_umem_odp *odp_data;
166 struct ib_umem *umem;
167 int ret;
168
169 if (WARN_ON(!root->is_implicit_odp))
170 return ERR_PTR(-EINVAL);
171
172 odp_data = kzalloc(sizeof(*odp_data), GFP_KERNEL);
173 if (!odp_data)
174 return ERR_PTR(-ENOMEM);
175 umem = &odp_data->umem;
176 umem->ibdev = root->umem.ibdev;
177 umem->length = size;
178 umem->address = addr;
179 umem->writable = root->umem.writable;
180 umem->owning_mm = root->umem.owning_mm;
181 odp_data->page_shift = PAGE_SHIFT;
182 odp_data->notifier.ops = ops;
183
184 /*
185 * A mmget must be held when registering a notifier, the owming_mm only
186 * has a mm_grab at this point.
187 */
188 if (!mmget_not_zero(umem->owning_mm)) {
189 ret = -EFAULT;
190 goto out_free;
191 }
192
193 odp_data->tgid = get_pid(root->tgid);
194 ret = ib_init_umem_odp(odp_data, ops);
195 if (ret)
196 goto out_tgid;
197 mmput(umem->owning_mm);
198 return odp_data;
199
200 out_tgid:
201 put_pid(odp_data->tgid);
202 mmput(umem->owning_mm);
203 out_free:
204 kfree(odp_data);
205 return ERR_PTR(ret);
206 }
207 EXPORT_SYMBOL(ib_umem_odp_alloc_child);
208
209 /**
210 * ib_umem_odp_get - Create a umem_odp for a userspace va
211 *
212 * @device: IB device struct to get UMEM
213 * @addr: userspace virtual address to start at
214 * @size: length of region to pin
215 * @access: IB_ACCESS_xxx flags for memory being pinned
216 *
217 * The driver should use when the access flags indicate ODP memory. It avoids
218 * pinning, instead, stores the mm for future page fault handling in
219 * conjunction with MMU notifiers.
220 */
221 struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device,
222 unsigned long addr, size_t size, int access,
223 const struct mmu_interval_notifier_ops *ops)
224 {
225 struct ib_umem_odp *umem_odp;
226 struct mm_struct *mm;
227 int ret;
228
229 if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND)))
230 return ERR_PTR(-EINVAL);
231
232 umem_odp = kzalloc(sizeof(struct ib_umem_odp), GFP_KERNEL);
233 if (!umem_odp)
234 return ERR_PTR(-ENOMEM);
235
236 umem_odp->umem.ibdev = device;
237 umem_odp->umem.length = size;
238 umem_odp->umem.address = addr;
239 umem_odp->umem.writable = ib_access_writable(access);
240 umem_odp->umem.owning_mm = mm = current->mm;
241 umem_odp->notifier.ops = ops;
242
243 umem_odp->page_shift = PAGE_SHIFT;
244 #ifdef CONFIG_HUGETLB_PAGE
245 if (access & IB_ACCESS_HUGETLB)
246 umem_odp->page_shift = HPAGE_SHIFT;
247 #endif
248
249 umem_odp->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
250 ret = ib_init_umem_odp(umem_odp, ops);
251 if (ret)
252 goto err_put_pid;
253 return umem_odp;
254
255 err_put_pid:
256 put_pid(umem_odp->tgid);
257 kfree(umem_odp);
258 return ERR_PTR(ret);
259 }
260 EXPORT_SYMBOL(ib_umem_odp_get);
261
262 void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
263 {
264 /*
265 * Ensure that no more pages are mapped in the umem.
266 *
267 * It is the driver's responsibility to ensure, before calling us,
268 * that the hardware will not attempt to access the MR any more.
269 */
270 if (!umem_odp->is_implicit_odp) {
271 mutex_lock(&umem_odp->umem_mutex);
272 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp),
273 ib_umem_end(umem_odp));
274 mutex_unlock(&umem_odp->umem_mutex);
275 mmu_interval_notifier_remove(&umem_odp->notifier);
276 kvfree(umem_odp->dma_list);
277 kvfree(umem_odp->page_list);
278 }
279 put_pid(umem_odp->tgid);
280 kfree(umem_odp);
281 }
282 EXPORT_SYMBOL(ib_umem_odp_release);
283
284 /*
285 * Map for DMA and insert a single page into the on-demand paging page tables.
286 *
287 * @umem: the umem to insert the page to.
288 * @page_index: index in the umem to add the page to.
289 * @page: the page struct to map and add.
290 * @access_mask: access permissions needed for this page.
291 * @current_seq: sequence number for synchronization with invalidations.
292 * the sequence number is taken from
293 * umem_odp->notifiers_seq.
294 *
295 * The function returns -EFAULT if the DMA mapping operation fails. It returns
296 * -EAGAIN if a concurrent invalidation prevents us from updating the page.
297 *
298 * The page is released via put_page even if the operation failed. For on-demand
299 * pinning, the page is released whenever it isn't stored in the umem.
300 */
301 static int ib_umem_odp_map_dma_single_page(
302 struct ib_umem_odp *umem_odp,
303 unsigned int page_index,
304 struct page *page,
305 u64 access_mask,
306 unsigned long current_seq)
307 {
308 struct ib_device *dev = umem_odp->umem.ibdev;
309 dma_addr_t dma_addr;
310 int ret = 0;
311
312 if (mmu_interval_check_retry(&umem_odp->notifier, current_seq)) {
313 ret = -EAGAIN;
314 goto out;
315 }
316 if (!(umem_odp->dma_list[page_index])) {
317 dma_addr =
318 ib_dma_map_page(dev, page, 0, BIT(umem_odp->page_shift),
319 DMA_BIDIRECTIONAL);
320 if (ib_dma_mapping_error(dev, dma_addr)) {
321 ret = -EFAULT;
322 goto out;
323 }
324 umem_odp->dma_list[page_index] = dma_addr | access_mask;
325 umem_odp->page_list[page_index] = page;
326 umem_odp->npages++;
327 } else if (umem_odp->page_list[page_index] == page) {
328 umem_odp->dma_list[page_index] |= access_mask;
329 } else {
330 /*
331 * This is a race here where we could have done:
332 *
333 * CPU0 CPU1
334 * get_user_pages()
335 * invalidate()
336 * page_fault()
337 * mutex_lock(umem_mutex)
338 * page from GUP != page in ODP
339 *
340 * It should be prevented by the retry test above as reading
341 * the seq number should be reliable under the
342 * umem_mutex. Thus something is really not working right if
343 * things get here.
344 */
345 WARN(true,
346 "Got different pages in IB device and from get_user_pages. IB device page: %p, gup page: %p\n",
347 umem_odp->page_list[page_index], page);
348 ret = -EAGAIN;
349 }
350
351 out:
352 put_page(page);
353 return ret;
354 }
355
356 /**
357 * ib_umem_odp_map_dma_pages - Pin and DMA map userspace memory in an ODP MR.
358 *
359 * Pins the range of pages passed in the argument, and maps them to
360 * DMA addresses. The DMA addresses of the mapped pages is updated in
361 * umem_odp->dma_list.
362 *
363 * Returns the number of pages mapped in success, negative error code
364 * for failure.
365 * An -EAGAIN error code is returned when a concurrent mmu notifier prevents
366 * the function from completing its task.
367 * An -ENOENT error code indicates that userspace process is being terminated
368 * and mm was already destroyed.
369 * @umem_odp: the umem to map and pin
370 * @user_virt: the address from which we need to map.
371 * @bcnt: the minimal number of bytes to pin and map. The mapping might be
372 * bigger due to alignment, and may also be smaller in case of an error
373 * pinning or mapping a page. The actual pages mapped is returned in
374 * the return value.
375 * @access_mask: bit mask of the requested access permissions for the given
376 * range.
377 * @current_seq: the MMU notifiers sequance value for synchronization with
378 * invalidations. the sequance number is read from
379 * umem_odp->notifiers_seq before calling this function
380 */
381 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
382 u64 bcnt, u64 access_mask,
383 unsigned long current_seq)
384 {
385 struct task_struct *owning_process = NULL;
386 struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
387 struct page **local_page_list = NULL;
388 u64 page_mask, off;
389 int j, k, ret = 0, start_idx, npages = 0;
390 unsigned int flags = 0, page_shift;
391 phys_addr_t p = 0;
392
393 if (access_mask == 0)
394 return -EINVAL;
395
396 if (user_virt < ib_umem_start(umem_odp) ||
397 user_virt + bcnt > ib_umem_end(umem_odp))
398 return -EFAULT;
399
400 local_page_list = (struct page **)__get_free_page(GFP_KERNEL);
401 if (!local_page_list)
402 return -ENOMEM;
403
404 page_shift = umem_odp->page_shift;
405 page_mask = ~(BIT(page_shift) - 1);
406 off = user_virt & (~page_mask);
407 user_virt = user_virt & page_mask;
408 bcnt += off; /* Charge for the first page offset as well. */
409
410 /*
411 * owning_process is allowed to be NULL, this means somehow the mm is
412 * existing beyond the lifetime of the originating process.. Presumably
413 * mmget_not_zero will fail in this case.
414 */
415 owning_process = get_pid_task(umem_odp->tgid, PIDTYPE_PID);
416 if (!owning_process || !mmget_not_zero(owning_mm)) {
417 ret = -EINVAL;
418 goto out_put_task;
419 }
420
421 if (access_mask & ODP_WRITE_ALLOWED_BIT)
422 flags |= FOLL_WRITE;
423
424 start_idx = (user_virt - ib_umem_start(umem_odp)) >> page_shift;
425 k = start_idx;
426
427 while (bcnt > 0) {
428 const size_t gup_num_pages = min_t(size_t,
429 ALIGN(bcnt, PAGE_SIZE) / PAGE_SIZE,
430 PAGE_SIZE / sizeof(struct page *));
431
432 mmap_read_lock(owning_mm);
433 /*
434 * Note: this might result in redundent page getting. We can
435 * avoid this by checking dma_list to be 0 before calling
436 * get_user_pages. However, this make the code much more
437 * complex (and doesn't gain us much performance in most use
438 * cases).
439 */
440 npages = get_user_pages_remote(owning_process, owning_mm,
441 user_virt, gup_num_pages,
442 flags, local_page_list, NULL, NULL);
443 mmap_read_unlock(owning_mm);
444
445 if (npages < 0) {
446 if (npages != -EAGAIN)
447 pr_warn("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
448 else
449 pr_debug("fail to get %zu user pages with error %d\n", gup_num_pages, npages);
450 break;
451 }
452
453 bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
454 mutex_lock(&umem_odp->umem_mutex);
455 for (j = 0; j < npages; j++, user_virt += PAGE_SIZE) {
456 if (user_virt & ~page_mask) {
457 p += PAGE_SIZE;
458 if (page_to_phys(local_page_list[j]) != p) {
459 ret = -EFAULT;
460 break;
461 }
462 put_page(local_page_list[j]);
463 continue;
464 }
465
466 ret = ib_umem_odp_map_dma_single_page(
467 umem_odp, k, local_page_list[j],
468 access_mask, current_seq);
469 if (ret < 0) {
470 if (ret != -EAGAIN)
471 pr_warn("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
472 else
473 pr_debug("ib_umem_odp_map_dma_single_page failed with error %d\n", ret);
474 break;
475 }
476
477 p = page_to_phys(local_page_list[j]);
478 k++;
479 }
480 mutex_unlock(&umem_odp->umem_mutex);
481
482 if (ret < 0) {
483 /*
484 * Release pages, remembering that the first page
485 * to hit an error was already released by
486 * ib_umem_odp_map_dma_single_page().
487 */
488 if (npages - (j + 1) > 0)
489 release_pages(&local_page_list[j+1],
490 npages - (j + 1));
491 break;
492 }
493 }
494
495 if (ret >= 0) {
496 if (npages < 0 && k == start_idx)
497 ret = npages;
498 else
499 ret = k - start_idx;
500 }
501
502 mmput(owning_mm);
503 out_put_task:
504 if (owning_process)
505 put_task_struct(owning_process);
506 free_page((unsigned long)local_page_list);
507 return ret;
508 }
509 EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
510
511 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
512 u64 bound)
513 {
514 int idx;
515 u64 addr;
516 struct ib_device *dev = umem_odp->umem.ibdev;
517
518 lockdep_assert_held(&umem_odp->umem_mutex);
519
520 virt = max_t(u64, virt, ib_umem_start(umem_odp));
521 bound = min_t(u64, bound, ib_umem_end(umem_odp));
522 /* Note that during the run of this function, the
523 * notifiers_count of the MR is > 0, preventing any racing
524 * faults from completion. We might be racing with other
525 * invalidations, so we must make sure we free each page only
526 * once. */
527 for (addr = virt; addr < bound; addr += BIT(umem_odp->page_shift)) {
528 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift;
529 if (umem_odp->page_list[idx]) {
530 struct page *page = umem_odp->page_list[idx];
531 dma_addr_t dma = umem_odp->dma_list[idx];
532 dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
533
534 WARN_ON(!dma_addr);
535
536 ib_dma_unmap_page(dev, dma_addr,
537 BIT(umem_odp->page_shift),
538 DMA_BIDIRECTIONAL);
539 if (dma & ODP_WRITE_ALLOWED_BIT) {
540 struct page *head_page = compound_head(page);
541 /*
542 * set_page_dirty prefers being called with
543 * the page lock. However, MMU notifiers are
544 * called sometimes with and sometimes without
545 * the lock. We rely on the umem_mutex instead
546 * to prevent other mmu notifiers from
547 * continuing and allowing the page mapping to
548 * be removed.
549 */
550 set_page_dirty(head_page);
551 }
552 umem_odp->page_list[idx] = NULL;
553 umem_odp->dma_list[idx] = 0;
554 umem_odp->npages--;
555 }
556 }
557 }
558 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);