]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - mm/hmm.c
mm/hmm: comment on VM_FAULT_RETRY semantics in handle_mm_fault
[mirror_ubuntu-hirsute-kernel.git] / mm / hmm.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
133ff0ea
JG
2/*
3 * Copyright 2013 Red Hat Inc.
4 *
f813f219 5 * Authors: Jérôme Glisse <jglisse@redhat.com>
133ff0ea
JG
6 */
7/*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
11#include <linux/mm.h>
12#include <linux/hmm.h>
858b54da 13#include <linux/init.h>
da4c3c73
JG
14#include <linux/rmap.h>
15#include <linux/swap.h>
133ff0ea
JG
16#include <linux/slab.h>
17#include <linux/sched.h>
4ef589dc
JG
18#include <linux/mmzone.h>
19#include <linux/pagemap.h>
da4c3c73
JG
20#include <linux/swapops.h>
21#include <linux/hugetlb.h>
4ef589dc 22#include <linux/memremap.h>
c8a53b2d 23#include <linux/sched/mm.h>
7b2d55d2 24#include <linux/jump_label.h>
55c0ece8 25#include <linux/dma-mapping.h>
c0b12405 26#include <linux/mmu_notifier.h>
4ef589dc
JG
27#include <linux/memory_hotplug.h>
28
c0b12405
JG
29static const struct mmu_notifier_ops hmm_mmu_notifier_ops;
30
704f3f2c
JG
31/**
32 * hmm_get_or_create - register HMM against an mm (HMM internal)
133ff0ea
JG
33 *
34 * @mm: mm struct to attach to
704f3f2c
JG
35 * Returns: returns an HMM object, either by referencing the existing
36 * (per-process) object, or by creating a new one.
133ff0ea 37 *
704f3f2c
JG
38 * This is not intended to be used directly by device drivers. If mm already
39 * has an HMM struct then it get a reference on it and returns it. Otherwise
40 * it allocates an HMM struct, initializes it, associate it with the mm and
41 * returns it.
133ff0ea 42 */
704f3f2c 43static struct hmm *hmm_get_or_create(struct mm_struct *mm)
133ff0ea 44{
8a9320b7 45 struct hmm *hmm;
133ff0ea 46
fec88ab0 47 lockdep_assert_held_write(&mm->mmap_sem);
133ff0ea 48
8a9320b7
JG
49 /* Abuse the page_table_lock to also protect mm->hmm. */
50 spin_lock(&mm->page_table_lock);
51 hmm = mm->hmm;
52 if (mm->hmm && kref_get_unless_zero(&mm->hmm->kref))
53 goto out_unlock;
54 spin_unlock(&mm->page_table_lock);
c0b12405
JG
55
56 hmm = kmalloc(sizeof(*hmm), GFP_KERNEL);
57 if (!hmm)
58 return NULL;
a3e0d41c 59 init_waitqueue_head(&hmm->wq);
c0b12405
JG
60 INIT_LIST_HEAD(&hmm->mirrors);
61 init_rwsem(&hmm->mirrors_sem);
c0b12405 62 hmm->mmu_notifier.ops = NULL;
da4c3c73 63 INIT_LIST_HEAD(&hmm->ranges);
5a136b4a 64 spin_lock_init(&hmm->ranges_lock);
704f3f2c 65 kref_init(&hmm->kref);
a3e0d41c 66 hmm->notifiers = 0;
c0b12405
JG
67 hmm->mm = mm;
68
8a9320b7
JG
69 hmm->mmu_notifier.ops = &hmm_mmu_notifier_ops;
70 if (__mmu_notifier_register(&hmm->mmu_notifier, mm)) {
71 kfree(hmm);
72 return NULL;
73 }
c0b12405 74
8a9320b7 75 mmgrab(hmm->mm);
86a2d598
RC
76
77 /*
8a9320b7
JG
78 * We hold the exclusive mmap_sem here so we know that mm->hmm is
79 * still NULL or 0 kref, and is safe to update.
86a2d598 80 */
86a2d598 81 spin_lock(&mm->page_table_lock);
8a9320b7 82 mm->hmm = hmm;
c0b12405 83
8a9320b7 84out_unlock:
86a2d598 85 spin_unlock(&mm->page_table_lock);
704f3f2c 86 return hmm;
133ff0ea 87}
86a2d598 88
6d7c3cde
JG
89static void hmm_free_rcu(struct rcu_head *rcu)
90{
8a9320b7
JG
91 struct hmm *hmm = container_of(rcu, struct hmm, rcu);
92
93 mmdrop(hmm->mm);
86a2d598 94 kfree(hmm);
133ff0ea
JG
95}
96
704f3f2c
JG
97static void hmm_free(struct kref *kref)
98{
99 struct hmm *hmm = container_of(kref, struct hmm, kref);
704f3f2c 100
8a9320b7
JG
101 spin_lock(&hmm->mm->page_table_lock);
102 if (hmm->mm->hmm == hmm)
103 hmm->mm->hmm = NULL;
104 spin_unlock(&hmm->mm->page_table_lock);
704f3f2c 105
8a9320b7 106 mmu_notifier_unregister_no_release(&hmm->mmu_notifier, hmm->mm);
6d7c3cde 107 mmu_notifier_call_srcu(&hmm->rcu, hmm_free_rcu);
704f3f2c
JG
108}
109
110static inline void hmm_put(struct hmm *hmm)
111{
112 kref_put(&hmm->kref, hmm_free);
113}
114
a3e0d41c 115static void hmm_release(struct mmu_notifier *mn, struct mm_struct *mm)
133ff0ea 116{
6d7c3cde 117 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
c0b12405 118 struct hmm_mirror *mirror;
704f3f2c 119
6d7c3cde
JG
120 /* Bail out if hmm is in the process of being freed */
121 if (!kref_get_unless_zero(&hmm->kref))
704f3f2c 122 return;
6d7c3cde 123
47f24598
JG
124 /*
125 * Since hmm_range_register() holds the mmget() lock hmm_release() is
126 * prevented as long as a range exists.
127 */
128 WARN_ON(!list_empty_careful(&hmm->ranges));
e1401513 129
14331726
JG
130 down_read(&hmm->mirrors_sem);
131 list_for_each_entry(mirror, &hmm->mirrors, list) {
132 /*
133 * Note: The driver is not allowed to trigger
134 * hmm_mirror_unregister() from this thread.
135 */
136 if (mirror->ops->release)
e1401513 137 mirror->ops->release(mirror);
704f3f2c 138 }
14331726 139 up_read(&hmm->mirrors_sem);
704f3f2c 140
704f3f2c 141 hmm_put(hmm);
133ff0ea 142}
c0b12405 143
5a136b4a 144static void notifiers_decrement(struct hmm *hmm)
c0b12405 145{
5a136b4a 146 unsigned long flags;
da4c3c73 147
5a136b4a
JG
148 spin_lock_irqsave(&hmm->ranges_lock, flags);
149 hmm->notifiers--;
150 if (!hmm->notifiers) {
151 struct hmm_range *range;
e1401513 152
5a136b4a
JG
153 list_for_each_entry(range, &hmm->ranges, list) {
154 if (range->valid)
155 continue;
156 range->valid = true;
e1401513 157 }
5a136b4a 158 wake_up_all(&hmm->wq);
e1401513 159 }
5a136b4a 160 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
e1401513
RC
161}
162
93065ac7 163static int hmm_invalidate_range_start(struct mmu_notifier *mn,
a3e0d41c 164 const struct mmu_notifier_range *nrange)
c0b12405 165{
6d7c3cde 166 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
a3e0d41c 167 struct hmm_mirror *mirror;
ec131b2d 168 struct hmm_update update;
a3e0d41c 169 struct hmm_range *range;
5a136b4a 170 unsigned long flags;
a3e0d41c 171 int ret = 0;
c0b12405 172
6d7c3cde
JG
173 if (!kref_get_unless_zero(&hmm->kref))
174 return 0;
c0b12405 175
a3e0d41c
JG
176 update.start = nrange->start;
177 update.end = nrange->end;
ec131b2d 178 update.event = HMM_UPDATE_INVALIDATE;
dfcd6660 179 update.blockable = mmu_notifier_range_blockable(nrange);
a3e0d41c 180
5a136b4a 181 spin_lock_irqsave(&hmm->ranges_lock, flags);
a3e0d41c
JG
182 hmm->notifiers++;
183 list_for_each_entry(range, &hmm->ranges, list) {
184 if (update.end < range->start || update.start >= range->end)
185 continue;
186
187 range->valid = false;
188 }
5a136b4a 189 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
a3e0d41c 190
dfcd6660 191 if (mmu_notifier_range_blockable(nrange))
a3e0d41c
JG
192 down_read(&hmm->mirrors_sem);
193 else if (!down_read_trylock(&hmm->mirrors_sem)) {
194 ret = -EAGAIN;
195 goto out;
196 }
5a136b4a 197
a3e0d41c 198 list_for_each_entry(mirror, &hmm->mirrors, list) {
5a136b4a 199 int rc;
a3e0d41c 200
5a136b4a
JG
201 rc = mirror->ops->sync_cpu_device_pagetables(mirror, &update);
202 if (rc) {
203 if (WARN_ON(update.blockable || rc != -EAGAIN))
204 continue;
a3e0d41c 205 ret = -EAGAIN;
085ea250 206 break;
a3e0d41c
JG
207 }
208 }
209 up_read(&hmm->mirrors_sem);
210
211out:
5a136b4a
JG
212 if (ret)
213 notifiers_decrement(hmm);
704f3f2c
JG
214 hmm_put(hmm);
215 return ret;
c0b12405
JG
216}
217
218static void hmm_invalidate_range_end(struct mmu_notifier *mn,
a3e0d41c 219 const struct mmu_notifier_range *nrange)
c0b12405 220{
6d7c3cde 221 struct hmm *hmm = container_of(mn, struct hmm, mmu_notifier);
c0b12405 222
6d7c3cde
JG
223 if (!kref_get_unless_zero(&hmm->kref))
224 return;
a3e0d41c 225
5a136b4a 226 notifiers_decrement(hmm);
704f3f2c 227 hmm_put(hmm);
c0b12405
JG
228}
229
230static const struct mmu_notifier_ops hmm_mmu_notifier_ops = {
e1401513 231 .release = hmm_release,
c0b12405
JG
232 .invalidate_range_start = hmm_invalidate_range_start,
233 .invalidate_range_end = hmm_invalidate_range_end,
234};
235
236/*
237 * hmm_mirror_register() - register a mirror against an mm
238 *
239 * @mirror: new mirror struct to register
240 * @mm: mm to register against
085ea250 241 * Return: 0 on success, -ENOMEM if no memory, -EINVAL if invalid arguments
c0b12405
JG
242 *
243 * To start mirroring a process address space, the device driver must register
244 * an HMM mirror struct.
c0b12405
JG
245 */
246int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm)
247{
fec88ab0 248 lockdep_assert_held_write(&mm->mmap_sem);
8a1a0cd0 249
c0b12405
JG
250 /* Sanity check */
251 if (!mm || !mirror || !mirror->ops)
252 return -EINVAL;
253
704f3f2c 254 mirror->hmm = hmm_get_or_create(mm);
c0b12405
JG
255 if (!mirror->hmm)
256 return -ENOMEM;
257
258 down_write(&mirror->hmm->mirrors_sem);
704f3f2c
JG
259 list_add(&mirror->list, &mirror->hmm->mirrors);
260 up_write(&mirror->hmm->mirrors_sem);
c0b12405
JG
261
262 return 0;
263}
264EXPORT_SYMBOL(hmm_mirror_register);
265
266/*
267 * hmm_mirror_unregister() - unregister a mirror
268 *
085ea250 269 * @mirror: mirror struct to unregister
c0b12405
JG
270 *
271 * Stop mirroring a process address space, and cleanup.
272 */
273void hmm_mirror_unregister(struct hmm_mirror *mirror)
274{
187229c2 275 struct hmm *hmm = mirror->hmm;
c0b12405
JG
276
277 down_write(&hmm->mirrors_sem);
14331726 278 list_del(&mirror->list);
c0b12405 279 up_write(&hmm->mirrors_sem);
704f3f2c 280 hmm_put(hmm);
c0b12405
JG
281}
282EXPORT_SYMBOL(hmm_mirror_unregister);
da4c3c73 283
74eee180
JG
284struct hmm_vma_walk {
285 struct hmm_range *range;
992de9a8 286 struct dev_pagemap *pgmap;
74eee180
JG
287 unsigned long last;
288 bool fault;
289 bool block;
74eee180
JG
290};
291
2aee09d8
JG
292static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
293 bool write_fault, uint64_t *pfn)
74eee180 294{
9b1ae605 295 unsigned int flags = FAULT_FLAG_REMOTE;
74eee180 296 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 297 struct hmm_range *range = hmm_vma_walk->range;
74eee180 298 struct vm_area_struct *vma = walk->vma;
50a7ca3c 299 vm_fault_t ret;
74eee180
JG
300
301 flags |= hmm_vma_walk->block ? 0 : FAULT_FLAG_ALLOW_RETRY;
2aee09d8 302 flags |= write_fault ? FAULT_FLAG_WRITE : 0;
50a7ca3c 303 ret = handle_mm_fault(vma, addr, flags);
e709accc
JG
304 if (ret & VM_FAULT_RETRY) {
305 /* Note, handle_mm_fault did up_read(&mm->mmap_sem)) */
73231612 306 return -EAGAIN;
e709accc 307 }
50a7ca3c 308 if (ret & VM_FAULT_ERROR) {
f88a1e90 309 *pfn = range->values[HMM_PFN_ERROR];
74eee180
JG
310 return -EFAULT;
311 }
312
73231612 313 return -EBUSY;
74eee180
JG
314}
315
da4c3c73
JG
316static int hmm_pfns_bad(unsigned long addr,
317 unsigned long end,
318 struct mm_walk *walk)
319{
c719547f
JG
320 struct hmm_vma_walk *hmm_vma_walk = walk->private;
321 struct hmm_range *range = hmm_vma_walk->range;
ff05c0c6 322 uint64_t *pfns = range->pfns;
da4c3c73
JG
323 unsigned long i;
324
325 i = (addr - range->start) >> PAGE_SHIFT;
326 for (; addr < end; addr += PAGE_SIZE, i++)
f88a1e90 327 pfns[i] = range->values[HMM_PFN_ERROR];
da4c3c73
JG
328
329 return 0;
330}
331
5504ed29
JG
332/*
333 * hmm_vma_walk_hole() - handle a range lacking valid pmd or pte(s)
334 * @start: range virtual start address (inclusive)
335 * @end: range virtual end address (exclusive)
2aee09d8
JG
336 * @fault: should we fault or not ?
337 * @write_fault: write fault ?
5504ed29 338 * @walk: mm_walk structure
085ea250 339 * Return: 0 on success, -EBUSY after page fault, or page fault error
5504ed29
JG
340 *
341 * This function will be called whenever pmd_none() or pte_none() returns true,
342 * or whenever there is no page directory covering the virtual address range.
343 */
2aee09d8
JG
344static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
345 bool fault, bool write_fault,
346 struct mm_walk *walk)
da4c3c73 347{
74eee180
JG
348 struct hmm_vma_walk *hmm_vma_walk = walk->private;
349 struct hmm_range *range = hmm_vma_walk->range;
ff05c0c6 350 uint64_t *pfns = range->pfns;
63d5066f 351 unsigned long i, page_size;
da4c3c73 352
74eee180 353 hmm_vma_walk->last = addr;
63d5066f
JG
354 page_size = hmm_range_page_size(range);
355 i = (addr - range->start) >> range->page_shift;
356
357 for (; addr < end; addr += page_size, i++) {
f88a1e90 358 pfns[i] = range->values[HMM_PFN_NONE];
2aee09d8 359 if (fault || write_fault) {
74eee180 360 int ret;
da4c3c73 361
2aee09d8
JG
362 ret = hmm_vma_do_fault(walk, addr, write_fault,
363 &pfns[i]);
73231612 364 if (ret != -EBUSY)
74eee180
JG
365 return ret;
366 }
367 }
368
73231612 369 return (fault || write_fault) ? -EBUSY : 0;
2aee09d8
JG
370}
371
372static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
373 uint64_t pfns, uint64_t cpu_flags,
374 bool *fault, bool *write_fault)
375{
f88a1e90
JG
376 struct hmm_range *range = hmm_vma_walk->range;
377
2aee09d8
JG
378 if (!hmm_vma_walk->fault)
379 return;
380
023a019a
JG
381 /*
382 * So we not only consider the individual per page request we also
383 * consider the default flags requested for the range. The API can
384 * be use in 2 fashions. The first one where the HMM user coalesce
385 * multiple page fault into one request and set flags per pfns for
386 * of those faults. The second one where the HMM user want to pre-
387 * fault a range with specific flags. For the latter one it is a
388 * waste to have the user pre-fill the pfn arrays with a default
389 * flags value.
390 */
391 pfns = (pfns & range->pfn_flags_mask) | range->default_flags;
392
2aee09d8 393 /* We aren't ask to do anything ... */
f88a1e90 394 if (!(pfns & range->flags[HMM_PFN_VALID]))
2aee09d8 395 return;
f88a1e90
JG
396 /* If this is device memory than only fault if explicitly requested */
397 if ((cpu_flags & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
398 /* Do we fault on device memory ? */
399 if (pfns & range->flags[HMM_PFN_DEVICE_PRIVATE]) {
400 *write_fault = pfns & range->flags[HMM_PFN_WRITE];
401 *fault = true;
402 }
2aee09d8
JG
403 return;
404 }
f88a1e90
JG
405
406 /* If CPU page table is not valid then we need to fault */
407 *fault = !(cpu_flags & range->flags[HMM_PFN_VALID]);
408 /* Need to write fault ? */
409 if ((pfns & range->flags[HMM_PFN_WRITE]) &&
410 !(cpu_flags & range->flags[HMM_PFN_WRITE])) {
411 *write_fault = true;
2aee09d8
JG
412 *fault = true;
413 }
414}
415
416static void hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
417 const uint64_t *pfns, unsigned long npages,
418 uint64_t cpu_flags, bool *fault,
419 bool *write_fault)
420{
421 unsigned long i;
422
423 if (!hmm_vma_walk->fault) {
424 *fault = *write_fault = false;
425 return;
426 }
427
a3e0d41c 428 *fault = *write_fault = false;
2aee09d8
JG
429 for (i = 0; i < npages; ++i) {
430 hmm_pte_need_fault(hmm_vma_walk, pfns[i], cpu_flags,
431 fault, write_fault);
a3e0d41c 432 if ((*write_fault))
2aee09d8
JG
433 return;
434 }
435}
436
437static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
438 struct mm_walk *walk)
439{
440 struct hmm_vma_walk *hmm_vma_walk = walk->private;
441 struct hmm_range *range = hmm_vma_walk->range;
442 bool fault, write_fault;
443 unsigned long i, npages;
444 uint64_t *pfns;
445
446 i = (addr - range->start) >> PAGE_SHIFT;
447 npages = (end - addr) >> PAGE_SHIFT;
448 pfns = &range->pfns[i];
449 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
450 0, &fault, &write_fault);
451 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
452}
453
f88a1e90 454static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
2aee09d8
JG
455{
456 if (pmd_protnone(pmd))
457 return 0;
f88a1e90
JG
458 return pmd_write(pmd) ? range->flags[HMM_PFN_VALID] |
459 range->flags[HMM_PFN_WRITE] :
460 range->flags[HMM_PFN_VALID];
da4c3c73
JG
461}
462
992de9a8
JG
463static inline uint64_t pud_to_hmm_pfn_flags(struct hmm_range *range, pud_t pud)
464{
465 if (!pud_present(pud))
466 return 0;
467 return pud_write(pud) ? range->flags[HMM_PFN_VALID] |
468 range->flags[HMM_PFN_WRITE] :
469 range->flags[HMM_PFN_VALID];
470}
471
53f5c3f4
JG
472static int hmm_vma_handle_pmd(struct mm_walk *walk,
473 unsigned long addr,
474 unsigned long end,
475 uint64_t *pfns,
476 pmd_t pmd)
477{
992de9a8 478#ifdef CONFIG_TRANSPARENT_HUGEPAGE
53f5c3f4 479 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 480 struct hmm_range *range = hmm_vma_walk->range;
2aee09d8 481 unsigned long pfn, npages, i;
2aee09d8 482 bool fault, write_fault;
f88a1e90 483 uint64_t cpu_flags;
53f5c3f4 484
2aee09d8 485 npages = (end - addr) >> PAGE_SHIFT;
f88a1e90 486 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
2aee09d8
JG
487 hmm_range_need_fault(hmm_vma_walk, pfns, npages, cpu_flags,
488 &fault, &write_fault);
53f5c3f4 489
2aee09d8
JG
490 if (pmd_protnone(pmd) || fault || write_fault)
491 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
53f5c3f4
JG
492
493 pfn = pmd_pfn(pmd) + pte_index(addr);
992de9a8
JG
494 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
495 if (pmd_devmap(pmd)) {
496 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
497 hmm_vma_walk->pgmap);
498 if (unlikely(!hmm_vma_walk->pgmap))
499 return -EBUSY;
500 }
391aab11 501 pfns[i] = hmm_device_entry_from_pfn(range, pfn) | cpu_flags;
992de9a8
JG
502 }
503 if (hmm_vma_walk->pgmap) {
504 put_dev_pagemap(hmm_vma_walk->pgmap);
505 hmm_vma_walk->pgmap = NULL;
506 }
53f5c3f4
JG
507 hmm_vma_walk->last = end;
508 return 0;
992de9a8
JG
509#else
510 /* If THP is not enabled then we should never reach that code ! */
511 return -EINVAL;
512#endif
53f5c3f4
JG
513}
514
f88a1e90 515static inline uint64_t pte_to_hmm_pfn_flags(struct hmm_range *range, pte_t pte)
2aee09d8 516{
789c2af8 517 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
2aee09d8 518 return 0;
f88a1e90
JG
519 return pte_write(pte) ? range->flags[HMM_PFN_VALID] |
520 range->flags[HMM_PFN_WRITE] :
521 range->flags[HMM_PFN_VALID];
2aee09d8
JG
522}
523
53f5c3f4
JG
524static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
525 unsigned long end, pmd_t *pmdp, pte_t *ptep,
526 uint64_t *pfn)
527{
528 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 529 struct hmm_range *range = hmm_vma_walk->range;
53f5c3f4 530 struct vm_area_struct *vma = walk->vma;
2aee09d8
JG
531 bool fault, write_fault;
532 uint64_t cpu_flags;
53f5c3f4 533 pte_t pte = *ptep;
f88a1e90 534 uint64_t orig_pfn = *pfn;
53f5c3f4 535
f88a1e90 536 *pfn = range->values[HMM_PFN_NONE];
73231612 537 fault = write_fault = false;
53f5c3f4
JG
538
539 if (pte_none(pte)) {
73231612
JG
540 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0,
541 &fault, &write_fault);
2aee09d8 542 if (fault || write_fault)
53f5c3f4
JG
543 goto fault;
544 return 0;
545 }
546
547 if (!pte_present(pte)) {
548 swp_entry_t entry = pte_to_swp_entry(pte);
549
550 if (!non_swap_entry(entry)) {
2aee09d8 551 if (fault || write_fault)
53f5c3f4
JG
552 goto fault;
553 return 0;
554 }
555
556 /*
557 * This is a special swap entry, ignore migration, use
558 * device and report anything else as error.
559 */
560 if (is_device_private_entry(entry)) {
f88a1e90
JG
561 cpu_flags = range->flags[HMM_PFN_VALID] |
562 range->flags[HMM_PFN_DEVICE_PRIVATE];
2aee09d8 563 cpu_flags |= is_write_device_private_entry(entry) ?
f88a1e90
JG
564 range->flags[HMM_PFN_WRITE] : 0;
565 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
566 &fault, &write_fault);
567 if (fault || write_fault)
568 goto fault;
391aab11
JG
569 *pfn = hmm_device_entry_from_pfn(range,
570 swp_offset(entry));
f88a1e90 571 *pfn |= cpu_flags;
53f5c3f4
JG
572 return 0;
573 }
574
575 if (is_migration_entry(entry)) {
2aee09d8 576 if (fault || write_fault) {
53f5c3f4
JG
577 pte_unmap(ptep);
578 hmm_vma_walk->last = addr;
579 migration_entry_wait(vma->vm_mm,
2aee09d8 580 pmdp, addr);
73231612 581 return -EBUSY;
53f5c3f4
JG
582 }
583 return 0;
584 }
585
586 /* Report error for everything else */
f88a1e90 587 *pfn = range->values[HMM_PFN_ERROR];
53f5c3f4 588 return -EFAULT;
73231612
JG
589 } else {
590 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
591 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
592 &fault, &write_fault);
53f5c3f4
JG
593 }
594
2aee09d8 595 if (fault || write_fault)
53f5c3f4
JG
596 goto fault;
597
992de9a8
JG
598 if (pte_devmap(pte)) {
599 hmm_vma_walk->pgmap = get_dev_pagemap(pte_pfn(pte),
600 hmm_vma_walk->pgmap);
601 if (unlikely(!hmm_vma_walk->pgmap))
602 return -EBUSY;
603 } else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
604 *pfn = range->values[HMM_PFN_SPECIAL];
605 return -EFAULT;
606 }
607
391aab11 608 *pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
53f5c3f4
JG
609 return 0;
610
611fault:
992de9a8
JG
612 if (hmm_vma_walk->pgmap) {
613 put_dev_pagemap(hmm_vma_walk->pgmap);
614 hmm_vma_walk->pgmap = NULL;
615 }
53f5c3f4
JG
616 pte_unmap(ptep);
617 /* Fault any virtual address we were asked to fault */
2aee09d8 618 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
53f5c3f4
JG
619}
620
da4c3c73
JG
621static int hmm_vma_walk_pmd(pmd_t *pmdp,
622 unsigned long start,
623 unsigned long end,
624 struct mm_walk *walk)
625{
74eee180
JG
626 struct hmm_vma_walk *hmm_vma_walk = walk->private;
627 struct hmm_range *range = hmm_vma_walk->range;
d08faca0 628 struct vm_area_struct *vma = walk->vma;
ff05c0c6 629 uint64_t *pfns = range->pfns;
da4c3c73 630 unsigned long addr = start, i;
da4c3c73 631 pte_t *ptep;
d08faca0 632 pmd_t pmd;
da4c3c73 633
da4c3c73
JG
634
635again:
d08faca0
JG
636 pmd = READ_ONCE(*pmdp);
637 if (pmd_none(pmd))
da4c3c73
JG
638 return hmm_vma_walk_hole(start, end, walk);
639
d08faca0 640 if (pmd_huge(pmd) && (range->vma->vm_flags & VM_HUGETLB))
da4c3c73
JG
641 return hmm_pfns_bad(start, end, walk);
642
d08faca0
JG
643 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
644 bool fault, write_fault;
645 unsigned long npages;
646 uint64_t *pfns;
647
648 i = (addr - range->start) >> PAGE_SHIFT;
649 npages = (end - addr) >> PAGE_SHIFT;
650 pfns = &range->pfns[i];
651
652 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
653 0, &fault, &write_fault);
654 if (fault || write_fault) {
655 hmm_vma_walk->last = addr;
656 pmd_migration_entry_wait(vma->vm_mm, pmdp);
73231612 657 return -EBUSY;
d08faca0
JG
658 }
659 return 0;
660 } else if (!pmd_present(pmd))
661 return hmm_pfns_bad(start, end, walk);
da4c3c73 662
d08faca0 663 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
da4c3c73
JG
664 /*
665 * No need to take pmd_lock here, even if some other threads
666 * is splitting the huge pmd we will get that event through
667 * mmu_notifier callback.
668 *
669 * So just read pmd value and check again its a transparent
670 * huge or device mapping one and compute corresponding pfn
671 * values.
672 */
673 pmd = pmd_read_atomic(pmdp);
674 barrier();
675 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
676 goto again;
74eee180 677
d08faca0 678 i = (addr - range->start) >> PAGE_SHIFT;
53f5c3f4 679 return hmm_vma_handle_pmd(walk, addr, end, &pfns[i], pmd);
da4c3c73
JG
680 }
681
d08faca0
JG
682 /*
683 * We have handled all the valid case above ie either none, migration,
684 * huge or transparent huge. At this point either it is a valid pmd
685 * entry pointing to pte directory or it is a bad pmd that will not
686 * recover.
687 */
688 if (pmd_bad(pmd))
da4c3c73
JG
689 return hmm_pfns_bad(start, end, walk);
690
691 ptep = pte_offset_map(pmdp, addr);
d08faca0 692 i = (addr - range->start) >> PAGE_SHIFT;
da4c3c73 693 for (; addr < end; addr += PAGE_SIZE, ptep++, i++) {
53f5c3f4 694 int r;
74eee180 695
53f5c3f4
JG
696 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, &pfns[i]);
697 if (r) {
698 /* hmm_vma_handle_pte() did unmap pte directory */
699 hmm_vma_walk->last = addr;
700 return r;
74eee180 701 }
da4c3c73 702 }
992de9a8
JG
703 if (hmm_vma_walk->pgmap) {
704 /*
705 * We do put_dev_pagemap() here and not in hmm_vma_handle_pte()
706 * so that we can leverage get_dev_pagemap() optimization which
707 * will not re-take a reference on a pgmap if we already have
708 * one.
709 */
710 put_dev_pagemap(hmm_vma_walk->pgmap);
711 hmm_vma_walk->pgmap = NULL;
712 }
da4c3c73
JG
713 pte_unmap(ptep - 1);
714
53f5c3f4 715 hmm_vma_walk->last = addr;
da4c3c73
JG
716 return 0;
717}
718
992de9a8
JG
719static int hmm_vma_walk_pud(pud_t *pudp,
720 unsigned long start,
721 unsigned long end,
722 struct mm_walk *walk)
723{
724 struct hmm_vma_walk *hmm_vma_walk = walk->private;
725 struct hmm_range *range = hmm_vma_walk->range;
726 unsigned long addr = start, next;
727 pmd_t *pmdp;
728 pud_t pud;
729 int ret;
730
731again:
732 pud = READ_ONCE(*pudp);
733 if (pud_none(pud))
734 return hmm_vma_walk_hole(start, end, walk);
735
736 if (pud_huge(pud) && pud_devmap(pud)) {
737 unsigned long i, npages, pfn;
738 uint64_t *pfns, cpu_flags;
739 bool fault, write_fault;
740
741 if (!pud_present(pud))
742 return hmm_vma_walk_hole(start, end, walk);
743
744 i = (addr - range->start) >> PAGE_SHIFT;
745 npages = (end - addr) >> PAGE_SHIFT;
746 pfns = &range->pfns[i];
747
748 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
749 hmm_range_need_fault(hmm_vma_walk, pfns, npages,
750 cpu_flags, &fault, &write_fault);
751 if (fault || write_fault)
752 return hmm_vma_walk_hole_(addr, end, fault,
753 write_fault, walk);
754
992de9a8
JG
755 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
756 for (i = 0; i < npages; ++i, ++pfn) {
757 hmm_vma_walk->pgmap = get_dev_pagemap(pfn,
758 hmm_vma_walk->pgmap);
759 if (unlikely(!hmm_vma_walk->pgmap))
760 return -EBUSY;
391aab11
JG
761 pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
762 cpu_flags;
992de9a8
JG
763 }
764 if (hmm_vma_walk->pgmap) {
765 put_dev_pagemap(hmm_vma_walk->pgmap);
766 hmm_vma_walk->pgmap = NULL;
767 }
768 hmm_vma_walk->last = end;
769 return 0;
992de9a8
JG
770 }
771
772 split_huge_pud(walk->vma, pudp, addr);
773 if (pud_none(*pudp))
774 goto again;
775
776 pmdp = pmd_offset(pudp, addr);
777 do {
778 next = pmd_addr_end(addr, end);
779 ret = hmm_vma_walk_pmd(pmdp, addr, next, walk);
780 if (ret)
781 return ret;
782 } while (pmdp++, addr = next, addr != end);
783
784 return 0;
785}
786
63d5066f
JG
787static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
788 unsigned long start, unsigned long end,
789 struct mm_walk *walk)
790{
791#ifdef CONFIG_HUGETLB_PAGE
792 unsigned long addr = start, i, pfn, mask, size, pfn_inc;
793 struct hmm_vma_walk *hmm_vma_walk = walk->private;
794 struct hmm_range *range = hmm_vma_walk->range;
795 struct vm_area_struct *vma = walk->vma;
796 struct hstate *h = hstate_vma(vma);
797 uint64_t orig_pfn, cpu_flags;
798 bool fault, write_fault;
799 spinlock_t *ptl;
800 pte_t entry;
801 int ret = 0;
802
803 size = 1UL << huge_page_shift(h);
804 mask = size - 1;
805 if (range->page_shift != PAGE_SHIFT) {
806 /* Make sure we are looking at full page. */
807 if (start & mask)
808 return -EINVAL;
809 if (end < (start + size))
810 return -EINVAL;
811 pfn_inc = size >> PAGE_SHIFT;
812 } else {
813 pfn_inc = 1;
814 size = PAGE_SIZE;
815 }
816
817
818 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
819 entry = huge_ptep_get(pte);
820
821 i = (start - range->start) >> range->page_shift;
822 orig_pfn = range->pfns[i];
823 range->pfns[i] = range->values[HMM_PFN_NONE];
824 cpu_flags = pte_to_hmm_pfn_flags(range, entry);
825 fault = write_fault = false;
826 hmm_pte_need_fault(hmm_vma_walk, orig_pfn, cpu_flags,
827 &fault, &write_fault);
828 if (fault || write_fault) {
829 ret = -ENOENT;
830 goto unlock;
831 }
832
833 pfn = pte_pfn(entry) + ((start & mask) >> range->page_shift);
834 for (; addr < end; addr += size, i++, pfn += pfn_inc)
391aab11
JG
835 range->pfns[i] = hmm_device_entry_from_pfn(range, pfn) |
836 cpu_flags;
63d5066f
JG
837 hmm_vma_walk->last = end;
838
839unlock:
840 spin_unlock(ptl);
841
842 if (ret == -ENOENT)
843 return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
844
845 return ret;
846#else /* CONFIG_HUGETLB_PAGE */
847 return -EINVAL;
848#endif
849}
850
f88a1e90
JG
851static void hmm_pfns_clear(struct hmm_range *range,
852 uint64_t *pfns,
33cd47dc
JG
853 unsigned long addr,
854 unsigned long end)
855{
856 for (; addr < end; addr += PAGE_SIZE, pfns++)
f88a1e90 857 *pfns = range->values[HMM_PFN_NONE];
33cd47dc
JG
858}
859
da4c3c73 860/*
a3e0d41c 861 * hmm_range_register() - start tracking change to CPU page table over a range
25f23a0c 862 * @range: range
a3e0d41c
JG
863 * @mm: the mm struct for the range of virtual address
864 * @start: start virtual address (inclusive)
865 * @end: end virtual address (exclusive)
63d5066f 866 * @page_shift: expect page shift for the range
a3e0d41c 867 * Returns 0 on success, -EFAULT if the address space is no longer valid
25f23a0c 868 *
a3e0d41c 869 * Track updates to the CPU page table see include/linux/hmm.h
da4c3c73 870 */
a3e0d41c 871int hmm_range_register(struct hmm_range *range,
e36acfe6 872 struct hmm_mirror *mirror,
a3e0d41c 873 unsigned long start,
63d5066f
JG
874 unsigned long end,
875 unsigned page_shift)
da4c3c73 876{
63d5066f 877 unsigned long mask = ((1UL << page_shift) - 1UL);
e36acfe6 878 struct hmm *hmm = mirror->hmm;
5a136b4a 879 unsigned long flags;
63d5066f 880
a3e0d41c 881 range->valid = false;
704f3f2c
JG
882 range->hmm = NULL;
883
63d5066f
JG
884 if ((start & mask) || (end & mask))
885 return -EINVAL;
886 if (start >= end)
da4c3c73
JG
887 return -EINVAL;
888
63d5066f 889 range->page_shift = page_shift;
a3e0d41c
JG
890 range->start = start;
891 range->end = end;
892
47f24598
JG
893 /* Prevent hmm_release() from running while the range is valid */
894 if (!mmget_not_zero(hmm->mm))
a3e0d41c 895 return -EFAULT;
da4c3c73 896
085ea250 897 /* Initialize range to track CPU page table updates. */
5a136b4a 898 spin_lock_irqsave(&hmm->ranges_lock, flags);
855ce7d2 899
085ea250 900 range->hmm = hmm;
e36acfe6 901 kref_get(&hmm->kref);
157816f3 902 list_add(&range->list, &hmm->ranges);
86586a41 903
704f3f2c 904 /*
a3e0d41c
JG
905 * If there are any concurrent notifiers we have to wait for them for
906 * the range to be valid (see hmm_range_wait_until_valid()).
704f3f2c 907 */
085ea250 908 if (!hmm->notifiers)
a3e0d41c 909 range->valid = true;
5a136b4a 910 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
a3e0d41c
JG
911
912 return 0;
da4c3c73 913}
a3e0d41c 914EXPORT_SYMBOL(hmm_range_register);
da4c3c73
JG
915
916/*
a3e0d41c
JG
917 * hmm_range_unregister() - stop tracking change to CPU page table over a range
918 * @range: range
da4c3c73
JG
919 *
920 * Range struct is used to track updates to the CPU page table after a call to
a3e0d41c 921 * hmm_range_register(). See include/linux/hmm.h for how to use it.
da4c3c73 922 */
a3e0d41c 923void hmm_range_unregister(struct hmm_range *range)
da4c3c73 924{
085ea250 925 struct hmm *hmm = range->hmm;
5a136b4a 926 unsigned long flags;
da4c3c73 927
5a136b4a 928 spin_lock_irqsave(&hmm->ranges_lock, flags);
47f24598 929 list_del_init(&range->list);
5a136b4a 930 spin_unlock_irqrestore(&hmm->ranges_lock, flags);
da4c3c73 931
a3e0d41c 932 /* Drop reference taken by hmm_range_register() */
47f24598 933 mmput(hmm->mm);
085ea250 934 hmm_put(hmm);
2dcc3eb8
JG
935
936 /*
937 * The range is now invalid and the ref on the hmm is dropped, so
938 * poison the pointer. Leave other fields in place, for the caller's
939 * use.
940 */
a3e0d41c 941 range->valid = false;
2dcc3eb8 942 memset(&range->hmm, POISON_INUSE, sizeof(range->hmm));
da4c3c73 943}
a3e0d41c
JG
944EXPORT_SYMBOL(hmm_range_unregister);
945
946/*
947 * hmm_range_snapshot() - snapshot CPU page table for a range
948 * @range: range
085ea250 949 * Return: -EINVAL if invalid argument, -ENOMEM out of memory, -EPERM invalid
a3e0d41c 950 * permission (for instance asking for write and range is read only),
2bcbeaef 951 * -EBUSY if you need to retry, -EFAULT invalid (ie either no valid
a3e0d41c
JG
952 * vma or it is illegal to access that range), number of valid pages
953 * in range->pfns[] (from range start address).
954 *
955 * This snapshots the CPU page table for a range of virtual addresses. Snapshot
956 * validity is tracked by range struct. See in include/linux/hmm.h for example
957 * on how to use.
958 */
959long hmm_range_snapshot(struct hmm_range *range)
960{
63d5066f 961 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
a3e0d41c
JG
962 unsigned long start = range->start, end;
963 struct hmm_vma_walk hmm_vma_walk;
964 struct hmm *hmm = range->hmm;
965 struct vm_area_struct *vma;
966 struct mm_walk mm_walk;
967
47f24598 968 lockdep_assert_held(&hmm->mm->mmap_sem);
a3e0d41c
JG
969 do {
970 /* If range is no longer valid force retry. */
971 if (!range->valid)
2bcbeaef 972 return -EBUSY;
a3e0d41c
JG
973
974 vma = find_vma(hmm->mm, start);
63d5066f 975 if (vma == NULL || (vma->vm_flags & device_vma))
a3e0d41c
JG
976 return -EFAULT;
977
63d5066f 978 if (is_vm_hugetlb_page(vma)) {
1c2308f0
JG
979 if (huge_page_shift(hstate_vma(vma)) !=
980 range->page_shift &&
63d5066f
JG
981 range->page_shift != PAGE_SHIFT)
982 return -EINVAL;
983 } else {
984 if (range->page_shift != PAGE_SHIFT)
985 return -EINVAL;
986 }
987
a3e0d41c
JG
988 if (!(vma->vm_flags & VM_READ)) {
989 /*
990 * If vma do not allow read access, then assume that it
991 * does not allow write access, either. HMM does not
992 * support architecture that allow write without read.
993 */
994 hmm_pfns_clear(range, range->pfns,
995 range->start, range->end);
996 return -EPERM;
997 }
998
999 range->vma = vma;
992de9a8 1000 hmm_vma_walk.pgmap = NULL;
a3e0d41c
JG
1001 hmm_vma_walk.last = start;
1002 hmm_vma_walk.fault = false;
1003 hmm_vma_walk.range = range;
1004 mm_walk.private = &hmm_vma_walk;
1005 end = min(range->end, vma->vm_end);
1006
1007 mm_walk.vma = vma;
1008 mm_walk.mm = vma->vm_mm;
1009 mm_walk.pte_entry = NULL;
1010 mm_walk.test_walk = NULL;
1011 mm_walk.hugetlb_entry = NULL;
992de9a8 1012 mm_walk.pud_entry = hmm_vma_walk_pud;
a3e0d41c
JG
1013 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1014 mm_walk.pte_hole = hmm_vma_walk_hole;
63d5066f 1015 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
a3e0d41c
JG
1016
1017 walk_page_range(start, end, &mm_walk);
1018 start = end;
1019 } while (start < range->end);
1020
1021 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1022}
1023EXPORT_SYMBOL(hmm_range_snapshot);
74eee180
JG
1024
1025/*
73231612 1026 * hmm_range_fault() - try to fault some address in a virtual address range
08232a45 1027 * @range: range being faulted
74eee180 1028 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
085ea250 1029 * Return: number of valid pages in range->pfns[] (from range start
73231612
JG
1030 * address). This may be zero. If the return value is negative,
1031 * then one of the following values may be returned:
1032 *
1033 * -EINVAL invalid arguments or mm or virtual address are in an
63d5066f 1034 * invalid vma (for instance device file vma).
73231612
JG
1035 * -ENOMEM: Out of memory.
1036 * -EPERM: Invalid permission (for instance asking for write and
1037 * range is read only).
1038 * -EAGAIN: If you need to retry and mmap_sem was drop. This can only
1039 * happens if block argument is false.
1040 * -EBUSY: If the the range is being invalidated and you should wait
1041 * for invalidation to finish.
1042 * -EFAULT: Invalid (ie either no valid vma or it is illegal to access
1043 * that range), number of valid pages in range->pfns[] (from
1044 * range start address).
74eee180
JG
1045 *
1046 * This is similar to a regular CPU page fault except that it will not trigger
73231612
JG
1047 * any memory migration if the memory being faulted is not accessible by CPUs
1048 * and caller does not ask for migration.
74eee180 1049 *
ff05c0c6
JG
1050 * On error, for one virtual address in the range, the function will mark the
1051 * corresponding HMM pfn entry with an error flag.
74eee180 1052 */
73231612 1053long hmm_range_fault(struct hmm_range *range, bool block)
74eee180 1054{
63d5066f 1055 const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
a3e0d41c 1056 unsigned long start = range->start, end;
74eee180 1057 struct hmm_vma_walk hmm_vma_walk;
a3e0d41c
JG
1058 struct hmm *hmm = range->hmm;
1059 struct vm_area_struct *vma;
74eee180 1060 struct mm_walk mm_walk;
74eee180
JG
1061 int ret;
1062
47f24598 1063 lockdep_assert_held(&hmm->mm->mmap_sem);
704f3f2c 1064
a3e0d41c
JG
1065 do {
1066 /* If range is no longer valid force retry. */
2bcbeaef
CH
1067 if (!range->valid)
1068 return -EBUSY;
74eee180 1069
a3e0d41c 1070 vma = find_vma(hmm->mm, start);
63d5066f 1071 if (vma == NULL || (vma->vm_flags & device_vma))
a3e0d41c 1072 return -EFAULT;
704f3f2c 1073
63d5066f
JG
1074 if (is_vm_hugetlb_page(vma)) {
1075 if (huge_page_shift(hstate_vma(vma)) !=
1076 range->page_shift &&
1077 range->page_shift != PAGE_SHIFT)
1078 return -EINVAL;
1079 } else {
1080 if (range->page_shift != PAGE_SHIFT)
1081 return -EINVAL;
1082 }
1083
a3e0d41c
JG
1084 if (!(vma->vm_flags & VM_READ)) {
1085 /*
1086 * If vma do not allow read access, then assume that it
1087 * does not allow write access, either. HMM does not
1088 * support architecture that allow write without read.
1089 */
1090 hmm_pfns_clear(range, range->pfns,
1091 range->start, range->end);
1092 return -EPERM;
1093 }
74eee180 1094
a3e0d41c 1095 range->vma = vma;
992de9a8 1096 hmm_vma_walk.pgmap = NULL;
a3e0d41c
JG
1097 hmm_vma_walk.last = start;
1098 hmm_vma_walk.fault = true;
1099 hmm_vma_walk.block = block;
1100 hmm_vma_walk.range = range;
1101 mm_walk.private = &hmm_vma_walk;
1102 end = min(range->end, vma->vm_end);
1103
1104 mm_walk.vma = vma;
1105 mm_walk.mm = vma->vm_mm;
1106 mm_walk.pte_entry = NULL;
1107 mm_walk.test_walk = NULL;
1108 mm_walk.hugetlb_entry = NULL;
992de9a8 1109 mm_walk.pud_entry = hmm_vma_walk_pud;
a3e0d41c
JG
1110 mm_walk.pmd_entry = hmm_vma_walk_pmd;
1111 mm_walk.pte_hole = hmm_vma_walk_hole;
63d5066f 1112 mm_walk.hugetlb_entry = hmm_vma_walk_hugetlb_entry;
a3e0d41c
JG
1113
1114 do {
1115 ret = walk_page_range(start, end, &mm_walk);
1116 start = hmm_vma_walk.last;
1117
1118 /* Keep trying while the range is valid. */
1119 } while (ret == -EBUSY && range->valid);
1120
1121 if (ret) {
1122 unsigned long i;
1123
1124 i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
1125 hmm_pfns_clear(range, &range->pfns[i],
1126 hmm_vma_walk.last, range->end);
1127 return ret;
1128 }
1129 start = end;
74eee180 1130
a3e0d41c 1131 } while (start < range->end);
704f3f2c 1132
73231612 1133 return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
74eee180 1134}
73231612 1135EXPORT_SYMBOL(hmm_range_fault);
55c0ece8
JG
1136
1137/**
1138 * hmm_range_dma_map() - hmm_range_fault() and dma map page all in one.
1139 * @range: range being faulted
1140 * @device: device against to dma map page to
1141 * @daddrs: dma address of mapped pages
1142 * @block: allow blocking on fault (if true it sleeps and do not drop mmap_sem)
085ea250 1143 * Return: number of pages mapped on success, -EAGAIN if mmap_sem have been
55c0ece8
JG
1144 * drop and you need to try again, some other error value otherwise
1145 *
1146 * Note same usage pattern as hmm_range_fault().
1147 */
1148long hmm_range_dma_map(struct hmm_range *range,
1149 struct device *device,
1150 dma_addr_t *daddrs,
1151 bool block)
1152{
1153 unsigned long i, npages, mapped;
1154 long ret;
1155
1156 ret = hmm_range_fault(range, block);
1157 if (ret <= 0)
1158 return ret ? ret : -EBUSY;
1159
1160 npages = (range->end - range->start) >> PAGE_SHIFT;
1161 for (i = 0, mapped = 0; i < npages; ++i) {
1162 enum dma_data_direction dir = DMA_TO_DEVICE;
1163 struct page *page;
1164
1165 /*
1166 * FIXME need to update DMA API to provide invalid DMA address
1167 * value instead of a function to test dma address value. This
1168 * would remove lot of dumb code duplicated accross many arch.
1169 *
1170 * For now setting it to 0 here is good enough as the pfns[]
1171 * value is what is use to check what is valid and what isn't.
1172 */
1173 daddrs[i] = 0;
1174
391aab11 1175 page = hmm_device_entry_to_page(range, range->pfns[i]);
55c0ece8
JG
1176 if (page == NULL)
1177 continue;
1178
1179 /* Check if range is being invalidated */
1180 if (!range->valid) {
1181 ret = -EBUSY;
1182 goto unmap;
1183 }
1184
1185 /* If it is read and write than map bi-directional. */
1186 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1187 dir = DMA_BIDIRECTIONAL;
1188
1189 daddrs[i] = dma_map_page(device, page, 0, PAGE_SIZE, dir);
1190 if (dma_mapping_error(device, daddrs[i])) {
1191 ret = -EFAULT;
1192 goto unmap;
1193 }
1194
1195 mapped++;
1196 }
1197
1198 return mapped;
1199
1200unmap:
1201 for (npages = i, i = 0; (i < npages) && mapped; ++i) {
1202 enum dma_data_direction dir = DMA_TO_DEVICE;
1203 struct page *page;
1204
391aab11 1205 page = hmm_device_entry_to_page(range, range->pfns[i]);
55c0ece8
JG
1206 if (page == NULL)
1207 continue;
1208
1209 if (dma_mapping_error(device, daddrs[i]))
1210 continue;
1211
1212 /* If it is read and write than map bi-directional. */
1213 if (range->pfns[i] & range->flags[HMM_PFN_WRITE])
1214 dir = DMA_BIDIRECTIONAL;
1215
1216 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1217 mapped--;
1218 }
1219
1220 return ret;
1221}
1222EXPORT_SYMBOL(hmm_range_dma_map);
1223
1224/**
1225 * hmm_range_dma_unmap() - unmap range of that was map with hmm_range_dma_map()
1226 * @range: range being unmapped
1227 * @vma: the vma against which the range (optional)
1228 * @device: device against which dma map was done
1229 * @daddrs: dma address of mapped pages
1230 * @dirty: dirty page if it had the write flag set
085ea250 1231 * Return: number of page unmapped on success, -EINVAL otherwise
55c0ece8
JG
1232 *
1233 * Note that caller MUST abide by mmu notifier or use HMM mirror and abide
1234 * to the sync_cpu_device_pagetables() callback so that it is safe here to
1235 * call set_page_dirty(). Caller must also take appropriate locks to avoid
1236 * concurrent mmu notifier or sync_cpu_device_pagetables() to make progress.
1237 */
1238long hmm_range_dma_unmap(struct hmm_range *range,
1239 struct vm_area_struct *vma,
1240 struct device *device,
1241 dma_addr_t *daddrs,
1242 bool dirty)
1243{
1244 unsigned long i, npages;
1245 long cpages = 0;
1246
1247 /* Sanity check. */
1248 if (range->end <= range->start)
1249 return -EINVAL;
1250 if (!daddrs)
1251 return -EINVAL;
1252 if (!range->pfns)
1253 return -EINVAL;
1254
1255 npages = (range->end - range->start) >> PAGE_SHIFT;
1256 for (i = 0; i < npages; ++i) {
1257 enum dma_data_direction dir = DMA_TO_DEVICE;
1258 struct page *page;
1259
391aab11 1260 page = hmm_device_entry_to_page(range, range->pfns[i]);
55c0ece8
JG
1261 if (page == NULL)
1262 continue;
1263
1264 /* If it is read and write than map bi-directional. */
1265 if (range->pfns[i] & range->flags[HMM_PFN_WRITE]) {
1266 dir = DMA_BIDIRECTIONAL;
1267
1268 /*
1269 * See comments in function description on why it is
1270 * safe here to call set_page_dirty()
1271 */
1272 if (dirty)
1273 set_page_dirty(page);
1274 }
1275
1276 /* Unmap and clear pfns/dma address */
1277 dma_unmap_page(device, daddrs[i], PAGE_SIZE, dir);
1278 range->pfns[i] = range->values[HMM_PFN_NONE];
1279 /* FIXME see comments in hmm_vma_dma_map() */
1280 daddrs[i] = 0;
1281 cpages++;
1282 }
1283
1284 return cpages;
1285}
1286EXPORT_SYMBOL(hmm_range_dma_unmap);