]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - mm/hmm.c
mm: mempolicy: fix potential pte_unmap_unlock pte error
[mirror_ubuntu-hirsute-kernel.git] / mm / hmm.c
CommitLineData
c942fddf 1// SPDX-License-Identifier: GPL-2.0-or-later
133ff0ea
JG
2/*
3 * Copyright 2013 Red Hat Inc.
4 *
f813f219 5 * Authors: Jérôme Glisse <jglisse@redhat.com>
133ff0ea
JG
6 */
7/*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
a520110e 11#include <linux/pagewalk.h>
133ff0ea 12#include <linux/hmm.h>
858b54da 13#include <linux/init.h>
da4c3c73
JG
14#include <linux/rmap.h>
15#include <linux/swap.h>
133ff0ea
JG
16#include <linux/slab.h>
17#include <linux/sched.h>
4ef589dc
JG
18#include <linux/mmzone.h>
19#include <linux/pagemap.h>
da4c3c73
JG
20#include <linux/swapops.h>
21#include <linux/hugetlb.h>
4ef589dc 22#include <linux/memremap.h>
c8a53b2d 23#include <linux/sched/mm.h>
7b2d55d2 24#include <linux/jump_label.h>
55c0ece8 25#include <linux/dma-mapping.h>
c0b12405 26#include <linux/mmu_notifier.h>
4ef589dc
JG
27#include <linux/memory_hotplug.h>
28
74eee180
JG
29struct hmm_vma_walk {
30 struct hmm_range *range;
31 unsigned long last;
74eee180
JG
32};
33
a3eb13c1
JG
34enum {
35 HMM_NEED_FAULT = 1 << 0,
36 HMM_NEED_WRITE_FAULT = 1 << 1,
37 HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
38};
39
d28c2c9a 40static int hmm_pfns_fill(unsigned long addr, unsigned long end,
2733ea14 41 struct hmm_range *range, unsigned long cpu_flags)
da4c3c73 42{
2733ea14 43 unsigned long i = (addr - range->start) >> PAGE_SHIFT;
da4c3c73 44
da4c3c73 45 for (; addr < end; addr += PAGE_SIZE, i++)
2733ea14 46 range->hmm_pfns[i] = cpu_flags;
da4c3c73
JG
47 return 0;
48}
49
5504ed29 50/*
f8c888a3 51 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
d2e8d551 52 * @addr: range virtual start address (inclusive)
5504ed29 53 * @end: range virtual end address (exclusive)
a3eb13c1 54 * @required_fault: HMM_NEED_* flags
5504ed29 55 * @walk: mm_walk structure
f8c888a3 56 * Return: -EBUSY after page fault, or page fault error
5504ed29
JG
57 *
58 * This function will be called whenever pmd_none() or pte_none() returns true,
59 * or whenever there is no page directory covering the virtual address range.
60 */
f8c888a3 61static int hmm_vma_fault(unsigned long addr, unsigned long end,
a3eb13c1 62 unsigned int required_fault, struct mm_walk *walk)
da4c3c73 63{
74eee180 64 struct hmm_vma_walk *hmm_vma_walk = walk->private;
5a0c38d3 65 struct vm_area_struct *vma = walk->vma;
5a0c38d3 66 unsigned int fault_flags = FAULT_FLAG_REMOTE;
da4c3c73 67
a3eb13c1 68 WARN_ON_ONCE(!required_fault);
74eee180 69 hmm_vma_walk->last = addr;
63d5066f 70
a3eb13c1 71 if (required_fault & HMM_NEED_WRITE_FAULT) {
5a0c38d3
CH
72 if (!(vma->vm_flags & VM_WRITE))
73 return -EPERM;
74 fault_flags |= FAULT_FLAG_WRITE;
74eee180
JG
75 }
76
53bfe17f 77 for (; addr < end; addr += PAGE_SIZE)
bce617ed
PX
78 if (handle_mm_fault(vma, addr, fault_flags, NULL) &
79 VM_FAULT_ERROR)
53bfe17f 80 return -EFAULT;
f8c888a3 81 return -EBUSY;
2aee09d8
JG
82}
83
a3eb13c1 84static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
2733ea14
JG
85 unsigned long pfn_req_flags,
86 unsigned long cpu_flags)
2aee09d8 87{
f88a1e90
JG
88 struct hmm_range *range = hmm_vma_walk->range;
89
023a019a
JG
90 /*
91 * So we not only consider the individual per page request we also
92 * consider the default flags requested for the range. The API can
d2e8d551
RC
93 * be used 2 ways. The first one where the HMM user coalesces
94 * multiple page faults into one request and sets flags per pfn for
95 * those faults. The second one where the HMM user wants to pre-
023a019a
JG
96 * fault a range with specific flags. For the latter one it is a
97 * waste to have the user pre-fill the pfn arrays with a default
98 * flags value.
99 */
2733ea14
JG
100 pfn_req_flags &= range->pfn_flags_mask;
101 pfn_req_flags |= range->default_flags;
023a019a 102
2aee09d8 103 /* We aren't ask to do anything ... */
2733ea14 104 if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
a3eb13c1 105 return 0;
f88a1e90 106
f88a1e90 107 /* Need to write fault ? */
2733ea14
JG
108 if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
109 !(cpu_flags & HMM_PFN_WRITE))
a3eb13c1
JG
110 return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
111
112 /* If CPU page table is not valid then we need to fault */
2733ea14 113 if (!(cpu_flags & HMM_PFN_VALID))
a3eb13c1
JG
114 return HMM_NEED_FAULT;
115 return 0;
2aee09d8
JG
116}
117
a3eb13c1
JG
118static unsigned int
119hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
2733ea14
JG
120 const unsigned long hmm_pfns[], unsigned long npages,
121 unsigned long cpu_flags)
2aee09d8 122{
6bfef2f9 123 struct hmm_range *range = hmm_vma_walk->range;
a3eb13c1 124 unsigned int required_fault = 0;
2aee09d8
JG
125 unsigned long i;
126
6bfef2f9
JG
127 /*
128 * If the default flags do not request to fault pages, and the mask does
129 * not allow for individual pages to be faulted, then
130 * hmm_pte_need_fault() will always return 0.
131 */
132 if (!((range->default_flags | range->pfn_flags_mask) &
2733ea14 133 HMM_PFN_REQ_FAULT))
a3eb13c1 134 return 0;
2aee09d8
JG
135
136 for (i = 0; i < npages; ++i) {
2733ea14
JG
137 required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
138 cpu_flags);
a3eb13c1
JG
139 if (required_fault == HMM_NEED_ALL_BITS)
140 return required_fault;
2aee09d8 141 }
a3eb13c1 142 return required_fault;
2aee09d8
JG
143}
144
145static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
b7a16c7a 146 __always_unused int depth, struct mm_walk *walk)
2aee09d8
JG
147{
148 struct hmm_vma_walk *hmm_vma_walk = walk->private;
149 struct hmm_range *range = hmm_vma_walk->range;
a3eb13c1 150 unsigned int required_fault;
2aee09d8 151 unsigned long i, npages;
2733ea14 152 unsigned long *hmm_pfns;
2aee09d8
JG
153
154 i = (addr - range->start) >> PAGE_SHIFT;
155 npages = (end - addr) >> PAGE_SHIFT;
2733ea14
JG
156 hmm_pfns = &range->hmm_pfns[i];
157 required_fault =
158 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
bd5d3587
JG
159 if (!walk->vma) {
160 if (required_fault)
161 return -EFAULT;
162 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
163 }
a3eb13c1
JG
164 if (required_fault)
165 return hmm_vma_fault(addr, end, required_fault, walk);
2733ea14 166 return hmm_pfns_fill(addr, end, range, 0);
2aee09d8
JG
167}
168
3b50a6e5
RC
169static inline unsigned long hmm_pfn_flags_order(unsigned long order)
170{
171 return order << HMM_PFN_ORDER_SHIFT;
172}
173
2733ea14
JG
174static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
175 pmd_t pmd)
2aee09d8
JG
176{
177 if (pmd_protnone(pmd))
178 return 0;
3b50a6e5
RC
179 return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
180 HMM_PFN_VALID) |
181 hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
da4c3c73
JG
182}
183
992de9a8 184#ifdef CONFIG_TRANSPARENT_HUGEPAGE
9d3973d6 185static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2733ea14
JG
186 unsigned long end, unsigned long hmm_pfns[],
187 pmd_t pmd)
9d3973d6 188{
53f5c3f4 189 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 190 struct hmm_range *range = hmm_vma_walk->range;
2aee09d8 191 unsigned long pfn, npages, i;
a3eb13c1 192 unsigned int required_fault;
2733ea14 193 unsigned long cpu_flags;
53f5c3f4 194
2aee09d8 195 npages = (end - addr) >> PAGE_SHIFT;
f88a1e90 196 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
a3eb13c1 197 required_fault =
2733ea14 198 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
a3eb13c1
JG
199 if (required_fault)
200 return hmm_vma_fault(addr, end, required_fault, walk);
53f5c3f4 201
309f9a4f 202 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
068354ad 203 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
2733ea14 204 hmm_pfns[i] = pfn | cpu_flags;
53f5c3f4
JG
205 return 0;
206}
9d3973d6
CH
207#else /* CONFIG_TRANSPARENT_HUGEPAGE */
208/* stub to allow the code below to compile */
209int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
2733ea14 210 unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
9d3973d6 211#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
53f5c3f4 212
08ddddda
CH
213static inline bool hmm_is_device_private_entry(struct hmm_range *range,
214 swp_entry_t entry)
215{
216 return is_device_private_entry(entry) &&
217 device_private_entry_to_page(entry)->pgmap->owner ==
218 range->dev_private_owner;
219}
220
2733ea14
JG
221static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
222 pte_t pte)
2aee09d8 223{
789c2af8 224 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
2aee09d8 225 return 0;
2733ea14 226 return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
2aee09d8
JG
227}
228
53f5c3f4
JG
229static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
230 unsigned long end, pmd_t *pmdp, pte_t *ptep,
2733ea14 231 unsigned long *hmm_pfn)
53f5c3f4
JG
232{
233 struct hmm_vma_walk *hmm_vma_walk = walk->private;
f88a1e90 234 struct hmm_range *range = hmm_vma_walk->range;
a3eb13c1 235 unsigned int required_fault;
2733ea14 236 unsigned long cpu_flags;
53f5c3f4 237 pte_t pte = *ptep;
2733ea14 238 uint64_t pfn_req_flags = *hmm_pfn;
53f5c3f4 239
53f5c3f4 240 if (pte_none(pte)) {
2733ea14
JG
241 required_fault =
242 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
a3eb13c1 243 if (required_fault)
53f5c3f4 244 goto fault;
2733ea14 245 *hmm_pfn = 0;
53f5c3f4
JG
246 return 0;
247 }
248
249 if (!pte_present(pte)) {
250 swp_entry_t entry = pte_to_swp_entry(pte);
251
53f5c3f4 252 /*
0cb80a2f 253 * Never fault in device private pages, but just report
17ffdc48 254 * the PFN even if not present.
53f5c3f4 255 */
08ddddda 256 if (hmm_is_device_private_entry(range, entry)) {
2733ea14 257 cpu_flags = HMM_PFN_VALID;
17ffdc48 258 if (is_write_device_private_entry(entry))
2733ea14
JG
259 cpu_flags |= HMM_PFN_WRITE;
260 *hmm_pfn = device_private_entry_to_pfn(entry) |
261 cpu_flags;
53f5c3f4
JG
262 return 0;
263 }
264
2733ea14
JG
265 required_fault =
266 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
846babe8 267 if (!required_fault) {
2733ea14 268 *hmm_pfn = 0;
53f5c3f4 269 return 0;
846babe8 270 }
76612d6c
JG
271
272 if (!non_swap_entry(entry))
273 goto fault;
274
275 if (is_migration_entry(entry)) {
276 pte_unmap(ptep);
277 hmm_vma_walk->last = addr;
278 migration_entry_wait(walk->mm, pmdp, addr);
279 return -EBUSY;
53f5c3f4
JG
280 }
281
282 /* Report error for everything else */
dfdc2207 283 pte_unmap(ptep);
53f5c3f4
JG
284 return -EFAULT;
285 }
286
76612d6c 287 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
2733ea14
JG
288 required_fault =
289 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
a3eb13c1 290 if (required_fault)
53f5c3f4
JG
291 goto fault;
292
40550627
JG
293 /*
294 * Since each architecture defines a struct page for the zero page, just
295 * fall through and treat it like a normal page.
296 */
297 if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
2733ea14 298 if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
dfdc2207 299 pte_unmap(ptep);
ac541f25
RC
300 return -EFAULT;
301 }
2733ea14 302 *hmm_pfn = HMM_PFN_ERROR;
40550627 303 return 0;
992de9a8
JG
304 }
305
2733ea14 306 *hmm_pfn = pte_pfn(pte) | cpu_flags;
53f5c3f4
JG
307 return 0;
308
309fault:
310 pte_unmap(ptep);
311 /* Fault any virtual address we were asked to fault */
a3eb13c1 312 return hmm_vma_fault(addr, end, required_fault, walk);
53f5c3f4
JG
313}
314
da4c3c73
JG
315static int hmm_vma_walk_pmd(pmd_t *pmdp,
316 unsigned long start,
317 unsigned long end,
318 struct mm_walk *walk)
319{
74eee180
JG
320 struct hmm_vma_walk *hmm_vma_walk = walk->private;
321 struct hmm_range *range = hmm_vma_walk->range;
2733ea14
JG
322 unsigned long *hmm_pfns =
323 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
2288a9a6
JG
324 unsigned long npages = (end - start) >> PAGE_SHIFT;
325 unsigned long addr = start;
da4c3c73 326 pte_t *ptep;
d08faca0 327 pmd_t pmd;
da4c3c73 328
da4c3c73 329again:
d08faca0
JG
330 pmd = READ_ONCE(*pmdp);
331 if (pmd_none(pmd))
b7a16c7a 332 return hmm_vma_walk_hole(start, end, -1, walk);
da4c3c73 333
d08faca0 334 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
2733ea14 335 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
d08faca0 336 hmm_vma_walk->last = addr;
d2e8d551 337 pmd_migration_entry_wait(walk->mm, pmdp);
73231612 338 return -EBUSY;
d08faca0 339 }
2733ea14 340 return hmm_pfns_fill(start, end, range, 0);
2288a9a6
JG
341 }
342
343 if (!pmd_present(pmd)) {
2733ea14 344 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
2288a9a6 345 return -EFAULT;
d28c2c9a 346 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
2288a9a6 347 }
da4c3c73 348
d08faca0 349 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
da4c3c73 350 /*
d2e8d551 351 * No need to take pmd_lock here, even if some other thread
da4c3c73
JG
352 * is splitting the huge pmd we will get that event through
353 * mmu_notifier callback.
354 *
d2e8d551 355 * So just read pmd value and check again it's a transparent
da4c3c73
JG
356 * huge or device mapping one and compute corresponding pfn
357 * values.
358 */
359 pmd = pmd_read_atomic(pmdp);
360 barrier();
361 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
362 goto again;
74eee180 363
2733ea14 364 return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
da4c3c73
JG
365 }
366
d08faca0 367 /*
d2e8d551 368 * We have handled all the valid cases above ie either none, migration,
d08faca0
JG
369 * huge or transparent huge. At this point either it is a valid pmd
370 * entry pointing to pte directory or it is a bad pmd that will not
371 * recover.
372 */
2288a9a6 373 if (pmd_bad(pmd)) {
2733ea14 374 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
2288a9a6 375 return -EFAULT;
d28c2c9a 376 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
2288a9a6 377 }
da4c3c73
JG
378
379 ptep = pte_offset_map(pmdp, addr);
2733ea14 380 for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
53f5c3f4 381 int r;
74eee180 382
2733ea14 383 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
53f5c3f4 384 if (r) {
dfdc2207 385 /* hmm_vma_handle_pte() did pte_unmap() */
53f5c3f4 386 return r;
74eee180 387 }
da4c3c73
JG
388 }
389 pte_unmap(ptep - 1);
da4c3c73
JG
390 return 0;
391}
392
f0b3c45c
CH
393#if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
394 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
2733ea14
JG
395static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
396 pud_t pud)
f0b3c45c
CH
397{
398 if (!pud_present(pud))
399 return 0;
3b50a6e5
RC
400 return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
401 HMM_PFN_VALID) |
402 hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
f0b3c45c
CH
403}
404
405static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
406 struct mm_walk *walk)
992de9a8
JG
407{
408 struct hmm_vma_walk *hmm_vma_walk = walk->private;
409 struct hmm_range *range = hmm_vma_walk->range;
3afc4236 410 unsigned long addr = start;
992de9a8 411 pud_t pud;
3afc4236
SP
412 int ret = 0;
413 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
414
415 if (!ptl)
416 return 0;
417
418 /* Normally we don't want to split the huge page */
419 walk->action = ACTION_CONTINUE;
992de9a8 420
992de9a8 421 pud = READ_ONCE(*pudp);
3afc4236 422 if (pud_none(pud)) {
05fc1df9
JG
423 spin_unlock(ptl);
424 return hmm_vma_walk_hole(start, end, -1, walk);
3afc4236 425 }
992de9a8
JG
426
427 if (pud_huge(pud) && pud_devmap(pud)) {
428 unsigned long i, npages, pfn;
a3eb13c1 429 unsigned int required_fault;
2733ea14
JG
430 unsigned long *hmm_pfns;
431 unsigned long cpu_flags;
992de9a8 432
3afc4236 433 if (!pud_present(pud)) {
05fc1df9
JG
434 spin_unlock(ptl);
435 return hmm_vma_walk_hole(start, end, -1, walk);
3afc4236 436 }
992de9a8
JG
437
438 i = (addr - range->start) >> PAGE_SHIFT;
439 npages = (end - addr) >> PAGE_SHIFT;
2733ea14 440 hmm_pfns = &range->hmm_pfns[i];
992de9a8
JG
441
442 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
2733ea14 443 required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
a3eb13c1
JG
444 npages, cpu_flags);
445 if (required_fault) {
05fc1df9 446 spin_unlock(ptl);
a3eb13c1 447 return hmm_vma_fault(addr, end, required_fault, walk);
3afc4236 448 }
992de9a8 449
992de9a8 450 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
068354ad 451 for (i = 0; i < npages; ++i, ++pfn)
2733ea14 452 hmm_pfns[i] = pfn | cpu_flags;
3afc4236 453 goto out_unlock;
992de9a8
JG
454 }
455
3afc4236
SP
456 /* Ask for the PUD to be split */
457 walk->action = ACTION_SUBTREE;
992de9a8 458
3afc4236
SP
459out_unlock:
460 spin_unlock(ptl);
461 return ret;
992de9a8 462}
f0b3c45c
CH
463#else
464#define hmm_vma_walk_pud NULL
465#endif
992de9a8 466
251bbe59 467#ifdef CONFIG_HUGETLB_PAGE
63d5066f
JG
468static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
469 unsigned long start, unsigned long end,
470 struct mm_walk *walk)
471{
05c23af4 472 unsigned long addr = start, i, pfn;
63d5066f
JG
473 struct hmm_vma_walk *hmm_vma_walk = walk->private;
474 struct hmm_range *range = hmm_vma_walk->range;
475 struct vm_area_struct *vma = walk->vma;
a3eb13c1 476 unsigned int required_fault;
2733ea14
JG
477 unsigned long pfn_req_flags;
478 unsigned long cpu_flags;
63d5066f
JG
479 spinlock_t *ptl;
480 pte_t entry;
63d5066f 481
d2e8d551 482 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
63d5066f
JG
483 entry = huge_ptep_get(pte);
484
7f08263d 485 i = (start - range->start) >> PAGE_SHIFT;
2733ea14 486 pfn_req_flags = range->hmm_pfns[i];
3b50a6e5
RC
487 cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
488 hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
2733ea14
JG
489 required_fault =
490 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
a3eb13c1 491 if (required_fault) {
45050692 492 spin_unlock(ptl);
a3eb13c1 493 return hmm_vma_fault(addr, end, required_fault, walk);
63d5066f
JG
494 }
495
05c23af4 496 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
7f08263d 497 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
2733ea14
JG
498 range->hmm_pfns[i] = pfn | cpu_flags;
499
63d5066f 500 spin_unlock(ptl);
45050692 501 return 0;
63d5066f 502}
251bbe59
CH
503#else
504#define hmm_vma_walk_hugetlb_entry NULL
505#endif /* CONFIG_HUGETLB_PAGE */
63d5066f 506
d28c2c9a
RC
507static int hmm_vma_walk_test(unsigned long start, unsigned long end,
508 struct mm_walk *walk)
33cd47dc 509{
d28c2c9a
RC
510 struct hmm_vma_walk *hmm_vma_walk = walk->private;
511 struct hmm_range *range = hmm_vma_walk->range;
512 struct vm_area_struct *vma = walk->vma;
513
a3eb13c1
JG
514 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) &&
515 vma->vm_flags & VM_READ)
516 return 0;
517
d28c2c9a 518 /*
a3eb13c1
JG
519 * vma ranges that don't have struct page backing them or map I/O
520 * devices directly cannot be handled by hmm_range_fault().
c2579c9c 521 *
d28c2c9a 522 * If the vma does not allow read access, then assume that it does not
c2579c9c
JG
523 * allow write access either. HMM does not support architectures that
524 * allow write without read.
a3eb13c1
JG
525 *
526 * If a fault is requested for an unsupported range then it is a hard
527 * failure.
d28c2c9a 528 */
a3eb13c1 529 if (hmm_range_need_fault(hmm_vma_walk,
2733ea14 530 range->hmm_pfns +
a3eb13c1
JG
531 ((start - range->start) >> PAGE_SHIFT),
532 (end - start) >> PAGE_SHIFT, 0))
533 return -EFAULT;
d28c2c9a 534
a3eb13c1 535 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
d28c2c9a 536
a3eb13c1
JG
537 /* Skip this vma and continue processing the next vma. */
538 return 1;
33cd47dc
JG
539}
540
7b86ac33
CH
541static const struct mm_walk_ops hmm_walk_ops = {
542 .pud_entry = hmm_vma_walk_pud,
543 .pmd_entry = hmm_vma_walk_pmd,
544 .pte_hole = hmm_vma_walk_hole,
545 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
d28c2c9a 546 .test_walk = hmm_vma_walk_test,
7b86ac33
CH
547};
548
9a4903e4
CH
549/**
550 * hmm_range_fault - try to fault some address in a virtual address range
f970b977 551 * @range: argument structure
9a4903e4 552 *
be957c88 553 * Returns 0 on success or one of the following error codes:
73231612 554 *
9a4903e4
CH
555 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
556 * (e.g., device file vma).
557 * -ENOMEM: Out of memory.
558 * -EPERM: Invalid permission (e.g., asking for write and range is read
559 * only).
9a4903e4
CH
560 * -EBUSY: The range has been invalidated and the caller needs to wait for
561 * the invalidation to finish.
f970b977
JG
562 * -EFAULT: A page was requested to be valid and could not be made valid
563 * ie it has no backing VMA or it is illegal to access
74eee180 564 *
f970b977
JG
565 * This is similar to get_user_pages(), except that it can read the page tables
566 * without mutating them (ie causing faults).
74eee180 567 */
be957c88 568int hmm_range_fault(struct hmm_range *range)
74eee180 569{
d28c2c9a
RC
570 struct hmm_vma_walk hmm_vma_walk = {
571 .range = range,
572 .last = range->start,
d28c2c9a 573 };
a22dd506 574 struct mm_struct *mm = range->notifier->mm;
74eee180
JG
575 int ret;
576
42fc5414 577 mmap_assert_locked(mm);
704f3f2c 578
a3e0d41c
JG
579 do {
580 /* If range is no longer valid force retry. */
a22dd506
JG
581 if (mmu_interval_check_retry(range->notifier,
582 range->notifier_seq))
2bcbeaef 583 return -EBUSY;
d28c2c9a
RC
584 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
585 &hmm_walk_ops, &hmm_vma_walk);
be957c88
JG
586 /*
587 * When -EBUSY is returned the loop restarts with
588 * hmm_vma_walk.last set to an address that has not been stored
589 * in pfns. All entries < last in the pfn array are set to their
590 * output, and all >= are still at their input values.
591 */
d28c2c9a 592 } while (ret == -EBUSY);
be957c88 593 return ret;
74eee180 594}
73231612 595EXPORT_SYMBOL(hmm_range_fault);