]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/hmm.c
Merge tag 'gfs2-for-5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux...
[mirror_ubuntu-jammy-kernel.git] / mm / hmm.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright 2013 Red Hat Inc.
4 *
5 * Authors: Jérôme Glisse <jglisse@redhat.com>
6 */
7 /*
8 * Refer to include/linux/hmm.h for information about heterogeneous memory
9 * management or HMM for short.
10 */
11 #include <linux/pagewalk.h>
12 #include <linux/hmm.h>
13 #include <linux/init.h>
14 #include <linux/rmap.h>
15 #include <linux/swap.h>
16 #include <linux/slab.h>
17 #include <linux/sched.h>
18 #include <linux/mmzone.h>
19 #include <linux/pagemap.h>
20 #include <linux/swapops.h>
21 #include <linux/hugetlb.h>
22 #include <linux/memremap.h>
23 #include <linux/sched/mm.h>
24 #include <linux/jump_label.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/memory_hotplug.h>
28
29 struct hmm_vma_walk {
30 struct hmm_range *range;
31 unsigned long last;
32 };
33
34 enum {
35 HMM_NEED_FAULT = 1 << 0,
36 HMM_NEED_WRITE_FAULT = 1 << 1,
37 HMM_NEED_ALL_BITS = HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT,
38 };
39
40 static int hmm_pfns_fill(unsigned long addr, unsigned long end,
41 struct hmm_range *range, unsigned long cpu_flags)
42 {
43 unsigned long i = (addr - range->start) >> PAGE_SHIFT;
44
45 for (; addr < end; addr += PAGE_SIZE, i++)
46 range->hmm_pfns[i] = cpu_flags;
47 return 0;
48 }
49
50 /*
51 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
52 * @addr: range virtual start address (inclusive)
53 * @end: range virtual end address (exclusive)
54 * @required_fault: HMM_NEED_* flags
55 * @walk: mm_walk structure
56 * Return: -EBUSY after page fault, or page fault error
57 *
58 * This function will be called whenever pmd_none() or pte_none() returns true,
59 * or whenever there is no page directory covering the virtual address range.
60 */
61 static int hmm_vma_fault(unsigned long addr, unsigned long end,
62 unsigned int required_fault, struct mm_walk *walk)
63 {
64 struct hmm_vma_walk *hmm_vma_walk = walk->private;
65 struct vm_area_struct *vma = walk->vma;
66 unsigned int fault_flags = FAULT_FLAG_REMOTE;
67
68 WARN_ON_ONCE(!required_fault);
69 hmm_vma_walk->last = addr;
70
71 if (required_fault & HMM_NEED_WRITE_FAULT) {
72 if (!(vma->vm_flags & VM_WRITE))
73 return -EPERM;
74 fault_flags |= FAULT_FLAG_WRITE;
75 }
76
77 for (; addr < end; addr += PAGE_SIZE)
78 if (handle_mm_fault(vma, addr, fault_flags) & VM_FAULT_ERROR)
79 return -EFAULT;
80 return -EBUSY;
81 }
82
83 static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
84 unsigned long pfn_req_flags,
85 unsigned long cpu_flags)
86 {
87 struct hmm_range *range = hmm_vma_walk->range;
88
89 /*
90 * So we not only consider the individual per page request we also
91 * consider the default flags requested for the range. The API can
92 * be used 2 ways. The first one where the HMM user coalesces
93 * multiple page faults into one request and sets flags per pfn for
94 * those faults. The second one where the HMM user wants to pre-
95 * fault a range with specific flags. For the latter one it is a
96 * waste to have the user pre-fill the pfn arrays with a default
97 * flags value.
98 */
99 pfn_req_flags &= range->pfn_flags_mask;
100 pfn_req_flags |= range->default_flags;
101
102 /* We aren't ask to do anything ... */
103 if (!(pfn_req_flags & HMM_PFN_REQ_FAULT))
104 return 0;
105
106 /* Need to write fault ? */
107 if ((pfn_req_flags & HMM_PFN_REQ_WRITE) &&
108 !(cpu_flags & HMM_PFN_WRITE))
109 return HMM_NEED_FAULT | HMM_NEED_WRITE_FAULT;
110
111 /* If CPU page table is not valid then we need to fault */
112 if (!(cpu_flags & HMM_PFN_VALID))
113 return HMM_NEED_FAULT;
114 return 0;
115 }
116
117 static unsigned int
118 hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
119 const unsigned long hmm_pfns[], unsigned long npages,
120 unsigned long cpu_flags)
121 {
122 struct hmm_range *range = hmm_vma_walk->range;
123 unsigned int required_fault = 0;
124 unsigned long i;
125
126 /*
127 * If the default flags do not request to fault pages, and the mask does
128 * not allow for individual pages to be faulted, then
129 * hmm_pte_need_fault() will always return 0.
130 */
131 if (!((range->default_flags | range->pfn_flags_mask) &
132 HMM_PFN_REQ_FAULT))
133 return 0;
134
135 for (i = 0; i < npages; ++i) {
136 required_fault |= hmm_pte_need_fault(hmm_vma_walk, hmm_pfns[i],
137 cpu_flags);
138 if (required_fault == HMM_NEED_ALL_BITS)
139 return required_fault;
140 }
141 return required_fault;
142 }
143
144 static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
145 __always_unused int depth, struct mm_walk *walk)
146 {
147 struct hmm_vma_walk *hmm_vma_walk = walk->private;
148 struct hmm_range *range = hmm_vma_walk->range;
149 unsigned int required_fault;
150 unsigned long i, npages;
151 unsigned long *hmm_pfns;
152
153 i = (addr - range->start) >> PAGE_SHIFT;
154 npages = (end - addr) >> PAGE_SHIFT;
155 hmm_pfns = &range->hmm_pfns[i];
156 required_fault =
157 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0);
158 if (!walk->vma) {
159 if (required_fault)
160 return -EFAULT;
161 return hmm_pfns_fill(addr, end, range, HMM_PFN_ERROR);
162 }
163 if (required_fault)
164 return hmm_vma_fault(addr, end, required_fault, walk);
165 return hmm_pfns_fill(addr, end, range, 0);
166 }
167
168 static inline unsigned long hmm_pfn_flags_order(unsigned long order)
169 {
170 return order << HMM_PFN_ORDER_SHIFT;
171 }
172
173 static inline unsigned long pmd_to_hmm_pfn_flags(struct hmm_range *range,
174 pmd_t pmd)
175 {
176 if (pmd_protnone(pmd))
177 return 0;
178 return (pmd_write(pmd) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
179 HMM_PFN_VALID) |
180 hmm_pfn_flags_order(PMD_SHIFT - PAGE_SHIFT);
181 }
182
183 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
184 static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
185 unsigned long end, unsigned long hmm_pfns[],
186 pmd_t pmd)
187 {
188 struct hmm_vma_walk *hmm_vma_walk = walk->private;
189 struct hmm_range *range = hmm_vma_walk->range;
190 unsigned long pfn, npages, i;
191 unsigned int required_fault;
192 unsigned long cpu_flags;
193
194 npages = (end - addr) >> PAGE_SHIFT;
195 cpu_flags = pmd_to_hmm_pfn_flags(range, pmd);
196 required_fault =
197 hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, cpu_flags);
198 if (required_fault)
199 return hmm_vma_fault(addr, end, required_fault, walk);
200
201 pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
202 for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++)
203 hmm_pfns[i] = pfn | cpu_flags;
204 return 0;
205 }
206 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
207 /* stub to allow the code below to compile */
208 int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
209 unsigned long end, unsigned long hmm_pfns[], pmd_t pmd);
210 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
211
212 static inline bool hmm_is_device_private_entry(struct hmm_range *range,
213 swp_entry_t entry)
214 {
215 return is_device_private_entry(entry) &&
216 device_private_entry_to_page(entry)->pgmap->owner ==
217 range->dev_private_owner;
218 }
219
220 static inline unsigned long pte_to_hmm_pfn_flags(struct hmm_range *range,
221 pte_t pte)
222 {
223 if (pte_none(pte) || !pte_present(pte) || pte_protnone(pte))
224 return 0;
225 return pte_write(pte) ? (HMM_PFN_VALID | HMM_PFN_WRITE) : HMM_PFN_VALID;
226 }
227
228 static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
229 unsigned long end, pmd_t *pmdp, pte_t *ptep,
230 unsigned long *hmm_pfn)
231 {
232 struct hmm_vma_walk *hmm_vma_walk = walk->private;
233 struct hmm_range *range = hmm_vma_walk->range;
234 unsigned int required_fault;
235 unsigned long cpu_flags;
236 pte_t pte = *ptep;
237 uint64_t pfn_req_flags = *hmm_pfn;
238
239 if (pte_none(pte)) {
240 required_fault =
241 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
242 if (required_fault)
243 goto fault;
244 *hmm_pfn = 0;
245 return 0;
246 }
247
248 if (!pte_present(pte)) {
249 swp_entry_t entry = pte_to_swp_entry(pte);
250
251 /*
252 * Never fault in device private pages pages, but just report
253 * the PFN even if not present.
254 */
255 if (hmm_is_device_private_entry(range, entry)) {
256 cpu_flags = HMM_PFN_VALID;
257 if (is_write_device_private_entry(entry))
258 cpu_flags |= HMM_PFN_WRITE;
259 *hmm_pfn = device_private_entry_to_pfn(entry) |
260 cpu_flags;
261 return 0;
262 }
263
264 required_fault =
265 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0);
266 if (!required_fault) {
267 *hmm_pfn = 0;
268 return 0;
269 }
270
271 if (!non_swap_entry(entry))
272 goto fault;
273
274 if (is_migration_entry(entry)) {
275 pte_unmap(ptep);
276 hmm_vma_walk->last = addr;
277 migration_entry_wait(walk->mm, pmdp, addr);
278 return -EBUSY;
279 }
280
281 /* Report error for everything else */
282 pte_unmap(ptep);
283 return -EFAULT;
284 }
285
286 cpu_flags = pte_to_hmm_pfn_flags(range, pte);
287 required_fault =
288 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
289 if (required_fault)
290 goto fault;
291
292 /*
293 * Since each architecture defines a struct page for the zero page, just
294 * fall through and treat it like a normal page.
295 */
296 if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
297 if (hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, 0)) {
298 pte_unmap(ptep);
299 return -EFAULT;
300 }
301 *hmm_pfn = HMM_PFN_ERROR;
302 return 0;
303 }
304
305 *hmm_pfn = pte_pfn(pte) | cpu_flags;
306 return 0;
307
308 fault:
309 pte_unmap(ptep);
310 /* Fault any virtual address we were asked to fault */
311 return hmm_vma_fault(addr, end, required_fault, walk);
312 }
313
314 static int hmm_vma_walk_pmd(pmd_t *pmdp,
315 unsigned long start,
316 unsigned long end,
317 struct mm_walk *walk)
318 {
319 struct hmm_vma_walk *hmm_vma_walk = walk->private;
320 struct hmm_range *range = hmm_vma_walk->range;
321 unsigned long *hmm_pfns =
322 &range->hmm_pfns[(start - range->start) >> PAGE_SHIFT];
323 unsigned long npages = (end - start) >> PAGE_SHIFT;
324 unsigned long addr = start;
325 pte_t *ptep;
326 pmd_t pmd;
327
328 again:
329 pmd = READ_ONCE(*pmdp);
330 if (pmd_none(pmd))
331 return hmm_vma_walk_hole(start, end, -1, walk);
332
333 if (thp_migration_supported() && is_pmd_migration_entry(pmd)) {
334 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0)) {
335 hmm_vma_walk->last = addr;
336 pmd_migration_entry_wait(walk->mm, pmdp);
337 return -EBUSY;
338 }
339 return hmm_pfns_fill(start, end, range, 0);
340 }
341
342 if (!pmd_present(pmd)) {
343 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
344 return -EFAULT;
345 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
346 }
347
348 if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
349 /*
350 * No need to take pmd_lock here, even if some other thread
351 * is splitting the huge pmd we will get that event through
352 * mmu_notifier callback.
353 *
354 * So just read pmd value and check again it's a transparent
355 * huge or device mapping one and compute corresponding pfn
356 * values.
357 */
358 pmd = pmd_read_atomic(pmdp);
359 barrier();
360 if (!pmd_devmap(pmd) && !pmd_trans_huge(pmd))
361 goto again;
362
363 return hmm_vma_handle_pmd(walk, addr, end, hmm_pfns, pmd);
364 }
365
366 /*
367 * We have handled all the valid cases above ie either none, migration,
368 * huge or transparent huge. At this point either it is a valid pmd
369 * entry pointing to pte directory or it is a bad pmd that will not
370 * recover.
371 */
372 if (pmd_bad(pmd)) {
373 if (hmm_range_need_fault(hmm_vma_walk, hmm_pfns, npages, 0))
374 return -EFAULT;
375 return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
376 }
377
378 ptep = pte_offset_map(pmdp, addr);
379 for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) {
380 int r;
381
382 r = hmm_vma_handle_pte(walk, addr, end, pmdp, ptep, hmm_pfns);
383 if (r) {
384 /* hmm_vma_handle_pte() did pte_unmap() */
385 return r;
386 }
387 }
388 pte_unmap(ptep - 1);
389 return 0;
390 }
391
392 #if defined(CONFIG_ARCH_HAS_PTE_DEVMAP) && \
393 defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
394 static inline unsigned long pud_to_hmm_pfn_flags(struct hmm_range *range,
395 pud_t pud)
396 {
397 if (!pud_present(pud))
398 return 0;
399 return (pud_write(pud) ? (HMM_PFN_VALID | HMM_PFN_WRITE) :
400 HMM_PFN_VALID) |
401 hmm_pfn_flags_order(PUD_SHIFT - PAGE_SHIFT);
402 }
403
404 static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
405 struct mm_walk *walk)
406 {
407 struct hmm_vma_walk *hmm_vma_walk = walk->private;
408 struct hmm_range *range = hmm_vma_walk->range;
409 unsigned long addr = start;
410 pud_t pud;
411 int ret = 0;
412 spinlock_t *ptl = pud_trans_huge_lock(pudp, walk->vma);
413
414 if (!ptl)
415 return 0;
416
417 /* Normally we don't want to split the huge page */
418 walk->action = ACTION_CONTINUE;
419
420 pud = READ_ONCE(*pudp);
421 if (pud_none(pud)) {
422 spin_unlock(ptl);
423 return hmm_vma_walk_hole(start, end, -1, walk);
424 }
425
426 if (pud_huge(pud) && pud_devmap(pud)) {
427 unsigned long i, npages, pfn;
428 unsigned int required_fault;
429 unsigned long *hmm_pfns;
430 unsigned long cpu_flags;
431
432 if (!pud_present(pud)) {
433 spin_unlock(ptl);
434 return hmm_vma_walk_hole(start, end, -1, walk);
435 }
436
437 i = (addr - range->start) >> PAGE_SHIFT;
438 npages = (end - addr) >> PAGE_SHIFT;
439 hmm_pfns = &range->hmm_pfns[i];
440
441 cpu_flags = pud_to_hmm_pfn_flags(range, pud);
442 required_fault = hmm_range_need_fault(hmm_vma_walk, hmm_pfns,
443 npages, cpu_flags);
444 if (required_fault) {
445 spin_unlock(ptl);
446 return hmm_vma_fault(addr, end, required_fault, walk);
447 }
448
449 pfn = pud_pfn(pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
450 for (i = 0; i < npages; ++i, ++pfn)
451 hmm_pfns[i] = pfn | cpu_flags;
452 goto out_unlock;
453 }
454
455 /* Ask for the PUD to be split */
456 walk->action = ACTION_SUBTREE;
457
458 out_unlock:
459 spin_unlock(ptl);
460 return ret;
461 }
462 #else
463 #define hmm_vma_walk_pud NULL
464 #endif
465
466 #ifdef CONFIG_HUGETLB_PAGE
467 static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
468 unsigned long start, unsigned long end,
469 struct mm_walk *walk)
470 {
471 unsigned long addr = start, i, pfn;
472 struct hmm_vma_walk *hmm_vma_walk = walk->private;
473 struct hmm_range *range = hmm_vma_walk->range;
474 struct vm_area_struct *vma = walk->vma;
475 unsigned int required_fault;
476 unsigned long pfn_req_flags;
477 unsigned long cpu_flags;
478 spinlock_t *ptl;
479 pte_t entry;
480
481 ptl = huge_pte_lock(hstate_vma(vma), walk->mm, pte);
482 entry = huge_ptep_get(pte);
483
484 i = (start - range->start) >> PAGE_SHIFT;
485 pfn_req_flags = range->hmm_pfns[i];
486 cpu_flags = pte_to_hmm_pfn_flags(range, entry) |
487 hmm_pfn_flags_order(huge_page_order(hstate_vma(vma)));
488 required_fault =
489 hmm_pte_need_fault(hmm_vma_walk, pfn_req_flags, cpu_flags);
490 if (required_fault) {
491 spin_unlock(ptl);
492 return hmm_vma_fault(addr, end, required_fault, walk);
493 }
494
495 pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
496 for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
497 range->hmm_pfns[i] = pfn | cpu_flags;
498
499 spin_unlock(ptl);
500 return 0;
501 }
502 #else
503 #define hmm_vma_walk_hugetlb_entry NULL
504 #endif /* CONFIG_HUGETLB_PAGE */
505
506 static int hmm_vma_walk_test(unsigned long start, unsigned long end,
507 struct mm_walk *walk)
508 {
509 struct hmm_vma_walk *hmm_vma_walk = walk->private;
510 struct hmm_range *range = hmm_vma_walk->range;
511 struct vm_area_struct *vma = walk->vma;
512
513 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP)) &&
514 vma->vm_flags & VM_READ)
515 return 0;
516
517 /*
518 * vma ranges that don't have struct page backing them or map I/O
519 * devices directly cannot be handled by hmm_range_fault().
520 *
521 * If the vma does not allow read access, then assume that it does not
522 * allow write access either. HMM does not support architectures that
523 * allow write without read.
524 *
525 * If a fault is requested for an unsupported range then it is a hard
526 * failure.
527 */
528 if (hmm_range_need_fault(hmm_vma_walk,
529 range->hmm_pfns +
530 ((start - range->start) >> PAGE_SHIFT),
531 (end - start) >> PAGE_SHIFT, 0))
532 return -EFAULT;
533
534 hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);
535
536 /* Skip this vma and continue processing the next vma. */
537 return 1;
538 }
539
540 static const struct mm_walk_ops hmm_walk_ops = {
541 .pud_entry = hmm_vma_walk_pud,
542 .pmd_entry = hmm_vma_walk_pmd,
543 .pte_hole = hmm_vma_walk_hole,
544 .hugetlb_entry = hmm_vma_walk_hugetlb_entry,
545 .test_walk = hmm_vma_walk_test,
546 };
547
548 /**
549 * hmm_range_fault - try to fault some address in a virtual address range
550 * @range: argument structure
551 *
552 * Returns 0 on success or one of the following error codes:
553 *
554 * -EINVAL: Invalid arguments or mm or virtual address is in an invalid vma
555 * (e.g., device file vma).
556 * -ENOMEM: Out of memory.
557 * -EPERM: Invalid permission (e.g., asking for write and range is read
558 * only).
559 * -EBUSY: The range has been invalidated and the caller needs to wait for
560 * the invalidation to finish.
561 * -EFAULT: A page was requested to be valid and could not be made valid
562 * ie it has no backing VMA or it is illegal to access
563 *
564 * This is similar to get_user_pages(), except that it can read the page tables
565 * without mutating them (ie causing faults).
566 */
567 int hmm_range_fault(struct hmm_range *range)
568 {
569 struct hmm_vma_walk hmm_vma_walk = {
570 .range = range,
571 .last = range->start,
572 };
573 struct mm_struct *mm = range->notifier->mm;
574 int ret;
575
576 mmap_assert_locked(mm);
577
578 do {
579 /* If range is no longer valid force retry. */
580 if (mmu_interval_check_retry(range->notifier,
581 range->notifier_seq))
582 return -EBUSY;
583 ret = walk_page_range(mm, hmm_vma_walk.last, range->end,
584 &hmm_walk_ops, &hmm_vma_walk);
585 /*
586 * When -EBUSY is returned the loop restarts with
587 * hmm_vma_walk.last set to an address that has not been stored
588 * in pfns. All entries < last in the pfn array are set to their
589 * output, and all >= are still at their input values.
590 */
591 } while (ret == -EBUSY);
592 return ret;
593 }
594 EXPORT_SYMBOL(hmm_range_fault);