]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/dax.c
Merge tag 'for-5.5-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[mirror_ubuntu-jammy-kernel.git] / fs / dax.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * fs/dax.c - Direct Access filesystem code
4 * Copyright (c) 2013-2014 Intel Corporation
5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 */
8
9 #include <linux/atomic.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/dax.h>
13 #include <linux/fs.h>
14 #include <linux/genhd.h>
15 #include <linux/highmem.h>
16 #include <linux/memcontrol.h>
17 #include <linux/mm.h>
18 #include <linux/mutex.h>
19 #include <linux/pagevec.h>
20 #include <linux/sched.h>
21 #include <linux/sched/signal.h>
22 #include <linux/uio.h>
23 #include <linux/vmstat.h>
24 #include <linux/pfn_t.h>
25 #include <linux/sizes.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/iomap.h>
28 #include <asm/pgalloc.h>
29
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/fs_dax.h>
32
33 static inline unsigned int pe_order(enum page_entry_size pe_size)
34 {
35 if (pe_size == PE_SIZE_PTE)
36 return PAGE_SHIFT - PAGE_SHIFT;
37 if (pe_size == PE_SIZE_PMD)
38 return PMD_SHIFT - PAGE_SHIFT;
39 if (pe_size == PE_SIZE_PUD)
40 return PUD_SHIFT - PAGE_SHIFT;
41 return ~0;
42 }
43
44 /* We choose 4096 entries - same as per-zone page wait tables */
45 #define DAX_WAIT_TABLE_BITS 12
46 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
47
48 /* The 'colour' (ie low bits) within a PMD of a page offset. */
49 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
50 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
51
52 /* The order of a PMD entry */
53 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
54
55 static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
56
57 static int __init init_dax_wait_table(void)
58 {
59 int i;
60
61 for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
62 init_waitqueue_head(wait_table + i);
63 return 0;
64 }
65 fs_initcall(init_dax_wait_table);
66
67 /*
68 * DAX pagecache entries use XArray value entries so they can't be mistaken
69 * for pages. We use one bit for locking, one bit for the entry size (PMD)
70 * and two more to tell us if the entry is a zero page or an empty entry that
71 * is just used for locking. In total four special bits.
72 *
73 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
74 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
75 * block allocation.
76 */
77 #define DAX_SHIFT (4)
78 #define DAX_LOCKED (1UL << 0)
79 #define DAX_PMD (1UL << 1)
80 #define DAX_ZERO_PAGE (1UL << 2)
81 #define DAX_EMPTY (1UL << 3)
82
83 static unsigned long dax_to_pfn(void *entry)
84 {
85 return xa_to_value(entry) >> DAX_SHIFT;
86 }
87
88 static void *dax_make_entry(pfn_t pfn, unsigned long flags)
89 {
90 return xa_mk_value(flags | (pfn_t_to_pfn(pfn) << DAX_SHIFT));
91 }
92
93 static bool dax_is_locked(void *entry)
94 {
95 return xa_to_value(entry) & DAX_LOCKED;
96 }
97
98 static unsigned int dax_entry_order(void *entry)
99 {
100 if (xa_to_value(entry) & DAX_PMD)
101 return PMD_ORDER;
102 return 0;
103 }
104
105 static unsigned long dax_is_pmd_entry(void *entry)
106 {
107 return xa_to_value(entry) & DAX_PMD;
108 }
109
110 static bool dax_is_pte_entry(void *entry)
111 {
112 return !(xa_to_value(entry) & DAX_PMD);
113 }
114
115 static int dax_is_zero_entry(void *entry)
116 {
117 return xa_to_value(entry) & DAX_ZERO_PAGE;
118 }
119
120 static int dax_is_empty_entry(void *entry)
121 {
122 return xa_to_value(entry) & DAX_EMPTY;
123 }
124
125 /*
126 * true if the entry that was found is of a smaller order than the entry
127 * we were looking for
128 */
129 static bool dax_is_conflict(void *entry)
130 {
131 return entry == XA_RETRY_ENTRY;
132 }
133
134 /*
135 * DAX page cache entry locking
136 */
137 struct exceptional_entry_key {
138 struct xarray *xa;
139 pgoff_t entry_start;
140 };
141
142 struct wait_exceptional_entry_queue {
143 wait_queue_entry_t wait;
144 struct exceptional_entry_key key;
145 };
146
147 static wait_queue_head_t *dax_entry_waitqueue(struct xa_state *xas,
148 void *entry, struct exceptional_entry_key *key)
149 {
150 unsigned long hash;
151 unsigned long index = xas->xa_index;
152
153 /*
154 * If 'entry' is a PMD, align the 'index' that we use for the wait
155 * queue to the start of that PMD. This ensures that all offsets in
156 * the range covered by the PMD map to the same bit lock.
157 */
158 if (dax_is_pmd_entry(entry))
159 index &= ~PG_PMD_COLOUR;
160 key->xa = xas->xa;
161 key->entry_start = index;
162
163 hash = hash_long((unsigned long)xas->xa ^ index, DAX_WAIT_TABLE_BITS);
164 return wait_table + hash;
165 }
166
167 static int wake_exceptional_entry_func(wait_queue_entry_t *wait,
168 unsigned int mode, int sync, void *keyp)
169 {
170 struct exceptional_entry_key *key = keyp;
171 struct wait_exceptional_entry_queue *ewait =
172 container_of(wait, struct wait_exceptional_entry_queue, wait);
173
174 if (key->xa != ewait->key.xa ||
175 key->entry_start != ewait->key.entry_start)
176 return 0;
177 return autoremove_wake_function(wait, mode, sync, NULL);
178 }
179
180 /*
181 * @entry may no longer be the entry at the index in the mapping.
182 * The important information it's conveying is whether the entry at
183 * this index used to be a PMD entry.
184 */
185 static void dax_wake_entry(struct xa_state *xas, void *entry, bool wake_all)
186 {
187 struct exceptional_entry_key key;
188 wait_queue_head_t *wq;
189
190 wq = dax_entry_waitqueue(xas, entry, &key);
191
192 /*
193 * Checking for locked entry and prepare_to_wait_exclusive() happens
194 * under the i_pages lock, ditto for entry handling in our callers.
195 * So at this point all tasks that could have seen our entry locked
196 * must be in the waitqueue and the following check will see them.
197 */
198 if (waitqueue_active(wq))
199 __wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
200 }
201
202 /*
203 * Look up entry in page cache, wait for it to become unlocked if it
204 * is a DAX entry and return it. The caller must subsequently call
205 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
206 * if it did. The entry returned may have a larger order than @order.
207 * If @order is larger than the order of the entry found in i_pages, this
208 * function returns a dax_is_conflict entry.
209 *
210 * Must be called with the i_pages lock held.
211 */
212 static void *get_unlocked_entry(struct xa_state *xas, unsigned int order)
213 {
214 void *entry;
215 struct wait_exceptional_entry_queue ewait;
216 wait_queue_head_t *wq;
217
218 init_wait(&ewait.wait);
219 ewait.wait.func = wake_exceptional_entry_func;
220
221 for (;;) {
222 entry = xas_find_conflict(xas);
223 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
224 return entry;
225 if (dax_entry_order(entry) < order)
226 return XA_RETRY_ENTRY;
227 if (!dax_is_locked(entry))
228 return entry;
229
230 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
231 prepare_to_wait_exclusive(wq, &ewait.wait,
232 TASK_UNINTERRUPTIBLE);
233 xas_unlock_irq(xas);
234 xas_reset(xas);
235 schedule();
236 finish_wait(wq, &ewait.wait);
237 xas_lock_irq(xas);
238 }
239 }
240
241 /*
242 * The only thing keeping the address space around is the i_pages lock
243 * (it's cycled in clear_inode() after removing the entries from i_pages)
244 * After we call xas_unlock_irq(), we cannot touch xas->xa.
245 */
246 static void wait_entry_unlocked(struct xa_state *xas, void *entry)
247 {
248 struct wait_exceptional_entry_queue ewait;
249 wait_queue_head_t *wq;
250
251 init_wait(&ewait.wait);
252 ewait.wait.func = wake_exceptional_entry_func;
253
254 wq = dax_entry_waitqueue(xas, entry, &ewait.key);
255 /*
256 * Unlike get_unlocked_entry() there is no guarantee that this
257 * path ever successfully retrieves an unlocked entry before an
258 * inode dies. Perform a non-exclusive wait in case this path
259 * never successfully performs its own wake up.
260 */
261 prepare_to_wait(wq, &ewait.wait, TASK_UNINTERRUPTIBLE);
262 xas_unlock_irq(xas);
263 schedule();
264 finish_wait(wq, &ewait.wait);
265 }
266
267 static void put_unlocked_entry(struct xa_state *xas, void *entry)
268 {
269 /* If we were the only waiter woken, wake the next one */
270 if (entry && !dax_is_conflict(entry))
271 dax_wake_entry(xas, entry, false);
272 }
273
274 /*
275 * We used the xa_state to get the entry, but then we locked the entry and
276 * dropped the xa_lock, so we know the xa_state is stale and must be reset
277 * before use.
278 */
279 static void dax_unlock_entry(struct xa_state *xas, void *entry)
280 {
281 void *old;
282
283 BUG_ON(dax_is_locked(entry));
284 xas_reset(xas);
285 xas_lock_irq(xas);
286 old = xas_store(xas, entry);
287 xas_unlock_irq(xas);
288 BUG_ON(!dax_is_locked(old));
289 dax_wake_entry(xas, entry, false);
290 }
291
292 /*
293 * Return: The entry stored at this location before it was locked.
294 */
295 static void *dax_lock_entry(struct xa_state *xas, void *entry)
296 {
297 unsigned long v = xa_to_value(entry);
298 return xas_store(xas, xa_mk_value(v | DAX_LOCKED));
299 }
300
301 static unsigned long dax_entry_size(void *entry)
302 {
303 if (dax_is_zero_entry(entry))
304 return 0;
305 else if (dax_is_empty_entry(entry))
306 return 0;
307 else if (dax_is_pmd_entry(entry))
308 return PMD_SIZE;
309 else
310 return PAGE_SIZE;
311 }
312
313 static unsigned long dax_end_pfn(void *entry)
314 {
315 return dax_to_pfn(entry) + dax_entry_size(entry) / PAGE_SIZE;
316 }
317
318 /*
319 * Iterate through all mapped pfns represented by an entry, i.e. skip
320 * 'empty' and 'zero' entries.
321 */
322 #define for_each_mapped_pfn(entry, pfn) \
323 for (pfn = dax_to_pfn(entry); \
324 pfn < dax_end_pfn(entry); pfn++)
325
326 /*
327 * TODO: for reflink+dax we need a way to associate a single page with
328 * multiple address_space instances at different linear_page_index()
329 * offsets.
330 */
331 static void dax_associate_entry(void *entry, struct address_space *mapping,
332 struct vm_area_struct *vma, unsigned long address)
333 {
334 unsigned long size = dax_entry_size(entry), pfn, index;
335 int i = 0;
336
337 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
338 return;
339
340 index = linear_page_index(vma, address & ~(size - 1));
341 for_each_mapped_pfn(entry, pfn) {
342 struct page *page = pfn_to_page(pfn);
343
344 WARN_ON_ONCE(page->mapping);
345 page->mapping = mapping;
346 page->index = index + i++;
347 }
348 }
349
350 static void dax_disassociate_entry(void *entry, struct address_space *mapping,
351 bool trunc)
352 {
353 unsigned long pfn;
354
355 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
356 return;
357
358 for_each_mapped_pfn(entry, pfn) {
359 struct page *page = pfn_to_page(pfn);
360
361 WARN_ON_ONCE(trunc && page_ref_count(page) > 1);
362 WARN_ON_ONCE(page->mapping && page->mapping != mapping);
363 page->mapping = NULL;
364 page->index = 0;
365 }
366 }
367
368 static struct page *dax_busy_page(void *entry)
369 {
370 unsigned long pfn;
371
372 for_each_mapped_pfn(entry, pfn) {
373 struct page *page = pfn_to_page(pfn);
374
375 if (page_ref_count(page) > 1)
376 return page;
377 }
378 return NULL;
379 }
380
381 /*
382 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
383 * @page: The page whose entry we want to lock
384 *
385 * Context: Process context.
386 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
387 * not be locked.
388 */
389 dax_entry_t dax_lock_page(struct page *page)
390 {
391 XA_STATE(xas, NULL, 0);
392 void *entry;
393
394 /* Ensure page->mapping isn't freed while we look at it */
395 rcu_read_lock();
396 for (;;) {
397 struct address_space *mapping = READ_ONCE(page->mapping);
398
399 entry = NULL;
400 if (!mapping || !dax_mapping(mapping))
401 break;
402
403 /*
404 * In the device-dax case there's no need to lock, a
405 * struct dev_pagemap pin is sufficient to keep the
406 * inode alive, and we assume we have dev_pagemap pin
407 * otherwise we would not have a valid pfn_to_page()
408 * translation.
409 */
410 entry = (void *)~0UL;
411 if (S_ISCHR(mapping->host->i_mode))
412 break;
413
414 xas.xa = &mapping->i_pages;
415 xas_lock_irq(&xas);
416 if (mapping != page->mapping) {
417 xas_unlock_irq(&xas);
418 continue;
419 }
420 xas_set(&xas, page->index);
421 entry = xas_load(&xas);
422 if (dax_is_locked(entry)) {
423 rcu_read_unlock();
424 wait_entry_unlocked(&xas, entry);
425 rcu_read_lock();
426 continue;
427 }
428 dax_lock_entry(&xas, entry);
429 xas_unlock_irq(&xas);
430 break;
431 }
432 rcu_read_unlock();
433 return (dax_entry_t)entry;
434 }
435
436 void dax_unlock_page(struct page *page, dax_entry_t cookie)
437 {
438 struct address_space *mapping = page->mapping;
439 XA_STATE(xas, &mapping->i_pages, page->index);
440
441 if (S_ISCHR(mapping->host->i_mode))
442 return;
443
444 dax_unlock_entry(&xas, (void *)cookie);
445 }
446
447 /*
448 * Find page cache entry at given index. If it is a DAX entry, return it
449 * with the entry locked. If the page cache doesn't contain an entry at
450 * that index, add a locked empty entry.
451 *
452 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
453 * either return that locked entry or will return VM_FAULT_FALLBACK.
454 * This will happen if there are any PTE entries within the PMD range
455 * that we are requesting.
456 *
457 * We always favor PTE entries over PMD entries. There isn't a flow where we
458 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
459 * insertion will fail if it finds any PTE entries already in the tree, and a
460 * PTE insertion will cause an existing PMD entry to be unmapped and
461 * downgraded to PTE entries. This happens for both PMD zero pages as
462 * well as PMD empty entries.
463 *
464 * The exception to this downgrade path is for PMD entries that have
465 * real storage backing them. We will leave these real PMD entries in
466 * the tree, and PTE writes will simply dirty the entire PMD entry.
467 *
468 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
469 * persistent memory the benefit is doubtful. We can add that later if we can
470 * show it helps.
471 *
472 * On error, this function does not return an ERR_PTR. Instead it returns
473 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
474 * overlap with xarray value entries.
475 */
476 static void *grab_mapping_entry(struct xa_state *xas,
477 struct address_space *mapping, unsigned int order)
478 {
479 unsigned long index = xas->xa_index;
480 bool pmd_downgrade = false; /* splitting PMD entry into PTE entries? */
481 void *entry;
482
483 retry:
484 xas_lock_irq(xas);
485 entry = get_unlocked_entry(xas, order);
486
487 if (entry) {
488 if (dax_is_conflict(entry))
489 goto fallback;
490 if (!xa_is_value(entry)) {
491 xas_set_err(xas, EIO);
492 goto out_unlock;
493 }
494
495 if (order == 0) {
496 if (dax_is_pmd_entry(entry) &&
497 (dax_is_zero_entry(entry) ||
498 dax_is_empty_entry(entry))) {
499 pmd_downgrade = true;
500 }
501 }
502 }
503
504 if (pmd_downgrade) {
505 /*
506 * Make sure 'entry' remains valid while we drop
507 * the i_pages lock.
508 */
509 dax_lock_entry(xas, entry);
510
511 /*
512 * Besides huge zero pages the only other thing that gets
513 * downgraded are empty entries which don't need to be
514 * unmapped.
515 */
516 if (dax_is_zero_entry(entry)) {
517 xas_unlock_irq(xas);
518 unmap_mapping_pages(mapping,
519 xas->xa_index & ~PG_PMD_COLOUR,
520 PG_PMD_NR, false);
521 xas_reset(xas);
522 xas_lock_irq(xas);
523 }
524
525 dax_disassociate_entry(entry, mapping, false);
526 xas_store(xas, NULL); /* undo the PMD join */
527 dax_wake_entry(xas, entry, true);
528 mapping->nrexceptional--;
529 entry = NULL;
530 xas_set(xas, index);
531 }
532
533 if (entry) {
534 dax_lock_entry(xas, entry);
535 } else {
536 unsigned long flags = DAX_EMPTY;
537
538 if (order > 0)
539 flags |= DAX_PMD;
540 entry = dax_make_entry(pfn_to_pfn_t(0), flags);
541 dax_lock_entry(xas, entry);
542 if (xas_error(xas))
543 goto out_unlock;
544 mapping->nrexceptional++;
545 }
546
547 out_unlock:
548 xas_unlock_irq(xas);
549 if (xas_nomem(xas, mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM))
550 goto retry;
551 if (xas->xa_node == XA_ERROR(-ENOMEM))
552 return xa_mk_internal(VM_FAULT_OOM);
553 if (xas_error(xas))
554 return xa_mk_internal(VM_FAULT_SIGBUS);
555 return entry;
556 fallback:
557 xas_unlock_irq(xas);
558 return xa_mk_internal(VM_FAULT_FALLBACK);
559 }
560
561 /**
562 * dax_layout_busy_page - find first pinned page in @mapping
563 * @mapping: address space to scan for a page with ref count > 1
564 *
565 * DAX requires ZONE_DEVICE mapped pages. These pages are never
566 * 'onlined' to the page allocator so they are considered idle when
567 * page->count == 1. A filesystem uses this interface to determine if
568 * any page in the mapping is busy, i.e. for DMA, or other
569 * get_user_pages() usages.
570 *
571 * It is expected that the filesystem is holding locks to block the
572 * establishment of new mappings in this address_space. I.e. it expects
573 * to be able to run unmap_mapping_range() and subsequently not race
574 * mapping_mapped() becoming true.
575 */
576 struct page *dax_layout_busy_page(struct address_space *mapping)
577 {
578 XA_STATE(xas, &mapping->i_pages, 0);
579 void *entry;
580 unsigned int scanned = 0;
581 struct page *page = NULL;
582
583 /*
584 * In the 'limited' case get_user_pages() for dax is disabled.
585 */
586 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED))
587 return NULL;
588
589 if (!dax_mapping(mapping) || !mapping_mapped(mapping))
590 return NULL;
591
592 /*
593 * If we race get_user_pages_fast() here either we'll see the
594 * elevated page count in the iteration and wait, or
595 * get_user_pages_fast() will see that the page it took a reference
596 * against is no longer mapped in the page tables and bail to the
597 * get_user_pages() slow path. The slow path is protected by
598 * pte_lock() and pmd_lock(). New references are not taken without
599 * holding those locks, and unmap_mapping_range() will not zero the
600 * pte or pmd without holding the respective lock, so we are
601 * guaranteed to either see new references or prevent new
602 * references from being established.
603 */
604 unmap_mapping_range(mapping, 0, 0, 0);
605
606 xas_lock_irq(&xas);
607 xas_for_each(&xas, entry, ULONG_MAX) {
608 if (WARN_ON_ONCE(!xa_is_value(entry)))
609 continue;
610 if (unlikely(dax_is_locked(entry)))
611 entry = get_unlocked_entry(&xas, 0);
612 if (entry)
613 page = dax_busy_page(entry);
614 put_unlocked_entry(&xas, entry);
615 if (page)
616 break;
617 if (++scanned % XA_CHECK_SCHED)
618 continue;
619
620 xas_pause(&xas);
621 xas_unlock_irq(&xas);
622 cond_resched();
623 xas_lock_irq(&xas);
624 }
625 xas_unlock_irq(&xas);
626 return page;
627 }
628 EXPORT_SYMBOL_GPL(dax_layout_busy_page);
629
630 static int __dax_invalidate_entry(struct address_space *mapping,
631 pgoff_t index, bool trunc)
632 {
633 XA_STATE(xas, &mapping->i_pages, index);
634 int ret = 0;
635 void *entry;
636
637 xas_lock_irq(&xas);
638 entry = get_unlocked_entry(&xas, 0);
639 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
640 goto out;
641 if (!trunc &&
642 (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY) ||
643 xas_get_mark(&xas, PAGECACHE_TAG_TOWRITE)))
644 goto out;
645 dax_disassociate_entry(entry, mapping, trunc);
646 xas_store(&xas, NULL);
647 mapping->nrexceptional--;
648 ret = 1;
649 out:
650 put_unlocked_entry(&xas, entry);
651 xas_unlock_irq(&xas);
652 return ret;
653 }
654
655 /*
656 * Delete DAX entry at @index from @mapping. Wait for it
657 * to be unlocked before deleting it.
658 */
659 int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
660 {
661 int ret = __dax_invalidate_entry(mapping, index, true);
662
663 /*
664 * This gets called from truncate / punch_hole path. As such, the caller
665 * must hold locks protecting against concurrent modifications of the
666 * page cache (usually fs-private i_mmap_sem for writing). Since the
667 * caller has seen a DAX entry for this index, we better find it
668 * at that index as well...
669 */
670 WARN_ON_ONCE(!ret);
671 return ret;
672 }
673
674 /*
675 * Invalidate DAX entry if it is clean.
676 */
677 int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
678 pgoff_t index)
679 {
680 return __dax_invalidate_entry(mapping, index, false);
681 }
682
683 static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
684 sector_t sector, size_t size, struct page *to,
685 unsigned long vaddr)
686 {
687 void *vto, *kaddr;
688 pgoff_t pgoff;
689 long rc;
690 int id;
691
692 rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
693 if (rc)
694 return rc;
695
696 id = dax_read_lock();
697 rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, NULL);
698 if (rc < 0) {
699 dax_read_unlock(id);
700 return rc;
701 }
702 vto = kmap_atomic(to);
703 copy_user_page(vto, (void __force *)kaddr, vaddr, to);
704 kunmap_atomic(vto);
705 dax_read_unlock(id);
706 return 0;
707 }
708
709 /*
710 * By this point grab_mapping_entry() has ensured that we have a locked entry
711 * of the appropriate size so we don't have to worry about downgrading PMDs to
712 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
713 * already in the tree, we will skip the insertion and just dirty the PMD as
714 * appropriate.
715 */
716 static void *dax_insert_entry(struct xa_state *xas,
717 struct address_space *mapping, struct vm_fault *vmf,
718 void *entry, pfn_t pfn, unsigned long flags, bool dirty)
719 {
720 void *new_entry = dax_make_entry(pfn, flags);
721
722 if (dirty)
723 __mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
724
725 if (dax_is_zero_entry(entry) && !(flags & DAX_ZERO_PAGE)) {
726 unsigned long index = xas->xa_index;
727 /* we are replacing a zero page with block mapping */
728 if (dax_is_pmd_entry(entry))
729 unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
730 PG_PMD_NR, false);
731 else /* pte entry */
732 unmap_mapping_pages(mapping, index, 1, false);
733 }
734
735 xas_reset(xas);
736 xas_lock_irq(xas);
737 if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
738 void *old;
739
740 dax_disassociate_entry(entry, mapping, false);
741 dax_associate_entry(new_entry, mapping, vmf->vma, vmf->address);
742 /*
743 * Only swap our new entry into the page cache if the current
744 * entry is a zero page or an empty entry. If a normal PTE or
745 * PMD entry is already in the cache, we leave it alone. This
746 * means that if we are trying to insert a PTE and the
747 * existing entry is a PMD, we will just leave the PMD in the
748 * tree and dirty it if necessary.
749 */
750 old = dax_lock_entry(xas, new_entry);
751 WARN_ON_ONCE(old != xa_mk_value(xa_to_value(entry) |
752 DAX_LOCKED));
753 entry = new_entry;
754 } else {
755 xas_load(xas); /* Walk the xa_state */
756 }
757
758 if (dirty)
759 xas_set_mark(xas, PAGECACHE_TAG_DIRTY);
760
761 xas_unlock_irq(xas);
762 return entry;
763 }
764
765 static inline
766 unsigned long pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
767 {
768 unsigned long address;
769
770 address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
771 VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
772 return address;
773 }
774
775 /* Walk all mappings of a given index of a file and writeprotect them */
776 static void dax_entry_mkclean(struct address_space *mapping, pgoff_t index,
777 unsigned long pfn)
778 {
779 struct vm_area_struct *vma;
780 pte_t pte, *ptep = NULL;
781 pmd_t *pmdp = NULL;
782 spinlock_t *ptl;
783
784 i_mmap_lock_read(mapping);
785 vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
786 struct mmu_notifier_range range;
787 unsigned long address;
788
789 cond_resched();
790
791 if (!(vma->vm_flags & VM_SHARED))
792 continue;
793
794 address = pgoff_address(index, vma);
795
796 /*
797 * Note because we provide range to follow_pte_pmd it will
798 * call mmu_notifier_invalidate_range_start() on our behalf
799 * before taking any lock.
800 */
801 if (follow_pte_pmd(vma->vm_mm, address, &range,
802 &ptep, &pmdp, &ptl))
803 continue;
804
805 /*
806 * No need to call mmu_notifier_invalidate_range() as we are
807 * downgrading page table protection not changing it to point
808 * to a new page.
809 *
810 * See Documentation/vm/mmu_notifier.rst
811 */
812 if (pmdp) {
813 #ifdef CONFIG_FS_DAX_PMD
814 pmd_t pmd;
815
816 if (pfn != pmd_pfn(*pmdp))
817 goto unlock_pmd;
818 if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
819 goto unlock_pmd;
820
821 flush_cache_page(vma, address, pfn);
822 pmd = pmdp_invalidate(vma, address, pmdp);
823 pmd = pmd_wrprotect(pmd);
824 pmd = pmd_mkclean(pmd);
825 set_pmd_at(vma->vm_mm, address, pmdp, pmd);
826 unlock_pmd:
827 #endif
828 spin_unlock(ptl);
829 } else {
830 if (pfn != pte_pfn(*ptep))
831 goto unlock_pte;
832 if (!pte_dirty(*ptep) && !pte_write(*ptep))
833 goto unlock_pte;
834
835 flush_cache_page(vma, address, pfn);
836 pte = ptep_clear_flush(vma, address, ptep);
837 pte = pte_wrprotect(pte);
838 pte = pte_mkclean(pte);
839 set_pte_at(vma->vm_mm, address, ptep, pte);
840 unlock_pte:
841 pte_unmap_unlock(ptep, ptl);
842 }
843
844 mmu_notifier_invalidate_range_end(&range);
845 }
846 i_mmap_unlock_read(mapping);
847 }
848
849 static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
850 struct address_space *mapping, void *entry)
851 {
852 unsigned long pfn, index, count;
853 long ret = 0;
854
855 /*
856 * A page got tagged dirty in DAX mapping? Something is seriously
857 * wrong.
858 */
859 if (WARN_ON(!xa_is_value(entry)))
860 return -EIO;
861
862 if (unlikely(dax_is_locked(entry))) {
863 void *old_entry = entry;
864
865 entry = get_unlocked_entry(xas, 0);
866
867 /* Entry got punched out / reallocated? */
868 if (!entry || WARN_ON_ONCE(!xa_is_value(entry)))
869 goto put_unlocked;
870 /*
871 * Entry got reallocated elsewhere? No need to writeback.
872 * We have to compare pfns as we must not bail out due to
873 * difference in lockbit or entry type.
874 */
875 if (dax_to_pfn(old_entry) != dax_to_pfn(entry))
876 goto put_unlocked;
877 if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
878 dax_is_zero_entry(entry))) {
879 ret = -EIO;
880 goto put_unlocked;
881 }
882
883 /* Another fsync thread may have already done this entry */
884 if (!xas_get_mark(xas, PAGECACHE_TAG_TOWRITE))
885 goto put_unlocked;
886 }
887
888 /* Lock the entry to serialize with page faults */
889 dax_lock_entry(xas, entry);
890
891 /*
892 * We can clear the tag now but we have to be careful so that concurrent
893 * dax_writeback_one() calls for the same index cannot finish before we
894 * actually flush the caches. This is achieved as the calls will look
895 * at the entry only under the i_pages lock and once they do that
896 * they will see the entry locked and wait for it to unlock.
897 */
898 xas_clear_mark(xas, PAGECACHE_TAG_TOWRITE);
899 xas_unlock_irq(xas);
900
901 /*
902 * If dax_writeback_mapping_range() was given a wbc->range_start
903 * in the middle of a PMD, the 'index' we use needs to be
904 * aligned to the start of the PMD.
905 * This allows us to flush for PMD_SIZE and not have to worry about
906 * partial PMD writebacks.
907 */
908 pfn = dax_to_pfn(entry);
909 count = 1UL << dax_entry_order(entry);
910 index = xas->xa_index & ~(count - 1);
911
912 dax_entry_mkclean(mapping, index, pfn);
913 dax_flush(dax_dev, page_address(pfn_to_page(pfn)), count * PAGE_SIZE);
914 /*
915 * After we have flushed the cache, we can clear the dirty tag. There
916 * cannot be new dirty data in the pfn after the flush has completed as
917 * the pfn mappings are writeprotected and fault waits for mapping
918 * entry lock.
919 */
920 xas_reset(xas);
921 xas_lock_irq(xas);
922 xas_store(xas, entry);
923 xas_clear_mark(xas, PAGECACHE_TAG_DIRTY);
924 dax_wake_entry(xas, entry, false);
925
926 trace_dax_writeback_one(mapping->host, index, count);
927 return ret;
928
929 put_unlocked:
930 put_unlocked_entry(xas, entry);
931 return ret;
932 }
933
934 /*
935 * Flush the mapping to the persistent domain within the byte range of [start,
936 * end]. This is required by data integrity operations to ensure file data is
937 * on persistent storage prior to completion of the operation.
938 */
939 int dax_writeback_mapping_range(struct address_space *mapping,
940 struct block_device *bdev, struct writeback_control *wbc)
941 {
942 XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
943 struct inode *inode = mapping->host;
944 pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
945 struct dax_device *dax_dev;
946 void *entry;
947 int ret = 0;
948 unsigned int scanned = 0;
949
950 if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
951 return -EIO;
952
953 if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
954 return 0;
955
956 dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
957 if (!dax_dev)
958 return -EIO;
959
960 trace_dax_writeback_range(inode, xas.xa_index, end_index);
961
962 tag_pages_for_writeback(mapping, xas.xa_index, end_index);
963
964 xas_lock_irq(&xas);
965 xas_for_each_marked(&xas, entry, end_index, PAGECACHE_TAG_TOWRITE) {
966 ret = dax_writeback_one(&xas, dax_dev, mapping, entry);
967 if (ret < 0) {
968 mapping_set_error(mapping, ret);
969 break;
970 }
971 if (++scanned % XA_CHECK_SCHED)
972 continue;
973
974 xas_pause(&xas);
975 xas_unlock_irq(&xas);
976 cond_resched();
977 xas_lock_irq(&xas);
978 }
979 xas_unlock_irq(&xas);
980 put_dax(dax_dev);
981 trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
982 return ret;
983 }
984 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);
985
986 static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
987 {
988 return (iomap->addr + (pos & PAGE_MASK) - iomap->offset) >> 9;
989 }
990
991 static int dax_iomap_pfn(struct iomap *iomap, loff_t pos, size_t size,
992 pfn_t *pfnp)
993 {
994 const sector_t sector = dax_iomap_sector(iomap, pos);
995 pgoff_t pgoff;
996 int id, rc;
997 long length;
998
999 rc = bdev_dax_pgoff(iomap->bdev, sector, size, &pgoff);
1000 if (rc)
1001 return rc;
1002 id = dax_read_lock();
1003 length = dax_direct_access(iomap->dax_dev, pgoff, PHYS_PFN(size),
1004 NULL, pfnp);
1005 if (length < 0) {
1006 rc = length;
1007 goto out;
1008 }
1009 rc = -EINVAL;
1010 if (PFN_PHYS(length) < size)
1011 goto out;
1012 if (pfn_t_to_pfn(*pfnp) & (PHYS_PFN(size)-1))
1013 goto out;
1014 /* For larger pages we need devmap */
1015 if (length > 1 && !pfn_t_devmap(*pfnp))
1016 goto out;
1017 rc = 0;
1018 out:
1019 dax_read_unlock(id);
1020 return rc;
1021 }
1022
1023 /*
1024 * The user has performed a load from a hole in the file. Allocating a new
1025 * page in the file would cause excessive storage usage for workloads with
1026 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1027 * If this page is ever written to we will re-fault and change the mapping to
1028 * point to real DAX storage instead.
1029 */
1030 static vm_fault_t dax_load_hole(struct xa_state *xas,
1031 struct address_space *mapping, void **entry,
1032 struct vm_fault *vmf)
1033 {
1034 struct inode *inode = mapping->host;
1035 unsigned long vaddr = vmf->address;
1036 pfn_t pfn = pfn_to_pfn_t(my_zero_pfn(vaddr));
1037 vm_fault_t ret;
1038
1039 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1040 DAX_ZERO_PAGE, false);
1041
1042 ret = vmf_insert_mixed(vmf->vma, vaddr, pfn);
1043 trace_dax_load_hole(inode, vmf, ret);
1044 return ret;
1045 }
1046
1047 static bool dax_range_is_aligned(struct block_device *bdev,
1048 unsigned int offset, unsigned int length)
1049 {
1050 unsigned short sector_size = bdev_logical_block_size(bdev);
1051
1052 if (!IS_ALIGNED(offset, sector_size))
1053 return false;
1054 if (!IS_ALIGNED(length, sector_size))
1055 return false;
1056
1057 return true;
1058 }
1059
1060 int __dax_zero_page_range(struct block_device *bdev,
1061 struct dax_device *dax_dev, sector_t sector,
1062 unsigned int offset, unsigned int size)
1063 {
1064 if (dax_range_is_aligned(bdev, offset, size)) {
1065 sector_t start_sector = sector + (offset >> 9);
1066
1067 return blkdev_issue_zeroout(bdev, start_sector,
1068 size >> 9, GFP_NOFS, 0);
1069 } else {
1070 pgoff_t pgoff;
1071 long rc, id;
1072 void *kaddr;
1073
1074 rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
1075 if (rc)
1076 return rc;
1077
1078 id = dax_read_lock();
1079 rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr, NULL);
1080 if (rc < 0) {
1081 dax_read_unlock(id);
1082 return rc;
1083 }
1084 memset(kaddr + offset, 0, size);
1085 dax_flush(dax_dev, kaddr + offset, size);
1086 dax_read_unlock(id);
1087 }
1088 return 0;
1089 }
1090 EXPORT_SYMBOL_GPL(__dax_zero_page_range);
1091
1092 static loff_t
1093 dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1094 struct iomap *iomap, struct iomap *srcmap)
1095 {
1096 struct block_device *bdev = iomap->bdev;
1097 struct dax_device *dax_dev = iomap->dax_dev;
1098 struct iov_iter *iter = data;
1099 loff_t end = pos + length, done = 0;
1100 ssize_t ret = 0;
1101 size_t xfer;
1102 int id;
1103
1104 if (iov_iter_rw(iter) == READ) {
1105 end = min(end, i_size_read(inode));
1106 if (pos >= end)
1107 return 0;
1108
1109 if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
1110 return iov_iter_zero(min(length, end - pos), iter);
1111 }
1112
1113 if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
1114 return -EIO;
1115
1116 /*
1117 * Write can allocate block for an area which has a hole page mapped
1118 * into page tables. We have to tear down these mappings so that data
1119 * written by write(2) is visible in mmap.
1120 */
1121 if (iomap->flags & IOMAP_F_NEW) {
1122 invalidate_inode_pages2_range(inode->i_mapping,
1123 pos >> PAGE_SHIFT,
1124 (end - 1) >> PAGE_SHIFT);
1125 }
1126
1127 id = dax_read_lock();
1128 while (pos < end) {
1129 unsigned offset = pos & (PAGE_SIZE - 1);
1130 const size_t size = ALIGN(length + offset, PAGE_SIZE);
1131 const sector_t sector = dax_iomap_sector(iomap, pos);
1132 ssize_t map_len;
1133 pgoff_t pgoff;
1134 void *kaddr;
1135
1136 if (fatal_signal_pending(current)) {
1137 ret = -EINTR;
1138 break;
1139 }
1140
1141 ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
1142 if (ret)
1143 break;
1144
1145 map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
1146 &kaddr, NULL);
1147 if (map_len < 0) {
1148 ret = map_len;
1149 break;
1150 }
1151
1152 map_len = PFN_PHYS(map_len);
1153 kaddr += offset;
1154 map_len -= offset;
1155 if (map_len > end - pos)
1156 map_len = end - pos;
1157
1158 /*
1159 * The userspace address for the memory copy has already been
1160 * validated via access_ok() in either vfs_read() or
1161 * vfs_write(), depending on which operation we are doing.
1162 */
1163 if (iov_iter_rw(iter) == WRITE)
1164 xfer = dax_copy_from_iter(dax_dev, pgoff, kaddr,
1165 map_len, iter);
1166 else
1167 xfer = dax_copy_to_iter(dax_dev, pgoff, kaddr,
1168 map_len, iter);
1169
1170 pos += xfer;
1171 length -= xfer;
1172 done += xfer;
1173
1174 if (xfer == 0)
1175 ret = -EFAULT;
1176 if (xfer < map_len)
1177 break;
1178 }
1179 dax_read_unlock(id);
1180
1181 return done ? done : ret;
1182 }
1183
1184 /**
1185 * dax_iomap_rw - Perform I/O to a DAX file
1186 * @iocb: The control block for this I/O
1187 * @iter: The addresses to do I/O from or to
1188 * @ops: iomap ops passed from the file system
1189 *
1190 * This function performs read and write operations to directly mapped
1191 * persistent memory. The callers needs to take care of read/write exclusion
1192 * and evicting any page cache pages in the region under I/O.
1193 */
1194 ssize_t
1195 dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1196 const struct iomap_ops *ops)
1197 {
1198 struct address_space *mapping = iocb->ki_filp->f_mapping;
1199 struct inode *inode = mapping->host;
1200 loff_t pos = iocb->ki_pos, ret = 0, done = 0;
1201 unsigned flags = 0;
1202
1203 if (iov_iter_rw(iter) == WRITE) {
1204 lockdep_assert_held_write(&inode->i_rwsem);
1205 flags |= IOMAP_WRITE;
1206 } else {
1207 lockdep_assert_held(&inode->i_rwsem);
1208 }
1209
1210 while (iov_iter_count(iter)) {
1211 ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1212 iter, dax_iomap_actor);
1213 if (ret <= 0)
1214 break;
1215 pos += ret;
1216 done += ret;
1217 }
1218
1219 iocb->ki_pos += done;
1220 return done ? done : ret;
1221 }
1222 EXPORT_SYMBOL_GPL(dax_iomap_rw);
1223
1224 static vm_fault_t dax_fault_return(int error)
1225 {
1226 if (error == 0)
1227 return VM_FAULT_NOPAGE;
1228 return vmf_error(error);
1229 }
1230
1231 /*
1232 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1233 * flushed on write-faults (non-cow), but not read-faults.
1234 */
1235 static bool dax_fault_is_synchronous(unsigned long flags,
1236 struct vm_area_struct *vma, struct iomap *iomap)
1237 {
1238 return (flags & IOMAP_WRITE) && (vma->vm_flags & VM_SYNC)
1239 && (iomap->flags & IOMAP_F_DIRTY);
1240 }
1241
1242 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
1243 int *iomap_errp, const struct iomap_ops *ops)
1244 {
1245 struct vm_area_struct *vma = vmf->vma;
1246 struct address_space *mapping = vma->vm_file->f_mapping;
1247 XA_STATE(xas, &mapping->i_pages, vmf->pgoff);
1248 struct inode *inode = mapping->host;
1249 unsigned long vaddr = vmf->address;
1250 loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
1251 struct iomap iomap = { .type = IOMAP_HOLE };
1252 struct iomap srcmap = { .type = IOMAP_HOLE };
1253 unsigned flags = IOMAP_FAULT;
1254 int error, major = 0;
1255 bool write = vmf->flags & FAULT_FLAG_WRITE;
1256 bool sync;
1257 vm_fault_t ret = 0;
1258 void *entry;
1259 pfn_t pfn;
1260
1261 trace_dax_pte_fault(inode, vmf, ret);
1262 /*
1263 * Check whether offset isn't beyond end of file now. Caller is supposed
1264 * to hold locks serializing us with truncate / punch hole so this is
1265 * a reliable test.
1266 */
1267 if (pos >= i_size_read(inode)) {
1268 ret = VM_FAULT_SIGBUS;
1269 goto out;
1270 }
1271
1272 if (write && !vmf->cow_page)
1273 flags |= IOMAP_WRITE;
1274
1275 entry = grab_mapping_entry(&xas, mapping, 0);
1276 if (xa_is_internal(entry)) {
1277 ret = xa_to_internal(entry);
1278 goto out;
1279 }
1280
1281 /*
1282 * It is possible, particularly with mixed reads & writes to private
1283 * mappings, that we have raced with a PMD fault that overlaps with
1284 * the PTE we need to set up. If so just return and the fault will be
1285 * retried.
1286 */
1287 if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
1288 ret = VM_FAULT_NOPAGE;
1289 goto unlock_entry;
1290 }
1291
1292 /*
1293 * Note that we don't bother to use iomap_apply here: DAX required
1294 * the file system block size to be equal the page size, which means
1295 * that we never have to deal with more than a single extent here.
1296 */
1297 error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap, &srcmap);
1298 if (iomap_errp)
1299 *iomap_errp = error;
1300 if (error) {
1301 ret = dax_fault_return(error);
1302 goto unlock_entry;
1303 }
1304 if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1305 error = -EIO; /* fs corruption? */
1306 goto error_finish_iomap;
1307 }
1308
1309 if (vmf->cow_page) {
1310 sector_t sector = dax_iomap_sector(&iomap, pos);
1311
1312 switch (iomap.type) {
1313 case IOMAP_HOLE:
1314 case IOMAP_UNWRITTEN:
1315 clear_user_highpage(vmf->cow_page, vaddr);
1316 break;
1317 case IOMAP_MAPPED:
1318 error = copy_user_dax(iomap.bdev, iomap.dax_dev,
1319 sector, PAGE_SIZE, vmf->cow_page, vaddr);
1320 break;
1321 default:
1322 WARN_ON_ONCE(1);
1323 error = -EIO;
1324 break;
1325 }
1326
1327 if (error)
1328 goto error_finish_iomap;
1329
1330 __SetPageUptodate(vmf->cow_page);
1331 ret = finish_fault(vmf);
1332 if (!ret)
1333 ret = VM_FAULT_DONE_COW;
1334 goto finish_iomap;
1335 }
1336
1337 sync = dax_fault_is_synchronous(flags, vma, &iomap);
1338
1339 switch (iomap.type) {
1340 case IOMAP_MAPPED:
1341 if (iomap.flags & IOMAP_F_NEW) {
1342 count_vm_event(PGMAJFAULT);
1343 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
1344 major = VM_FAULT_MAJOR;
1345 }
1346 error = dax_iomap_pfn(&iomap, pos, PAGE_SIZE, &pfn);
1347 if (error < 0)
1348 goto error_finish_iomap;
1349
1350 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1351 0, write && !sync);
1352
1353 /*
1354 * If we are doing synchronous page fault and inode needs fsync,
1355 * we can insert PTE into page tables only after that happens.
1356 * Skip insertion for now and return the pfn so that caller can
1357 * insert it after fsync is done.
1358 */
1359 if (sync) {
1360 if (WARN_ON_ONCE(!pfnp)) {
1361 error = -EIO;
1362 goto error_finish_iomap;
1363 }
1364 *pfnp = pfn;
1365 ret = VM_FAULT_NEEDDSYNC | major;
1366 goto finish_iomap;
1367 }
1368 trace_dax_insert_mapping(inode, vmf, entry);
1369 if (write)
1370 ret = vmf_insert_mixed_mkwrite(vma, vaddr, pfn);
1371 else
1372 ret = vmf_insert_mixed(vma, vaddr, pfn);
1373
1374 goto finish_iomap;
1375 case IOMAP_UNWRITTEN:
1376 case IOMAP_HOLE:
1377 if (!write) {
1378 ret = dax_load_hole(&xas, mapping, &entry, vmf);
1379 goto finish_iomap;
1380 }
1381 /*FALLTHRU*/
1382 default:
1383 WARN_ON_ONCE(1);
1384 error = -EIO;
1385 break;
1386 }
1387
1388 error_finish_iomap:
1389 ret = dax_fault_return(error);
1390 finish_iomap:
1391 if (ops->iomap_end) {
1392 int copied = PAGE_SIZE;
1393
1394 if (ret & VM_FAULT_ERROR)
1395 copied = 0;
1396 /*
1397 * The fault is done by now and there's no way back (other
1398 * thread may be already happily using PTE we have installed).
1399 * Just ignore error from ->iomap_end since we cannot do much
1400 * with it.
1401 */
1402 ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1403 }
1404 unlock_entry:
1405 dax_unlock_entry(&xas, entry);
1406 out:
1407 trace_dax_pte_fault_done(inode, vmf, ret);
1408 return ret | major;
1409 }
1410
1411 #ifdef CONFIG_FS_DAX_PMD
1412 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf,
1413 struct iomap *iomap, void **entry)
1414 {
1415 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1416 unsigned long pmd_addr = vmf->address & PMD_MASK;
1417 struct vm_area_struct *vma = vmf->vma;
1418 struct inode *inode = mapping->host;
1419 pgtable_t pgtable = NULL;
1420 struct page *zero_page;
1421 spinlock_t *ptl;
1422 pmd_t pmd_entry;
1423 pfn_t pfn;
1424
1425 zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1426
1427 if (unlikely(!zero_page))
1428 goto fallback;
1429
1430 pfn = page_to_pfn_t(zero_page);
1431 *entry = dax_insert_entry(xas, mapping, vmf, *entry, pfn,
1432 DAX_PMD | DAX_ZERO_PAGE, false);
1433
1434 if (arch_needs_pgtable_deposit()) {
1435 pgtable = pte_alloc_one(vma->vm_mm);
1436 if (!pgtable)
1437 return VM_FAULT_OOM;
1438 }
1439
1440 ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
1441 if (!pmd_none(*(vmf->pmd))) {
1442 spin_unlock(ptl);
1443 goto fallback;
1444 }
1445
1446 if (pgtable) {
1447 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
1448 mm_inc_nr_ptes(vma->vm_mm);
1449 }
1450 pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1451 pmd_entry = pmd_mkhuge(pmd_entry);
1452 set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1453 spin_unlock(ptl);
1454 trace_dax_pmd_load_hole(inode, vmf, zero_page, *entry);
1455 return VM_FAULT_NOPAGE;
1456
1457 fallback:
1458 if (pgtable)
1459 pte_free(vma->vm_mm, pgtable);
1460 trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, *entry);
1461 return VM_FAULT_FALLBACK;
1462 }
1463
1464 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1465 const struct iomap_ops *ops)
1466 {
1467 struct vm_area_struct *vma = vmf->vma;
1468 struct address_space *mapping = vma->vm_file->f_mapping;
1469 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, PMD_ORDER);
1470 unsigned long pmd_addr = vmf->address & PMD_MASK;
1471 bool write = vmf->flags & FAULT_FLAG_WRITE;
1472 bool sync;
1473 unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1474 struct inode *inode = mapping->host;
1475 vm_fault_t result = VM_FAULT_FALLBACK;
1476 struct iomap iomap = { .type = IOMAP_HOLE };
1477 struct iomap srcmap = { .type = IOMAP_HOLE };
1478 pgoff_t max_pgoff;
1479 void *entry;
1480 loff_t pos;
1481 int error;
1482 pfn_t pfn;
1483
1484 /*
1485 * Check whether offset isn't beyond end of file now. Caller is
1486 * supposed to hold locks serializing us with truncate / punch hole so
1487 * this is a reliable test.
1488 */
1489 max_pgoff = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1490
1491 trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1492
1493 /*
1494 * Make sure that the faulting address's PMD offset (color) matches
1495 * the PMD offset from the start of the file. This is necessary so
1496 * that a PMD range in the page table overlaps exactly with a PMD
1497 * range in the page cache.
1498 */
1499 if ((vmf->pgoff & PG_PMD_COLOUR) !=
1500 ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
1501 goto fallback;
1502
1503 /* Fall back to PTEs if we're going to COW */
1504 if (write && !(vma->vm_flags & VM_SHARED))
1505 goto fallback;
1506
1507 /* If the PMD would extend outside the VMA */
1508 if (pmd_addr < vma->vm_start)
1509 goto fallback;
1510 if ((pmd_addr + PMD_SIZE) > vma->vm_end)
1511 goto fallback;
1512
1513 if (xas.xa_index >= max_pgoff) {
1514 result = VM_FAULT_SIGBUS;
1515 goto out;
1516 }
1517
1518 /* If the PMD would extend beyond the file size */
1519 if ((xas.xa_index | PG_PMD_COLOUR) >= max_pgoff)
1520 goto fallback;
1521
1522 /*
1523 * grab_mapping_entry() will make sure we get an empty PMD entry,
1524 * a zero PMD entry or a DAX PMD. If it can't (because a PTE
1525 * entry is already in the array, for instance), it will return
1526 * VM_FAULT_FALLBACK.
1527 */
1528 entry = grab_mapping_entry(&xas, mapping, PMD_ORDER);
1529 if (xa_is_internal(entry)) {
1530 result = xa_to_internal(entry);
1531 goto fallback;
1532 }
1533
1534 /*
1535 * It is possible, particularly with mixed reads & writes to private
1536 * mappings, that we have raced with a PTE fault that overlaps with
1537 * the PMD we need to set up. If so just return and the fault will be
1538 * retried.
1539 */
1540 if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
1541 !pmd_devmap(*vmf->pmd)) {
1542 result = 0;
1543 goto unlock_entry;
1544 }
1545
1546 /*
1547 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1548 * setting up a mapping, so really we're using iomap_begin() as a way
1549 * to look up our filesystem block.
1550 */
1551 pos = (loff_t)xas.xa_index << PAGE_SHIFT;
1552 error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap,
1553 &srcmap);
1554 if (error)
1555 goto unlock_entry;
1556
1557 if (iomap.offset + iomap.length < pos + PMD_SIZE)
1558 goto finish_iomap;
1559
1560 sync = dax_fault_is_synchronous(iomap_flags, vma, &iomap);
1561
1562 switch (iomap.type) {
1563 case IOMAP_MAPPED:
1564 error = dax_iomap_pfn(&iomap, pos, PMD_SIZE, &pfn);
1565 if (error < 0)
1566 goto finish_iomap;
1567
1568 entry = dax_insert_entry(&xas, mapping, vmf, entry, pfn,
1569 DAX_PMD, write && !sync);
1570
1571 /*
1572 * If we are doing synchronous page fault and inode needs fsync,
1573 * we can insert PMD into page tables only after that happens.
1574 * Skip insertion for now and return the pfn so that caller can
1575 * insert it after fsync is done.
1576 */
1577 if (sync) {
1578 if (WARN_ON_ONCE(!pfnp))
1579 goto finish_iomap;
1580 *pfnp = pfn;
1581 result = VM_FAULT_NEEDDSYNC;
1582 goto finish_iomap;
1583 }
1584
1585 trace_dax_pmd_insert_mapping(inode, vmf, PMD_SIZE, pfn, entry);
1586 result = vmf_insert_pfn_pmd(vmf, pfn, write);
1587 break;
1588 case IOMAP_UNWRITTEN:
1589 case IOMAP_HOLE:
1590 if (WARN_ON_ONCE(write))
1591 break;
1592 result = dax_pmd_load_hole(&xas, vmf, &iomap, &entry);
1593 break;
1594 default:
1595 WARN_ON_ONCE(1);
1596 break;
1597 }
1598
1599 finish_iomap:
1600 if (ops->iomap_end) {
1601 int copied = PMD_SIZE;
1602
1603 if (result == VM_FAULT_FALLBACK)
1604 copied = 0;
1605 /*
1606 * The fault is done by now and there's no way back (other
1607 * thread may be already happily using PMD we have installed).
1608 * Just ignore error from ->iomap_end since we cannot do much
1609 * with it.
1610 */
1611 ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
1612 &iomap);
1613 }
1614 unlock_entry:
1615 dax_unlock_entry(&xas, entry);
1616 fallback:
1617 if (result == VM_FAULT_FALLBACK) {
1618 split_huge_pmd(vma, vmf->pmd, vmf->address);
1619 count_vm_event(THP_FAULT_FALLBACK);
1620 }
1621 out:
1622 trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1623 return result;
1624 }
1625 #else
1626 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
1627 const struct iomap_ops *ops)
1628 {
1629 return VM_FAULT_FALLBACK;
1630 }
1631 #endif /* CONFIG_FS_DAX_PMD */
1632
1633 /**
1634 * dax_iomap_fault - handle a page fault on a DAX file
1635 * @vmf: The description of the fault
1636 * @pe_size: Size of the page to fault in
1637 * @pfnp: PFN to insert for synchronous faults if fsync is required
1638 * @iomap_errp: Storage for detailed error code in case of error
1639 * @ops: Iomap ops passed from the file system
1640 *
1641 * When a page fault occurs, filesystems may call this helper in
1642 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1643 * has done all the necessary locking for page fault to proceed
1644 * successfully.
1645 */
1646 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
1647 pfn_t *pfnp, int *iomap_errp, const struct iomap_ops *ops)
1648 {
1649 switch (pe_size) {
1650 case PE_SIZE_PTE:
1651 return dax_iomap_pte_fault(vmf, pfnp, iomap_errp, ops);
1652 case PE_SIZE_PMD:
1653 return dax_iomap_pmd_fault(vmf, pfnp, ops);
1654 default:
1655 return VM_FAULT_FALLBACK;
1656 }
1657 }
1658 EXPORT_SYMBOL_GPL(dax_iomap_fault);
1659
1660 /*
1661 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1662 * @vmf: The description of the fault
1663 * @pfn: PFN to insert
1664 * @order: Order of entry to insert.
1665 *
1666 * This function inserts a writeable PTE or PMD entry into the page tables
1667 * for an mmaped DAX file. It also marks the page cache entry as dirty.
1668 */
1669 static vm_fault_t
1670 dax_insert_pfn_mkwrite(struct vm_fault *vmf, pfn_t pfn, unsigned int order)
1671 {
1672 struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1673 XA_STATE_ORDER(xas, &mapping->i_pages, vmf->pgoff, order);
1674 void *entry;
1675 vm_fault_t ret;
1676
1677 xas_lock_irq(&xas);
1678 entry = get_unlocked_entry(&xas, order);
1679 /* Did we race with someone splitting entry or so? */
1680 if (!entry || dax_is_conflict(entry) ||
1681 (order == 0 && !dax_is_pte_entry(entry))) {
1682 put_unlocked_entry(&xas, entry);
1683 xas_unlock_irq(&xas);
1684 trace_dax_insert_pfn_mkwrite_no_entry(mapping->host, vmf,
1685 VM_FAULT_NOPAGE);
1686 return VM_FAULT_NOPAGE;
1687 }
1688 xas_set_mark(&xas, PAGECACHE_TAG_DIRTY);
1689 dax_lock_entry(&xas, entry);
1690 xas_unlock_irq(&xas);
1691 if (order == 0)
1692 ret = vmf_insert_mixed_mkwrite(vmf->vma, vmf->address, pfn);
1693 #ifdef CONFIG_FS_DAX_PMD
1694 else if (order == PMD_ORDER)
1695 ret = vmf_insert_pfn_pmd(vmf, pfn, FAULT_FLAG_WRITE);
1696 #endif
1697 else
1698 ret = VM_FAULT_FALLBACK;
1699 dax_unlock_entry(&xas, entry);
1700 trace_dax_insert_pfn_mkwrite(mapping->host, vmf, ret);
1701 return ret;
1702 }
1703
1704 /**
1705 * dax_finish_sync_fault - finish synchronous page fault
1706 * @vmf: The description of the fault
1707 * @pe_size: Size of entry to be inserted
1708 * @pfn: PFN to insert
1709 *
1710 * This function ensures that the file range touched by the page fault is
1711 * stored persistently on the media and handles inserting of appropriate page
1712 * table entry.
1713 */
1714 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
1715 enum page_entry_size pe_size, pfn_t pfn)
1716 {
1717 int err;
1718 loff_t start = ((loff_t)vmf->pgoff) << PAGE_SHIFT;
1719 unsigned int order = pe_order(pe_size);
1720 size_t len = PAGE_SIZE << order;
1721
1722 err = vfs_fsync_range(vmf->vma->vm_file, start, start + len - 1, 1);
1723 if (err)
1724 return VM_FAULT_SIGBUS;
1725 return dax_insert_pfn_mkwrite(vmf, pfn, order);
1726 }
1727 EXPORT_SYMBOL_GPL(dax_finish_sync_fault);