1 // SPDX-License-Identifier: GPL-2.0-only
3 * fs/dax.c - Direct Access filesystem code
4 * Copyright (c) 2013-2014 Intel Corporation
5 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
6 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
9 #include <linux/atomic.h>
10 #include <linux/blkdev.h>
11 #include <linux/buffer_head.h>
12 #include <linux/dax.h>
14 #include <linux/genhd.h>
15 #include <linux/highmem.h>
16 #include <linux/memcontrol.h>
18 #include <linux/mutex.h>
19 #include <linux/pagevec.h>
20 #include <linux/sched.h>
21 #include <linux/sched/signal.h>
22 #include <linux/uio.h>
23 #include <linux/vmstat.h>
24 #include <linux/pfn_t.h>
25 #include <linux/sizes.h>
26 #include <linux/mmu_notifier.h>
27 #include <linux/iomap.h>
28 #include <asm/pgalloc.h>
31 #define CREATE_TRACE_POINTS
32 #include <trace/events/fs_dax.h>
34 static inline unsigned int pe_order(enum page_entry_size pe_size
)
36 if (pe_size
== PE_SIZE_PTE
)
37 return PAGE_SHIFT
- PAGE_SHIFT
;
38 if (pe_size
== PE_SIZE_PMD
)
39 return PMD_SHIFT
- PAGE_SHIFT
;
40 if (pe_size
== PE_SIZE_PUD
)
41 return PUD_SHIFT
- PAGE_SHIFT
;
45 /* We choose 4096 entries - same as per-zone page wait tables */
46 #define DAX_WAIT_TABLE_BITS 12
47 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
49 /* The 'colour' (ie low bits) within a PMD of a page offset. */
50 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
51 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
53 /* The order of a PMD entry */
54 #define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
56 static wait_queue_head_t wait_table
[DAX_WAIT_TABLE_ENTRIES
];
58 static int __init
init_dax_wait_table(void)
62 for (i
= 0; i
< DAX_WAIT_TABLE_ENTRIES
; i
++)
63 init_waitqueue_head(wait_table
+ i
);
66 fs_initcall(init_dax_wait_table
);
69 * DAX pagecache entries use XArray value entries so they can't be mistaken
70 * for pages. We use one bit for locking, one bit for the entry size (PMD)
71 * and two more to tell us if the entry is a zero page or an empty entry that
72 * is just used for locking. In total four special bits.
74 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
75 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
79 #define DAX_LOCKED (1UL << 0)
80 #define DAX_PMD (1UL << 1)
81 #define DAX_ZERO_PAGE (1UL << 2)
82 #define DAX_EMPTY (1UL << 3)
84 static unsigned long dax_to_pfn(void *entry
)
86 return xa_to_value(entry
) >> DAX_SHIFT
;
89 static void *dax_make_entry(pfn_t pfn
, unsigned long flags
)
91 return xa_mk_value(flags
| (pfn_t_to_pfn(pfn
) << DAX_SHIFT
));
94 static bool dax_is_locked(void *entry
)
96 return xa_to_value(entry
) & DAX_LOCKED
;
99 static unsigned int dax_entry_order(void *entry
)
101 if (xa_to_value(entry
) & DAX_PMD
)
106 static unsigned long dax_is_pmd_entry(void *entry
)
108 return xa_to_value(entry
) & DAX_PMD
;
111 static bool dax_is_pte_entry(void *entry
)
113 return !(xa_to_value(entry
) & DAX_PMD
);
116 static int dax_is_zero_entry(void *entry
)
118 return xa_to_value(entry
) & DAX_ZERO_PAGE
;
121 static int dax_is_empty_entry(void *entry
)
123 return xa_to_value(entry
) & DAX_EMPTY
;
127 * DAX page cache entry locking
129 struct exceptional_entry_key
{
134 struct wait_exceptional_entry_queue
{
135 wait_queue_entry_t wait
;
136 struct exceptional_entry_key key
;
139 static wait_queue_head_t
*dax_entry_waitqueue(struct xa_state
*xas
,
140 void *entry
, struct exceptional_entry_key
*key
)
143 unsigned long index
= xas
->xa_index
;
146 * If 'entry' is a PMD, align the 'index' that we use for the wait
147 * queue to the start of that PMD. This ensures that all offsets in
148 * the range covered by the PMD map to the same bit lock.
150 if (dax_is_pmd_entry(entry
))
151 index
&= ~PG_PMD_COLOUR
;
153 key
->entry_start
= index
;
155 hash
= hash_long((unsigned long)xas
->xa
^ index
, DAX_WAIT_TABLE_BITS
);
156 return wait_table
+ hash
;
159 static int wake_exceptional_entry_func(wait_queue_entry_t
*wait
,
160 unsigned int mode
, int sync
, void *keyp
)
162 struct exceptional_entry_key
*key
= keyp
;
163 struct wait_exceptional_entry_queue
*ewait
=
164 container_of(wait
, struct wait_exceptional_entry_queue
, wait
);
166 if (key
->xa
!= ewait
->key
.xa
||
167 key
->entry_start
!= ewait
->key
.entry_start
)
169 return autoremove_wake_function(wait
, mode
, sync
, NULL
);
173 * @entry may no longer be the entry at the index in the mapping.
174 * The important information it's conveying is whether the entry at
175 * this index used to be a PMD entry.
177 static void dax_wake_entry(struct xa_state
*xas
, void *entry
, bool wake_all
)
179 struct exceptional_entry_key key
;
180 wait_queue_head_t
*wq
;
182 wq
= dax_entry_waitqueue(xas
, entry
, &key
);
185 * Checking for locked entry and prepare_to_wait_exclusive() happens
186 * under the i_pages lock, ditto for entry handling in our callers.
187 * So at this point all tasks that could have seen our entry locked
188 * must be in the waitqueue and the following check will see them.
190 if (waitqueue_active(wq
))
191 __wake_up(wq
, TASK_NORMAL
, wake_all
? 0 : 1, &key
);
195 * Look up entry in page cache, wait for it to become unlocked if it
196 * is a DAX entry and return it. The caller must subsequently call
197 * put_unlocked_entry() if it did not lock the entry or dax_unlock_entry()
200 * Must be called with the i_pages lock held.
202 static void *get_unlocked_entry(struct xa_state
*xas
)
205 struct wait_exceptional_entry_queue ewait
;
206 wait_queue_head_t
*wq
;
208 init_wait(&ewait
.wait
);
209 ewait
.wait
.func
= wake_exceptional_entry_func
;
212 entry
= xas_find_conflict(xas
);
213 if (!entry
|| WARN_ON_ONCE(!xa_is_value(entry
)) ||
214 !dax_is_locked(entry
))
217 wq
= dax_entry_waitqueue(xas
, entry
, &ewait
.key
);
218 prepare_to_wait_exclusive(wq
, &ewait
.wait
,
219 TASK_UNINTERRUPTIBLE
);
223 finish_wait(wq
, &ewait
.wait
);
229 * The only thing keeping the address space around is the i_pages lock
230 * (it's cycled in clear_inode() after removing the entries from i_pages)
231 * After we call xas_unlock_irq(), we cannot touch xas->xa.
233 static void wait_entry_unlocked(struct xa_state
*xas
, void *entry
)
235 struct wait_exceptional_entry_queue ewait
;
236 wait_queue_head_t
*wq
;
238 init_wait(&ewait
.wait
);
239 ewait
.wait
.func
= wake_exceptional_entry_func
;
241 wq
= dax_entry_waitqueue(xas
, entry
, &ewait
.key
);
243 * Unlike get_unlocked_entry() there is no guarantee that this
244 * path ever successfully retrieves an unlocked entry before an
245 * inode dies. Perform a non-exclusive wait in case this path
246 * never successfully performs its own wake up.
248 prepare_to_wait(wq
, &ewait
.wait
, TASK_UNINTERRUPTIBLE
);
251 finish_wait(wq
, &ewait
.wait
);
254 static void put_unlocked_entry(struct xa_state
*xas
, void *entry
)
256 /* If we were the only waiter woken, wake the next one */
258 dax_wake_entry(xas
, entry
, false);
262 * We used the xa_state to get the entry, but then we locked the entry and
263 * dropped the xa_lock, so we know the xa_state is stale and must be reset
266 static void dax_unlock_entry(struct xa_state
*xas
, void *entry
)
270 BUG_ON(dax_is_locked(entry
));
273 old
= xas_store(xas
, entry
);
275 BUG_ON(!dax_is_locked(old
));
276 dax_wake_entry(xas
, entry
, false);
280 * Return: The entry stored at this location before it was locked.
282 static void *dax_lock_entry(struct xa_state
*xas
, void *entry
)
284 unsigned long v
= xa_to_value(entry
);
285 return xas_store(xas
, xa_mk_value(v
| DAX_LOCKED
));
288 static unsigned long dax_entry_size(void *entry
)
290 if (dax_is_zero_entry(entry
))
292 else if (dax_is_empty_entry(entry
))
294 else if (dax_is_pmd_entry(entry
))
300 static unsigned long dax_end_pfn(void *entry
)
302 return dax_to_pfn(entry
) + dax_entry_size(entry
) / PAGE_SIZE
;
306 * Iterate through all mapped pfns represented by an entry, i.e. skip
307 * 'empty' and 'zero' entries.
309 #define for_each_mapped_pfn(entry, pfn) \
310 for (pfn = dax_to_pfn(entry); \
311 pfn < dax_end_pfn(entry); pfn++)
314 * TODO: for reflink+dax we need a way to associate a single page with
315 * multiple address_space instances at different linear_page_index()
318 static void dax_associate_entry(void *entry
, struct address_space
*mapping
,
319 struct vm_area_struct
*vma
, unsigned long address
)
321 unsigned long size
= dax_entry_size(entry
), pfn
, index
;
324 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
))
327 index
= linear_page_index(vma
, address
& ~(size
- 1));
328 for_each_mapped_pfn(entry
, pfn
) {
329 struct page
*page
= pfn_to_page(pfn
);
331 WARN_ON_ONCE(page
->mapping
);
332 page
->mapping
= mapping
;
333 page
->index
= index
+ i
++;
337 static void dax_disassociate_entry(void *entry
, struct address_space
*mapping
,
342 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
))
345 for_each_mapped_pfn(entry
, pfn
) {
346 struct page
*page
= pfn_to_page(pfn
);
348 WARN_ON_ONCE(trunc
&& page_ref_count(page
) > 1);
349 WARN_ON_ONCE(page
->mapping
&& page
->mapping
!= mapping
);
350 page
->mapping
= NULL
;
355 static struct page
*dax_busy_page(void *entry
)
359 for_each_mapped_pfn(entry
, pfn
) {
360 struct page
*page
= pfn_to_page(pfn
);
362 if (page_ref_count(page
) > 1)
369 * dax_lock_mapping_entry - Lock the DAX entry corresponding to a page
370 * @page: The page whose entry we want to lock
372 * Context: Process context.
373 * Return: A cookie to pass to dax_unlock_page() or 0 if the entry could
376 dax_entry_t
dax_lock_page(struct page
*page
)
378 XA_STATE(xas
, NULL
, 0);
381 /* Ensure page->mapping isn't freed while we look at it */
384 struct address_space
*mapping
= READ_ONCE(page
->mapping
);
387 if (!mapping
|| !dax_mapping(mapping
))
391 * In the device-dax case there's no need to lock, a
392 * struct dev_pagemap pin is sufficient to keep the
393 * inode alive, and we assume we have dev_pagemap pin
394 * otherwise we would not have a valid pfn_to_page()
397 entry
= (void *)~0UL;
398 if (S_ISCHR(mapping
->host
->i_mode
))
401 xas
.xa
= &mapping
->i_pages
;
403 if (mapping
!= page
->mapping
) {
404 xas_unlock_irq(&xas
);
407 xas_set(&xas
, page
->index
);
408 entry
= xas_load(&xas
);
409 if (dax_is_locked(entry
)) {
411 wait_entry_unlocked(&xas
, entry
);
415 dax_lock_entry(&xas
, entry
);
416 xas_unlock_irq(&xas
);
420 return (dax_entry_t
)entry
;
423 void dax_unlock_page(struct page
*page
, dax_entry_t cookie
)
425 struct address_space
*mapping
= page
->mapping
;
426 XA_STATE(xas
, &mapping
->i_pages
, page
->index
);
428 if (S_ISCHR(mapping
->host
->i_mode
))
431 dax_unlock_entry(&xas
, (void *)cookie
);
435 * Find page cache entry at given index. If it is a DAX entry, return it
436 * with the entry locked. If the page cache doesn't contain an entry at
437 * that index, add a locked empty entry.
439 * When requesting an entry with size DAX_PMD, grab_mapping_entry() will
440 * either return that locked entry or will return VM_FAULT_FALLBACK.
441 * This will happen if there are any PTE entries within the PMD range
442 * that we are requesting.
444 * We always favor PTE entries over PMD entries. There isn't a flow where we
445 * evict PTE entries in order to 'upgrade' them to a PMD entry. A PMD
446 * insertion will fail if it finds any PTE entries already in the tree, and a
447 * PTE insertion will cause an existing PMD entry to be unmapped and
448 * downgraded to PTE entries. This happens for both PMD zero pages as
449 * well as PMD empty entries.
451 * The exception to this downgrade path is for PMD entries that have
452 * real storage backing them. We will leave these real PMD entries in
453 * the tree, and PTE writes will simply dirty the entire PMD entry.
455 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
456 * persistent memory the benefit is doubtful. We can add that later if we can
459 * On error, this function does not return an ERR_PTR. Instead it returns
460 * a VM_FAULT code, encoded as an xarray internal entry. The ERR_PTR values
461 * overlap with xarray value entries.
463 static void *grab_mapping_entry(struct xa_state
*xas
,
464 struct address_space
*mapping
, unsigned long size_flag
)
466 unsigned long index
= xas
->xa_index
;
467 bool pmd_downgrade
= false; /* splitting PMD entry into PTE entries? */
472 entry
= get_unlocked_entry(xas
);
475 if (!xa_is_value(entry
)) {
476 xas_set_err(xas
, EIO
);
480 if (size_flag
& DAX_PMD
) {
481 if (dax_is_pte_entry(entry
)) {
482 put_unlocked_entry(xas
, entry
);
485 } else { /* trying to grab a PTE entry */
486 if (dax_is_pmd_entry(entry
) &&
487 (dax_is_zero_entry(entry
) ||
488 dax_is_empty_entry(entry
))) {
489 pmd_downgrade
= true;
496 * Make sure 'entry' remains valid while we drop
499 dax_lock_entry(xas
, entry
);
502 * Besides huge zero pages the only other thing that gets
503 * downgraded are empty entries which don't need to be
506 if (dax_is_zero_entry(entry
)) {
508 unmap_mapping_pages(mapping
,
509 xas
->xa_index
& ~PG_PMD_COLOUR
,
515 dax_disassociate_entry(entry
, mapping
, false);
516 xas_store(xas
, NULL
); /* undo the PMD join */
517 dax_wake_entry(xas
, entry
, true);
518 mapping
->nrexceptional
--;
524 dax_lock_entry(xas
, entry
);
526 entry
= dax_make_entry(pfn_to_pfn_t(0), size_flag
| DAX_EMPTY
);
527 dax_lock_entry(xas
, entry
);
530 mapping
->nrexceptional
++;
535 if (xas_nomem(xas
, mapping_gfp_mask(mapping
) & ~__GFP_HIGHMEM
))
537 if (xas
->xa_node
== XA_ERROR(-ENOMEM
))
538 return xa_mk_internal(VM_FAULT_OOM
);
540 return xa_mk_internal(VM_FAULT_SIGBUS
);
544 return xa_mk_internal(VM_FAULT_FALLBACK
);
548 * dax_layout_busy_page - find first pinned page in @mapping
549 * @mapping: address space to scan for a page with ref count > 1
551 * DAX requires ZONE_DEVICE mapped pages. These pages are never
552 * 'onlined' to the page allocator so they are considered idle when
553 * page->count == 1. A filesystem uses this interface to determine if
554 * any page in the mapping is busy, i.e. for DMA, or other
555 * get_user_pages() usages.
557 * It is expected that the filesystem is holding locks to block the
558 * establishment of new mappings in this address_space. I.e. it expects
559 * to be able to run unmap_mapping_range() and subsequently not race
560 * mapping_mapped() becoming true.
562 struct page
*dax_layout_busy_page(struct address_space
*mapping
)
564 XA_STATE(xas
, &mapping
->i_pages
, 0);
566 unsigned int scanned
= 0;
567 struct page
*page
= NULL
;
570 * In the 'limited' case get_user_pages() for dax is disabled.
572 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
))
575 if (!dax_mapping(mapping
) || !mapping_mapped(mapping
))
579 * If we race get_user_pages_fast() here either we'll see the
580 * elevated page count in the iteration and wait, or
581 * get_user_pages_fast() will see that the page it took a reference
582 * against is no longer mapped in the page tables and bail to the
583 * get_user_pages() slow path. The slow path is protected by
584 * pte_lock() and pmd_lock(). New references are not taken without
585 * holding those locks, and unmap_mapping_range() will not zero the
586 * pte or pmd without holding the respective lock, so we are
587 * guaranteed to either see new references or prevent new
588 * references from being established.
590 unmap_mapping_range(mapping
, 0, 0, 1);
593 xas_for_each(&xas
, entry
, ULONG_MAX
) {
594 if (WARN_ON_ONCE(!xa_is_value(entry
)))
596 if (unlikely(dax_is_locked(entry
)))
597 entry
= get_unlocked_entry(&xas
);
599 page
= dax_busy_page(entry
);
600 put_unlocked_entry(&xas
, entry
);
603 if (++scanned
% XA_CHECK_SCHED
)
607 xas_unlock_irq(&xas
);
611 xas_unlock_irq(&xas
);
614 EXPORT_SYMBOL_GPL(dax_layout_busy_page
);
616 static int __dax_invalidate_entry(struct address_space
*mapping
,
617 pgoff_t index
, bool trunc
)
619 XA_STATE(xas
, &mapping
->i_pages
, index
);
624 entry
= get_unlocked_entry(&xas
);
625 if (!entry
|| WARN_ON_ONCE(!xa_is_value(entry
)))
628 (xas_get_mark(&xas
, PAGECACHE_TAG_DIRTY
) ||
629 xas_get_mark(&xas
, PAGECACHE_TAG_TOWRITE
)))
631 dax_disassociate_entry(entry
, mapping
, trunc
);
632 xas_store(&xas
, NULL
);
633 mapping
->nrexceptional
--;
636 put_unlocked_entry(&xas
, entry
);
637 xas_unlock_irq(&xas
);
642 * Delete DAX entry at @index from @mapping. Wait for it
643 * to be unlocked before deleting it.
645 int dax_delete_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
647 int ret
= __dax_invalidate_entry(mapping
, index
, true);
650 * This gets called from truncate / punch_hole path. As such, the caller
651 * must hold locks protecting against concurrent modifications of the
652 * page cache (usually fs-private i_mmap_sem for writing). Since the
653 * caller has seen a DAX entry for this index, we better find it
654 * at that index as well...
661 * Invalidate DAX entry if it is clean.
663 int dax_invalidate_mapping_entry_sync(struct address_space
*mapping
,
666 return __dax_invalidate_entry(mapping
, index
, false);
669 static int copy_user_dax(struct block_device
*bdev
, struct dax_device
*dax_dev
,
670 sector_t sector
, size_t size
, struct page
*to
,
678 rc
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
682 id
= dax_read_lock();
683 rc
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
), &kaddr
, NULL
);
688 vto
= kmap_atomic(to
);
689 copy_user_page(vto
, (void __force
*)kaddr
, vaddr
, to
);
696 * By this point grab_mapping_entry() has ensured that we have a locked entry
697 * of the appropriate size so we don't have to worry about downgrading PMDs to
698 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
699 * already in the tree, we will skip the insertion and just dirty the PMD as
702 static void *dax_insert_entry(struct xa_state
*xas
,
703 struct address_space
*mapping
, struct vm_fault
*vmf
,
704 void *entry
, pfn_t pfn
, unsigned long flags
, bool dirty
)
706 void *new_entry
= dax_make_entry(pfn
, flags
);
709 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
711 if (dax_is_zero_entry(entry
) && !(flags
& DAX_ZERO_PAGE
)) {
712 unsigned long index
= xas
->xa_index
;
713 /* we are replacing a zero page with block mapping */
714 if (dax_is_pmd_entry(entry
))
715 unmap_mapping_pages(mapping
, index
& ~PG_PMD_COLOUR
,
718 unmap_mapping_pages(mapping
, index
, 1, false);
723 if (dax_is_zero_entry(entry
) || dax_is_empty_entry(entry
)) {
726 dax_disassociate_entry(entry
, mapping
, false);
727 dax_associate_entry(new_entry
, mapping
, vmf
->vma
, vmf
->address
);
729 * Only swap our new entry into the page cache if the current
730 * entry is a zero page or an empty entry. If a normal PTE or
731 * PMD entry is already in the cache, we leave it alone. This
732 * means that if we are trying to insert a PTE and the
733 * existing entry is a PMD, we will just leave the PMD in the
734 * tree and dirty it if necessary.
736 old
= dax_lock_entry(xas
, new_entry
);
737 WARN_ON_ONCE(old
!= xa_mk_value(xa_to_value(entry
) |
741 xas_load(xas
); /* Walk the xa_state */
745 xas_set_mark(xas
, PAGECACHE_TAG_DIRTY
);
752 unsigned long pgoff_address(pgoff_t pgoff
, struct vm_area_struct
*vma
)
754 unsigned long address
;
756 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
757 VM_BUG_ON_VMA(address
< vma
->vm_start
|| address
>= vma
->vm_end
, vma
);
761 /* Walk all mappings of a given index of a file and writeprotect them */
762 static void dax_entry_mkclean(struct address_space
*mapping
, pgoff_t index
,
765 struct vm_area_struct
*vma
;
766 pte_t pte
, *ptep
= NULL
;
770 i_mmap_lock_read(mapping
);
771 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, index
, index
) {
772 struct mmu_notifier_range range
;
773 unsigned long address
;
777 if (!(vma
->vm_flags
& VM_SHARED
))
780 address
= pgoff_address(index
, vma
);
783 * Note because we provide range to follow_pte_pmd it will
784 * call mmu_notifier_invalidate_range_start() on our behalf
785 * before taking any lock.
787 if (follow_pte_pmd(vma
->vm_mm
, address
, &range
,
792 * No need to call mmu_notifier_invalidate_range() as we are
793 * downgrading page table protection not changing it to point
796 * See Documentation/vm/mmu_notifier.rst
799 #ifdef CONFIG_FS_DAX_PMD
802 if (pfn
!= pmd_pfn(*pmdp
))
804 if (!pmd_dirty(*pmdp
) && !pmd_write(*pmdp
))
807 flush_cache_page(vma
, address
, pfn
);
808 pmd
= pmdp_invalidate(vma
, address
, pmdp
);
809 pmd
= pmd_wrprotect(pmd
);
810 pmd
= pmd_mkclean(pmd
);
811 set_pmd_at(vma
->vm_mm
, address
, pmdp
, pmd
);
816 if (pfn
!= pte_pfn(*ptep
))
818 if (!pte_dirty(*ptep
) && !pte_write(*ptep
))
821 flush_cache_page(vma
, address
, pfn
);
822 pte
= ptep_clear_flush(vma
, address
, ptep
);
823 pte
= pte_wrprotect(pte
);
824 pte
= pte_mkclean(pte
);
825 set_pte_at(vma
->vm_mm
, address
, ptep
, pte
);
827 pte_unmap_unlock(ptep
, ptl
);
830 mmu_notifier_invalidate_range_end(&range
);
832 i_mmap_unlock_read(mapping
);
835 static int dax_writeback_one(struct xa_state
*xas
, struct dax_device
*dax_dev
,
836 struct address_space
*mapping
, void *entry
)
838 unsigned long pfn
, index
, count
;
842 * A page got tagged dirty in DAX mapping? Something is seriously
845 if (WARN_ON(!xa_is_value(entry
)))
848 if (unlikely(dax_is_locked(entry
))) {
849 void *old_entry
= entry
;
851 entry
= get_unlocked_entry(xas
);
853 /* Entry got punched out / reallocated? */
854 if (!entry
|| WARN_ON_ONCE(!xa_is_value(entry
)))
857 * Entry got reallocated elsewhere? No need to writeback.
858 * We have to compare pfns as we must not bail out due to
859 * difference in lockbit or entry type.
861 if (dax_to_pfn(old_entry
) != dax_to_pfn(entry
))
863 if (WARN_ON_ONCE(dax_is_empty_entry(entry
) ||
864 dax_is_zero_entry(entry
))) {
869 /* Another fsync thread may have already done this entry */
870 if (!xas_get_mark(xas
, PAGECACHE_TAG_TOWRITE
))
874 /* Lock the entry to serialize with page faults */
875 dax_lock_entry(xas
, entry
);
878 * We can clear the tag now but we have to be careful so that concurrent
879 * dax_writeback_one() calls for the same index cannot finish before we
880 * actually flush the caches. This is achieved as the calls will look
881 * at the entry only under the i_pages lock and once they do that
882 * they will see the entry locked and wait for it to unlock.
884 xas_clear_mark(xas
, PAGECACHE_TAG_TOWRITE
);
888 * If dax_writeback_mapping_range() was given a wbc->range_start
889 * in the middle of a PMD, the 'index' we use needs to be
890 * aligned to the start of the PMD.
891 * This allows us to flush for PMD_SIZE and not have to worry about
892 * partial PMD writebacks.
894 pfn
= dax_to_pfn(entry
);
895 count
= 1UL << dax_entry_order(entry
);
896 index
= xas
->xa_index
& ~(count
- 1);
898 dax_entry_mkclean(mapping
, index
, pfn
);
899 dax_flush(dax_dev
, page_address(pfn_to_page(pfn
)), count
* PAGE_SIZE
);
901 * After we have flushed the cache, we can clear the dirty tag. There
902 * cannot be new dirty data in the pfn after the flush has completed as
903 * the pfn mappings are writeprotected and fault waits for mapping
908 xas_store(xas
, entry
);
909 xas_clear_mark(xas
, PAGECACHE_TAG_DIRTY
);
910 dax_wake_entry(xas
, entry
, false);
912 trace_dax_writeback_one(mapping
->host
, index
, count
);
916 put_unlocked_entry(xas
, entry
);
921 * Flush the mapping to the persistent domain within the byte range of [start,
922 * end]. This is required by data integrity operations to ensure file data is
923 * on persistent storage prior to completion of the operation.
925 int dax_writeback_mapping_range(struct address_space
*mapping
,
926 struct block_device
*bdev
, struct writeback_control
*wbc
)
928 XA_STATE(xas
, &mapping
->i_pages
, wbc
->range_start
>> PAGE_SHIFT
);
929 struct inode
*inode
= mapping
->host
;
930 pgoff_t end_index
= wbc
->range_end
>> PAGE_SHIFT
;
931 struct dax_device
*dax_dev
;
934 unsigned int scanned
= 0;
936 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
939 if (!mapping
->nrexceptional
|| wbc
->sync_mode
!= WB_SYNC_ALL
)
942 dax_dev
= dax_get_by_host(bdev
->bd_disk
->disk_name
);
946 trace_dax_writeback_range(inode
, xas
.xa_index
, end_index
);
948 tag_pages_for_writeback(mapping
, xas
.xa_index
, end_index
);
951 xas_for_each_marked(&xas
, entry
, end_index
, PAGECACHE_TAG_TOWRITE
) {
952 ret
= dax_writeback_one(&xas
, dax_dev
, mapping
, entry
);
954 mapping_set_error(mapping
, ret
);
957 if (++scanned
% XA_CHECK_SCHED
)
961 xas_unlock_irq(&xas
);
965 xas_unlock_irq(&xas
);
967 trace_dax_writeback_range_done(inode
, xas
.xa_index
, end_index
);
970 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
972 static sector_t
dax_iomap_sector(struct iomap
*iomap
, loff_t pos
)
974 return (iomap
->addr
+ (pos
& PAGE_MASK
) - iomap
->offset
) >> 9;
977 static int dax_iomap_pfn(struct iomap
*iomap
, loff_t pos
, size_t size
,
980 const sector_t sector
= dax_iomap_sector(iomap
, pos
);
985 rc
= bdev_dax_pgoff(iomap
->bdev
, sector
, size
, &pgoff
);
988 id
= dax_read_lock();
989 length
= dax_direct_access(iomap
->dax_dev
, pgoff
, PHYS_PFN(size
),
996 if (PFN_PHYS(length
) < size
)
998 if (pfn_t_to_pfn(*pfnp
) & (PHYS_PFN(size
)-1))
1000 /* For larger pages we need devmap */
1001 if (length
> 1 && !pfn_t_devmap(*pfnp
))
1005 dax_read_unlock(id
);
1010 * The user has performed a load from a hole in the file. Allocating a new
1011 * page in the file would cause excessive storage usage for workloads with
1012 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1013 * If this page is ever written to we will re-fault and change the mapping to
1014 * point to real DAX storage instead.
1016 static vm_fault_t
dax_load_hole(struct xa_state
*xas
,
1017 struct address_space
*mapping
, void **entry
,
1018 struct vm_fault
*vmf
)
1020 struct inode
*inode
= mapping
->host
;
1021 unsigned long vaddr
= vmf
->address
;
1022 pfn_t pfn
= pfn_to_pfn_t(my_zero_pfn(vaddr
));
1025 *entry
= dax_insert_entry(xas
, mapping
, vmf
, *entry
, pfn
,
1026 DAX_ZERO_PAGE
, false);
1028 ret
= vmf_insert_mixed(vmf
->vma
, vaddr
, pfn
);
1029 trace_dax_load_hole(inode
, vmf
, ret
);
1033 static bool dax_range_is_aligned(struct block_device
*bdev
,
1034 unsigned int offset
, unsigned int length
)
1036 unsigned short sector_size
= bdev_logical_block_size(bdev
);
1038 if (!IS_ALIGNED(offset
, sector_size
))
1040 if (!IS_ALIGNED(length
, sector_size
))
1046 int __dax_zero_page_range(struct block_device
*bdev
,
1047 struct dax_device
*dax_dev
, sector_t sector
,
1048 unsigned int offset
, unsigned int size
)
1050 if (dax_range_is_aligned(bdev
, offset
, size
)) {
1051 sector_t start_sector
= sector
+ (offset
>> 9);
1053 return blkdev_issue_zeroout(bdev
, start_sector
,
1054 size
>> 9, GFP_NOFS
, 0);
1060 rc
= bdev_dax_pgoff(bdev
, sector
, PAGE_SIZE
, &pgoff
);
1064 id
= dax_read_lock();
1065 rc
= dax_direct_access(dax_dev
, pgoff
, 1, &kaddr
, NULL
);
1067 dax_read_unlock(id
);
1070 memset(kaddr
+ offset
, 0, size
);
1071 dax_flush(dax_dev
, kaddr
+ offset
, size
);
1072 dax_read_unlock(id
);
1076 EXPORT_SYMBOL_GPL(__dax_zero_page_range
);
1079 dax_iomap_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
1080 struct iomap
*iomap
)
1082 struct block_device
*bdev
= iomap
->bdev
;
1083 struct dax_device
*dax_dev
= iomap
->dax_dev
;
1084 struct iov_iter
*iter
= data
;
1085 loff_t end
= pos
+ length
, done
= 0;
1090 if (iov_iter_rw(iter
) == READ
) {
1091 end
= min(end
, i_size_read(inode
));
1095 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
1096 return iov_iter_zero(min(length
, end
- pos
), iter
);
1099 if (WARN_ON_ONCE(iomap
->type
!= IOMAP_MAPPED
))
1103 * Write can allocate block for an area which has a hole page mapped
1104 * into page tables. We have to tear down these mappings so that data
1105 * written by write(2) is visible in mmap.
1107 if (iomap
->flags
& IOMAP_F_NEW
) {
1108 invalidate_inode_pages2_range(inode
->i_mapping
,
1110 (end
- 1) >> PAGE_SHIFT
);
1113 id
= dax_read_lock();
1115 unsigned offset
= pos
& (PAGE_SIZE
- 1);
1116 const size_t size
= ALIGN(length
+ offset
, PAGE_SIZE
);
1117 const sector_t sector
= dax_iomap_sector(iomap
, pos
);
1122 if (fatal_signal_pending(current
)) {
1127 ret
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
1131 map_len
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
),
1138 map_len
= PFN_PHYS(map_len
);
1141 if (map_len
> end
- pos
)
1142 map_len
= end
- pos
;
1145 * The userspace address for the memory copy has already been
1146 * validated via access_ok() in either vfs_read() or
1147 * vfs_write(), depending on which operation we are doing.
1149 if (iov_iter_rw(iter
) == WRITE
)
1150 xfer
= dax_copy_from_iter(dax_dev
, pgoff
, kaddr
,
1153 xfer
= dax_copy_to_iter(dax_dev
, pgoff
, kaddr
,
1165 dax_read_unlock(id
);
1167 return done
? done
: ret
;
1171 * dax_iomap_rw - Perform I/O to a DAX file
1172 * @iocb: The control block for this I/O
1173 * @iter: The addresses to do I/O from or to
1174 * @ops: iomap ops passed from the file system
1176 * This function performs read and write operations to directly mapped
1177 * persistent memory. The callers needs to take care of read/write exclusion
1178 * and evicting any page cache pages in the region under I/O.
1181 dax_iomap_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
1182 const struct iomap_ops
*ops
)
1184 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1185 struct inode
*inode
= mapping
->host
;
1186 loff_t pos
= iocb
->ki_pos
, ret
= 0, done
= 0;
1189 if (iov_iter_rw(iter
) == WRITE
) {
1190 lockdep_assert_held_write(&inode
->i_rwsem
);
1191 flags
|= IOMAP_WRITE
;
1193 lockdep_assert_held(&inode
->i_rwsem
);
1196 while (iov_iter_count(iter
)) {
1197 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
), flags
, ops
,
1198 iter
, dax_iomap_actor
);
1205 iocb
->ki_pos
+= done
;
1206 return done
? done
: ret
;
1208 EXPORT_SYMBOL_GPL(dax_iomap_rw
);
1210 static vm_fault_t
dax_fault_return(int error
)
1213 return VM_FAULT_NOPAGE
;
1214 return vmf_error(error
);
1218 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1219 * flushed on write-faults (non-cow), but not read-faults.
1221 static bool dax_fault_is_synchronous(unsigned long flags
,
1222 struct vm_area_struct
*vma
, struct iomap
*iomap
)
1224 return (flags
& IOMAP_WRITE
) && (vma
->vm_flags
& VM_SYNC
)
1225 && (iomap
->flags
& IOMAP_F_DIRTY
);
1228 static vm_fault_t
dax_iomap_pte_fault(struct vm_fault
*vmf
, pfn_t
*pfnp
,
1229 int *iomap_errp
, const struct iomap_ops
*ops
)
1231 struct vm_area_struct
*vma
= vmf
->vma
;
1232 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1233 XA_STATE(xas
, &mapping
->i_pages
, vmf
->pgoff
);
1234 struct inode
*inode
= mapping
->host
;
1235 unsigned long vaddr
= vmf
->address
;
1236 loff_t pos
= (loff_t
)vmf
->pgoff
<< PAGE_SHIFT
;
1237 struct iomap iomap
= { 0 };
1238 unsigned flags
= IOMAP_FAULT
;
1239 int error
, major
= 0;
1240 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1246 trace_dax_pte_fault(inode
, vmf
, ret
);
1248 * Check whether offset isn't beyond end of file now. Caller is supposed
1249 * to hold locks serializing us with truncate / punch hole so this is
1252 if (pos
>= i_size_read(inode
)) {
1253 ret
= VM_FAULT_SIGBUS
;
1257 if (write
&& !vmf
->cow_page
)
1258 flags
|= IOMAP_WRITE
;
1260 entry
= grab_mapping_entry(&xas
, mapping
, 0);
1261 if (xa_is_internal(entry
)) {
1262 ret
= xa_to_internal(entry
);
1267 * It is possible, particularly with mixed reads & writes to private
1268 * mappings, that we have raced with a PMD fault that overlaps with
1269 * the PTE we need to set up. If so just return and the fault will be
1272 if (pmd_trans_huge(*vmf
->pmd
) || pmd_devmap(*vmf
->pmd
)) {
1273 ret
= VM_FAULT_NOPAGE
;
1278 * Note that we don't bother to use iomap_apply here: DAX required
1279 * the file system block size to be equal the page size, which means
1280 * that we never have to deal with more than a single extent here.
1282 error
= ops
->iomap_begin(inode
, pos
, PAGE_SIZE
, flags
, &iomap
);
1284 *iomap_errp
= error
;
1286 ret
= dax_fault_return(error
);
1289 if (WARN_ON_ONCE(iomap
.offset
+ iomap
.length
< pos
+ PAGE_SIZE
)) {
1290 error
= -EIO
; /* fs corruption? */
1291 goto error_finish_iomap
;
1294 if (vmf
->cow_page
) {
1295 sector_t sector
= dax_iomap_sector(&iomap
, pos
);
1297 switch (iomap
.type
) {
1299 case IOMAP_UNWRITTEN
:
1300 clear_user_highpage(vmf
->cow_page
, vaddr
);
1303 error
= copy_user_dax(iomap
.bdev
, iomap
.dax_dev
,
1304 sector
, PAGE_SIZE
, vmf
->cow_page
, vaddr
);
1313 goto error_finish_iomap
;
1315 __SetPageUptodate(vmf
->cow_page
);
1316 ret
= finish_fault(vmf
);
1318 ret
= VM_FAULT_DONE_COW
;
1322 sync
= dax_fault_is_synchronous(flags
, vma
, &iomap
);
1324 switch (iomap
.type
) {
1326 if (iomap
.flags
& IOMAP_F_NEW
) {
1327 count_vm_event(PGMAJFAULT
);
1328 count_memcg_event_mm(vma
->vm_mm
, PGMAJFAULT
);
1329 major
= VM_FAULT_MAJOR
;
1331 error
= dax_iomap_pfn(&iomap
, pos
, PAGE_SIZE
, &pfn
);
1333 goto error_finish_iomap
;
1335 entry
= dax_insert_entry(&xas
, mapping
, vmf
, entry
, pfn
,
1339 * If we are doing synchronous page fault and inode needs fsync,
1340 * we can insert PTE into page tables only after that happens.
1341 * Skip insertion for now and return the pfn so that caller can
1342 * insert it after fsync is done.
1345 if (WARN_ON_ONCE(!pfnp
)) {
1347 goto error_finish_iomap
;
1350 ret
= VM_FAULT_NEEDDSYNC
| major
;
1353 trace_dax_insert_mapping(inode
, vmf
, entry
);
1355 ret
= vmf_insert_mixed_mkwrite(vma
, vaddr
, pfn
);
1357 ret
= vmf_insert_mixed(vma
, vaddr
, pfn
);
1360 case IOMAP_UNWRITTEN
:
1363 ret
= dax_load_hole(&xas
, mapping
, &entry
, vmf
);
1374 ret
= dax_fault_return(error
);
1376 if (ops
->iomap_end
) {
1377 int copied
= PAGE_SIZE
;
1379 if (ret
& VM_FAULT_ERROR
)
1382 * The fault is done by now and there's no way back (other
1383 * thread may be already happily using PTE we have installed).
1384 * Just ignore error from ->iomap_end since we cannot do much
1387 ops
->iomap_end(inode
, pos
, PAGE_SIZE
, copied
, flags
, &iomap
);
1390 dax_unlock_entry(&xas
, entry
);
1392 trace_dax_pte_fault_done(inode
, vmf
, ret
);
1396 #ifdef CONFIG_FS_DAX_PMD
1397 static vm_fault_t
dax_pmd_load_hole(struct xa_state
*xas
, struct vm_fault
*vmf
,
1398 struct iomap
*iomap
, void **entry
)
1400 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1401 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
1402 struct vm_area_struct
*vma
= vmf
->vma
;
1403 struct inode
*inode
= mapping
->host
;
1404 pgtable_t pgtable
= NULL
;
1405 struct page
*zero_page
;
1410 zero_page
= mm_get_huge_zero_page(vmf
->vma
->vm_mm
);
1412 if (unlikely(!zero_page
))
1415 pfn
= page_to_pfn_t(zero_page
);
1416 *entry
= dax_insert_entry(xas
, mapping
, vmf
, *entry
, pfn
,
1417 DAX_PMD
| DAX_ZERO_PAGE
, false);
1419 if (arch_needs_pgtable_deposit()) {
1420 pgtable
= pte_alloc_one(vma
->vm_mm
);
1422 return VM_FAULT_OOM
;
1425 ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1426 if (!pmd_none(*(vmf
->pmd
))) {
1432 pgtable_trans_huge_deposit(vma
->vm_mm
, vmf
->pmd
, pgtable
);
1433 mm_inc_nr_ptes(vma
->vm_mm
);
1435 pmd_entry
= mk_pmd(zero_page
, vmf
->vma
->vm_page_prot
);
1436 pmd_entry
= pmd_mkhuge(pmd_entry
);
1437 set_pmd_at(vmf
->vma
->vm_mm
, pmd_addr
, vmf
->pmd
, pmd_entry
);
1439 trace_dax_pmd_load_hole(inode
, vmf
, zero_page
, *entry
);
1440 return VM_FAULT_NOPAGE
;
1444 pte_free(vma
->vm_mm
, pgtable
);
1445 trace_dax_pmd_load_hole_fallback(inode
, vmf
, zero_page
, *entry
);
1446 return VM_FAULT_FALLBACK
;
1449 static vm_fault_t
dax_iomap_pmd_fault(struct vm_fault
*vmf
, pfn_t
*pfnp
,
1450 const struct iomap_ops
*ops
)
1452 struct vm_area_struct
*vma
= vmf
->vma
;
1453 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1454 XA_STATE_ORDER(xas
, &mapping
->i_pages
, vmf
->pgoff
, PMD_ORDER
);
1455 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
1456 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1458 unsigned int iomap_flags
= (write
? IOMAP_WRITE
: 0) | IOMAP_FAULT
;
1459 struct inode
*inode
= mapping
->host
;
1460 vm_fault_t result
= VM_FAULT_FALLBACK
;
1461 struct iomap iomap
= { 0 };
1469 * Check whether offset isn't beyond end of file now. Caller is
1470 * supposed to hold locks serializing us with truncate / punch hole so
1471 * this is a reliable test.
1473 max_pgoff
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1475 trace_dax_pmd_fault(inode
, vmf
, max_pgoff
, 0);
1478 * Make sure that the faulting address's PMD offset (color) matches
1479 * the PMD offset from the start of the file. This is necessary so
1480 * that a PMD range in the page table overlaps exactly with a PMD
1481 * range in the page cache.
1483 if ((vmf
->pgoff
& PG_PMD_COLOUR
) !=
1484 ((vmf
->address
>> PAGE_SHIFT
) & PG_PMD_COLOUR
))
1487 /* Fall back to PTEs if we're going to COW */
1488 if (write
&& !(vma
->vm_flags
& VM_SHARED
))
1491 /* If the PMD would extend outside the VMA */
1492 if (pmd_addr
< vma
->vm_start
)
1494 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
)
1497 if (xas
.xa_index
>= max_pgoff
) {
1498 result
= VM_FAULT_SIGBUS
;
1502 /* If the PMD would extend beyond the file size */
1503 if ((xas
.xa_index
| PG_PMD_COLOUR
) >= max_pgoff
)
1507 * grab_mapping_entry() will make sure we get an empty PMD entry,
1508 * a zero PMD entry or a DAX PMD. If it can't (because a PTE
1509 * entry is already in the array, for instance), it will return
1510 * VM_FAULT_FALLBACK.
1512 entry
= grab_mapping_entry(&xas
, mapping
, DAX_PMD
);
1513 if (xa_is_internal(entry
)) {
1514 result
= xa_to_internal(entry
);
1519 * It is possible, particularly with mixed reads & writes to private
1520 * mappings, that we have raced with a PTE fault that overlaps with
1521 * the PMD we need to set up. If so just return and the fault will be
1524 if (!pmd_none(*vmf
->pmd
) && !pmd_trans_huge(*vmf
->pmd
) &&
1525 !pmd_devmap(*vmf
->pmd
)) {
1531 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1532 * setting up a mapping, so really we're using iomap_begin() as a way
1533 * to look up our filesystem block.
1535 pos
= (loff_t
)xas
.xa_index
<< PAGE_SHIFT
;
1536 error
= ops
->iomap_begin(inode
, pos
, PMD_SIZE
, iomap_flags
, &iomap
);
1540 if (iomap
.offset
+ iomap
.length
< pos
+ PMD_SIZE
)
1543 sync
= dax_fault_is_synchronous(iomap_flags
, vma
, &iomap
);
1545 switch (iomap
.type
) {
1547 error
= dax_iomap_pfn(&iomap
, pos
, PMD_SIZE
, &pfn
);
1551 entry
= dax_insert_entry(&xas
, mapping
, vmf
, entry
, pfn
,
1552 DAX_PMD
, write
&& !sync
);
1555 * If we are doing synchronous page fault and inode needs fsync,
1556 * we can insert PMD into page tables only after that happens.
1557 * Skip insertion for now and return the pfn so that caller can
1558 * insert it after fsync is done.
1561 if (WARN_ON_ONCE(!pfnp
))
1564 result
= VM_FAULT_NEEDDSYNC
;
1568 trace_dax_pmd_insert_mapping(inode
, vmf
, PMD_SIZE
, pfn
, entry
);
1569 result
= vmf_insert_pfn_pmd(vmf
, pfn
, write
);
1571 case IOMAP_UNWRITTEN
:
1573 if (WARN_ON_ONCE(write
))
1575 result
= dax_pmd_load_hole(&xas
, vmf
, &iomap
, &entry
);
1583 if (ops
->iomap_end
) {
1584 int copied
= PMD_SIZE
;
1586 if (result
== VM_FAULT_FALLBACK
)
1589 * The fault is done by now and there's no way back (other
1590 * thread may be already happily using PMD we have installed).
1591 * Just ignore error from ->iomap_end since we cannot do much
1594 ops
->iomap_end(inode
, pos
, PMD_SIZE
, copied
, iomap_flags
,
1598 dax_unlock_entry(&xas
, entry
);
1600 if (result
== VM_FAULT_FALLBACK
) {
1601 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1602 count_vm_event(THP_FAULT_FALLBACK
);
1605 trace_dax_pmd_fault_done(inode
, vmf
, max_pgoff
, result
);
1609 static vm_fault_t
dax_iomap_pmd_fault(struct vm_fault
*vmf
, pfn_t
*pfnp
,
1610 const struct iomap_ops
*ops
)
1612 return VM_FAULT_FALLBACK
;
1614 #endif /* CONFIG_FS_DAX_PMD */
1617 * dax_iomap_fault - handle a page fault on a DAX file
1618 * @vmf: The description of the fault
1619 * @pe_size: Size of the page to fault in
1620 * @pfnp: PFN to insert for synchronous faults if fsync is required
1621 * @iomap_errp: Storage for detailed error code in case of error
1622 * @ops: Iomap ops passed from the file system
1624 * When a page fault occurs, filesystems may call this helper in
1625 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1626 * has done all the necessary locking for page fault to proceed
1629 vm_fault_t
dax_iomap_fault(struct vm_fault
*vmf
, enum page_entry_size pe_size
,
1630 pfn_t
*pfnp
, int *iomap_errp
, const struct iomap_ops
*ops
)
1634 return dax_iomap_pte_fault(vmf
, pfnp
, iomap_errp
, ops
);
1636 return dax_iomap_pmd_fault(vmf
, pfnp
, ops
);
1638 return VM_FAULT_FALLBACK
;
1641 EXPORT_SYMBOL_GPL(dax_iomap_fault
);
1644 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1645 * @vmf: The description of the fault
1646 * @pfn: PFN to insert
1647 * @order: Order of entry to insert.
1649 * This function inserts a writeable PTE or PMD entry into the page tables
1650 * for an mmaped DAX file. It also marks the page cache entry as dirty.
1653 dax_insert_pfn_mkwrite(struct vm_fault
*vmf
, pfn_t pfn
, unsigned int order
)
1655 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1656 XA_STATE_ORDER(xas
, &mapping
->i_pages
, vmf
->pgoff
, order
);
1661 entry
= get_unlocked_entry(&xas
);
1662 /* Did we race with someone splitting entry or so? */
1664 (order
== 0 && !dax_is_pte_entry(entry
)) ||
1665 (order
== PMD_ORDER
&& !dax_is_pmd_entry(entry
))) {
1666 put_unlocked_entry(&xas
, entry
);
1667 xas_unlock_irq(&xas
);
1668 trace_dax_insert_pfn_mkwrite_no_entry(mapping
->host
, vmf
,
1670 return VM_FAULT_NOPAGE
;
1672 xas_set_mark(&xas
, PAGECACHE_TAG_DIRTY
);
1673 dax_lock_entry(&xas
, entry
);
1674 xas_unlock_irq(&xas
);
1676 ret
= vmf_insert_mixed_mkwrite(vmf
->vma
, vmf
->address
, pfn
);
1677 #ifdef CONFIG_FS_DAX_PMD
1678 else if (order
== PMD_ORDER
)
1679 ret
= vmf_insert_pfn_pmd(vmf
, pfn
, FAULT_FLAG_WRITE
);
1682 ret
= VM_FAULT_FALLBACK
;
1683 dax_unlock_entry(&xas
, entry
);
1684 trace_dax_insert_pfn_mkwrite(mapping
->host
, vmf
, ret
);
1689 * dax_finish_sync_fault - finish synchronous page fault
1690 * @vmf: The description of the fault
1691 * @pe_size: Size of entry to be inserted
1692 * @pfn: PFN to insert
1694 * This function ensures that the file range touched by the page fault is
1695 * stored persistently on the media and handles inserting of appropriate page
1698 vm_fault_t
dax_finish_sync_fault(struct vm_fault
*vmf
,
1699 enum page_entry_size pe_size
, pfn_t pfn
)
1702 loff_t start
= ((loff_t
)vmf
->pgoff
) << PAGE_SHIFT
;
1703 unsigned int order
= pe_order(pe_size
);
1704 size_t len
= PAGE_SIZE
<< order
;
1706 err
= vfs_fsync_range(vmf
->vma
->vm_file
, start
, start
+ len
- 1, 1);
1708 return VM_FAULT_SIGBUS
;
1709 return dax_insert_pfn_mkwrite(vmf
, pfn
, order
);
1711 EXPORT_SYMBOL_GPL(dax_finish_sync_fault
);