2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45 /* The 'colour' (ie low bits) within a PMD of a page offset. */
46 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
47 #define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
49 static wait_queue_head_t wait_table
[DAX_WAIT_TABLE_ENTRIES
];
51 static int __init
init_dax_wait_table(void)
55 for (i
= 0; i
< DAX_WAIT_TABLE_ENTRIES
; i
++)
56 init_waitqueue_head(wait_table
+ i
);
59 fs_initcall(init_dax_wait_table
);
62 * We use lowest available bit in exceptional entry for locking, one bit for
63 * the entry size (PMD) and two more to tell us if the entry is a zero page or
64 * an empty entry that is just used for locking. In total four special bits.
66 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
67 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
70 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
71 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
72 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
73 #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
74 #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
76 static unsigned long dax_radix_pfn(void *entry
)
78 return (unsigned long)entry
>> RADIX_DAX_SHIFT
;
81 static void *dax_radix_locked_entry(unsigned long pfn
, unsigned long flags
)
83 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY
| flags
|
84 (pfn
<< RADIX_DAX_SHIFT
) | RADIX_DAX_ENTRY_LOCK
);
87 static unsigned int dax_radix_order(void *entry
)
89 if ((unsigned long)entry
& RADIX_DAX_PMD
)
90 return PMD_SHIFT
- PAGE_SHIFT
;
94 static int dax_is_pmd_entry(void *entry
)
96 return (unsigned long)entry
& RADIX_DAX_PMD
;
99 static int dax_is_pte_entry(void *entry
)
101 return !((unsigned long)entry
& RADIX_DAX_PMD
);
104 static int dax_is_zero_entry(void *entry
)
106 return (unsigned long)entry
& RADIX_DAX_ZERO_PAGE
;
109 static int dax_is_empty_entry(void *entry
)
111 return (unsigned long)entry
& RADIX_DAX_EMPTY
;
115 * DAX radix tree locking
117 struct exceptional_entry_key
{
118 struct address_space
*mapping
;
122 struct wait_exceptional_entry_queue
{
123 wait_queue_entry_t wait
;
124 struct exceptional_entry_key key
;
127 static wait_queue_head_t
*dax_entry_waitqueue(struct address_space
*mapping
,
128 pgoff_t index
, void *entry
, struct exceptional_entry_key
*key
)
133 * If 'entry' is a PMD, align the 'index' that we use for the wait
134 * queue to the start of that PMD. This ensures that all offsets in
135 * the range covered by the PMD map to the same bit lock.
137 if (dax_is_pmd_entry(entry
))
138 index
&= ~PG_PMD_COLOUR
;
140 key
->mapping
= mapping
;
141 key
->entry_start
= index
;
143 hash
= hash_long((unsigned long)mapping
^ index
, DAX_WAIT_TABLE_BITS
);
144 return wait_table
+ hash
;
147 static int wake_exceptional_entry_func(wait_queue_entry_t
*wait
, unsigned int mode
,
148 int sync
, void *keyp
)
150 struct exceptional_entry_key
*key
= keyp
;
151 struct wait_exceptional_entry_queue
*ewait
=
152 container_of(wait
, struct wait_exceptional_entry_queue
, wait
);
154 if (key
->mapping
!= ewait
->key
.mapping
||
155 key
->entry_start
!= ewait
->key
.entry_start
)
157 return autoremove_wake_function(wait
, mode
, sync
, NULL
);
161 * @entry may no longer be the entry at the index in the mapping.
162 * The important information it's conveying is whether the entry at
163 * this index used to be a PMD entry.
165 static void dax_wake_mapping_entry_waiter(struct address_space
*mapping
,
166 pgoff_t index
, void *entry
, bool wake_all
)
168 struct exceptional_entry_key key
;
169 wait_queue_head_t
*wq
;
171 wq
= dax_entry_waitqueue(mapping
, index
, entry
, &key
);
174 * Checking for locked entry and prepare_to_wait_exclusive() happens
175 * under the i_pages lock, ditto for entry handling in our callers.
176 * So at this point all tasks that could have seen our entry locked
177 * must be in the waitqueue and the following check will see them.
179 if (waitqueue_active(wq
))
180 __wake_up(wq
, TASK_NORMAL
, wake_all
? 0 : 1, &key
);
184 * Check whether the given slot is locked. Must be called with the i_pages
187 static inline int slot_locked(struct address_space
*mapping
, void **slot
)
189 unsigned long entry
= (unsigned long)
190 radix_tree_deref_slot_protected(slot
, &mapping
->i_pages
.xa_lock
);
191 return entry
& RADIX_DAX_ENTRY_LOCK
;
195 * Mark the given slot as locked. Must be called with the i_pages lock held.
197 static inline void *lock_slot(struct address_space
*mapping
, void **slot
)
199 unsigned long entry
= (unsigned long)
200 radix_tree_deref_slot_protected(slot
, &mapping
->i_pages
.xa_lock
);
202 entry
|= RADIX_DAX_ENTRY_LOCK
;
203 radix_tree_replace_slot(&mapping
->i_pages
, slot
, (void *)entry
);
204 return (void *)entry
;
208 * Mark the given slot as unlocked. Must be called with the i_pages lock held.
210 static inline void *unlock_slot(struct address_space
*mapping
, void **slot
)
212 unsigned long entry
= (unsigned long)
213 radix_tree_deref_slot_protected(slot
, &mapping
->i_pages
.xa_lock
);
215 entry
&= ~(unsigned long)RADIX_DAX_ENTRY_LOCK
;
216 radix_tree_replace_slot(&mapping
->i_pages
, slot
, (void *)entry
);
217 return (void *)entry
;
221 * Lookup entry in radix tree, wait for it to become unlocked if it is
222 * exceptional entry and return it. The caller must call
223 * put_unlocked_mapping_entry() when he decided not to lock the entry or
224 * put_locked_mapping_entry() when he locked the entry and now wants to
227 * Must be called with the i_pages lock held.
229 static void *get_unlocked_mapping_entry(struct address_space
*mapping
,
230 pgoff_t index
, void ***slotp
)
233 struct wait_exceptional_entry_queue ewait
;
234 wait_queue_head_t
*wq
;
236 init_wait(&ewait
.wait
);
237 ewait
.wait
.func
= wake_exceptional_entry_func
;
240 entry
= __radix_tree_lookup(&mapping
->i_pages
, index
, NULL
,
243 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry
)) ||
244 !slot_locked(mapping
, slot
)) {
250 wq
= dax_entry_waitqueue(mapping
, index
, entry
, &ewait
.key
);
251 prepare_to_wait_exclusive(wq
, &ewait
.wait
,
252 TASK_UNINTERRUPTIBLE
);
253 xa_unlock_irq(&mapping
->i_pages
);
255 finish_wait(wq
, &ewait
.wait
);
256 xa_lock_irq(&mapping
->i_pages
);
260 static void dax_unlock_mapping_entry(struct address_space
*mapping
,
265 xa_lock_irq(&mapping
->i_pages
);
266 entry
= __radix_tree_lookup(&mapping
->i_pages
, index
, NULL
, &slot
);
267 if (WARN_ON_ONCE(!entry
|| !radix_tree_exceptional_entry(entry
) ||
268 !slot_locked(mapping
, slot
))) {
269 xa_unlock_irq(&mapping
->i_pages
);
272 unlock_slot(mapping
, slot
);
273 xa_unlock_irq(&mapping
->i_pages
);
274 dax_wake_mapping_entry_waiter(mapping
, index
, entry
, false);
277 static void put_locked_mapping_entry(struct address_space
*mapping
,
280 dax_unlock_mapping_entry(mapping
, index
);
284 * Called when we are done with radix tree entry we looked up via
285 * get_unlocked_mapping_entry() and which we didn't lock in the end.
287 static void put_unlocked_mapping_entry(struct address_space
*mapping
,
288 pgoff_t index
, void *entry
)
293 /* We have to wake up next waiter for the radix tree entry lock */
294 dax_wake_mapping_entry_waiter(mapping
, index
, entry
, false);
297 static unsigned long dax_entry_size(void *entry
)
299 if (dax_is_zero_entry(entry
))
301 else if (dax_is_empty_entry(entry
))
303 else if (dax_is_pmd_entry(entry
))
309 static unsigned long dax_radix_end_pfn(void *entry
)
311 return dax_radix_pfn(entry
) + dax_entry_size(entry
) / PAGE_SIZE
;
315 * Iterate through all mapped pfns represented by an entry, i.e. skip
316 * 'empty' and 'zero' entries.
318 #define for_each_mapped_pfn(entry, pfn) \
319 for (pfn = dax_radix_pfn(entry); \
320 pfn < dax_radix_end_pfn(entry); pfn++)
322 static void dax_associate_entry(void *entry
, struct address_space
*mapping
)
326 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
))
329 for_each_mapped_pfn(entry
, pfn
) {
330 struct page
*page
= pfn_to_page(pfn
);
332 WARN_ON_ONCE(page
->mapping
);
333 page
->mapping
= mapping
;
337 static void dax_disassociate_entry(void *entry
, struct address_space
*mapping
,
342 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
))
345 for_each_mapped_pfn(entry
, pfn
) {
346 struct page
*page
= pfn_to_page(pfn
);
348 WARN_ON_ONCE(trunc
&& page_ref_count(page
) > 1);
349 WARN_ON_ONCE(page
->mapping
&& page
->mapping
!= mapping
);
350 page
->mapping
= NULL
;
354 static struct page
*dax_busy_page(void *entry
)
358 for_each_mapped_pfn(entry
, pfn
) {
359 struct page
*page
= pfn_to_page(pfn
);
361 if (page_ref_count(page
) > 1)
368 * Find radix tree entry at given index. If it points to an exceptional entry,
369 * return it with the radix tree entry locked. If the radix tree doesn't
370 * contain given index, create an empty exceptional entry for the index and
371 * return with it locked.
373 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
374 * either return that locked entry or will return an error. This error will
375 * happen if there are any 4k entries within the 2MiB range that we are
378 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
379 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
380 * insertion will fail if it finds any 4k entries already in the tree, and a
381 * 4k insertion will cause an existing 2MiB entry to be unmapped and
382 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
383 * well as 2MiB empty entries.
385 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
386 * real storage backing them. We will leave these real 2MiB DAX entries in
387 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
389 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
390 * persistent memory the benefit is doubtful. We can add that later if we can
393 static void *grab_mapping_entry(struct address_space
*mapping
, pgoff_t index
,
394 unsigned long size_flag
)
396 bool pmd_downgrade
= false; /* splitting 2MiB entry into 4k entries? */
400 xa_lock_irq(&mapping
->i_pages
);
401 entry
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
403 if (WARN_ON_ONCE(entry
&& !radix_tree_exceptional_entry(entry
))) {
404 entry
= ERR_PTR(-EIO
);
409 if (size_flag
& RADIX_DAX_PMD
) {
410 if (dax_is_pte_entry(entry
)) {
411 put_unlocked_mapping_entry(mapping
, index
,
413 entry
= ERR_PTR(-EEXIST
);
416 } else { /* trying to grab a PTE entry */
417 if (dax_is_pmd_entry(entry
) &&
418 (dax_is_zero_entry(entry
) ||
419 dax_is_empty_entry(entry
))) {
420 pmd_downgrade
= true;
425 /* No entry for given index? Make sure radix tree is big enough. */
426 if (!entry
|| pmd_downgrade
) {
431 * Make sure 'entry' remains valid while we drop
434 entry
= lock_slot(mapping
, slot
);
437 xa_unlock_irq(&mapping
->i_pages
);
439 * Besides huge zero pages the only other thing that gets
440 * downgraded are empty entries which don't need to be
443 if (pmd_downgrade
&& dax_is_zero_entry(entry
))
444 unmap_mapping_pages(mapping
, index
& ~PG_PMD_COLOUR
,
447 err
= radix_tree_preload(
448 mapping_gfp_mask(mapping
) & ~__GFP_HIGHMEM
);
451 put_locked_mapping_entry(mapping
, index
);
454 xa_lock_irq(&mapping
->i_pages
);
458 * We needed to drop the i_pages lock while calling
459 * radix_tree_preload() and we didn't have an entry to
460 * lock. See if another thread inserted an entry at
461 * our index during this time.
463 entry
= __radix_tree_lookup(&mapping
->i_pages
, index
,
466 radix_tree_preload_end();
467 xa_unlock_irq(&mapping
->i_pages
);
473 dax_disassociate_entry(entry
, mapping
, false);
474 radix_tree_delete(&mapping
->i_pages
, index
);
475 mapping
->nrexceptional
--;
476 dax_wake_mapping_entry_waiter(mapping
, index
, entry
,
480 entry
= dax_radix_locked_entry(0, size_flag
| RADIX_DAX_EMPTY
);
482 err
= __radix_tree_insert(&mapping
->i_pages
, index
,
483 dax_radix_order(entry
), entry
);
484 radix_tree_preload_end();
486 xa_unlock_irq(&mapping
->i_pages
);
488 * Our insertion of a DAX entry failed, most likely
489 * because we were inserting a PMD entry and it
490 * collided with a PTE sized entry at a different
491 * index in the PMD range. We haven't inserted
492 * anything into the radix tree and have no waiters to
497 /* Good, we have inserted empty locked entry into the tree. */
498 mapping
->nrexceptional
++;
499 xa_unlock_irq(&mapping
->i_pages
);
502 entry
= lock_slot(mapping
, slot
);
504 xa_unlock_irq(&mapping
->i_pages
);
509 * dax_layout_busy_page - find first pinned page in @mapping
510 * @mapping: address space to scan for a page with ref count > 1
512 * DAX requires ZONE_DEVICE mapped pages. These pages are never
513 * 'onlined' to the page allocator so they are considered idle when
514 * page->count == 1. A filesystem uses this interface to determine if
515 * any page in the mapping is busy, i.e. for DMA, or other
516 * get_user_pages() usages.
518 * It is expected that the filesystem is holding locks to block the
519 * establishment of new mappings in this address_space. I.e. it expects
520 * to be able to run unmap_mapping_range() and subsequently not race
521 * mapping_mapped() becoming true.
523 struct page
*dax_layout_busy_page(struct address_space
*mapping
)
525 pgoff_t indices
[PAGEVEC_SIZE
];
526 struct page
*page
= NULL
;
532 * In the 'limited' case get_user_pages() for dax is disabled.
534 if (IS_ENABLED(CONFIG_FS_DAX_LIMITED
))
537 if (!dax_mapping(mapping
) || !mapping_mapped(mapping
))
545 * If we race get_user_pages_fast() here either we'll see the
546 * elevated page count in the pagevec_lookup and wait, or
547 * get_user_pages_fast() will see that the page it took a reference
548 * against is no longer mapped in the page tables and bail to the
549 * get_user_pages() slow path. The slow path is protected by
550 * pte_lock() and pmd_lock(). New references are not taken without
551 * holding those locks, and unmap_mapping_range() will not zero the
552 * pte or pmd without holding the respective lock, so we are
553 * guaranteed to either see new references or prevent new
554 * references from being established.
556 unmap_mapping_range(mapping
, 0, 0, 1);
558 while (index
< end
&& pagevec_lookup_entries(&pvec
, mapping
, index
,
559 min(end
- index
, (pgoff_t
)PAGEVEC_SIZE
),
561 for (i
= 0; i
< pagevec_count(&pvec
); i
++) {
562 struct page
*pvec_ent
= pvec
.pages
[i
];
570 !radix_tree_exceptional_entry(pvec_ent
)))
573 xa_lock_irq(&mapping
->i_pages
);
574 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
576 page
= dax_busy_page(entry
);
577 put_unlocked_mapping_entry(mapping
, index
, entry
);
578 xa_unlock_irq(&mapping
->i_pages
);
584 * We don't expect normal struct page entries to exist in our
585 * tree, but we keep these pagevec calls so that this code is
586 * consistent with the common pattern for handling pagevecs
587 * throughout the kernel.
589 pagevec_remove_exceptionals(&pvec
);
590 pagevec_release(&pvec
);
598 EXPORT_SYMBOL_GPL(dax_layout_busy_page
);
600 static int __dax_invalidate_mapping_entry(struct address_space
*mapping
,
601 pgoff_t index
, bool trunc
)
605 struct radix_tree_root
*pages
= &mapping
->i_pages
;
608 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
609 if (!entry
|| WARN_ON_ONCE(!radix_tree_exceptional_entry(entry
)))
612 (radix_tree_tag_get(pages
, index
, PAGECACHE_TAG_DIRTY
) ||
613 radix_tree_tag_get(pages
, index
, PAGECACHE_TAG_TOWRITE
)))
615 dax_disassociate_entry(entry
, mapping
, trunc
);
616 radix_tree_delete(pages
, index
);
617 mapping
->nrexceptional
--;
620 put_unlocked_mapping_entry(mapping
, index
, entry
);
621 xa_unlock_irq(pages
);
625 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
626 * entry to get unlocked before deleting it.
628 int dax_delete_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
630 int ret
= __dax_invalidate_mapping_entry(mapping
, index
, true);
633 * This gets called from truncate / punch_hole path. As such, the caller
634 * must hold locks protecting against concurrent modifications of the
635 * radix tree (usually fs-private i_mmap_sem for writing). Since the
636 * caller has seen exceptional entry for this index, we better find it
637 * at that index as well...
644 * Invalidate exceptional DAX entry if it is clean.
646 int dax_invalidate_mapping_entry_sync(struct address_space
*mapping
,
649 return __dax_invalidate_mapping_entry(mapping
, index
, false);
652 static int copy_user_dax(struct block_device
*bdev
, struct dax_device
*dax_dev
,
653 sector_t sector
, size_t size
, struct page
*to
,
662 rc
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
666 id
= dax_read_lock();
667 rc
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
), &kaddr
, &pfn
);
672 vto
= kmap_atomic(to
);
673 copy_user_page(vto
, (void __force
*)kaddr
, vaddr
, to
);
680 * By this point grab_mapping_entry() has ensured that we have a locked entry
681 * of the appropriate size so we don't have to worry about downgrading PMDs to
682 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
683 * already in the tree, we will skip the insertion and just dirty the PMD as
686 static void *dax_insert_mapping_entry(struct address_space
*mapping
,
687 struct vm_fault
*vmf
,
688 void *entry
, pfn_t pfn_t
,
689 unsigned long flags
, bool dirty
)
691 struct radix_tree_root
*pages
= &mapping
->i_pages
;
692 unsigned long pfn
= pfn_t_to_pfn(pfn_t
);
693 pgoff_t index
= vmf
->pgoff
;
697 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
699 if (dax_is_zero_entry(entry
) && !(flags
& RADIX_DAX_ZERO_PAGE
)) {
700 /* we are replacing a zero page with block mapping */
701 if (dax_is_pmd_entry(entry
))
702 unmap_mapping_pages(mapping
, index
& ~PG_PMD_COLOUR
,
705 unmap_mapping_pages(mapping
, vmf
->pgoff
, 1, false);
709 new_entry
= dax_radix_locked_entry(pfn
, flags
);
710 if (dax_entry_size(entry
) != dax_entry_size(new_entry
)) {
711 dax_disassociate_entry(entry
, mapping
, false);
712 dax_associate_entry(new_entry
, mapping
);
715 if (dax_is_zero_entry(entry
) || dax_is_empty_entry(entry
)) {
717 * Only swap our new entry into the radix tree if the current
718 * entry is a zero page or an empty entry. If a normal PTE or
719 * PMD entry is already in the tree, we leave it alone. This
720 * means that if we are trying to insert a PTE and the
721 * existing entry is a PMD, we will just leave the PMD in the
722 * tree and dirty it if necessary.
724 struct radix_tree_node
*node
;
728 ret
= __radix_tree_lookup(pages
, index
, &node
, &slot
);
729 WARN_ON_ONCE(ret
!= entry
);
730 __radix_tree_replace(pages
, node
, slot
,
736 radix_tree_tag_set(pages
, index
, PAGECACHE_TAG_DIRTY
);
738 xa_unlock_irq(pages
);
742 static inline unsigned long
743 pgoff_address(pgoff_t pgoff
, struct vm_area_struct
*vma
)
745 unsigned long address
;
747 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
748 VM_BUG_ON_VMA(address
< vma
->vm_start
|| address
>= vma
->vm_end
, vma
);
752 /* Walk all mappings of a given index of a file and writeprotect them */
753 static void dax_mapping_entry_mkclean(struct address_space
*mapping
,
754 pgoff_t index
, unsigned long pfn
)
756 struct vm_area_struct
*vma
;
757 pte_t pte
, *ptep
= NULL
;
761 i_mmap_lock_read(mapping
);
762 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, index
, index
) {
763 unsigned long address
, start
, end
;
767 if (!(vma
->vm_flags
& VM_SHARED
))
770 address
= pgoff_address(index
, vma
);
773 * Note because we provide start/end to follow_pte_pmd it will
774 * call mmu_notifier_invalidate_range_start() on our behalf
775 * before taking any lock.
777 if (follow_pte_pmd(vma
->vm_mm
, address
, &start
, &end
, &ptep
, &pmdp
, &ptl
))
781 * No need to call mmu_notifier_invalidate_range() as we are
782 * downgrading page table protection not changing it to point
785 * See Documentation/vm/mmu_notifier.rst
788 #ifdef CONFIG_FS_DAX_PMD
791 if (pfn
!= pmd_pfn(*pmdp
))
793 if (!pmd_dirty(*pmdp
) && !pmd_write(*pmdp
))
796 flush_cache_page(vma
, address
, pfn
);
797 pmd
= pmdp_huge_clear_flush(vma
, address
, pmdp
);
798 pmd
= pmd_wrprotect(pmd
);
799 pmd
= pmd_mkclean(pmd
);
800 set_pmd_at(vma
->vm_mm
, address
, pmdp
, pmd
);
805 if (pfn
!= pte_pfn(*ptep
))
807 if (!pte_dirty(*ptep
) && !pte_write(*ptep
))
810 flush_cache_page(vma
, address
, pfn
);
811 pte
= ptep_clear_flush(vma
, address
, ptep
);
812 pte
= pte_wrprotect(pte
);
813 pte
= pte_mkclean(pte
);
814 set_pte_at(vma
->vm_mm
, address
, ptep
, pte
);
816 pte_unmap_unlock(ptep
, ptl
);
819 mmu_notifier_invalidate_range_end(vma
->vm_mm
, start
, end
);
821 i_mmap_unlock_read(mapping
);
824 static int dax_writeback_one(struct dax_device
*dax_dev
,
825 struct address_space
*mapping
, pgoff_t index
, void *entry
)
827 struct radix_tree_root
*pages
= &mapping
->i_pages
;
828 void *entry2
, **slot
;
834 * A page got tagged dirty in DAX mapping? Something is seriously
837 if (WARN_ON(!radix_tree_exceptional_entry(entry
)))
841 entry2
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
842 /* Entry got punched out / reallocated? */
843 if (!entry2
|| WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2
)))
846 * Entry got reallocated elsewhere? No need to writeback. We have to
847 * compare pfns as we must not bail out due to difference in lockbit
850 if (dax_radix_pfn(entry2
) != dax_radix_pfn(entry
))
852 if (WARN_ON_ONCE(dax_is_empty_entry(entry
) ||
853 dax_is_zero_entry(entry
))) {
858 /* Another fsync thread may have already written back this entry */
859 if (!radix_tree_tag_get(pages
, index
, PAGECACHE_TAG_TOWRITE
))
861 /* Lock the entry to serialize with page faults */
862 entry
= lock_slot(mapping
, slot
);
864 * We can clear the tag now but we have to be careful so that concurrent
865 * dax_writeback_one() calls for the same index cannot finish before we
866 * actually flush the caches. This is achieved as the calls will look
867 * at the entry only under the i_pages lock and once they do that
868 * they will see the entry locked and wait for it to unlock.
870 radix_tree_tag_clear(pages
, index
, PAGECACHE_TAG_TOWRITE
);
871 xa_unlock_irq(pages
);
874 * Even if dax_writeback_mapping_range() was given a wbc->range_start
875 * in the middle of a PMD, the 'index' we are given will be aligned to
876 * the start index of the PMD, as will the pfn we pull from 'entry'.
877 * This allows us to flush for PMD_SIZE and not have to worry about
878 * partial PMD writebacks.
880 pfn
= dax_radix_pfn(entry
);
881 size
= PAGE_SIZE
<< dax_radix_order(entry
);
883 dax_mapping_entry_mkclean(mapping
, index
, pfn
);
884 dax_flush(dax_dev
, page_address(pfn_to_page(pfn
)), size
);
886 * After we have flushed the cache, we can clear the dirty tag. There
887 * cannot be new dirty data in the pfn after the flush has completed as
888 * the pfn mappings are writeprotected and fault waits for mapping
892 radix_tree_tag_clear(pages
, index
, PAGECACHE_TAG_DIRTY
);
893 xa_unlock_irq(pages
);
894 trace_dax_writeback_one(mapping
->host
, index
, size
>> PAGE_SHIFT
);
895 put_locked_mapping_entry(mapping
, index
);
899 put_unlocked_mapping_entry(mapping
, index
, entry2
);
900 xa_unlock_irq(pages
);
905 * Flush the mapping to the persistent domain within the byte range of [start,
906 * end]. This is required by data integrity operations to ensure file data is
907 * on persistent storage prior to completion of the operation.
909 int dax_writeback_mapping_range(struct address_space
*mapping
,
910 struct block_device
*bdev
, struct writeback_control
*wbc
)
912 struct inode
*inode
= mapping
->host
;
913 pgoff_t start_index
, end_index
;
914 pgoff_t indices
[PAGEVEC_SIZE
];
915 struct dax_device
*dax_dev
;
920 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
923 if (!mapping
->nrexceptional
|| wbc
->sync_mode
!= WB_SYNC_ALL
)
926 dax_dev
= dax_get_by_host(bdev
->bd_disk
->disk_name
);
930 start_index
= wbc
->range_start
>> PAGE_SHIFT
;
931 end_index
= wbc
->range_end
>> PAGE_SHIFT
;
933 trace_dax_writeback_range(inode
, start_index
, end_index
);
935 tag_pages_for_writeback(mapping
, start_index
, end_index
);
939 pvec
.nr
= find_get_entries_tag(mapping
, start_index
,
940 PAGECACHE_TAG_TOWRITE
, PAGEVEC_SIZE
,
941 pvec
.pages
, indices
);
946 for (i
= 0; i
< pvec
.nr
; i
++) {
947 if (indices
[i
] > end_index
) {
952 ret
= dax_writeback_one(dax_dev
, mapping
, indices
[i
],
955 mapping_set_error(mapping
, ret
);
959 start_index
= indices
[pvec
.nr
- 1] + 1;
963 trace_dax_writeback_range_done(inode
, start_index
, end_index
);
964 return (ret
< 0 ? ret
: 0);
966 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
968 static sector_t
dax_iomap_sector(struct iomap
*iomap
, loff_t pos
)
970 return (iomap
->addr
+ (pos
& PAGE_MASK
) - iomap
->offset
) >> 9;
973 static int dax_iomap_pfn(struct iomap
*iomap
, loff_t pos
, size_t size
,
976 const sector_t sector
= dax_iomap_sector(iomap
, pos
);
982 rc
= bdev_dax_pgoff(iomap
->bdev
, sector
, size
, &pgoff
);
985 id
= dax_read_lock();
986 length
= dax_direct_access(iomap
->dax_dev
, pgoff
, PHYS_PFN(size
),
993 if (PFN_PHYS(length
) < size
)
995 if (pfn_t_to_pfn(*pfnp
) & (PHYS_PFN(size
)-1))
997 /* For larger pages we need devmap */
998 if (length
> 1 && !pfn_t_devmap(*pfnp
))
1002 dax_read_unlock(id
);
1007 * The user has performed a load from a hole in the file. Allocating a new
1008 * page in the file would cause excessive storage usage for workloads with
1009 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
1010 * If this page is ever written to we will re-fault and change the mapping to
1011 * point to real DAX storage instead.
1013 static vm_fault_t
dax_load_hole(struct address_space
*mapping
, void *entry
,
1014 struct vm_fault
*vmf
)
1016 struct inode
*inode
= mapping
->host
;
1017 unsigned long vaddr
= vmf
->address
;
1018 vm_fault_t ret
= VM_FAULT_NOPAGE
;
1019 struct page
*zero_page
;
1022 zero_page
= ZERO_PAGE(0);
1023 if (unlikely(!zero_page
)) {
1028 pfn
= page_to_pfn_t(zero_page
);
1029 dax_insert_mapping_entry(mapping
, vmf
, entry
, pfn
, RADIX_DAX_ZERO_PAGE
,
1031 ret
= vmf_insert_mixed(vmf
->vma
, vaddr
, pfn
);
1033 trace_dax_load_hole(inode
, vmf
, ret
);
1037 static bool dax_range_is_aligned(struct block_device
*bdev
,
1038 unsigned int offset
, unsigned int length
)
1040 unsigned short sector_size
= bdev_logical_block_size(bdev
);
1042 if (!IS_ALIGNED(offset
, sector_size
))
1044 if (!IS_ALIGNED(length
, sector_size
))
1050 int __dax_zero_page_range(struct block_device
*bdev
,
1051 struct dax_device
*dax_dev
, sector_t sector
,
1052 unsigned int offset
, unsigned int size
)
1054 if (dax_range_is_aligned(bdev
, offset
, size
)) {
1055 sector_t start_sector
= sector
+ (offset
>> 9);
1057 return blkdev_issue_zeroout(bdev
, start_sector
,
1058 size
>> 9, GFP_NOFS
, 0);
1065 rc
= bdev_dax_pgoff(bdev
, sector
, PAGE_SIZE
, &pgoff
);
1069 id
= dax_read_lock();
1070 rc
= dax_direct_access(dax_dev
, pgoff
, 1, &kaddr
,
1073 dax_read_unlock(id
);
1076 memset(kaddr
+ offset
, 0, size
);
1077 dax_flush(dax_dev
, kaddr
+ offset
, size
);
1078 dax_read_unlock(id
);
1082 EXPORT_SYMBOL_GPL(__dax_zero_page_range
);
1085 dax_iomap_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
1086 struct iomap
*iomap
)
1088 struct block_device
*bdev
= iomap
->bdev
;
1089 struct dax_device
*dax_dev
= iomap
->dax_dev
;
1090 struct iov_iter
*iter
= data
;
1091 loff_t end
= pos
+ length
, done
= 0;
1096 if (iov_iter_rw(iter
) == READ
) {
1097 end
= min(end
, i_size_read(inode
));
1101 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
1102 return iov_iter_zero(min(length
, end
- pos
), iter
);
1105 if (WARN_ON_ONCE(iomap
->type
!= IOMAP_MAPPED
))
1109 * Write can allocate block for an area which has a hole page mapped
1110 * into page tables. We have to tear down these mappings so that data
1111 * written by write(2) is visible in mmap.
1113 if (iomap
->flags
& IOMAP_F_NEW
) {
1114 invalidate_inode_pages2_range(inode
->i_mapping
,
1116 (end
- 1) >> PAGE_SHIFT
);
1119 id
= dax_read_lock();
1121 unsigned offset
= pos
& (PAGE_SIZE
- 1);
1122 const size_t size
= ALIGN(length
+ offset
, PAGE_SIZE
);
1123 const sector_t sector
= dax_iomap_sector(iomap
, pos
);
1129 if (fatal_signal_pending(current
)) {
1134 ret
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
1138 map_len
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
),
1145 map_len
= PFN_PHYS(map_len
);
1148 if (map_len
> end
- pos
)
1149 map_len
= end
- pos
;
1152 * The userspace address for the memory copy has already been
1153 * validated via access_ok() in either vfs_read() or
1154 * vfs_write(), depending on which operation we are doing.
1156 if (iov_iter_rw(iter
) == WRITE
)
1157 xfer
= dax_copy_from_iter(dax_dev
, pgoff
, kaddr
,
1160 xfer
= dax_copy_to_iter(dax_dev
, pgoff
, kaddr
,
1172 dax_read_unlock(id
);
1174 return done
? done
: ret
;
1178 * dax_iomap_rw - Perform I/O to a DAX file
1179 * @iocb: The control block for this I/O
1180 * @iter: The addresses to do I/O from or to
1181 * @ops: iomap ops passed from the file system
1183 * This function performs read and write operations to directly mapped
1184 * persistent memory. The callers needs to take care of read/write exclusion
1185 * and evicting any page cache pages in the region under I/O.
1188 dax_iomap_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
1189 const struct iomap_ops
*ops
)
1191 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1192 struct inode
*inode
= mapping
->host
;
1193 loff_t pos
= iocb
->ki_pos
, ret
= 0, done
= 0;
1196 if (iov_iter_rw(iter
) == WRITE
) {
1197 lockdep_assert_held_exclusive(&inode
->i_rwsem
);
1198 flags
|= IOMAP_WRITE
;
1200 lockdep_assert_held(&inode
->i_rwsem
);
1203 while (iov_iter_count(iter
)) {
1204 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
), flags
, ops
,
1205 iter
, dax_iomap_actor
);
1212 iocb
->ki_pos
+= done
;
1213 return done
? done
: ret
;
1215 EXPORT_SYMBOL_GPL(dax_iomap_rw
);
1217 static vm_fault_t
dax_fault_return(int error
)
1220 return VM_FAULT_NOPAGE
;
1221 if (error
== -ENOMEM
)
1222 return VM_FAULT_OOM
;
1223 return VM_FAULT_SIGBUS
;
1227 * MAP_SYNC on a dax mapping guarantees dirty metadata is
1228 * flushed on write-faults (non-cow), but not read-faults.
1230 static bool dax_fault_is_synchronous(unsigned long flags
,
1231 struct vm_area_struct
*vma
, struct iomap
*iomap
)
1233 return (flags
& IOMAP_WRITE
) && (vma
->vm_flags
& VM_SYNC
)
1234 && (iomap
->flags
& IOMAP_F_DIRTY
);
1237 static vm_fault_t
dax_iomap_pte_fault(struct vm_fault
*vmf
, pfn_t
*pfnp
,
1238 int *iomap_errp
, const struct iomap_ops
*ops
)
1240 struct vm_area_struct
*vma
= vmf
->vma
;
1241 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1242 struct inode
*inode
= mapping
->host
;
1243 unsigned long vaddr
= vmf
->address
;
1244 loff_t pos
= (loff_t
)vmf
->pgoff
<< PAGE_SHIFT
;
1245 struct iomap iomap
= { 0 };
1246 unsigned flags
= IOMAP_FAULT
;
1247 int error
, major
= 0;
1248 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1254 trace_dax_pte_fault(inode
, vmf
, ret
);
1256 * Check whether offset isn't beyond end of file now. Caller is supposed
1257 * to hold locks serializing us with truncate / punch hole so this is
1260 if (pos
>= i_size_read(inode
)) {
1261 ret
= VM_FAULT_SIGBUS
;
1265 if (write
&& !vmf
->cow_page
)
1266 flags
|= IOMAP_WRITE
;
1268 entry
= grab_mapping_entry(mapping
, vmf
->pgoff
, 0);
1269 if (IS_ERR(entry
)) {
1270 ret
= dax_fault_return(PTR_ERR(entry
));
1275 * It is possible, particularly with mixed reads & writes to private
1276 * mappings, that we have raced with a PMD fault that overlaps with
1277 * the PTE we need to set up. If so just return and the fault will be
1280 if (pmd_trans_huge(*vmf
->pmd
) || pmd_devmap(*vmf
->pmd
)) {
1281 ret
= VM_FAULT_NOPAGE
;
1286 * Note that we don't bother to use iomap_apply here: DAX required
1287 * the file system block size to be equal the page size, which means
1288 * that we never have to deal with more than a single extent here.
1290 error
= ops
->iomap_begin(inode
, pos
, PAGE_SIZE
, flags
, &iomap
);
1292 *iomap_errp
= error
;
1294 ret
= dax_fault_return(error
);
1297 if (WARN_ON_ONCE(iomap
.offset
+ iomap
.length
< pos
+ PAGE_SIZE
)) {
1298 error
= -EIO
; /* fs corruption? */
1299 goto error_finish_iomap
;
1302 if (vmf
->cow_page
) {
1303 sector_t sector
= dax_iomap_sector(&iomap
, pos
);
1305 switch (iomap
.type
) {
1307 case IOMAP_UNWRITTEN
:
1308 clear_user_highpage(vmf
->cow_page
, vaddr
);
1311 error
= copy_user_dax(iomap
.bdev
, iomap
.dax_dev
,
1312 sector
, PAGE_SIZE
, vmf
->cow_page
, vaddr
);
1321 goto error_finish_iomap
;
1323 __SetPageUptodate(vmf
->cow_page
);
1324 ret
= finish_fault(vmf
);
1326 ret
= VM_FAULT_DONE_COW
;
1330 sync
= dax_fault_is_synchronous(flags
, vma
, &iomap
);
1332 switch (iomap
.type
) {
1334 if (iomap
.flags
& IOMAP_F_NEW
) {
1335 count_vm_event(PGMAJFAULT
);
1336 count_memcg_event_mm(vma
->vm_mm
, PGMAJFAULT
);
1337 major
= VM_FAULT_MAJOR
;
1339 error
= dax_iomap_pfn(&iomap
, pos
, PAGE_SIZE
, &pfn
);
1341 goto error_finish_iomap
;
1343 entry
= dax_insert_mapping_entry(mapping
, vmf
, entry
, pfn
,
1347 * If we are doing synchronous page fault and inode needs fsync,
1348 * we can insert PTE into page tables only after that happens.
1349 * Skip insertion for now and return the pfn so that caller can
1350 * insert it after fsync is done.
1353 if (WARN_ON_ONCE(!pfnp
)) {
1355 goto error_finish_iomap
;
1358 ret
= VM_FAULT_NEEDDSYNC
| major
;
1361 trace_dax_insert_mapping(inode
, vmf
, entry
);
1363 ret
= vmf_insert_mixed_mkwrite(vma
, vaddr
, pfn
);
1365 ret
= vmf_insert_mixed(vma
, vaddr
, pfn
);
1368 case IOMAP_UNWRITTEN
:
1371 ret
= dax_load_hole(mapping
, entry
, vmf
);
1382 ret
= dax_fault_return(error
);
1384 if (ops
->iomap_end
) {
1385 int copied
= PAGE_SIZE
;
1387 if (ret
& VM_FAULT_ERROR
)
1390 * The fault is done by now and there's no way back (other
1391 * thread may be already happily using PTE we have installed).
1392 * Just ignore error from ->iomap_end since we cannot do much
1395 ops
->iomap_end(inode
, pos
, PAGE_SIZE
, copied
, flags
, &iomap
);
1398 put_locked_mapping_entry(mapping
, vmf
->pgoff
);
1400 trace_dax_pte_fault_done(inode
, vmf
, ret
);
1404 #ifdef CONFIG_FS_DAX_PMD
1405 static vm_fault_t
dax_pmd_load_hole(struct vm_fault
*vmf
, struct iomap
*iomap
,
1408 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1409 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
1410 struct inode
*inode
= mapping
->host
;
1411 struct page
*zero_page
;
1417 zero_page
= mm_get_huge_zero_page(vmf
->vma
->vm_mm
);
1419 if (unlikely(!zero_page
))
1422 pfn
= page_to_pfn_t(zero_page
);
1423 ret
= dax_insert_mapping_entry(mapping
, vmf
, entry
, pfn
,
1424 RADIX_DAX_PMD
| RADIX_DAX_ZERO_PAGE
, false);
1426 ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1427 if (!pmd_none(*(vmf
->pmd
))) {
1432 pmd_entry
= mk_pmd(zero_page
, vmf
->vma
->vm_page_prot
);
1433 pmd_entry
= pmd_mkhuge(pmd_entry
);
1434 set_pmd_at(vmf
->vma
->vm_mm
, pmd_addr
, vmf
->pmd
, pmd_entry
);
1436 trace_dax_pmd_load_hole(inode
, vmf
, zero_page
, ret
);
1437 return VM_FAULT_NOPAGE
;
1440 trace_dax_pmd_load_hole_fallback(inode
, vmf
, zero_page
, ret
);
1441 return VM_FAULT_FALLBACK
;
1444 static vm_fault_t
dax_iomap_pmd_fault(struct vm_fault
*vmf
, pfn_t
*pfnp
,
1445 const struct iomap_ops
*ops
)
1447 struct vm_area_struct
*vma
= vmf
->vma
;
1448 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1449 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
1450 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1452 unsigned int iomap_flags
= (write
? IOMAP_WRITE
: 0) | IOMAP_FAULT
;
1453 struct inode
*inode
= mapping
->host
;
1454 vm_fault_t result
= VM_FAULT_FALLBACK
;
1455 struct iomap iomap
= { 0 };
1456 pgoff_t max_pgoff
, pgoff
;
1463 * Check whether offset isn't beyond end of file now. Caller is
1464 * supposed to hold locks serializing us with truncate / punch hole so
1465 * this is a reliable test.
1467 pgoff
= linear_page_index(vma
, pmd_addr
);
1468 max_pgoff
= DIV_ROUND_UP(i_size_read(inode
), PAGE_SIZE
);
1470 trace_dax_pmd_fault(inode
, vmf
, max_pgoff
, 0);
1473 * Make sure that the faulting address's PMD offset (color) matches
1474 * the PMD offset from the start of the file. This is necessary so
1475 * that a PMD range in the page table overlaps exactly with a PMD
1476 * range in the radix tree.
1478 if ((vmf
->pgoff
& PG_PMD_COLOUR
) !=
1479 ((vmf
->address
>> PAGE_SHIFT
) & PG_PMD_COLOUR
))
1482 /* Fall back to PTEs if we're going to COW */
1483 if (write
&& !(vma
->vm_flags
& VM_SHARED
))
1486 /* If the PMD would extend outside the VMA */
1487 if (pmd_addr
< vma
->vm_start
)
1489 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
)
1492 if (pgoff
>= max_pgoff
) {
1493 result
= VM_FAULT_SIGBUS
;
1497 /* If the PMD would extend beyond the file size */
1498 if ((pgoff
| PG_PMD_COLOUR
) >= max_pgoff
)
1502 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1503 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page
1504 * is already in the tree, for instance), it will return -EEXIST and
1505 * we just fall back to 4k entries.
1507 entry
= grab_mapping_entry(mapping
, pgoff
, RADIX_DAX_PMD
);
1512 * It is possible, particularly with mixed reads & writes to private
1513 * mappings, that we have raced with a PTE fault that overlaps with
1514 * the PMD we need to set up. If so just return and the fault will be
1517 if (!pmd_none(*vmf
->pmd
) && !pmd_trans_huge(*vmf
->pmd
) &&
1518 !pmd_devmap(*vmf
->pmd
)) {
1524 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1525 * setting up a mapping, so really we're using iomap_begin() as a way
1526 * to look up our filesystem block.
1528 pos
= (loff_t
)pgoff
<< PAGE_SHIFT
;
1529 error
= ops
->iomap_begin(inode
, pos
, PMD_SIZE
, iomap_flags
, &iomap
);
1533 if (iomap
.offset
+ iomap
.length
< pos
+ PMD_SIZE
)
1536 sync
= dax_fault_is_synchronous(iomap_flags
, vma
, &iomap
);
1538 switch (iomap
.type
) {
1540 error
= dax_iomap_pfn(&iomap
, pos
, PMD_SIZE
, &pfn
);
1544 entry
= dax_insert_mapping_entry(mapping
, vmf
, entry
, pfn
,
1545 RADIX_DAX_PMD
, write
&& !sync
);
1548 * If we are doing synchronous page fault and inode needs fsync,
1549 * we can insert PMD into page tables only after that happens.
1550 * Skip insertion for now and return the pfn so that caller can
1551 * insert it after fsync is done.
1554 if (WARN_ON_ONCE(!pfnp
))
1557 result
= VM_FAULT_NEEDDSYNC
;
1561 trace_dax_pmd_insert_mapping(inode
, vmf
, PMD_SIZE
, pfn
, entry
);
1562 result
= vmf_insert_pfn_pmd(vma
, vmf
->address
, vmf
->pmd
, pfn
,
1565 case IOMAP_UNWRITTEN
:
1567 if (WARN_ON_ONCE(write
))
1569 result
= dax_pmd_load_hole(vmf
, &iomap
, entry
);
1577 if (ops
->iomap_end
) {
1578 int copied
= PMD_SIZE
;
1580 if (result
== VM_FAULT_FALLBACK
)
1583 * The fault is done by now and there's no way back (other
1584 * thread may be already happily using PMD we have installed).
1585 * Just ignore error from ->iomap_end since we cannot do much
1588 ops
->iomap_end(inode
, pos
, PMD_SIZE
, copied
, iomap_flags
,
1592 put_locked_mapping_entry(mapping
, pgoff
);
1594 if (result
== VM_FAULT_FALLBACK
) {
1595 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1596 count_vm_event(THP_FAULT_FALLBACK
);
1599 trace_dax_pmd_fault_done(inode
, vmf
, max_pgoff
, result
);
1603 static vm_fault_t
dax_iomap_pmd_fault(struct vm_fault
*vmf
, pfn_t
*pfnp
,
1604 const struct iomap_ops
*ops
)
1606 return VM_FAULT_FALLBACK
;
1608 #endif /* CONFIG_FS_DAX_PMD */
1611 * dax_iomap_fault - handle a page fault on a DAX file
1612 * @vmf: The description of the fault
1613 * @pe_size: Size of the page to fault in
1614 * @pfnp: PFN to insert for synchronous faults if fsync is required
1615 * @iomap_errp: Storage for detailed error code in case of error
1616 * @ops: Iomap ops passed from the file system
1618 * When a page fault occurs, filesystems may call this helper in
1619 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1620 * has done all the necessary locking for page fault to proceed
1623 vm_fault_t
dax_iomap_fault(struct vm_fault
*vmf
, enum page_entry_size pe_size
,
1624 pfn_t
*pfnp
, int *iomap_errp
, const struct iomap_ops
*ops
)
1628 return dax_iomap_pte_fault(vmf
, pfnp
, iomap_errp
, ops
);
1630 return dax_iomap_pmd_fault(vmf
, pfnp
, ops
);
1632 return VM_FAULT_FALLBACK
;
1635 EXPORT_SYMBOL_GPL(dax_iomap_fault
);
1638 * dax_insert_pfn_mkwrite - insert PTE or PMD entry into page tables
1639 * @vmf: The description of the fault
1640 * @pe_size: Size of entry to be inserted
1641 * @pfn: PFN to insert
1643 * This function inserts writeable PTE or PMD entry into page tables for mmaped
1644 * DAX file. It takes care of marking corresponding radix tree entry as dirty
1647 static vm_fault_t
dax_insert_pfn_mkwrite(struct vm_fault
*vmf
,
1648 enum page_entry_size pe_size
,
1651 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1652 void *entry
, **slot
;
1653 pgoff_t index
= vmf
->pgoff
;
1656 xa_lock_irq(&mapping
->i_pages
);
1657 entry
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
1658 /* Did we race with someone splitting entry or so? */
1660 (pe_size
== PE_SIZE_PTE
&& !dax_is_pte_entry(entry
)) ||
1661 (pe_size
== PE_SIZE_PMD
&& !dax_is_pmd_entry(entry
))) {
1662 put_unlocked_mapping_entry(mapping
, index
, entry
);
1663 xa_unlock_irq(&mapping
->i_pages
);
1664 trace_dax_insert_pfn_mkwrite_no_entry(mapping
->host
, vmf
,
1666 return VM_FAULT_NOPAGE
;
1668 radix_tree_tag_set(&mapping
->i_pages
, index
, PAGECACHE_TAG_DIRTY
);
1669 entry
= lock_slot(mapping
, slot
);
1670 xa_unlock_irq(&mapping
->i_pages
);
1673 ret
= vmf_insert_mixed_mkwrite(vmf
->vma
, vmf
->address
, pfn
);
1675 #ifdef CONFIG_FS_DAX_PMD
1677 ret
= vmf_insert_pfn_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
,
1682 ret
= VM_FAULT_FALLBACK
;
1684 put_locked_mapping_entry(mapping
, index
);
1685 trace_dax_insert_pfn_mkwrite(mapping
->host
, vmf
, ret
);
1690 * dax_finish_sync_fault - finish synchronous page fault
1691 * @vmf: The description of the fault
1692 * @pe_size: Size of entry to be inserted
1693 * @pfn: PFN to insert
1695 * This function ensures that the file range touched by the page fault is
1696 * stored persistently on the media and handles inserting of appropriate page
1699 vm_fault_t
dax_finish_sync_fault(struct vm_fault
*vmf
,
1700 enum page_entry_size pe_size
, pfn_t pfn
)
1703 loff_t start
= ((loff_t
)vmf
->pgoff
) << PAGE_SHIFT
;
1706 if (pe_size
== PE_SIZE_PTE
)
1708 else if (pe_size
== PE_SIZE_PMD
)
1712 err
= vfs_fsync_range(vmf
->vma
->vm_file
, start
, start
+ len
- 1, 1);
1714 return VM_FAULT_SIGBUS
;
1715 return dax_insert_pfn_mkwrite(vmf
, pe_size
, pfn
);
1717 EXPORT_SYMBOL_GPL(dax_finish_sync_fault
);