2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/sched.h>
29 #include <linux/sched/signal.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
38 #define CREATE_TRACE_POINTS
39 #include <trace/events/fs_dax.h>
41 /* We choose 4096 entries - same as per-zone page wait tables */
42 #define DAX_WAIT_TABLE_BITS 12
43 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
45 static wait_queue_head_t wait_table
[DAX_WAIT_TABLE_ENTRIES
];
47 static int __init
init_dax_wait_table(void)
51 for (i
= 0; i
< DAX_WAIT_TABLE_ENTRIES
; i
++)
52 init_waitqueue_head(wait_table
+ i
);
55 fs_initcall(init_dax_wait_table
);
58 * We use lowest available bit in exceptional entry for locking, one bit for
59 * the entry size (PMD) and two more to tell us if the entry is a zero page or
60 * an empty entry that is just used for locking. In total four special bits.
62 * If the PMD bit isn't set the entry has size PAGE_SIZE, and if the ZERO_PAGE
63 * and EMPTY bits aren't set the entry is a normal DAX entry with a filesystem
66 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 4)
67 #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
68 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
69 #define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
70 #define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
72 static unsigned long dax_radix_sector(void *entry
)
74 return (unsigned long)entry
>> RADIX_DAX_SHIFT
;
77 static void *dax_radix_locked_entry(sector_t sector
, unsigned long flags
)
79 return (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY
| flags
|
80 ((unsigned long)sector
<< RADIX_DAX_SHIFT
) |
81 RADIX_DAX_ENTRY_LOCK
);
84 static unsigned int dax_radix_order(void *entry
)
86 if ((unsigned long)entry
& RADIX_DAX_PMD
)
87 return PMD_SHIFT
- PAGE_SHIFT
;
91 static int dax_is_pmd_entry(void *entry
)
93 return (unsigned long)entry
& RADIX_DAX_PMD
;
96 static int dax_is_pte_entry(void *entry
)
98 return !((unsigned long)entry
& RADIX_DAX_PMD
);
101 static int dax_is_zero_entry(void *entry
)
103 return (unsigned long)entry
& RADIX_DAX_ZERO_PAGE
;
106 static int dax_is_empty_entry(void *entry
)
108 return (unsigned long)entry
& RADIX_DAX_EMPTY
;
112 * DAX radix tree locking
114 struct exceptional_entry_key
{
115 struct address_space
*mapping
;
119 struct wait_exceptional_entry_queue
{
120 wait_queue_entry_t wait
;
121 struct exceptional_entry_key key
;
124 static wait_queue_head_t
*dax_entry_waitqueue(struct address_space
*mapping
,
125 pgoff_t index
, void *entry
, struct exceptional_entry_key
*key
)
130 * If 'entry' is a PMD, align the 'index' that we use for the wait
131 * queue to the start of that PMD. This ensures that all offsets in
132 * the range covered by the PMD map to the same bit lock.
134 if (dax_is_pmd_entry(entry
))
135 index
&= ~((1UL << (PMD_SHIFT
- PAGE_SHIFT
)) - 1);
137 key
->mapping
= mapping
;
138 key
->entry_start
= index
;
140 hash
= hash_long((unsigned long)mapping
^ index
, DAX_WAIT_TABLE_BITS
);
141 return wait_table
+ hash
;
144 static int wake_exceptional_entry_func(wait_queue_entry_t
*wait
, unsigned int mode
,
145 int sync
, void *keyp
)
147 struct exceptional_entry_key
*key
= keyp
;
148 struct wait_exceptional_entry_queue
*ewait
=
149 container_of(wait
, struct wait_exceptional_entry_queue
, wait
);
151 if (key
->mapping
!= ewait
->key
.mapping
||
152 key
->entry_start
!= ewait
->key
.entry_start
)
154 return autoremove_wake_function(wait
, mode
, sync
, NULL
);
158 * We do not necessarily hold the mapping->tree_lock when we call this
159 * function so it is possible that 'entry' is no longer a valid item in the
160 * radix tree. This is okay because all we really need to do is to find the
161 * correct waitqueue where tasks might be waiting for that old 'entry' and
164 static void dax_wake_mapping_entry_waiter(struct address_space
*mapping
,
165 pgoff_t index
, void *entry
, bool wake_all
)
167 struct exceptional_entry_key key
;
168 wait_queue_head_t
*wq
;
170 wq
= dax_entry_waitqueue(mapping
, index
, entry
, &key
);
173 * Checking for locked entry and prepare_to_wait_exclusive() happens
174 * under mapping->tree_lock, ditto for entry handling in our callers.
175 * So at this point all tasks that could have seen our entry locked
176 * must be in the waitqueue and the following check will see them.
178 if (waitqueue_active(wq
))
179 __wake_up(wq
, TASK_NORMAL
, wake_all
? 0 : 1, &key
);
183 * Check whether the given slot is locked. The function must be called with
184 * mapping->tree_lock held
186 static inline int slot_locked(struct address_space
*mapping
, void **slot
)
188 unsigned long entry
= (unsigned long)
189 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
190 return entry
& RADIX_DAX_ENTRY_LOCK
;
194 * Mark the given slot is locked. The function must be called with
195 * mapping->tree_lock held
197 static inline void *lock_slot(struct address_space
*mapping
, void **slot
)
199 unsigned long entry
= (unsigned long)
200 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
202 entry
|= RADIX_DAX_ENTRY_LOCK
;
203 radix_tree_replace_slot(&mapping
->page_tree
, slot
, (void *)entry
);
204 return (void *)entry
;
208 * Mark the given slot is unlocked. The function must be called with
209 * mapping->tree_lock held
211 static inline void *unlock_slot(struct address_space
*mapping
, void **slot
)
213 unsigned long entry
= (unsigned long)
214 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
216 entry
&= ~(unsigned long)RADIX_DAX_ENTRY_LOCK
;
217 radix_tree_replace_slot(&mapping
->page_tree
, slot
, (void *)entry
);
218 return (void *)entry
;
222 * Lookup entry in radix tree, wait for it to become unlocked if it is
223 * exceptional entry and return it. The caller must call
224 * put_unlocked_mapping_entry() when he decided not to lock the entry or
225 * put_locked_mapping_entry() when he locked the entry and now wants to
228 * The function must be called with mapping->tree_lock held.
230 static void *get_unlocked_mapping_entry(struct address_space
*mapping
,
231 pgoff_t index
, void ***slotp
)
234 struct wait_exceptional_entry_queue ewait
;
235 wait_queue_head_t
*wq
;
237 init_wait(&ewait
.wait
);
238 ewait
.wait
.func
= wake_exceptional_entry_func
;
241 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
,
244 WARN_ON_ONCE(!radix_tree_exceptional_entry(entry
)) ||
245 !slot_locked(mapping
, slot
)) {
251 wq
= dax_entry_waitqueue(mapping
, index
, entry
, &ewait
.key
);
252 prepare_to_wait_exclusive(wq
, &ewait
.wait
,
253 TASK_UNINTERRUPTIBLE
);
254 spin_unlock_irq(&mapping
->tree_lock
);
256 finish_wait(wq
, &ewait
.wait
);
257 spin_lock_irq(&mapping
->tree_lock
);
261 static void dax_unlock_mapping_entry(struct address_space
*mapping
,
266 spin_lock_irq(&mapping
->tree_lock
);
267 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
, &slot
);
268 if (WARN_ON_ONCE(!entry
|| !radix_tree_exceptional_entry(entry
) ||
269 !slot_locked(mapping
, slot
))) {
270 spin_unlock_irq(&mapping
->tree_lock
);
273 unlock_slot(mapping
, slot
);
274 spin_unlock_irq(&mapping
->tree_lock
);
275 dax_wake_mapping_entry_waiter(mapping
, index
, entry
, false);
278 static void put_locked_mapping_entry(struct address_space
*mapping
,
281 dax_unlock_mapping_entry(mapping
, index
);
285 * Called when we are done with radix tree entry we looked up via
286 * get_unlocked_mapping_entry() and which we didn't lock in the end.
288 static void put_unlocked_mapping_entry(struct address_space
*mapping
,
289 pgoff_t index
, void *entry
)
294 /* We have to wake up next waiter for the radix tree entry lock */
295 dax_wake_mapping_entry_waiter(mapping
, index
, entry
, false);
299 * Find radix tree entry at given index. If it points to an exceptional entry,
300 * return it with the radix tree entry locked. If the radix tree doesn't
301 * contain given index, create an empty exceptional entry for the index and
302 * return with it locked.
304 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
305 * either return that locked entry or will return an error. This error will
306 * happen if there are any 4k entries within the 2MiB range that we are
309 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
310 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
311 * insertion will fail if it finds any 4k entries already in the tree, and a
312 * 4k insertion will cause an existing 2MiB entry to be unmapped and
313 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
314 * well as 2MiB empty entries.
316 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
317 * real storage backing them. We will leave these real 2MiB DAX entries in
318 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
320 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
321 * persistent memory the benefit is doubtful. We can add that later if we can
324 static void *grab_mapping_entry(struct address_space
*mapping
, pgoff_t index
,
325 unsigned long size_flag
)
327 bool pmd_downgrade
= false; /* splitting 2MiB entry into 4k entries? */
331 spin_lock_irq(&mapping
->tree_lock
);
332 entry
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
334 if (WARN_ON_ONCE(entry
&& !radix_tree_exceptional_entry(entry
))) {
335 entry
= ERR_PTR(-EIO
);
340 if (size_flag
& RADIX_DAX_PMD
) {
341 if (dax_is_pte_entry(entry
)) {
342 put_unlocked_mapping_entry(mapping
, index
,
344 entry
= ERR_PTR(-EEXIST
);
347 } else { /* trying to grab a PTE entry */
348 if (dax_is_pmd_entry(entry
) &&
349 (dax_is_zero_entry(entry
) ||
350 dax_is_empty_entry(entry
))) {
351 pmd_downgrade
= true;
356 /* No entry for given index? Make sure radix tree is big enough. */
357 if (!entry
|| pmd_downgrade
) {
362 * Make sure 'entry' remains valid while we drop
363 * mapping->tree_lock.
365 entry
= lock_slot(mapping
, slot
);
368 spin_unlock_irq(&mapping
->tree_lock
);
370 * Besides huge zero pages the only other thing that gets
371 * downgraded are empty entries which don't need to be
374 if (pmd_downgrade
&& dax_is_zero_entry(entry
))
375 unmap_mapping_range(mapping
,
376 (index
<< PAGE_SHIFT
) & PMD_MASK
, PMD_SIZE
, 0);
378 err
= radix_tree_preload(
379 mapping_gfp_mask(mapping
) & ~__GFP_HIGHMEM
);
382 put_locked_mapping_entry(mapping
, index
);
385 spin_lock_irq(&mapping
->tree_lock
);
389 * We needed to drop the page_tree lock while calling
390 * radix_tree_preload() and we didn't have an entry to
391 * lock. See if another thread inserted an entry at
392 * our index during this time.
394 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
,
397 radix_tree_preload_end();
398 spin_unlock_irq(&mapping
->tree_lock
);
404 radix_tree_delete(&mapping
->page_tree
, index
);
405 mapping
->nrexceptional
--;
406 dax_wake_mapping_entry_waiter(mapping
, index
, entry
,
410 entry
= dax_radix_locked_entry(0, size_flag
| RADIX_DAX_EMPTY
);
412 err
= __radix_tree_insert(&mapping
->page_tree
, index
,
413 dax_radix_order(entry
), entry
);
414 radix_tree_preload_end();
416 spin_unlock_irq(&mapping
->tree_lock
);
418 * Our insertion of a DAX entry failed, most likely
419 * because we were inserting a PMD entry and it
420 * collided with a PTE sized entry at a different
421 * index in the PMD range. We haven't inserted
422 * anything into the radix tree and have no waiters to
427 /* Good, we have inserted empty locked entry into the tree. */
428 mapping
->nrexceptional
++;
429 spin_unlock_irq(&mapping
->tree_lock
);
432 entry
= lock_slot(mapping
, slot
);
434 spin_unlock_irq(&mapping
->tree_lock
);
438 static int __dax_invalidate_mapping_entry(struct address_space
*mapping
,
439 pgoff_t index
, bool trunc
)
443 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
445 spin_lock_irq(&mapping
->tree_lock
);
446 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
447 if (!entry
|| WARN_ON_ONCE(!radix_tree_exceptional_entry(entry
)))
450 (radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_DIRTY
) ||
451 radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
)))
453 radix_tree_delete(page_tree
, index
);
454 mapping
->nrexceptional
--;
457 put_unlocked_mapping_entry(mapping
, index
, entry
);
458 spin_unlock_irq(&mapping
->tree_lock
);
462 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
463 * entry to get unlocked before deleting it.
465 int dax_delete_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
467 int ret
= __dax_invalidate_mapping_entry(mapping
, index
, true);
470 * This gets called from truncate / punch_hole path. As such, the caller
471 * must hold locks protecting against concurrent modifications of the
472 * radix tree (usually fs-private i_mmap_sem for writing). Since the
473 * caller has seen exceptional entry for this index, we better find it
474 * at that index as well...
481 * Invalidate exceptional DAX entry if it is clean.
483 int dax_invalidate_mapping_entry_sync(struct address_space
*mapping
,
486 return __dax_invalidate_mapping_entry(mapping
, index
, false);
489 static int copy_user_dax(struct block_device
*bdev
, struct dax_device
*dax_dev
,
490 sector_t sector
, size_t size
, struct page
*to
,
499 rc
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
503 id
= dax_read_lock();
504 rc
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
), &kaddr
, &pfn
);
509 vto
= kmap_atomic(to
);
510 copy_user_page(vto
, (void __force
*)kaddr
, vaddr
, to
);
517 * By this point grab_mapping_entry() has ensured that we have a locked entry
518 * of the appropriate size so we don't have to worry about downgrading PMDs to
519 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
520 * already in the tree, we will skip the insertion and just dirty the PMD as
523 static void *dax_insert_mapping_entry(struct address_space
*mapping
,
524 struct vm_fault
*vmf
,
525 void *entry
, sector_t sector
,
528 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
530 pgoff_t index
= vmf
->pgoff
;
532 if (vmf
->flags
& FAULT_FLAG_WRITE
)
533 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
535 if (dax_is_zero_entry(entry
) && !(flags
& RADIX_DAX_ZERO_PAGE
)) {
536 /* we are replacing a zero page with block mapping */
537 if (dax_is_pmd_entry(entry
))
538 unmap_mapping_range(mapping
,
539 (vmf
->pgoff
<< PAGE_SHIFT
) & PMD_MASK
,
542 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
546 spin_lock_irq(&mapping
->tree_lock
);
547 new_entry
= dax_radix_locked_entry(sector
, flags
);
549 if (dax_is_zero_entry(entry
) || dax_is_empty_entry(entry
)) {
551 * Only swap our new entry into the radix tree if the current
552 * entry is a zero page or an empty entry. If a normal PTE or
553 * PMD entry is already in the tree, we leave it alone. This
554 * means that if we are trying to insert a PTE and the
555 * existing entry is a PMD, we will just leave the PMD in the
556 * tree and dirty it if necessary.
558 struct radix_tree_node
*node
;
562 ret
= __radix_tree_lookup(page_tree
, index
, &node
, &slot
);
563 WARN_ON_ONCE(ret
!= entry
);
564 __radix_tree_replace(page_tree
, node
, slot
,
565 new_entry
, NULL
, NULL
);
569 if (vmf
->flags
& FAULT_FLAG_WRITE
)
570 radix_tree_tag_set(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
572 spin_unlock_irq(&mapping
->tree_lock
);
576 static inline unsigned long
577 pgoff_address(pgoff_t pgoff
, struct vm_area_struct
*vma
)
579 unsigned long address
;
581 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
582 VM_BUG_ON_VMA(address
< vma
->vm_start
|| address
>= vma
->vm_end
, vma
);
586 /* Walk all mappings of a given index of a file and writeprotect them */
587 static void dax_mapping_entry_mkclean(struct address_space
*mapping
,
588 pgoff_t index
, unsigned long pfn
)
590 struct vm_area_struct
*vma
;
591 pte_t pte
, *ptep
= NULL
;
595 i_mmap_lock_read(mapping
);
596 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, index
, index
) {
597 unsigned long address
, start
, end
;
601 if (!(vma
->vm_flags
& VM_SHARED
))
604 address
= pgoff_address(index
, vma
);
607 * Note because we provide start/end to follow_pte_pmd it will
608 * call mmu_notifier_invalidate_range_start() on our behalf
609 * before taking any lock.
611 if (follow_pte_pmd(vma
->vm_mm
, address
, &start
, &end
, &ptep
, &pmdp
, &ptl
))
615 #ifdef CONFIG_FS_DAX_PMD
618 if (pfn
!= pmd_pfn(*pmdp
))
620 if (!pmd_dirty(*pmdp
) && !pmd_write(*pmdp
))
623 flush_cache_page(vma
, address
, pfn
);
624 pmd
= pmdp_huge_clear_flush(vma
, address
, pmdp
);
625 pmd
= pmd_wrprotect(pmd
);
626 pmd
= pmd_mkclean(pmd
);
627 set_pmd_at(vma
->vm_mm
, address
, pmdp
, pmd
);
628 mmu_notifier_invalidate_range(vma
->vm_mm
, start
, end
);
633 if (pfn
!= pte_pfn(*ptep
))
635 if (!pte_dirty(*ptep
) && !pte_write(*ptep
))
638 flush_cache_page(vma
, address
, pfn
);
639 pte
= ptep_clear_flush(vma
, address
, ptep
);
640 pte
= pte_wrprotect(pte
);
641 pte
= pte_mkclean(pte
);
642 set_pte_at(vma
->vm_mm
, address
, ptep
, pte
);
643 mmu_notifier_invalidate_range(vma
->vm_mm
, start
, end
);
645 pte_unmap_unlock(ptep
, ptl
);
648 mmu_notifier_invalidate_range_end(vma
->vm_mm
, start
, end
);
650 i_mmap_unlock_read(mapping
);
653 static int dax_writeback_one(struct block_device
*bdev
,
654 struct dax_device
*dax_dev
, struct address_space
*mapping
,
655 pgoff_t index
, void *entry
)
657 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
658 void *entry2
, **slot
, *kaddr
;
666 * A page got tagged dirty in DAX mapping? Something is seriously
669 if (WARN_ON(!radix_tree_exceptional_entry(entry
)))
672 spin_lock_irq(&mapping
->tree_lock
);
673 entry2
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
674 /* Entry got punched out / reallocated? */
675 if (!entry2
|| WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2
)))
678 * Entry got reallocated elsewhere? No need to writeback. We have to
679 * compare sectors as we must not bail out due to difference in lockbit
682 if (dax_radix_sector(entry2
) != dax_radix_sector(entry
))
684 if (WARN_ON_ONCE(dax_is_empty_entry(entry
) ||
685 dax_is_zero_entry(entry
))) {
690 /* Another fsync thread may have already written back this entry */
691 if (!radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
))
693 /* Lock the entry to serialize with page faults */
694 entry
= lock_slot(mapping
, slot
);
696 * We can clear the tag now but we have to be careful so that concurrent
697 * dax_writeback_one() calls for the same index cannot finish before we
698 * actually flush the caches. This is achieved as the calls will look
699 * at the entry only under tree_lock and once they do that they will
700 * see the entry locked and wait for it to unlock.
702 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_TOWRITE
);
703 spin_unlock_irq(&mapping
->tree_lock
);
706 * Even if dax_writeback_mapping_range() was given a wbc->range_start
707 * in the middle of a PMD, the 'index' we are given will be aligned to
708 * the start index of the PMD, as will the sector we pull from
709 * 'entry'. This allows us to flush for PMD_SIZE and not have to
710 * worry about partial PMD writebacks.
712 sector
= dax_radix_sector(entry
);
713 size
= PAGE_SIZE
<< dax_radix_order(entry
);
715 id
= dax_read_lock();
716 ret
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
721 * dax_direct_access() may sleep, so cannot hold tree_lock over
724 ret
= dax_direct_access(dax_dev
, pgoff
, size
/ PAGE_SIZE
, &kaddr
, &pfn
);
728 if (WARN_ON_ONCE(ret
< size
/ PAGE_SIZE
)) {
733 dax_mapping_entry_mkclean(mapping
, index
, pfn_t_to_pfn(pfn
));
734 dax_flush(dax_dev
, pgoff
, kaddr
, size
);
736 * After we have flushed the cache, we can clear the dirty tag. There
737 * cannot be new dirty data in the pfn after the flush has completed as
738 * the pfn mappings are writeprotected and fault waits for mapping
741 spin_lock_irq(&mapping
->tree_lock
);
742 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
743 spin_unlock_irq(&mapping
->tree_lock
);
744 trace_dax_writeback_one(mapping
->host
, index
, size
>> PAGE_SHIFT
);
747 put_locked_mapping_entry(mapping
, index
);
751 put_unlocked_mapping_entry(mapping
, index
, entry2
);
752 spin_unlock_irq(&mapping
->tree_lock
);
757 * Flush the mapping to the persistent domain within the byte range of [start,
758 * end]. This is required by data integrity operations to ensure file data is
759 * on persistent storage prior to completion of the operation.
761 int dax_writeback_mapping_range(struct address_space
*mapping
,
762 struct block_device
*bdev
, struct writeback_control
*wbc
)
764 struct inode
*inode
= mapping
->host
;
765 pgoff_t start_index
, end_index
;
766 pgoff_t indices
[PAGEVEC_SIZE
];
767 struct dax_device
*dax_dev
;
772 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
775 if (!mapping
->nrexceptional
|| wbc
->sync_mode
!= WB_SYNC_ALL
)
778 dax_dev
= dax_get_by_host(bdev
->bd_disk
->disk_name
);
782 start_index
= wbc
->range_start
>> PAGE_SHIFT
;
783 end_index
= wbc
->range_end
>> PAGE_SHIFT
;
785 trace_dax_writeback_range(inode
, start_index
, end_index
);
787 tag_pages_for_writeback(mapping
, start_index
, end_index
);
789 pagevec_init(&pvec
, 0);
791 pvec
.nr
= find_get_entries_tag(mapping
, start_index
,
792 PAGECACHE_TAG_TOWRITE
, PAGEVEC_SIZE
,
793 pvec
.pages
, indices
);
798 for (i
= 0; i
< pvec
.nr
; i
++) {
799 if (indices
[i
] > end_index
) {
804 ret
= dax_writeback_one(bdev
, dax_dev
, mapping
,
805 indices
[i
], pvec
.pages
[i
]);
807 mapping_set_error(mapping
, ret
);
811 start_index
= indices
[pvec
.nr
- 1] + 1;
815 trace_dax_writeback_range_done(inode
, start_index
, end_index
);
816 return (ret
< 0 ? ret
: 0);
818 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
820 static int dax_insert_mapping(struct address_space
*mapping
,
821 struct block_device
*bdev
, struct dax_device
*dax_dev
,
822 sector_t sector
, size_t size
, void *entry
,
823 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
825 unsigned long vaddr
= vmf
->address
;
831 rc
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
835 id
= dax_read_lock();
836 rc
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
), &kaddr
, &pfn
);
843 ret
= dax_insert_mapping_entry(mapping
, vmf
, entry
, sector
, 0);
847 trace_dax_insert_mapping(mapping
->host
, vmf
, ret
);
848 if (vmf
->flags
& FAULT_FLAG_WRITE
)
849 return vm_insert_mixed_mkwrite(vma
, vaddr
, pfn
);
851 return vm_insert_mixed(vma
, vaddr
, pfn
);
855 * The user has performed a load from a hole in the file. Allocating a new
856 * page in the file would cause excessive storage usage for workloads with
857 * sparse files. Instead we insert a read-only mapping of the 4k zero page.
858 * If this page is ever written to we will re-fault and change the mapping to
859 * point to real DAX storage instead.
861 static int dax_load_hole(struct address_space
*mapping
, void *entry
,
862 struct vm_fault
*vmf
)
864 struct inode
*inode
= mapping
->host
;
865 unsigned long vaddr
= vmf
->address
;
866 int ret
= VM_FAULT_NOPAGE
;
867 struct page
*zero_page
;
870 zero_page
= ZERO_PAGE(0);
871 if (unlikely(!zero_page
)) {
876 entry2
= dax_insert_mapping_entry(mapping
, vmf
, entry
, 0,
877 RADIX_DAX_ZERO_PAGE
);
878 if (IS_ERR(entry2
)) {
879 ret
= VM_FAULT_SIGBUS
;
883 vm_insert_mixed(vmf
->vma
, vaddr
, page_to_pfn_t(zero_page
));
885 trace_dax_load_hole(inode
, vmf
, ret
);
889 static bool dax_range_is_aligned(struct block_device
*bdev
,
890 unsigned int offset
, unsigned int length
)
892 unsigned short sector_size
= bdev_logical_block_size(bdev
);
894 if (!IS_ALIGNED(offset
, sector_size
))
896 if (!IS_ALIGNED(length
, sector_size
))
902 int __dax_zero_page_range(struct block_device
*bdev
,
903 struct dax_device
*dax_dev
, sector_t sector
,
904 unsigned int offset
, unsigned int size
)
906 if (dax_range_is_aligned(bdev
, offset
, size
)) {
907 sector_t start_sector
= sector
+ (offset
>> 9);
909 return blkdev_issue_zeroout(bdev
, start_sector
,
910 size
>> 9, GFP_NOFS
, 0);
917 rc
= bdev_dax_pgoff(bdev
, sector
, PAGE_SIZE
, &pgoff
);
921 id
= dax_read_lock();
922 rc
= dax_direct_access(dax_dev
, pgoff
, 1, &kaddr
,
928 memset(kaddr
+ offset
, 0, size
);
929 dax_flush(dax_dev
, pgoff
, kaddr
+ offset
, size
);
934 EXPORT_SYMBOL_GPL(__dax_zero_page_range
);
936 static sector_t
dax_iomap_sector(struct iomap
*iomap
, loff_t pos
)
938 return iomap
->blkno
+ (((pos
& PAGE_MASK
) - iomap
->offset
) >> 9);
942 dax_iomap_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
945 struct block_device
*bdev
= iomap
->bdev
;
946 struct dax_device
*dax_dev
= iomap
->dax_dev
;
947 struct iov_iter
*iter
= data
;
948 loff_t end
= pos
+ length
, done
= 0;
952 if (iov_iter_rw(iter
) == READ
) {
953 end
= min(end
, i_size_read(inode
));
957 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
958 return iov_iter_zero(min(length
, end
- pos
), iter
);
961 if (WARN_ON_ONCE(iomap
->type
!= IOMAP_MAPPED
))
965 * Write can allocate block for an area which has a hole page mapped
966 * into page tables. We have to tear down these mappings so that data
967 * written by write(2) is visible in mmap.
969 if (iomap
->flags
& IOMAP_F_NEW
) {
970 invalidate_inode_pages2_range(inode
->i_mapping
,
972 (end
- 1) >> PAGE_SHIFT
);
975 id
= dax_read_lock();
977 unsigned offset
= pos
& (PAGE_SIZE
- 1);
978 const size_t size
= ALIGN(length
+ offset
, PAGE_SIZE
);
979 const sector_t sector
= dax_iomap_sector(iomap
, pos
);
985 if (fatal_signal_pending(current
)) {
990 ret
= bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
);
994 map_len
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
),
1001 map_len
= PFN_PHYS(map_len
);
1004 if (map_len
> end
- pos
)
1005 map_len
= end
- pos
;
1007 if (iov_iter_rw(iter
) == WRITE
)
1008 map_len
= dax_copy_from_iter(dax_dev
, pgoff
, kaddr
,
1011 map_len
= copy_to_iter(kaddr
, map_len
, iter
);
1013 ret
= map_len
? map_len
: -EFAULT
;
1021 dax_read_unlock(id
);
1023 return done
? done
: ret
;
1027 * dax_iomap_rw - Perform I/O to a DAX file
1028 * @iocb: The control block for this I/O
1029 * @iter: The addresses to do I/O from or to
1030 * @ops: iomap ops passed from the file system
1032 * This function performs read and write operations to directly mapped
1033 * persistent memory. The callers needs to take care of read/write exclusion
1034 * and evicting any page cache pages in the region under I/O.
1037 dax_iomap_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
1038 const struct iomap_ops
*ops
)
1040 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1041 struct inode
*inode
= mapping
->host
;
1042 loff_t pos
= iocb
->ki_pos
, ret
= 0, done
= 0;
1045 if (iov_iter_rw(iter
) == WRITE
) {
1046 lockdep_assert_held_exclusive(&inode
->i_rwsem
);
1047 flags
|= IOMAP_WRITE
;
1049 lockdep_assert_held(&inode
->i_rwsem
);
1052 while (iov_iter_count(iter
)) {
1053 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
), flags
, ops
,
1054 iter
, dax_iomap_actor
);
1061 iocb
->ki_pos
+= done
;
1062 return done
? done
: ret
;
1064 EXPORT_SYMBOL_GPL(dax_iomap_rw
);
1066 static int dax_fault_return(int error
)
1069 return VM_FAULT_NOPAGE
;
1070 if (error
== -ENOMEM
)
1071 return VM_FAULT_OOM
;
1072 return VM_FAULT_SIGBUS
;
1075 static int dax_iomap_pte_fault(struct vm_fault
*vmf
,
1076 const struct iomap_ops
*ops
)
1078 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1079 struct inode
*inode
= mapping
->host
;
1080 unsigned long vaddr
= vmf
->address
;
1081 loff_t pos
= (loff_t
)vmf
->pgoff
<< PAGE_SHIFT
;
1083 struct iomap iomap
= { 0 };
1084 unsigned flags
= IOMAP_FAULT
;
1085 int error
, major
= 0;
1089 trace_dax_pte_fault(inode
, vmf
, vmf_ret
);
1091 * Check whether offset isn't beyond end of file now. Caller is supposed
1092 * to hold locks serializing us with truncate / punch hole so this is
1095 if (pos
>= i_size_read(inode
)) {
1096 vmf_ret
= VM_FAULT_SIGBUS
;
1100 if ((vmf
->flags
& FAULT_FLAG_WRITE
) && !vmf
->cow_page
)
1101 flags
|= IOMAP_WRITE
;
1103 entry
= grab_mapping_entry(mapping
, vmf
->pgoff
, 0);
1104 if (IS_ERR(entry
)) {
1105 vmf_ret
= dax_fault_return(PTR_ERR(entry
));
1110 * It is possible, particularly with mixed reads & writes to private
1111 * mappings, that we have raced with a PMD fault that overlaps with
1112 * the PTE we need to set up. If so just return and the fault will be
1115 if (pmd_trans_huge(*vmf
->pmd
) || pmd_devmap(*vmf
->pmd
)) {
1116 vmf_ret
= VM_FAULT_NOPAGE
;
1121 * Note that we don't bother to use iomap_apply here: DAX required
1122 * the file system block size to be equal the page size, which means
1123 * that we never have to deal with more than a single extent here.
1125 error
= ops
->iomap_begin(inode
, pos
, PAGE_SIZE
, flags
, &iomap
);
1127 vmf_ret
= dax_fault_return(error
);
1130 if (WARN_ON_ONCE(iomap
.offset
+ iomap
.length
< pos
+ PAGE_SIZE
)) {
1131 error
= -EIO
; /* fs corruption? */
1132 goto error_finish_iomap
;
1135 sector
= dax_iomap_sector(&iomap
, pos
);
1137 if (vmf
->cow_page
) {
1138 switch (iomap
.type
) {
1140 case IOMAP_UNWRITTEN
:
1141 clear_user_highpage(vmf
->cow_page
, vaddr
);
1144 error
= copy_user_dax(iomap
.bdev
, iomap
.dax_dev
,
1145 sector
, PAGE_SIZE
, vmf
->cow_page
, vaddr
);
1154 goto error_finish_iomap
;
1156 __SetPageUptodate(vmf
->cow_page
);
1157 vmf_ret
= finish_fault(vmf
);
1159 vmf_ret
= VM_FAULT_DONE_COW
;
1163 switch (iomap
.type
) {
1165 if (iomap
.flags
& IOMAP_F_NEW
) {
1166 count_vm_event(PGMAJFAULT
);
1167 count_memcg_event_mm(vmf
->vma
->vm_mm
, PGMAJFAULT
);
1168 major
= VM_FAULT_MAJOR
;
1170 error
= dax_insert_mapping(mapping
, iomap
.bdev
, iomap
.dax_dev
,
1171 sector
, PAGE_SIZE
, entry
, vmf
->vma
, vmf
);
1172 /* -EBUSY is fine, somebody else faulted on the same PTE */
1173 if (error
== -EBUSY
)
1176 case IOMAP_UNWRITTEN
:
1178 if (!(vmf
->flags
& FAULT_FLAG_WRITE
)) {
1179 vmf_ret
= dax_load_hole(mapping
, entry
, vmf
);
1190 vmf_ret
= dax_fault_return(error
) | major
;
1192 if (ops
->iomap_end
) {
1193 int copied
= PAGE_SIZE
;
1195 if (vmf_ret
& VM_FAULT_ERROR
)
1198 * The fault is done by now and there's no way back (other
1199 * thread may be already happily using PTE we have installed).
1200 * Just ignore error from ->iomap_end since we cannot do much
1203 ops
->iomap_end(inode
, pos
, PAGE_SIZE
, copied
, flags
, &iomap
);
1206 put_locked_mapping_entry(mapping
, vmf
->pgoff
);
1208 trace_dax_pte_fault_done(inode
, vmf
, vmf_ret
);
1212 #ifdef CONFIG_FS_DAX_PMD
1214 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1215 * more often than one might expect in the below functions.
1217 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1219 static int dax_pmd_insert_mapping(struct vm_fault
*vmf
, struct iomap
*iomap
,
1220 loff_t pos
, void *entry
)
1222 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1223 const sector_t sector
= dax_iomap_sector(iomap
, pos
);
1224 struct dax_device
*dax_dev
= iomap
->dax_dev
;
1225 struct block_device
*bdev
= iomap
->bdev
;
1226 struct inode
*inode
= mapping
->host
;
1227 const size_t size
= PMD_SIZE
;
1228 void *ret
= NULL
, *kaddr
;
1234 if (bdev_dax_pgoff(bdev
, sector
, size
, &pgoff
) != 0)
1237 id
= dax_read_lock();
1238 length
= dax_direct_access(dax_dev
, pgoff
, PHYS_PFN(size
), &kaddr
, &pfn
);
1240 goto unlock_fallback
;
1241 length
= PFN_PHYS(length
);
1244 goto unlock_fallback
;
1245 if (pfn_t_to_pfn(pfn
) & PG_PMD_COLOUR
)
1246 goto unlock_fallback
;
1247 if (!pfn_t_devmap(pfn
))
1248 goto unlock_fallback
;
1249 dax_read_unlock(id
);
1251 ret
= dax_insert_mapping_entry(mapping
, vmf
, entry
, sector
,
1256 trace_dax_pmd_insert_mapping(inode
, vmf
, length
, pfn
, ret
);
1257 return vmf_insert_pfn_pmd(vmf
->vma
, vmf
->address
, vmf
->pmd
,
1258 pfn
, vmf
->flags
& FAULT_FLAG_WRITE
);
1261 dax_read_unlock(id
);
1263 trace_dax_pmd_insert_mapping_fallback(inode
, vmf
, length
, pfn
, ret
);
1264 return VM_FAULT_FALLBACK
;
1267 static int dax_pmd_load_hole(struct vm_fault
*vmf
, struct iomap
*iomap
,
1270 struct address_space
*mapping
= vmf
->vma
->vm_file
->f_mapping
;
1271 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
1272 struct inode
*inode
= mapping
->host
;
1273 struct page
*zero_page
;
1278 zero_page
= mm_get_huge_zero_page(vmf
->vma
->vm_mm
);
1280 if (unlikely(!zero_page
))
1283 ret
= dax_insert_mapping_entry(mapping
, vmf
, entry
, 0,
1284 RADIX_DAX_PMD
| RADIX_DAX_ZERO_PAGE
);
1288 ptl
= pmd_lock(vmf
->vma
->vm_mm
, vmf
->pmd
);
1289 if (!pmd_none(*(vmf
->pmd
))) {
1294 pmd_entry
= mk_pmd(zero_page
, vmf
->vma
->vm_page_prot
);
1295 pmd_entry
= pmd_mkhuge(pmd_entry
);
1296 set_pmd_at(vmf
->vma
->vm_mm
, pmd_addr
, vmf
->pmd
, pmd_entry
);
1298 trace_dax_pmd_load_hole(inode
, vmf
, zero_page
, ret
);
1299 return VM_FAULT_NOPAGE
;
1302 trace_dax_pmd_load_hole_fallback(inode
, vmf
, zero_page
, ret
);
1303 return VM_FAULT_FALLBACK
;
1306 static int dax_iomap_pmd_fault(struct vm_fault
*vmf
,
1307 const struct iomap_ops
*ops
)
1309 struct vm_area_struct
*vma
= vmf
->vma
;
1310 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1311 unsigned long pmd_addr
= vmf
->address
& PMD_MASK
;
1312 bool write
= vmf
->flags
& FAULT_FLAG_WRITE
;
1313 unsigned int iomap_flags
= (write
? IOMAP_WRITE
: 0) | IOMAP_FAULT
;
1314 struct inode
*inode
= mapping
->host
;
1315 int result
= VM_FAULT_FALLBACK
;
1316 struct iomap iomap
= { 0 };
1317 pgoff_t max_pgoff
, pgoff
;
1323 * Check whether offset isn't beyond end of file now. Caller is
1324 * supposed to hold locks serializing us with truncate / punch hole so
1325 * this is a reliable test.
1327 pgoff
= linear_page_index(vma
, pmd_addr
);
1328 max_pgoff
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
1330 trace_dax_pmd_fault(inode
, vmf
, max_pgoff
, 0);
1333 * Make sure that the faulting address's PMD offset (color) matches
1334 * the PMD offset from the start of the file. This is necessary so
1335 * that a PMD range in the page table overlaps exactly with a PMD
1336 * range in the radix tree.
1338 if ((vmf
->pgoff
& PG_PMD_COLOUR
) !=
1339 ((vmf
->address
>> PAGE_SHIFT
) & PG_PMD_COLOUR
))
1342 /* Fall back to PTEs if we're going to COW */
1343 if (write
&& !(vma
->vm_flags
& VM_SHARED
))
1346 /* If the PMD would extend outside the VMA */
1347 if (pmd_addr
< vma
->vm_start
)
1349 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
)
1352 if (pgoff
> max_pgoff
) {
1353 result
= VM_FAULT_SIGBUS
;
1357 /* If the PMD would extend beyond the file size */
1358 if ((pgoff
| PG_PMD_COLOUR
) > max_pgoff
)
1362 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
1363 * 2MiB zero page entry or a DAX PMD. If it can't (because a 4k page
1364 * is already in the tree, for instance), it will return -EEXIST and
1365 * we just fall back to 4k entries.
1367 entry
= grab_mapping_entry(mapping
, pgoff
, RADIX_DAX_PMD
);
1372 * It is possible, particularly with mixed reads & writes to private
1373 * mappings, that we have raced with a PTE fault that overlaps with
1374 * the PMD we need to set up. If so just return and the fault will be
1377 if (!pmd_none(*vmf
->pmd
) && !pmd_trans_huge(*vmf
->pmd
) &&
1378 !pmd_devmap(*vmf
->pmd
)) {
1384 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1385 * setting up a mapping, so really we're using iomap_begin() as a way
1386 * to look up our filesystem block.
1388 pos
= (loff_t
)pgoff
<< PAGE_SHIFT
;
1389 error
= ops
->iomap_begin(inode
, pos
, PMD_SIZE
, iomap_flags
, &iomap
);
1393 if (iomap
.offset
+ iomap
.length
< pos
+ PMD_SIZE
)
1396 switch (iomap
.type
) {
1398 result
= dax_pmd_insert_mapping(vmf
, &iomap
, pos
, entry
);
1400 case IOMAP_UNWRITTEN
:
1402 if (WARN_ON_ONCE(write
))
1404 result
= dax_pmd_load_hole(vmf
, &iomap
, entry
);
1412 if (ops
->iomap_end
) {
1413 int copied
= PMD_SIZE
;
1415 if (result
== VM_FAULT_FALLBACK
)
1418 * The fault is done by now and there's no way back (other
1419 * thread may be already happily using PMD we have installed).
1420 * Just ignore error from ->iomap_end since we cannot do much
1423 ops
->iomap_end(inode
, pos
, PMD_SIZE
, copied
, iomap_flags
,
1427 put_locked_mapping_entry(mapping
, pgoff
);
1429 if (result
== VM_FAULT_FALLBACK
) {
1430 split_huge_pmd(vma
, vmf
->pmd
, vmf
->address
);
1431 count_vm_event(THP_FAULT_FALLBACK
);
1434 trace_dax_pmd_fault_done(inode
, vmf
, max_pgoff
, result
);
1438 static int dax_iomap_pmd_fault(struct vm_fault
*vmf
,
1439 const struct iomap_ops
*ops
)
1441 return VM_FAULT_FALLBACK
;
1443 #endif /* CONFIG_FS_DAX_PMD */
1446 * dax_iomap_fault - handle a page fault on a DAX file
1447 * @vmf: The description of the fault
1448 * @ops: iomap ops passed from the file system
1450 * When a page fault occurs, filesystems may call this helper in
1451 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
1452 * has done all the necessary locking for page fault to proceed
1455 int dax_iomap_fault(struct vm_fault
*vmf
, enum page_entry_size pe_size
,
1456 const struct iomap_ops
*ops
)
1460 return dax_iomap_pte_fault(vmf
, ops
);
1462 return dax_iomap_pmd_fault(vmf
, ops
);
1464 return VM_FAULT_FALLBACK
;
1467 EXPORT_SYMBOL_GPL(dax_iomap_fault
);