2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/mmu_notifier.h>
35 #include <linux/iomap.h>
38 /* We choose 4096 entries - same as per-zone page wait tables */
39 #define DAX_WAIT_TABLE_BITS 12
40 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
42 static wait_queue_head_t wait_table
[DAX_WAIT_TABLE_ENTRIES
];
44 static int __init
init_dax_wait_table(void)
48 for (i
= 0; i
< DAX_WAIT_TABLE_ENTRIES
; i
++)
49 init_waitqueue_head(wait_table
+ i
);
52 fs_initcall(init_dax_wait_table
);
54 static long dax_map_atomic(struct block_device
*bdev
, struct blk_dax_ctl
*dax
)
56 struct request_queue
*q
= bdev
->bd_queue
;
59 dax
->addr
= ERR_PTR(-EIO
);
60 if (blk_queue_enter(q
, true) != 0)
63 rc
= bdev_direct_access(bdev
, dax
);
65 dax
->addr
= ERR_PTR(rc
);
72 static void dax_unmap_atomic(struct block_device
*bdev
,
73 const struct blk_dax_ctl
*dax
)
75 if (IS_ERR(dax
->addr
))
77 blk_queue_exit(bdev
->bd_queue
);
80 static int dax_is_pmd_entry(void *entry
)
82 return (unsigned long)entry
& RADIX_DAX_PMD
;
85 static int dax_is_pte_entry(void *entry
)
87 return !((unsigned long)entry
& RADIX_DAX_PMD
);
90 static int dax_is_zero_entry(void *entry
)
92 return (unsigned long)entry
& RADIX_DAX_HZP
;
95 static int dax_is_empty_entry(void *entry
)
97 return (unsigned long)entry
& RADIX_DAX_EMPTY
;
100 struct page
*read_dax_sector(struct block_device
*bdev
, sector_t n
)
102 struct page
*page
= alloc_pages(GFP_KERNEL
, 0);
103 struct blk_dax_ctl dax
= {
105 .sector
= n
& ~((((int) PAGE_SIZE
) / 512) - 1),
110 return ERR_PTR(-ENOMEM
);
112 rc
= dax_map_atomic(bdev
, &dax
);
115 memcpy_from_pmem(page_address(page
), dax
.addr
, PAGE_SIZE
);
116 dax_unmap_atomic(bdev
, &dax
);
121 * DAX radix tree locking
123 struct exceptional_entry_key
{
124 struct address_space
*mapping
;
128 struct wait_exceptional_entry_queue
{
130 struct exceptional_entry_key key
;
133 static wait_queue_head_t
*dax_entry_waitqueue(struct address_space
*mapping
,
134 pgoff_t index
, void *entry
, struct exceptional_entry_key
*key
)
139 * If 'entry' is a PMD, align the 'index' that we use for the wait
140 * queue to the start of that PMD. This ensures that all offsets in
141 * the range covered by the PMD map to the same bit lock.
143 if (dax_is_pmd_entry(entry
))
144 index
&= ~((1UL << (PMD_SHIFT
- PAGE_SHIFT
)) - 1);
146 key
->mapping
= mapping
;
147 key
->entry_start
= index
;
149 hash
= hash_long((unsigned long)mapping
^ index
, DAX_WAIT_TABLE_BITS
);
150 return wait_table
+ hash
;
153 static int wake_exceptional_entry_func(wait_queue_t
*wait
, unsigned int mode
,
154 int sync
, void *keyp
)
156 struct exceptional_entry_key
*key
= keyp
;
157 struct wait_exceptional_entry_queue
*ewait
=
158 container_of(wait
, struct wait_exceptional_entry_queue
, wait
);
160 if (key
->mapping
!= ewait
->key
.mapping
||
161 key
->entry_start
!= ewait
->key
.entry_start
)
163 return autoremove_wake_function(wait
, mode
, sync
, NULL
);
167 * Check whether the given slot is locked. The function must be called with
168 * mapping->tree_lock held
170 static inline int slot_locked(struct address_space
*mapping
, void **slot
)
172 unsigned long entry
= (unsigned long)
173 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
174 return entry
& RADIX_DAX_ENTRY_LOCK
;
178 * Mark the given slot is locked. The function must be called with
179 * mapping->tree_lock held
181 static inline void *lock_slot(struct address_space
*mapping
, void **slot
)
183 unsigned long entry
= (unsigned long)
184 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
186 entry
|= RADIX_DAX_ENTRY_LOCK
;
187 radix_tree_replace_slot(&mapping
->page_tree
, slot
, (void *)entry
);
188 return (void *)entry
;
192 * Mark the given slot is unlocked. The function must be called with
193 * mapping->tree_lock held
195 static inline void *unlock_slot(struct address_space
*mapping
, void **slot
)
197 unsigned long entry
= (unsigned long)
198 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
200 entry
&= ~(unsigned long)RADIX_DAX_ENTRY_LOCK
;
201 radix_tree_replace_slot(&mapping
->page_tree
, slot
, (void *)entry
);
202 return (void *)entry
;
206 * Lookup entry in radix tree, wait for it to become unlocked if it is
207 * exceptional entry and return it. The caller must call
208 * put_unlocked_mapping_entry() when he decided not to lock the entry or
209 * put_locked_mapping_entry() when he locked the entry and now wants to
212 * The function must be called with mapping->tree_lock held.
214 static void *get_unlocked_mapping_entry(struct address_space
*mapping
,
215 pgoff_t index
, void ***slotp
)
218 struct wait_exceptional_entry_queue ewait
;
219 wait_queue_head_t
*wq
;
221 init_wait(&ewait
.wait
);
222 ewait
.wait
.func
= wake_exceptional_entry_func
;
225 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
,
227 if (!entry
|| !radix_tree_exceptional_entry(entry
) ||
228 !slot_locked(mapping
, slot
)) {
234 wq
= dax_entry_waitqueue(mapping
, index
, entry
, &ewait
.key
);
235 prepare_to_wait_exclusive(wq
, &ewait
.wait
,
236 TASK_UNINTERRUPTIBLE
);
237 spin_unlock_irq(&mapping
->tree_lock
);
239 finish_wait(wq
, &ewait
.wait
);
240 spin_lock_irq(&mapping
->tree_lock
);
244 static void dax_unlock_mapping_entry(struct address_space
*mapping
,
249 spin_lock_irq(&mapping
->tree_lock
);
250 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
, &slot
);
251 if (WARN_ON_ONCE(!entry
|| !radix_tree_exceptional_entry(entry
) ||
252 !slot_locked(mapping
, slot
))) {
253 spin_unlock_irq(&mapping
->tree_lock
);
256 unlock_slot(mapping
, slot
);
257 spin_unlock_irq(&mapping
->tree_lock
);
258 dax_wake_mapping_entry_waiter(mapping
, index
, entry
, false);
261 static void put_locked_mapping_entry(struct address_space
*mapping
,
262 pgoff_t index
, void *entry
)
264 if (!radix_tree_exceptional_entry(entry
)) {
268 dax_unlock_mapping_entry(mapping
, index
);
273 * Called when we are done with radix tree entry we looked up via
274 * get_unlocked_mapping_entry() and which we didn't lock in the end.
276 static void put_unlocked_mapping_entry(struct address_space
*mapping
,
277 pgoff_t index
, void *entry
)
279 if (!radix_tree_exceptional_entry(entry
))
282 /* We have to wake up next waiter for the radix tree entry lock */
283 dax_wake_mapping_entry_waiter(mapping
, index
, entry
, false);
287 * Find radix tree entry at given index. If it points to a page, return with
288 * the page locked. If it points to the exceptional entry, return with the
289 * radix tree entry locked. If the radix tree doesn't contain given index,
290 * create empty exceptional entry for the index and return with it locked.
292 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
293 * either return that locked entry or will return an error. This error will
294 * happen if there are any 4k entries (either zero pages or DAX entries)
295 * within the 2MiB range that we are requesting.
297 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
298 * evict 4k entries in order to 'upgrade' them to a 2MiB entry. A 2MiB
299 * insertion will fail if it finds any 4k entries already in the tree, and a
300 * 4k insertion will cause an existing 2MiB entry to be unmapped and
301 * downgraded to 4k entries. This happens for both 2MiB huge zero pages as
302 * well as 2MiB empty entries.
304 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
305 * real storage backing them. We will leave these real 2MiB DAX entries in
306 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
308 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
309 * persistent memory the benefit is doubtful. We can add that later if we can
312 static void *grab_mapping_entry(struct address_space
*mapping
, pgoff_t index
,
313 unsigned long size_flag
)
315 bool pmd_downgrade
= false; /* splitting 2MiB entry into 4k entries? */
319 spin_lock_irq(&mapping
->tree_lock
);
320 entry
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
323 if (size_flag
& RADIX_DAX_PMD
) {
324 if (!radix_tree_exceptional_entry(entry
) ||
325 dax_is_pte_entry(entry
)) {
326 put_unlocked_mapping_entry(mapping
, index
,
328 entry
= ERR_PTR(-EEXIST
);
331 } else { /* trying to grab a PTE entry */
332 if (radix_tree_exceptional_entry(entry
) &&
333 dax_is_pmd_entry(entry
) &&
334 (dax_is_zero_entry(entry
) ||
335 dax_is_empty_entry(entry
))) {
336 pmd_downgrade
= true;
341 /* No entry for given index? Make sure radix tree is big enough. */
342 if (!entry
|| pmd_downgrade
) {
347 * Make sure 'entry' remains valid while we drop
348 * mapping->tree_lock.
350 entry
= lock_slot(mapping
, slot
);
353 spin_unlock_irq(&mapping
->tree_lock
);
355 * Besides huge zero pages the only other thing that gets
356 * downgraded are empty entries which don't need to be
359 if (pmd_downgrade
&& dax_is_zero_entry(entry
))
360 unmap_mapping_range(mapping
,
361 (index
<< PAGE_SHIFT
) & PMD_MASK
, PMD_SIZE
, 0);
363 err
= radix_tree_preload(
364 mapping_gfp_mask(mapping
) & ~__GFP_HIGHMEM
);
367 put_locked_mapping_entry(mapping
, index
, entry
);
370 spin_lock_irq(&mapping
->tree_lock
);
374 * We needed to drop the page_tree lock while calling
375 * radix_tree_preload() and we didn't have an entry to
376 * lock. See if another thread inserted an entry at
377 * our index during this time.
379 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
,
382 radix_tree_preload_end();
383 spin_unlock_irq(&mapping
->tree_lock
);
389 radix_tree_delete(&mapping
->page_tree
, index
);
390 mapping
->nrexceptional
--;
391 dax_wake_mapping_entry_waiter(mapping
, index
, entry
,
395 entry
= dax_radix_locked_entry(0, size_flag
| RADIX_DAX_EMPTY
);
397 err
= __radix_tree_insert(&mapping
->page_tree
, index
,
398 dax_radix_order(entry
), entry
);
399 radix_tree_preload_end();
401 spin_unlock_irq(&mapping
->tree_lock
);
403 * Our insertion of a DAX entry failed, most likely
404 * because we were inserting a PMD entry and it
405 * collided with a PTE sized entry at a different
406 * index in the PMD range. We haven't inserted
407 * anything into the radix tree and have no waiters to
412 /* Good, we have inserted empty locked entry into the tree. */
413 mapping
->nrexceptional
++;
414 spin_unlock_irq(&mapping
->tree_lock
);
417 /* Normal page in radix tree? */
418 if (!radix_tree_exceptional_entry(entry
)) {
419 struct page
*page
= entry
;
422 spin_unlock_irq(&mapping
->tree_lock
);
424 /* Page got truncated? Retry... */
425 if (unlikely(page
->mapping
!= mapping
)) {
432 entry
= lock_slot(mapping
, slot
);
434 spin_unlock_irq(&mapping
->tree_lock
);
439 * We do not necessarily hold the mapping->tree_lock when we call this
440 * function so it is possible that 'entry' is no longer a valid item in the
441 * radix tree. This is okay because all we really need to do is to find the
442 * correct waitqueue where tasks might be waiting for that old 'entry' and
445 void dax_wake_mapping_entry_waiter(struct address_space
*mapping
,
446 pgoff_t index
, void *entry
, bool wake_all
)
448 struct exceptional_entry_key key
;
449 wait_queue_head_t
*wq
;
451 wq
= dax_entry_waitqueue(mapping
, index
, entry
, &key
);
454 * Checking for locked entry and prepare_to_wait_exclusive() happens
455 * under mapping->tree_lock, ditto for entry handling in our callers.
456 * So at this point all tasks that could have seen our entry locked
457 * must be in the waitqueue and the following check will see them.
459 if (waitqueue_active(wq
))
460 __wake_up(wq
, TASK_NORMAL
, wake_all
? 0 : 1, &key
);
463 static int __dax_invalidate_mapping_entry(struct address_space
*mapping
,
464 pgoff_t index
, bool trunc
)
468 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
470 spin_lock_irq(&mapping
->tree_lock
);
471 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
472 if (!entry
|| !radix_tree_exceptional_entry(entry
))
475 (radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_DIRTY
) ||
476 radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
)))
478 radix_tree_delete(page_tree
, index
);
479 mapping
->nrexceptional
--;
482 put_unlocked_mapping_entry(mapping
, index
, entry
);
483 spin_unlock_irq(&mapping
->tree_lock
);
487 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
488 * entry to get unlocked before deleting it.
490 int dax_delete_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
492 int ret
= __dax_invalidate_mapping_entry(mapping
, index
, true);
495 * This gets called from truncate / punch_hole path. As such, the caller
496 * must hold locks protecting against concurrent modifications of the
497 * radix tree (usually fs-private i_mmap_sem for writing). Since the
498 * caller has seen exceptional entry for this index, we better find it
499 * at that index as well...
506 * Invalidate exceptional DAX entry if easily possible. This handles DAX
507 * entries for invalidate_inode_pages() so we evict the entry only if we can
508 * do so without blocking.
510 int dax_invalidate_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
514 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
516 spin_lock_irq(&mapping
->tree_lock
);
517 entry
= __radix_tree_lookup(page_tree
, index
, NULL
, &slot
);
518 if (!entry
|| !radix_tree_exceptional_entry(entry
) ||
519 slot_locked(mapping
, slot
))
521 if (radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_DIRTY
) ||
522 radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
))
524 radix_tree_delete(page_tree
, index
);
525 mapping
->nrexceptional
--;
528 spin_unlock_irq(&mapping
->tree_lock
);
530 dax_wake_mapping_entry_waiter(mapping
, index
, entry
, true);
535 * Invalidate exceptional DAX entry if it is clean.
537 int dax_invalidate_mapping_entry_sync(struct address_space
*mapping
,
540 return __dax_invalidate_mapping_entry(mapping
, index
, false);
544 * The user has performed a load from a hole in the file. Allocating
545 * a new page in the file would cause excessive storage usage for
546 * workloads with sparse files. We allocate a page cache page instead.
547 * We'll kick it out of the page cache if it's ever written to,
548 * otherwise it will simply fall out of the page cache under memory
549 * pressure without ever having been dirtied.
551 static int dax_load_hole(struct address_space
*mapping
, void **entry
,
552 struct vm_fault
*vmf
)
557 /* Hole page already exists? Return it... */
558 if (!radix_tree_exceptional_entry(*entry
)) {
563 /* This will replace locked radix tree entry with a hole page */
564 page
= find_or_create_page(mapping
, vmf
->pgoff
,
565 vmf
->gfp_mask
| __GFP_ZERO
);
570 ret
= finish_fault(vmf
);
574 /* Grab reference for PTE that is now referencing the page */
576 return VM_FAULT_NOPAGE
;
581 static int copy_user_dax(struct block_device
*bdev
, sector_t sector
, size_t size
,
582 struct page
*to
, unsigned long vaddr
)
584 struct blk_dax_ctl dax
= {
590 if (dax_map_atomic(bdev
, &dax
) < 0)
591 return PTR_ERR(dax
.addr
);
592 vto
= kmap_atomic(to
);
593 copy_user_page(vto
, (void __force
*)dax
.addr
, vaddr
, to
);
595 dax_unmap_atomic(bdev
, &dax
);
600 * By this point grab_mapping_entry() has ensured that we have a locked entry
601 * of the appropriate size so we don't have to worry about downgrading PMDs to
602 * PTEs. If we happen to be trying to insert a PTE and there is a PMD
603 * already in the tree, we will skip the insertion and just dirty the PMD as
606 static void *dax_insert_mapping_entry(struct address_space
*mapping
,
607 struct vm_fault
*vmf
,
608 void *entry
, sector_t sector
,
611 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
613 bool hole_fill
= false;
615 pgoff_t index
= vmf
->pgoff
;
617 if (vmf
->flags
& FAULT_FLAG_WRITE
)
618 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
620 /* Replacing hole page with block mapping? */
621 if (!radix_tree_exceptional_entry(entry
)) {
624 * Unmap the page now before we remove it from page cache below.
625 * The page is locked so it cannot be faulted in again.
627 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
629 error
= radix_tree_preload(vmf
->gfp_mask
& ~__GFP_HIGHMEM
);
631 return ERR_PTR(error
);
632 } else if (dax_is_zero_entry(entry
) && !(flags
& RADIX_DAX_HZP
)) {
633 /* replacing huge zero page with PMD block mapping */
634 unmap_mapping_range(mapping
,
635 (vmf
->pgoff
<< PAGE_SHIFT
) & PMD_MASK
, PMD_SIZE
, 0);
638 spin_lock_irq(&mapping
->tree_lock
);
639 new_entry
= dax_radix_locked_entry(sector
, flags
);
642 __delete_from_page_cache(entry
, NULL
);
643 /* Drop pagecache reference */
645 error
= __radix_tree_insert(page_tree
, index
,
646 dax_radix_order(new_entry
), new_entry
);
648 new_entry
= ERR_PTR(error
);
651 mapping
->nrexceptional
++;
652 } else if (dax_is_zero_entry(entry
) || dax_is_empty_entry(entry
)) {
654 * Only swap our new entry into the radix tree if the current
655 * entry is a zero page or an empty entry. If a normal PTE or
656 * PMD entry is already in the tree, we leave it alone. This
657 * means that if we are trying to insert a PTE and the
658 * existing entry is a PMD, we will just leave the PMD in the
659 * tree and dirty it if necessary.
661 struct radix_tree_node
*node
;
665 ret
= __radix_tree_lookup(page_tree
, index
, &node
, &slot
);
666 WARN_ON_ONCE(ret
!= entry
);
667 __radix_tree_replace(page_tree
, node
, slot
,
668 new_entry
, NULL
, NULL
);
670 if (vmf
->flags
& FAULT_FLAG_WRITE
)
671 radix_tree_tag_set(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
673 spin_unlock_irq(&mapping
->tree_lock
);
675 radix_tree_preload_end();
677 * We don't need hole page anymore, it has been replaced with
678 * locked radix tree entry now.
680 if (mapping
->a_ops
->freepage
)
681 mapping
->a_ops
->freepage(entry
);
688 static inline unsigned long
689 pgoff_address(pgoff_t pgoff
, struct vm_area_struct
*vma
)
691 unsigned long address
;
693 address
= vma
->vm_start
+ ((pgoff
- vma
->vm_pgoff
) << PAGE_SHIFT
);
694 VM_BUG_ON_VMA(address
< vma
->vm_start
|| address
>= vma
->vm_end
, vma
);
698 /* Walk all mappings of a given index of a file and writeprotect them */
699 static void dax_mapping_entry_mkclean(struct address_space
*mapping
,
700 pgoff_t index
, unsigned long pfn
)
702 struct vm_area_struct
*vma
;
703 pte_t pte
, *ptep
= NULL
;
708 i_mmap_lock_read(mapping
);
709 vma_interval_tree_foreach(vma
, &mapping
->i_mmap
, index
, index
) {
710 unsigned long address
;
714 if (!(vma
->vm_flags
& VM_SHARED
))
717 address
= pgoff_address(index
, vma
);
719 if (follow_pte_pmd(vma
->vm_mm
, address
, &ptep
, &pmdp
, &ptl
))
723 #ifdef CONFIG_FS_DAX_PMD
726 if (pfn
!= pmd_pfn(*pmdp
))
728 if (!pmd_dirty(*pmdp
) && !pmd_write(*pmdp
))
731 flush_cache_page(vma
, address
, pfn
);
732 pmd
= pmdp_huge_clear_flush(vma
, address
, pmdp
);
733 pmd
= pmd_wrprotect(pmd
);
734 pmd
= pmd_mkclean(pmd
);
735 set_pmd_at(vma
->vm_mm
, address
, pmdp
, pmd
);
741 if (pfn
!= pte_pfn(*ptep
))
743 if (!pte_dirty(*ptep
) && !pte_write(*ptep
))
746 flush_cache_page(vma
, address
, pfn
);
747 pte
= ptep_clear_flush(vma
, address
, ptep
);
748 pte
= pte_wrprotect(pte
);
749 pte
= pte_mkclean(pte
);
750 set_pte_at(vma
->vm_mm
, address
, ptep
, pte
);
753 pte_unmap_unlock(ptep
, ptl
);
757 mmu_notifier_invalidate_page(vma
->vm_mm
, address
);
759 i_mmap_unlock_read(mapping
);
762 static int dax_writeback_one(struct block_device
*bdev
,
763 struct address_space
*mapping
, pgoff_t index
, void *entry
)
765 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
766 struct blk_dax_ctl dax
;
767 void *entry2
, **slot
;
771 * A page got tagged dirty in DAX mapping? Something is seriously
774 if (WARN_ON(!radix_tree_exceptional_entry(entry
)))
777 spin_lock_irq(&mapping
->tree_lock
);
778 entry2
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
779 /* Entry got punched out / reallocated? */
780 if (!entry2
|| !radix_tree_exceptional_entry(entry2
))
783 * Entry got reallocated elsewhere? No need to writeback. We have to
784 * compare sectors as we must not bail out due to difference in lockbit
787 if (dax_radix_sector(entry2
) != dax_radix_sector(entry
))
789 if (WARN_ON_ONCE(dax_is_empty_entry(entry
) ||
790 dax_is_zero_entry(entry
))) {
795 /* Another fsync thread may have already written back this entry */
796 if (!radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
))
798 /* Lock the entry to serialize with page faults */
799 entry
= lock_slot(mapping
, slot
);
801 * We can clear the tag now but we have to be careful so that concurrent
802 * dax_writeback_one() calls for the same index cannot finish before we
803 * actually flush the caches. This is achieved as the calls will look
804 * at the entry only under tree_lock and once they do that they will
805 * see the entry locked and wait for it to unlock.
807 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_TOWRITE
);
808 spin_unlock_irq(&mapping
->tree_lock
);
811 * Even if dax_writeback_mapping_range() was given a wbc->range_start
812 * in the middle of a PMD, the 'index' we are given will be aligned to
813 * the start index of the PMD, as will the sector we pull from
814 * 'entry'. This allows us to flush for PMD_SIZE and not have to
815 * worry about partial PMD writebacks.
817 dax
.sector
= dax_radix_sector(entry
);
818 dax
.size
= PAGE_SIZE
<< dax_radix_order(entry
);
821 * We cannot hold tree_lock while calling dax_map_atomic() because it
822 * eventually calls cond_resched().
824 ret
= dax_map_atomic(bdev
, &dax
);
826 put_locked_mapping_entry(mapping
, index
, entry
);
830 if (WARN_ON_ONCE(ret
< dax
.size
)) {
835 dax_mapping_entry_mkclean(mapping
, index
, pfn_t_to_pfn(dax
.pfn
));
836 wb_cache_pmem(dax
.addr
, dax
.size
);
838 * After we have flushed the cache, we can clear the dirty tag. There
839 * cannot be new dirty data in the pfn after the flush has completed as
840 * the pfn mappings are writeprotected and fault waits for mapping
843 spin_lock_irq(&mapping
->tree_lock
);
844 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
845 spin_unlock_irq(&mapping
->tree_lock
);
847 dax_unmap_atomic(bdev
, &dax
);
848 put_locked_mapping_entry(mapping
, index
, entry
);
852 put_unlocked_mapping_entry(mapping
, index
, entry2
);
853 spin_unlock_irq(&mapping
->tree_lock
);
858 * Flush the mapping to the persistent domain within the byte range of [start,
859 * end]. This is required by data integrity operations to ensure file data is
860 * on persistent storage prior to completion of the operation.
862 int dax_writeback_mapping_range(struct address_space
*mapping
,
863 struct block_device
*bdev
, struct writeback_control
*wbc
)
865 struct inode
*inode
= mapping
->host
;
866 pgoff_t start_index
, end_index
;
867 pgoff_t indices
[PAGEVEC_SIZE
];
872 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
875 if (!mapping
->nrexceptional
|| wbc
->sync_mode
!= WB_SYNC_ALL
)
878 start_index
= wbc
->range_start
>> PAGE_SHIFT
;
879 end_index
= wbc
->range_end
>> PAGE_SHIFT
;
881 tag_pages_for_writeback(mapping
, start_index
, end_index
);
883 pagevec_init(&pvec
, 0);
885 pvec
.nr
= find_get_entries_tag(mapping
, start_index
,
886 PAGECACHE_TAG_TOWRITE
, PAGEVEC_SIZE
,
887 pvec
.pages
, indices
);
892 for (i
= 0; i
< pvec
.nr
; i
++) {
893 if (indices
[i
] > end_index
) {
898 ret
= dax_writeback_one(bdev
, mapping
, indices
[i
],
906 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
908 static int dax_insert_mapping(struct address_space
*mapping
,
909 struct block_device
*bdev
, sector_t sector
, size_t size
,
910 void **entryp
, struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
912 unsigned long vaddr
= vmf
->address
;
913 struct blk_dax_ctl dax
= {
918 void *entry
= *entryp
;
920 if (dax_map_atomic(bdev
, &dax
) < 0)
921 return PTR_ERR(dax
.addr
);
922 dax_unmap_atomic(bdev
, &dax
);
924 ret
= dax_insert_mapping_entry(mapping
, vmf
, entry
, dax
.sector
, 0);
929 return vm_insert_mixed(vma
, vaddr
, dax
.pfn
);
933 * dax_pfn_mkwrite - handle first write to DAX page
934 * @vma: The virtual memory area where the fault occurred
935 * @vmf: The description of the fault
937 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
939 struct file
*file
= vma
->vm_file
;
940 struct address_space
*mapping
= file
->f_mapping
;
942 pgoff_t index
= vmf
->pgoff
;
944 spin_lock_irq(&mapping
->tree_lock
);
945 entry
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
946 if (!entry
|| !radix_tree_exceptional_entry(entry
)) {
948 put_unlocked_mapping_entry(mapping
, index
, entry
);
949 spin_unlock_irq(&mapping
->tree_lock
);
950 return VM_FAULT_NOPAGE
;
952 radix_tree_tag_set(&mapping
->page_tree
, index
, PAGECACHE_TAG_DIRTY
);
953 entry
= lock_slot(mapping
, slot
);
954 spin_unlock_irq(&mapping
->tree_lock
);
956 * If we race with somebody updating the PTE and finish_mkwrite_fault()
957 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
958 * the fault in either case.
960 finish_mkwrite_fault(vmf
);
961 put_locked_mapping_entry(mapping
, index
, entry
);
962 return VM_FAULT_NOPAGE
;
964 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
966 static bool dax_range_is_aligned(struct block_device
*bdev
,
967 unsigned int offset
, unsigned int length
)
969 unsigned short sector_size
= bdev_logical_block_size(bdev
);
971 if (!IS_ALIGNED(offset
, sector_size
))
973 if (!IS_ALIGNED(length
, sector_size
))
979 int __dax_zero_page_range(struct block_device
*bdev
, sector_t sector
,
980 unsigned int offset
, unsigned int length
)
982 struct blk_dax_ctl dax
= {
987 if (dax_range_is_aligned(bdev
, offset
, length
)) {
988 sector_t start_sector
= dax
.sector
+ (offset
>> 9);
990 return blkdev_issue_zeroout(bdev
, start_sector
,
991 length
>> 9, GFP_NOFS
, true);
993 if (dax_map_atomic(bdev
, &dax
) < 0)
994 return PTR_ERR(dax
.addr
);
995 clear_pmem(dax
.addr
+ offset
, length
);
996 dax_unmap_atomic(bdev
, &dax
);
1000 EXPORT_SYMBOL_GPL(__dax_zero_page_range
);
1002 static sector_t
dax_iomap_sector(struct iomap
*iomap
, loff_t pos
)
1004 return iomap
->blkno
+ (((pos
& PAGE_MASK
) - iomap
->offset
) >> 9);
1008 dax_iomap_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
1009 struct iomap
*iomap
)
1011 struct iov_iter
*iter
= data
;
1012 loff_t end
= pos
+ length
, done
= 0;
1015 if (iov_iter_rw(iter
) == READ
) {
1016 end
= min(end
, i_size_read(inode
));
1020 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
1021 return iov_iter_zero(min(length
, end
- pos
), iter
);
1024 if (WARN_ON_ONCE(iomap
->type
!= IOMAP_MAPPED
))
1028 * Write can allocate block for an area which has a hole page mapped
1029 * into page tables. We have to tear down these mappings so that data
1030 * written by write(2) is visible in mmap.
1032 if ((iomap
->flags
& IOMAP_F_NEW
) && inode
->i_mapping
->nrpages
) {
1033 invalidate_inode_pages2_range(inode
->i_mapping
,
1035 (end
- 1) >> PAGE_SHIFT
);
1039 unsigned offset
= pos
& (PAGE_SIZE
- 1);
1040 struct blk_dax_ctl dax
= { 0 };
1043 if (fatal_signal_pending(current
)) {
1048 dax
.sector
= dax_iomap_sector(iomap
, pos
);
1049 dax
.size
= (length
+ offset
+ PAGE_SIZE
- 1) & PAGE_MASK
;
1050 map_len
= dax_map_atomic(iomap
->bdev
, &dax
);
1058 if (map_len
> end
- pos
)
1059 map_len
= end
- pos
;
1061 if (iov_iter_rw(iter
) == WRITE
)
1062 map_len
= copy_from_iter_pmem(dax
.addr
, map_len
, iter
);
1064 map_len
= copy_to_iter(dax
.addr
, map_len
, iter
);
1065 dax_unmap_atomic(iomap
->bdev
, &dax
);
1067 ret
= map_len
? map_len
: -EFAULT
;
1076 return done
? done
: ret
;
1080 * dax_iomap_rw - Perform I/O to a DAX file
1081 * @iocb: The control block for this I/O
1082 * @iter: The addresses to do I/O from or to
1083 * @ops: iomap ops passed from the file system
1085 * This function performs read and write operations to directly mapped
1086 * persistent memory. The callers needs to take care of read/write exclusion
1087 * and evicting any page cache pages in the region under I/O.
1090 dax_iomap_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
1091 struct iomap_ops
*ops
)
1093 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1094 struct inode
*inode
= mapping
->host
;
1095 loff_t pos
= iocb
->ki_pos
, ret
= 0, done
= 0;
1098 if (iov_iter_rw(iter
) == WRITE
)
1099 flags
|= IOMAP_WRITE
;
1101 while (iov_iter_count(iter
)) {
1102 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
), flags
, ops
,
1103 iter
, dax_iomap_actor
);
1110 iocb
->ki_pos
+= done
;
1111 return done
? done
: ret
;
1113 EXPORT_SYMBOL_GPL(dax_iomap_rw
);
1115 static int dax_fault_return(int error
)
1118 return VM_FAULT_NOPAGE
;
1119 if (error
== -ENOMEM
)
1120 return VM_FAULT_OOM
;
1121 return VM_FAULT_SIGBUS
;
1125 * dax_iomap_fault - handle a page fault on a DAX file
1126 * @vma: The virtual memory area where the fault occurred
1127 * @vmf: The description of the fault
1128 * @ops: iomap ops passed from the file system
1130 * When a page fault occurs, filesystems may call this helper in their fault
1131 * or mkwrite handler for DAX files. Assumes the caller has done all the
1132 * necessary locking for the page fault to proceed successfully.
1134 int dax_iomap_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
1135 struct iomap_ops
*ops
)
1137 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1138 struct inode
*inode
= mapping
->host
;
1139 unsigned long vaddr
= vmf
->address
;
1140 loff_t pos
= (loff_t
)vmf
->pgoff
<< PAGE_SHIFT
;
1142 struct iomap iomap
= { 0 };
1143 unsigned flags
= IOMAP_FAULT
;
1144 int error
, major
= 0;
1149 * Check whether offset isn't beyond end of file now. Caller is supposed
1150 * to hold locks serializing us with truncate / punch hole so this is
1153 if (pos
>= i_size_read(inode
))
1154 return VM_FAULT_SIGBUS
;
1156 if ((vmf
->flags
& FAULT_FLAG_WRITE
) && !vmf
->cow_page
)
1157 flags
|= IOMAP_WRITE
;
1160 * Note that we don't bother to use iomap_apply here: DAX required
1161 * the file system block size to be equal the page size, which means
1162 * that we never have to deal with more than a single extent here.
1164 error
= ops
->iomap_begin(inode
, pos
, PAGE_SIZE
, flags
, &iomap
);
1166 return dax_fault_return(error
);
1167 if (WARN_ON_ONCE(iomap
.offset
+ iomap
.length
< pos
+ PAGE_SIZE
)) {
1168 vmf_ret
= dax_fault_return(-EIO
); /* fs corruption? */
1172 entry
= grab_mapping_entry(mapping
, vmf
->pgoff
, 0);
1173 if (IS_ERR(entry
)) {
1174 vmf_ret
= dax_fault_return(PTR_ERR(entry
));
1178 sector
= dax_iomap_sector(&iomap
, pos
);
1180 if (vmf
->cow_page
) {
1181 switch (iomap
.type
) {
1183 case IOMAP_UNWRITTEN
:
1184 clear_user_highpage(vmf
->cow_page
, vaddr
);
1187 error
= copy_user_dax(iomap
.bdev
, sector
, PAGE_SIZE
,
1188 vmf
->cow_page
, vaddr
);
1197 goto error_unlock_entry
;
1199 __SetPageUptodate(vmf
->cow_page
);
1200 vmf_ret
= finish_fault(vmf
);
1202 vmf_ret
= VM_FAULT_DONE_COW
;
1206 switch (iomap
.type
) {
1208 if (iomap
.flags
& IOMAP_F_NEW
) {
1209 count_vm_event(PGMAJFAULT
);
1210 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
1211 major
= VM_FAULT_MAJOR
;
1213 error
= dax_insert_mapping(mapping
, iomap
.bdev
, sector
,
1214 PAGE_SIZE
, &entry
, vma
, vmf
);
1215 /* -EBUSY is fine, somebody else faulted on the same PTE */
1216 if (error
== -EBUSY
)
1219 case IOMAP_UNWRITTEN
:
1221 if (!(vmf
->flags
& FAULT_FLAG_WRITE
)) {
1222 vmf_ret
= dax_load_hole(mapping
, &entry
, vmf
);
1233 vmf_ret
= dax_fault_return(error
) | major
;
1235 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
1237 if (ops
->iomap_end
) {
1238 int copied
= PAGE_SIZE
;
1240 if (vmf_ret
& VM_FAULT_ERROR
)
1243 * The fault is done by now and there's no way back (other
1244 * thread may be already happily using PTE we have installed).
1245 * Just ignore error from ->iomap_end since we cannot do much
1248 ops
->iomap_end(inode
, pos
, PAGE_SIZE
, copied
, flags
, &iomap
);
1252 EXPORT_SYMBOL_GPL(dax_iomap_fault
);
1254 #ifdef CONFIG_FS_DAX_PMD
1256 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
1257 * more often than one might expect in the below functions.
1259 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
1261 static int dax_pmd_insert_mapping(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1262 struct vm_fault
*vmf
, unsigned long address
,
1263 struct iomap
*iomap
, loff_t pos
, bool write
, void **entryp
)
1265 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1266 struct block_device
*bdev
= iomap
->bdev
;
1267 struct blk_dax_ctl dax
= {
1268 .sector
= dax_iomap_sector(iomap
, pos
),
1271 long length
= dax_map_atomic(bdev
, &dax
);
1274 if (length
< 0) /* dax_map_atomic() failed */
1275 return VM_FAULT_FALLBACK
;
1276 if (length
< PMD_SIZE
)
1277 goto unmap_fallback
;
1278 if (pfn_t_to_pfn(dax
.pfn
) & PG_PMD_COLOUR
)
1279 goto unmap_fallback
;
1280 if (!pfn_t_devmap(dax
.pfn
))
1281 goto unmap_fallback
;
1283 dax_unmap_atomic(bdev
, &dax
);
1285 ret
= dax_insert_mapping_entry(mapping
, vmf
, *entryp
, dax
.sector
,
1288 return VM_FAULT_FALLBACK
;
1291 return vmf_insert_pfn_pmd(vma
, address
, pmd
, dax
.pfn
, write
);
1294 dax_unmap_atomic(bdev
, &dax
);
1295 return VM_FAULT_FALLBACK
;
1298 static int dax_pmd_load_hole(struct vm_area_struct
*vma
, pmd_t
*pmd
,
1299 struct vm_fault
*vmf
, unsigned long address
,
1300 struct iomap
*iomap
, void **entryp
)
1302 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1303 unsigned long pmd_addr
= address
& PMD_MASK
;
1304 struct page
*zero_page
;
1309 zero_page
= mm_get_huge_zero_page(vma
->vm_mm
);
1311 if (unlikely(!zero_page
))
1312 return VM_FAULT_FALLBACK
;
1314 ret
= dax_insert_mapping_entry(mapping
, vmf
, *entryp
, 0,
1315 RADIX_DAX_PMD
| RADIX_DAX_HZP
);
1317 return VM_FAULT_FALLBACK
;
1320 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1321 if (!pmd_none(*pmd
)) {
1323 return VM_FAULT_FALLBACK
;
1326 pmd_entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
1327 pmd_entry
= pmd_mkhuge(pmd_entry
);
1328 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, pmd_entry
);
1330 return VM_FAULT_NOPAGE
;
1333 int dax_iomap_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
1334 pmd_t
*pmd
, unsigned int flags
, struct iomap_ops
*ops
)
1336 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1337 unsigned long pmd_addr
= address
& PMD_MASK
;
1338 bool write
= flags
& FAULT_FLAG_WRITE
;
1339 unsigned int iomap_flags
= (write
? IOMAP_WRITE
: 0) | IOMAP_FAULT
;
1340 struct inode
*inode
= mapping
->host
;
1341 int result
= VM_FAULT_FALLBACK
;
1342 struct iomap iomap
= { 0 };
1343 pgoff_t max_pgoff
, pgoff
;
1344 struct vm_fault vmf
;
1349 /* Fall back to PTEs if we're going to COW */
1350 if (write
&& !(vma
->vm_flags
& VM_SHARED
))
1353 /* If the PMD would extend outside the VMA */
1354 if (pmd_addr
< vma
->vm_start
)
1356 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
)
1360 * Check whether offset isn't beyond end of file now. Caller is
1361 * supposed to hold locks serializing us with truncate / punch hole so
1362 * this is a reliable test.
1364 pgoff
= linear_page_index(vma
, pmd_addr
);
1365 max_pgoff
= (i_size_read(inode
) - 1) >> PAGE_SHIFT
;
1367 if (pgoff
> max_pgoff
)
1368 return VM_FAULT_SIGBUS
;
1370 /* If the PMD would extend beyond the file size */
1371 if ((pgoff
| PG_PMD_COLOUR
) > max_pgoff
)
1375 * Note that we don't use iomap_apply here. We aren't doing I/O, only
1376 * setting up a mapping, so really we're using iomap_begin() as a way
1377 * to look up our filesystem block.
1379 pos
= (loff_t
)pgoff
<< PAGE_SHIFT
;
1380 error
= ops
->iomap_begin(inode
, pos
, PMD_SIZE
, iomap_flags
, &iomap
);
1384 if (iomap
.offset
+ iomap
.length
< pos
+ PMD_SIZE
)
1388 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
1389 * PMD or a HZP entry. If it can't (because a 4k page is already in
1390 * the tree, for instance), it will return -EEXIST and we just fall
1391 * back to 4k entries.
1393 entry
= grab_mapping_entry(mapping
, pgoff
, RADIX_DAX_PMD
);
1399 vmf
.gfp_mask
= mapping_gfp_mask(mapping
) | __GFP_IO
;
1401 switch (iomap
.type
) {
1403 result
= dax_pmd_insert_mapping(vma
, pmd
, &vmf
, address
,
1404 &iomap
, pos
, write
, &entry
);
1406 case IOMAP_UNWRITTEN
:
1408 if (WARN_ON_ONCE(write
))
1410 result
= dax_pmd_load_hole(vma
, pmd
, &vmf
, address
, &iomap
,
1419 put_locked_mapping_entry(mapping
, pgoff
, entry
);
1421 if (ops
->iomap_end
) {
1422 int copied
= PMD_SIZE
;
1424 if (result
== VM_FAULT_FALLBACK
)
1427 * The fault is done by now and there's no way back (other
1428 * thread may be already happily using PMD we have installed).
1429 * Just ignore error from ->iomap_end since we cannot do much
1432 ops
->iomap_end(inode
, pos
, PMD_SIZE
, copied
, iomap_flags
,
1436 if (result
== VM_FAULT_FALLBACK
) {
1437 split_huge_pmd(vma
, pmd
, address
);
1438 count_vm_event(THP_FAULT_FALLBACK
);
1442 EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault
);
1443 #endif /* CONFIG_FS_DAX_PMD */