2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
34 #include <linux/iomap.h>
38 * We use lowest available bit in exceptional entry for locking, other two
39 * bits to determine entry type. In total 3 special bits.
41 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
42 #define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
43 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
44 #define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
45 #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
46 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
47 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
48 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
49 RADIX_TREE_EXCEPTIONAL_ENTRY))
51 /* We choose 4096 entries - same as per-zone page wait tables */
52 #define DAX_WAIT_TABLE_BITS 12
53 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
55 static wait_queue_head_t wait_table
[DAX_WAIT_TABLE_ENTRIES
];
57 static int __init
init_dax_wait_table(void)
61 for (i
= 0; i
< DAX_WAIT_TABLE_ENTRIES
; i
++)
62 init_waitqueue_head(wait_table
+ i
);
65 fs_initcall(init_dax_wait_table
);
67 static wait_queue_head_t
*dax_entry_waitqueue(struct address_space
*mapping
,
70 unsigned long hash
= hash_long((unsigned long)mapping
^ index
,
72 return wait_table
+ hash
;
75 static long dax_map_atomic(struct block_device
*bdev
, struct blk_dax_ctl
*dax
)
77 struct request_queue
*q
= bdev
->bd_queue
;
80 dax
->addr
= ERR_PTR(-EIO
);
81 if (blk_queue_enter(q
, true) != 0)
84 rc
= bdev_direct_access(bdev
, dax
);
86 dax
->addr
= ERR_PTR(rc
);
93 static void dax_unmap_atomic(struct block_device
*bdev
,
94 const struct blk_dax_ctl
*dax
)
96 if (IS_ERR(dax
->addr
))
98 blk_queue_exit(bdev
->bd_queue
);
101 struct page
*read_dax_sector(struct block_device
*bdev
, sector_t n
)
103 struct page
*page
= alloc_pages(GFP_KERNEL
, 0);
104 struct blk_dax_ctl dax
= {
106 .sector
= n
& ~((((int) PAGE_SIZE
) / 512) - 1),
111 return ERR_PTR(-ENOMEM
);
113 rc
= dax_map_atomic(bdev
, &dax
);
116 memcpy_from_pmem(page_address(page
), dax
.addr
, PAGE_SIZE
);
117 dax_unmap_atomic(bdev
, &dax
);
121 static bool buffer_written(struct buffer_head
*bh
)
123 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
126 static sector_t
to_sector(const struct buffer_head
*bh
,
127 const struct inode
*inode
)
129 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
134 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
135 loff_t start
, loff_t end
, get_block_t get_block
,
136 struct buffer_head
*bh
)
138 loff_t pos
= start
, max
= start
, bh_max
= start
;
140 struct block_device
*bdev
= NULL
;
141 int rw
= iov_iter_rw(iter
), rc
;
143 struct blk_dax_ctl dax
= {
144 .addr
= ERR_PTR(-EIO
),
146 unsigned blkbits
= inode
->i_blkbits
;
147 sector_t file_blks
= (i_size_read(inode
) + (1 << blkbits
) - 1)
151 end
= min(end
, i_size_read(inode
));
156 long page
= pos
>> PAGE_SHIFT
;
157 sector_t block
= page
<< (PAGE_SHIFT
- blkbits
);
158 unsigned first
= pos
- (block
<< blkbits
);
162 bh
->b_size
= PAGE_ALIGN(end
- pos
);
164 rc
= get_block(inode
, block
, bh
, rw
== WRITE
);
167 bh_max
= pos
- first
+ bh
->b_size
;
170 * We allow uninitialized buffers for writes
171 * beyond EOF as those cannot race with faults
174 (buffer_new(bh
) && block
< file_blks
) ||
175 (rw
== WRITE
&& buffer_unwritten(bh
)));
177 unsigned done
= bh
->b_size
-
178 (bh_max
- (pos
- first
));
179 bh
->b_blocknr
+= done
>> blkbits
;
183 hole
= rw
== READ
&& !buffer_written(bh
);
185 size
= bh
->b_size
- first
;
187 dax_unmap_atomic(bdev
, &dax
);
188 dax
.sector
= to_sector(bh
, inode
);
189 dax
.size
= bh
->b_size
;
190 map_len
= dax_map_atomic(bdev
, &dax
);
196 size
= map_len
- first
;
199 * pos + size is one past the last offset for IO,
200 * so pos + size can overflow loff_t at extreme offsets.
201 * Cast to u64 to catch this and get the true minimum.
203 max
= min_t(u64
, pos
+ size
, end
);
206 if (iov_iter_rw(iter
) == WRITE
) {
207 len
= copy_from_iter_pmem(dax
.addr
, max
- pos
, iter
);
209 len
= copy_to_iter((void __force
*) dax
.addr
, max
- pos
,
212 len
= iov_iter_zero(max
- pos
, iter
);
220 if (!IS_ERR(dax
.addr
))
224 dax_unmap_atomic(bdev
, &dax
);
226 return (pos
== start
) ? rc
: pos
- start
;
230 * dax_do_io - Perform I/O to a DAX file
231 * @iocb: The control block for this I/O
232 * @inode: The file which the I/O is directed at
233 * @iter: The addresses to do I/O from or to
234 * @get_block: The filesystem method used to translate file offsets to blocks
235 * @end_io: A filesystem callback for I/O completion
238 * This function uses the same locking scheme as do_blockdev_direct_IO:
239 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
240 * caller for writes. For reads, we take and release the i_mutex ourselves.
241 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
242 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
245 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
246 struct iov_iter
*iter
, get_block_t get_block
,
247 dio_iodone_t end_io
, int flags
)
249 struct buffer_head bh
;
250 ssize_t retval
= -EINVAL
;
251 loff_t pos
= iocb
->ki_pos
;
252 loff_t end
= pos
+ iov_iter_count(iter
);
254 memset(&bh
, 0, sizeof(bh
));
255 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
257 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
260 /* Protects against truncate */
261 if (!(flags
& DIO_SKIP_DIO_COUNT
))
262 inode_dio_begin(inode
);
264 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
266 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
272 err
= end_io(iocb
, pos
, retval
, bh
.b_private
);
277 if (!(flags
& DIO_SKIP_DIO_COUNT
))
278 inode_dio_end(inode
);
281 EXPORT_SYMBOL_GPL(dax_do_io
);
284 * DAX radix tree locking
286 struct exceptional_entry_key
{
287 struct address_space
*mapping
;
291 struct wait_exceptional_entry_queue
{
293 struct exceptional_entry_key key
;
296 static int wake_exceptional_entry_func(wait_queue_t
*wait
, unsigned int mode
,
297 int sync
, void *keyp
)
299 struct exceptional_entry_key
*key
= keyp
;
300 struct wait_exceptional_entry_queue
*ewait
=
301 container_of(wait
, struct wait_exceptional_entry_queue
, wait
);
303 if (key
->mapping
!= ewait
->key
.mapping
||
304 key
->index
!= ewait
->key
.index
)
306 return autoremove_wake_function(wait
, mode
, sync
, NULL
);
310 * Check whether the given slot is locked. The function must be called with
311 * mapping->tree_lock held
313 static inline int slot_locked(struct address_space
*mapping
, void **slot
)
315 unsigned long entry
= (unsigned long)
316 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
317 return entry
& RADIX_DAX_ENTRY_LOCK
;
321 * Mark the given slot is locked. The function must be called with
322 * mapping->tree_lock held
324 static inline void *lock_slot(struct address_space
*mapping
, void **slot
)
326 unsigned long entry
= (unsigned long)
327 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
329 entry
|= RADIX_DAX_ENTRY_LOCK
;
330 radix_tree_replace_slot(slot
, (void *)entry
);
331 return (void *)entry
;
335 * Mark the given slot is unlocked. The function must be called with
336 * mapping->tree_lock held
338 static inline void *unlock_slot(struct address_space
*mapping
, void **slot
)
340 unsigned long entry
= (unsigned long)
341 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
343 entry
&= ~(unsigned long)RADIX_DAX_ENTRY_LOCK
;
344 radix_tree_replace_slot(slot
, (void *)entry
);
345 return (void *)entry
;
349 * Lookup entry in radix tree, wait for it to become unlocked if it is
350 * exceptional entry and return it. The caller must call
351 * put_unlocked_mapping_entry() when he decided not to lock the entry or
352 * put_locked_mapping_entry() when he locked the entry and now wants to
355 * The function must be called with mapping->tree_lock held.
357 static void *get_unlocked_mapping_entry(struct address_space
*mapping
,
358 pgoff_t index
, void ***slotp
)
361 struct wait_exceptional_entry_queue ewait
;
362 wait_queue_head_t
*wq
= dax_entry_waitqueue(mapping
, index
);
364 init_wait(&ewait
.wait
);
365 ewait
.wait
.func
= wake_exceptional_entry_func
;
366 ewait
.key
.mapping
= mapping
;
367 ewait
.key
.index
= index
;
370 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
,
372 if (!entry
|| !radix_tree_exceptional_entry(entry
) ||
373 !slot_locked(mapping
, slot
)) {
378 prepare_to_wait_exclusive(wq
, &ewait
.wait
,
379 TASK_UNINTERRUPTIBLE
);
380 spin_unlock_irq(&mapping
->tree_lock
);
382 finish_wait(wq
, &ewait
.wait
);
383 spin_lock_irq(&mapping
->tree_lock
);
388 * Find radix tree entry at given index. If it points to a page, return with
389 * the page locked. If it points to the exceptional entry, return with the
390 * radix tree entry locked. If the radix tree doesn't contain given index,
391 * create empty exceptional entry for the index and return with it locked.
393 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
394 * persistent memory the benefit is doubtful. We can add that later if we can
397 static void *grab_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
402 spin_lock_irq(&mapping
->tree_lock
);
403 entry
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
404 /* No entry for given index? Make sure radix tree is big enough. */
408 spin_unlock_irq(&mapping
->tree_lock
);
409 err
= radix_tree_preload(
410 mapping_gfp_mask(mapping
) & ~__GFP_HIGHMEM
);
413 entry
= (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY
|
414 RADIX_DAX_ENTRY_LOCK
);
415 spin_lock_irq(&mapping
->tree_lock
);
416 err
= radix_tree_insert(&mapping
->page_tree
, index
, entry
);
417 radix_tree_preload_end();
419 spin_unlock_irq(&mapping
->tree_lock
);
420 /* Someone already created the entry? */
425 /* Good, we have inserted empty locked entry into the tree. */
426 mapping
->nrexceptional
++;
427 spin_unlock_irq(&mapping
->tree_lock
);
430 /* Normal page in radix tree? */
431 if (!radix_tree_exceptional_entry(entry
)) {
432 struct page
*page
= entry
;
435 spin_unlock_irq(&mapping
->tree_lock
);
437 /* Page got truncated? Retry... */
438 if (unlikely(page
->mapping
!= mapping
)) {
445 entry
= lock_slot(mapping
, slot
);
446 spin_unlock_irq(&mapping
->tree_lock
);
450 void dax_wake_mapping_entry_waiter(struct address_space
*mapping
,
451 pgoff_t index
, bool wake_all
)
453 wait_queue_head_t
*wq
= dax_entry_waitqueue(mapping
, index
);
456 * Checking for locked entry and prepare_to_wait_exclusive() happens
457 * under mapping->tree_lock, ditto for entry handling in our callers.
458 * So at this point all tasks that could have seen our entry locked
459 * must be in the waitqueue and the following check will see them.
461 if (waitqueue_active(wq
)) {
462 struct exceptional_entry_key key
;
464 key
.mapping
= mapping
;
466 __wake_up(wq
, TASK_NORMAL
, wake_all
? 0 : 1, &key
);
470 void dax_unlock_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
474 spin_lock_irq(&mapping
->tree_lock
);
475 entry
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
, &slot
);
476 if (WARN_ON_ONCE(!entry
|| !radix_tree_exceptional_entry(entry
) ||
477 !slot_locked(mapping
, slot
))) {
478 spin_unlock_irq(&mapping
->tree_lock
);
481 unlock_slot(mapping
, slot
);
482 spin_unlock_irq(&mapping
->tree_lock
);
483 dax_wake_mapping_entry_waiter(mapping
, index
, false);
486 static void put_locked_mapping_entry(struct address_space
*mapping
,
487 pgoff_t index
, void *entry
)
489 if (!radix_tree_exceptional_entry(entry
)) {
493 dax_unlock_mapping_entry(mapping
, index
);
498 * Called when we are done with radix tree entry we looked up via
499 * get_unlocked_mapping_entry() and which we didn't lock in the end.
501 static void put_unlocked_mapping_entry(struct address_space
*mapping
,
502 pgoff_t index
, void *entry
)
504 if (!radix_tree_exceptional_entry(entry
))
507 /* We have to wake up next waiter for the radix tree entry lock */
508 dax_wake_mapping_entry_waiter(mapping
, index
, false);
512 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
513 * entry to get unlocked before deleting it.
515 int dax_delete_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
519 spin_lock_irq(&mapping
->tree_lock
);
520 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
522 * This gets called from truncate / punch_hole path. As such, the caller
523 * must hold locks protecting against concurrent modifications of the
524 * radix tree (usually fs-private i_mmap_sem for writing). Since the
525 * caller has seen exceptional entry for this index, we better find it
526 * at that index as well...
528 if (WARN_ON_ONCE(!entry
|| !radix_tree_exceptional_entry(entry
))) {
529 spin_unlock_irq(&mapping
->tree_lock
);
532 radix_tree_delete(&mapping
->page_tree
, index
);
533 mapping
->nrexceptional
--;
534 spin_unlock_irq(&mapping
->tree_lock
);
535 dax_wake_mapping_entry_waiter(mapping
, index
, true);
541 * The user has performed a load from a hole in the file. Allocating
542 * a new page in the file would cause excessive storage usage for
543 * workloads with sparse files. We allocate a page cache page instead.
544 * We'll kick it out of the page cache if it's ever written to,
545 * otherwise it will simply fall out of the page cache under memory
546 * pressure without ever having been dirtied.
548 static int dax_load_hole(struct address_space
*mapping
, void *entry
,
549 struct vm_fault
*vmf
)
553 /* Hole page already exists? Return it... */
554 if (!radix_tree_exceptional_entry(entry
)) {
556 return VM_FAULT_LOCKED
;
559 /* This will replace locked radix tree entry with a hole page */
560 page
= find_or_create_page(mapping
, vmf
->pgoff
,
561 vmf
->gfp_mask
| __GFP_ZERO
);
563 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
567 return VM_FAULT_LOCKED
;
570 static int copy_user_dax(struct block_device
*bdev
, sector_t sector
, size_t size
,
571 struct page
*to
, unsigned long vaddr
)
573 struct blk_dax_ctl dax
= {
579 if (dax_map_atomic(bdev
, &dax
) < 0)
580 return PTR_ERR(dax
.addr
);
581 vto
= kmap_atomic(to
);
582 copy_user_page(vto
, (void __force
*)dax
.addr
, vaddr
, to
);
584 dax_unmap_atomic(bdev
, &dax
);
588 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
590 static void *dax_insert_mapping_entry(struct address_space
*mapping
,
591 struct vm_fault
*vmf
,
592 void *entry
, sector_t sector
)
594 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
596 bool hole_fill
= false;
598 pgoff_t index
= vmf
->pgoff
;
600 if (vmf
->flags
& FAULT_FLAG_WRITE
)
601 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
603 /* Replacing hole page with block mapping? */
604 if (!radix_tree_exceptional_entry(entry
)) {
607 * Unmap the page now before we remove it from page cache below.
608 * The page is locked so it cannot be faulted in again.
610 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
612 error
= radix_tree_preload(vmf
->gfp_mask
& ~__GFP_HIGHMEM
);
614 return ERR_PTR(error
);
617 spin_lock_irq(&mapping
->tree_lock
);
618 new_entry
= (void *)((unsigned long)RADIX_DAX_ENTRY(sector
, false) |
619 RADIX_DAX_ENTRY_LOCK
);
621 __delete_from_page_cache(entry
, NULL
);
622 /* Drop pagecache reference */
624 error
= radix_tree_insert(page_tree
, index
, new_entry
);
626 new_entry
= ERR_PTR(error
);
629 mapping
->nrexceptional
++;
634 ret
= __radix_tree_lookup(page_tree
, index
, NULL
, &slot
);
635 WARN_ON_ONCE(ret
!= entry
);
636 radix_tree_replace_slot(slot
, new_entry
);
638 if (vmf
->flags
& FAULT_FLAG_WRITE
)
639 radix_tree_tag_set(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
641 spin_unlock_irq(&mapping
->tree_lock
);
643 radix_tree_preload_end();
645 * We don't need hole page anymore, it has been replaced with
646 * locked radix tree entry now.
648 if (mapping
->a_ops
->freepage
)
649 mapping
->a_ops
->freepage(entry
);
656 static int dax_writeback_one(struct block_device
*bdev
,
657 struct address_space
*mapping
, pgoff_t index
, void *entry
)
659 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
660 int type
= RADIX_DAX_TYPE(entry
);
661 struct radix_tree_node
*node
;
662 struct blk_dax_ctl dax
;
666 spin_lock_irq(&mapping
->tree_lock
);
668 * Regular page slots are stabilized by the page lock even
669 * without the tree itself locked. These unlocked entries
670 * need verification under the tree lock.
672 if (!__radix_tree_lookup(page_tree
, index
, &node
, &slot
))
677 /* another fsync thread may have already written back this entry */
678 if (!radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
))
681 if (WARN_ON_ONCE(type
!= RADIX_DAX_PTE
&& type
!= RADIX_DAX_PMD
)) {
686 dax
.sector
= RADIX_DAX_SECTOR(entry
);
687 dax
.size
= (type
== RADIX_DAX_PMD
? PMD_SIZE
: PAGE_SIZE
);
688 spin_unlock_irq(&mapping
->tree_lock
);
691 * We cannot hold tree_lock while calling dax_map_atomic() because it
692 * eventually calls cond_resched().
694 ret
= dax_map_atomic(bdev
, &dax
);
698 if (WARN_ON_ONCE(ret
< dax
.size
)) {
703 wb_cache_pmem(dax
.addr
, dax
.size
);
705 spin_lock_irq(&mapping
->tree_lock
);
706 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_TOWRITE
);
707 spin_unlock_irq(&mapping
->tree_lock
);
709 dax_unmap_atomic(bdev
, &dax
);
713 spin_unlock_irq(&mapping
->tree_lock
);
718 * Flush the mapping to the persistent domain within the byte range of [start,
719 * end]. This is required by data integrity operations to ensure file data is
720 * on persistent storage prior to completion of the operation.
722 int dax_writeback_mapping_range(struct address_space
*mapping
,
723 struct block_device
*bdev
, struct writeback_control
*wbc
)
725 struct inode
*inode
= mapping
->host
;
726 pgoff_t start_index
, end_index
, pmd_index
;
727 pgoff_t indices
[PAGEVEC_SIZE
];
733 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
736 if (!mapping
->nrexceptional
|| wbc
->sync_mode
!= WB_SYNC_ALL
)
739 start_index
= wbc
->range_start
>> PAGE_SHIFT
;
740 end_index
= wbc
->range_end
>> PAGE_SHIFT
;
741 pmd_index
= DAX_PMD_INDEX(start_index
);
744 entry
= radix_tree_lookup(&mapping
->page_tree
, pmd_index
);
747 /* see if the start of our range is covered by a PMD entry */
748 if (entry
&& RADIX_DAX_TYPE(entry
) == RADIX_DAX_PMD
)
749 start_index
= pmd_index
;
751 tag_pages_for_writeback(mapping
, start_index
, end_index
);
753 pagevec_init(&pvec
, 0);
755 pvec
.nr
= find_get_entries_tag(mapping
, start_index
,
756 PAGECACHE_TAG_TOWRITE
, PAGEVEC_SIZE
,
757 pvec
.pages
, indices
);
762 for (i
= 0; i
< pvec
.nr
; i
++) {
763 if (indices
[i
] > end_index
) {
768 ret
= dax_writeback_one(bdev
, mapping
, indices
[i
],
776 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
778 static int dax_insert_mapping(struct address_space
*mapping
,
779 struct block_device
*bdev
, sector_t sector
, size_t size
,
780 void **entryp
, struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
782 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
783 struct blk_dax_ctl dax
= {
788 void *entry
= *entryp
;
790 if (dax_map_atomic(bdev
, &dax
) < 0)
791 return PTR_ERR(dax
.addr
);
792 dax_unmap_atomic(bdev
, &dax
);
794 ret
= dax_insert_mapping_entry(mapping
, vmf
, entry
, dax
.sector
);
799 return vm_insert_mixed(vma
, vaddr
, dax
.pfn
);
803 * dax_fault - handle a page fault on a DAX file
804 * @vma: The virtual memory area where the fault occurred
805 * @vmf: The description of the fault
806 * @get_block: The filesystem method used to translate file offsets to blocks
808 * When a page fault occurs, filesystems may call this helper in their
809 * fault handler for DAX files. dax_fault() assumes the caller has done all
810 * the necessary locking for the page fault to proceed successfully.
812 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
813 get_block_t get_block
)
815 struct file
*file
= vma
->vm_file
;
816 struct address_space
*mapping
= file
->f_mapping
;
817 struct inode
*inode
= mapping
->host
;
819 struct buffer_head bh
;
820 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
821 unsigned blkbits
= inode
->i_blkbits
;
828 * Check whether offset isn't beyond end of file now. Caller is supposed
829 * to hold locks serializing us with truncate / punch hole so this is
832 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
833 if (vmf
->pgoff
>= size
)
834 return VM_FAULT_SIGBUS
;
836 memset(&bh
, 0, sizeof(bh
));
837 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
838 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
839 bh
.b_size
= PAGE_SIZE
;
841 entry
= grab_mapping_entry(mapping
, vmf
->pgoff
);
843 error
= PTR_ERR(entry
);
847 error
= get_block(inode
, block
, &bh
, 0);
848 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
849 error
= -EIO
; /* fs corruption? */
854 struct page
*new_page
= vmf
->cow_page
;
855 if (buffer_written(&bh
))
856 error
= copy_user_dax(bh
.b_bdev
, to_sector(&bh
, inode
),
857 bh
.b_size
, new_page
, vaddr
);
859 clear_user_highpage(new_page
, vaddr
);
862 if (!radix_tree_exceptional_entry(entry
)) {
864 return VM_FAULT_LOCKED
;
867 return VM_FAULT_DAX_LOCKED
;
870 if (!buffer_mapped(&bh
)) {
871 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
872 error
= get_block(inode
, block
, &bh
, 1);
873 count_vm_event(PGMAJFAULT
);
874 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
875 major
= VM_FAULT_MAJOR
;
876 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
881 return dax_load_hole(mapping
, entry
, vmf
);
885 /* Filesystem should not return unwritten buffers to us! */
886 WARN_ON_ONCE(buffer_unwritten(&bh
) || buffer_new(&bh
));
887 error
= dax_insert_mapping(mapping
, bh
.b_bdev
, to_sector(&bh
, inode
),
888 bh
.b_size
, &entry
, vma
, vmf
);
890 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
892 if (error
== -ENOMEM
)
893 return VM_FAULT_OOM
| major
;
894 /* -EBUSY is fine, somebody else faulted on the same PTE */
895 if ((error
< 0) && (error
!= -EBUSY
))
896 return VM_FAULT_SIGBUS
| major
;
897 return VM_FAULT_NOPAGE
| major
;
899 EXPORT_SYMBOL_GPL(dax_fault
);
901 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
903 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
904 * more often than one might expect in the below function.
906 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
908 static void __dax_dbg(struct buffer_head
*bh
, unsigned long address
,
909 const char *reason
, const char *fn
)
912 char bname
[BDEVNAME_SIZE
];
913 bdevname(bh
->b_bdev
, bname
);
914 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
915 "length %zd fallback: %s\n", fn
, current
->comm
,
916 address
, bname
, bh
->b_state
, (u64
)bh
->b_blocknr
,
919 pr_debug("%s: %s addr: %lx fallback: %s\n", fn
,
920 current
->comm
, address
, reason
);
924 #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
927 * dax_pmd_fault - handle a PMD fault on a DAX file
928 * @vma: The virtual memory area where the fault occurred
929 * @vmf: The description of the fault
930 * @get_block: The filesystem method used to translate file offsets to blocks
932 * When a page fault occurs, filesystems may call this helper in their
933 * pmd_fault handler for DAX files.
935 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
936 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
)
938 struct file
*file
= vma
->vm_file
;
939 struct address_space
*mapping
= file
->f_mapping
;
940 struct inode
*inode
= mapping
->host
;
941 struct buffer_head bh
;
942 unsigned blkbits
= inode
->i_blkbits
;
943 unsigned long pmd_addr
= address
& PMD_MASK
;
944 bool write
= flags
& FAULT_FLAG_WRITE
;
945 struct block_device
*bdev
;
951 /* dax pmd mappings require pfn_t_devmap() */
952 if (!IS_ENABLED(CONFIG_FS_DAX_PMD
))
953 return VM_FAULT_FALLBACK
;
955 /* Fall back to PTEs if we're going to COW */
956 if (write
&& !(vma
->vm_flags
& VM_SHARED
)) {
957 split_huge_pmd(vma
, pmd
, address
);
958 dax_pmd_dbg(NULL
, address
, "cow write");
959 return VM_FAULT_FALLBACK
;
961 /* If the PMD would extend outside the VMA */
962 if (pmd_addr
< vma
->vm_start
) {
963 dax_pmd_dbg(NULL
, address
, "vma start unaligned");
964 return VM_FAULT_FALLBACK
;
966 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
) {
967 dax_pmd_dbg(NULL
, address
, "vma end unaligned");
968 return VM_FAULT_FALLBACK
;
971 pgoff
= linear_page_index(vma
, pmd_addr
);
972 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
974 return VM_FAULT_SIGBUS
;
975 /* If the PMD would cover blocks out of the file */
976 if ((pgoff
| PG_PMD_COLOUR
) >= size
) {
977 dax_pmd_dbg(NULL
, address
,
978 "offset + huge page size > file size");
979 return VM_FAULT_FALLBACK
;
982 memset(&bh
, 0, sizeof(bh
));
983 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
984 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
986 bh
.b_size
= PMD_SIZE
;
988 if (get_block(inode
, block
, &bh
, 0) != 0)
989 return VM_FAULT_SIGBUS
;
991 if (!buffer_mapped(&bh
) && write
) {
992 if (get_block(inode
, block
, &bh
, 1) != 0)
993 return VM_FAULT_SIGBUS
;
995 WARN_ON_ONCE(buffer_unwritten(&bh
) || buffer_new(&bh
));
1000 if (bh
.b_size
< PMD_SIZE
) {
1001 dax_pmd_dbg(&bh
, address
, "allocated block too small");
1002 return VM_FAULT_FALLBACK
;
1006 * If we allocated new storage, make sure no process has any
1007 * zero pages covering this hole
1010 loff_t lstart
= pgoff
<< PAGE_SHIFT
;
1011 loff_t lend
= lstart
+ PMD_SIZE
- 1; /* inclusive */
1013 truncate_pagecache_range(inode
, lstart
, lend
);
1016 if (!write
&& !buffer_mapped(&bh
)) {
1019 struct page
*zero_page
= mm_get_huge_zero_page(vma
->vm_mm
);
1021 if (unlikely(!zero_page
)) {
1022 dax_pmd_dbg(&bh
, address
, "no zero page");
1026 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1027 if (!pmd_none(*pmd
)) {
1029 dax_pmd_dbg(&bh
, address
, "pmd already present");
1033 dev_dbg(part_to_dev(bdev
->bd_part
),
1034 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
1035 __func__
, current
->comm
, address
,
1036 (unsigned long long) to_sector(&bh
, inode
));
1038 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
1039 entry
= pmd_mkhuge(entry
);
1040 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
1041 result
= VM_FAULT_NOPAGE
;
1044 struct blk_dax_ctl dax
= {
1045 .sector
= to_sector(&bh
, inode
),
1048 long length
= dax_map_atomic(bdev
, &dax
);
1051 dax_pmd_dbg(&bh
, address
, "dax-error fallback");
1054 if (length
< PMD_SIZE
) {
1055 dax_pmd_dbg(&bh
, address
, "dax-length too small");
1056 dax_unmap_atomic(bdev
, &dax
);
1059 if (pfn_t_to_pfn(dax
.pfn
) & PG_PMD_COLOUR
) {
1060 dax_pmd_dbg(&bh
, address
, "pfn unaligned");
1061 dax_unmap_atomic(bdev
, &dax
);
1065 if (!pfn_t_devmap(dax
.pfn
)) {
1066 dax_unmap_atomic(bdev
, &dax
);
1067 dax_pmd_dbg(&bh
, address
, "pfn not in memmap");
1070 dax_unmap_atomic(bdev
, &dax
);
1073 * For PTE faults we insert a radix tree entry for reads, and
1074 * leave it clean. Then on the first write we dirty the radix
1075 * tree entry via the dax_pfn_mkwrite() path. This sequence
1076 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
1077 * call into get_block() to translate the pgoff to a sector in
1078 * order to be able to create a new radix tree entry.
1080 * The PMD path doesn't have an equivalent to
1081 * dax_pfn_mkwrite(), though, so for a read followed by a
1082 * write we traverse all the way through dax_pmd_fault()
1083 * twice. This means we can just skip inserting a radix tree
1084 * entry completely on the initial read and just wait until
1085 * the write to insert a dirty entry.
1089 * We should insert radix-tree entry and dirty it here.
1090 * For now this is broken...
1094 dev_dbg(part_to_dev(bdev
->bd_part
),
1095 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
1096 __func__
, current
->comm
, address
,
1097 pfn_t_to_pfn(dax
.pfn
),
1098 (unsigned long long) dax
.sector
);
1099 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
,
1107 count_vm_event(THP_FAULT_FALLBACK
);
1108 result
= VM_FAULT_FALLBACK
;
1111 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
1112 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1115 * dax_pfn_mkwrite - handle first write to DAX page
1116 * @vma: The virtual memory area where the fault occurred
1117 * @vmf: The description of the fault
1119 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1121 struct file
*file
= vma
->vm_file
;
1122 struct address_space
*mapping
= file
->f_mapping
;
1124 pgoff_t index
= vmf
->pgoff
;
1126 spin_lock_irq(&mapping
->tree_lock
);
1127 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
1128 if (!entry
|| !radix_tree_exceptional_entry(entry
))
1130 radix_tree_tag_set(&mapping
->page_tree
, index
, PAGECACHE_TAG_DIRTY
);
1131 put_unlocked_mapping_entry(mapping
, index
, entry
);
1133 spin_unlock_irq(&mapping
->tree_lock
);
1134 return VM_FAULT_NOPAGE
;
1136 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
1138 static bool dax_range_is_aligned(struct block_device
*bdev
,
1139 unsigned int offset
, unsigned int length
)
1141 unsigned short sector_size
= bdev_logical_block_size(bdev
);
1143 if (!IS_ALIGNED(offset
, sector_size
))
1145 if (!IS_ALIGNED(length
, sector_size
))
1151 int __dax_zero_page_range(struct block_device
*bdev
, sector_t sector
,
1152 unsigned int offset
, unsigned int length
)
1154 struct blk_dax_ctl dax
= {
1159 if (dax_range_is_aligned(bdev
, offset
, length
)) {
1160 sector_t start_sector
= dax
.sector
+ (offset
>> 9);
1162 return blkdev_issue_zeroout(bdev
, start_sector
,
1163 length
>> 9, GFP_NOFS
, true);
1165 if (dax_map_atomic(bdev
, &dax
) < 0)
1166 return PTR_ERR(dax
.addr
);
1167 clear_pmem(dax
.addr
+ offset
, length
);
1168 dax_unmap_atomic(bdev
, &dax
);
1172 EXPORT_SYMBOL_GPL(__dax_zero_page_range
);
1175 * dax_zero_page_range - zero a range within a page of a DAX file
1176 * @inode: The file being truncated
1177 * @from: The file offset that is being truncated to
1178 * @length: The number of bytes to zero
1179 * @get_block: The filesystem method used to translate file offsets to blocks
1181 * This function can be called by a filesystem when it is zeroing part of a
1182 * page in a DAX file. This is intended for hole-punch operations. If
1183 * you are truncating a file, the helper function dax_truncate_page() may be
1186 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
1187 get_block_t get_block
)
1189 struct buffer_head bh
;
1190 pgoff_t index
= from
>> PAGE_SHIFT
;
1191 unsigned offset
= from
& (PAGE_SIZE
-1);
1194 /* Block boundary? Nothing to do */
1197 if (WARN_ON_ONCE((offset
+ length
) > PAGE_SIZE
))
1200 memset(&bh
, 0, sizeof(bh
));
1201 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
1202 bh
.b_size
= PAGE_SIZE
;
1203 err
= get_block(inode
, index
, &bh
, 0);
1204 if (err
< 0 || !buffer_written(&bh
))
1207 return __dax_zero_page_range(bh
.b_bdev
, to_sector(&bh
, inode
),
1210 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
1213 * dax_truncate_page - handle a partial page being truncated in a DAX file
1214 * @inode: The file being truncated
1215 * @from: The file offset that is being truncated to
1216 * @get_block: The filesystem method used to translate file offsets to blocks
1218 * Similar to block_truncate_page(), this function can be called by a
1219 * filesystem when it is truncating a DAX file to handle the partial page.
1221 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
1223 unsigned length
= PAGE_ALIGN(from
) - from
;
1224 return dax_zero_page_range(inode
, from
, length
, get_block
);
1226 EXPORT_SYMBOL_GPL(dax_truncate_page
);
1228 #ifdef CONFIG_FS_IOMAP
1230 iomap_dax_actor(struct inode
*inode
, loff_t pos
, loff_t length
, void *data
,
1231 struct iomap
*iomap
)
1233 struct iov_iter
*iter
= data
;
1234 loff_t end
= pos
+ length
, done
= 0;
1237 if (iov_iter_rw(iter
) == READ
) {
1238 end
= min(end
, i_size_read(inode
));
1242 if (iomap
->type
== IOMAP_HOLE
|| iomap
->type
== IOMAP_UNWRITTEN
)
1243 return iov_iter_zero(min(length
, end
- pos
), iter
);
1246 if (WARN_ON_ONCE(iomap
->type
!= IOMAP_MAPPED
))
1250 unsigned offset
= pos
& (PAGE_SIZE
- 1);
1251 struct blk_dax_ctl dax
= { 0 };
1254 dax
.sector
= iomap
->blkno
+
1255 (((pos
& PAGE_MASK
) - iomap
->offset
) >> 9);
1256 dax
.size
= (length
+ offset
+ PAGE_SIZE
- 1) & PAGE_MASK
;
1257 map_len
= dax_map_atomic(iomap
->bdev
, &dax
);
1265 if (map_len
> end
- pos
)
1266 map_len
= end
- pos
;
1268 if (iov_iter_rw(iter
) == WRITE
)
1269 map_len
= copy_from_iter_pmem(dax
.addr
, map_len
, iter
);
1271 map_len
= copy_to_iter(dax
.addr
, map_len
, iter
);
1272 dax_unmap_atomic(iomap
->bdev
, &dax
);
1274 ret
= map_len
? map_len
: -EFAULT
;
1283 return done
? done
: ret
;
1287 * iomap_dax_rw - Perform I/O to a DAX file
1288 * @iocb: The control block for this I/O
1289 * @iter: The addresses to do I/O from or to
1290 * @ops: iomap ops passed from the file system
1292 * This function performs read and write operations to directly mapped
1293 * persistent memory. The callers needs to take care of read/write exclusion
1294 * and evicting any page cache pages in the region under I/O.
1297 iomap_dax_rw(struct kiocb
*iocb
, struct iov_iter
*iter
,
1298 struct iomap_ops
*ops
)
1300 struct address_space
*mapping
= iocb
->ki_filp
->f_mapping
;
1301 struct inode
*inode
= mapping
->host
;
1302 loff_t pos
= iocb
->ki_pos
, ret
= 0, done
= 0;
1305 if (iov_iter_rw(iter
) == WRITE
)
1306 flags
|= IOMAP_WRITE
;
1309 * Yes, even DAX files can have page cache attached to them: A zeroed
1310 * page is inserted into the pagecache when we have to serve a write
1311 * fault on a hole. It should never be dirtied and can simply be
1312 * dropped from the pagecache once we get real data for the page.
1314 * XXX: This is racy against mmap, and there's nothing we can do about
1315 * it. We'll eventually need to shift this down even further so that
1316 * we can check if we allocated blocks over a hole first.
1318 if (mapping
->nrpages
) {
1319 ret
= invalidate_inode_pages2_range(mapping
,
1321 (pos
+ iov_iter_count(iter
) - 1) >> PAGE_SHIFT
);
1325 while (iov_iter_count(iter
)) {
1326 ret
= iomap_apply(inode
, pos
, iov_iter_count(iter
), flags
, ops
,
1327 iter
, iomap_dax_actor
);
1334 iocb
->ki_pos
+= done
;
1335 return done
? done
: ret
;
1337 EXPORT_SYMBOL_GPL(iomap_dax_rw
);
1340 * iomap_dax_fault - handle a page fault on a DAX file
1341 * @vma: The virtual memory area where the fault occurred
1342 * @vmf: The description of the fault
1343 * @ops: iomap ops passed from the file system
1345 * When a page fault occurs, filesystems may call this helper in their fault
1346 * or mkwrite handler for DAX files. Assumes the caller has done all the
1347 * necessary locking for the page fault to proceed successfully.
1349 int iomap_dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
1350 struct iomap_ops
*ops
)
1352 struct address_space
*mapping
= vma
->vm_file
->f_mapping
;
1353 struct inode
*inode
= mapping
->host
;
1354 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
1355 loff_t pos
= (loff_t
)vmf
->pgoff
<< PAGE_SHIFT
;
1357 struct iomap iomap
= { 0 };
1359 int error
, major
= 0;
1363 * Check whether offset isn't beyond end of file now. Caller is supposed
1364 * to hold locks serializing us with truncate / punch hole so this is
1367 if (pos
>= i_size_read(inode
))
1368 return VM_FAULT_SIGBUS
;
1370 entry
= grab_mapping_entry(mapping
, vmf
->pgoff
);
1371 if (IS_ERR(entry
)) {
1372 error
= PTR_ERR(entry
);
1376 if ((vmf
->flags
& FAULT_FLAG_WRITE
) && !vmf
->cow_page
)
1377 flags
|= IOMAP_WRITE
;
1380 * Note that we don't bother to use iomap_apply here: DAX required
1381 * the file system block size to be equal the page size, which means
1382 * that we never have to deal with more than a single extent here.
1384 error
= ops
->iomap_begin(inode
, pos
, PAGE_SIZE
, flags
, &iomap
);
1387 if (WARN_ON_ONCE(iomap
.offset
+ iomap
.length
< pos
+ PAGE_SIZE
)) {
1388 error
= -EIO
; /* fs corruption? */
1392 sector
= iomap
.blkno
+ (((pos
& PAGE_MASK
) - iomap
.offset
) >> 9);
1394 if (vmf
->cow_page
) {
1395 switch (iomap
.type
) {
1397 case IOMAP_UNWRITTEN
:
1398 clear_user_highpage(vmf
->cow_page
, vaddr
);
1401 error
= copy_user_dax(iomap
.bdev
, sector
, PAGE_SIZE
,
1402 vmf
->cow_page
, vaddr
);
1412 if (!radix_tree_exceptional_entry(entry
)) {
1414 return VM_FAULT_LOCKED
;
1417 return VM_FAULT_DAX_LOCKED
;
1420 switch (iomap
.type
) {
1422 if (iomap
.flags
& IOMAP_F_NEW
) {
1423 count_vm_event(PGMAJFAULT
);
1424 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
1425 major
= VM_FAULT_MAJOR
;
1427 error
= dax_insert_mapping(mapping
, iomap
.bdev
, sector
,
1428 PAGE_SIZE
, &entry
, vma
, vmf
);
1430 case IOMAP_UNWRITTEN
:
1432 if (!(vmf
->flags
& FAULT_FLAG_WRITE
))
1433 return dax_load_hole(mapping
, entry
, vmf
);
1442 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
1444 if (error
== -ENOMEM
)
1445 return VM_FAULT_OOM
| major
;
1446 /* -EBUSY is fine, somebody else faulted on the same PTE */
1447 if (error
< 0 && error
!= -EBUSY
)
1448 return VM_FAULT_SIGBUS
| major
;
1449 return VM_FAULT_NOPAGE
| major
;
1451 EXPORT_SYMBOL_GPL(iomap_dax_fault
);
1452 #endif /* CONFIG_FS_IOMAP */