2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pagevec.h>
28 #include <linux/pmem.h>
29 #include <linux/sched.h>
30 #include <linux/uio.h>
31 #include <linux/vmstat.h>
32 #include <linux/pfn_t.h>
33 #include <linux/sizes.h>
36 * We use lowest available bit in exceptional entry for locking, other two
37 * bits to determine entry type. In total 3 special bits.
39 #define RADIX_DAX_SHIFT (RADIX_TREE_EXCEPTIONAL_SHIFT + 3)
40 #define RADIX_DAX_PTE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 1))
41 #define RADIX_DAX_PMD (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
42 #define RADIX_DAX_TYPE_MASK (RADIX_DAX_PTE | RADIX_DAX_PMD)
43 #define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_TYPE_MASK)
44 #define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
45 #define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
46 RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE) | \
47 RADIX_TREE_EXCEPTIONAL_ENTRY))
49 /* We choose 4096 entries - same as per-zone page wait tables */
50 #define DAX_WAIT_TABLE_BITS 12
51 #define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)
53 wait_queue_head_t wait_table
[DAX_WAIT_TABLE_ENTRIES
];
55 static int __init
init_dax_wait_table(void)
59 for (i
= 0; i
< DAX_WAIT_TABLE_ENTRIES
; i
++)
60 init_waitqueue_head(wait_table
+ i
);
63 fs_initcall(init_dax_wait_table
);
65 static wait_queue_head_t
*dax_entry_waitqueue(struct address_space
*mapping
,
68 unsigned long hash
= hash_long((unsigned long)mapping
^ index
,
70 return wait_table
+ hash
;
73 static long dax_map_atomic(struct block_device
*bdev
, struct blk_dax_ctl
*dax
)
75 struct request_queue
*q
= bdev
->bd_queue
;
78 dax
->addr
= (void __pmem
*) ERR_PTR(-EIO
);
79 if (blk_queue_enter(q
, true) != 0)
82 rc
= bdev_direct_access(bdev
, dax
);
84 dax
->addr
= (void __pmem
*) ERR_PTR(rc
);
91 static void dax_unmap_atomic(struct block_device
*bdev
,
92 const struct blk_dax_ctl
*dax
)
94 if (IS_ERR(dax
->addr
))
96 blk_queue_exit(bdev
->bd_queue
);
99 struct page
*read_dax_sector(struct block_device
*bdev
, sector_t n
)
101 struct page
*page
= alloc_pages(GFP_KERNEL
, 0);
102 struct blk_dax_ctl dax
= {
104 .sector
= n
& ~((((int) PAGE_SIZE
) / 512) - 1),
109 return ERR_PTR(-ENOMEM
);
111 rc
= dax_map_atomic(bdev
, &dax
);
114 memcpy_from_pmem(page_address(page
), dax
.addr
, PAGE_SIZE
);
115 dax_unmap_atomic(bdev
, &dax
);
119 static bool buffer_written(struct buffer_head
*bh
)
121 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
125 * When ext4 encounters a hole, it returns without modifying the buffer_head
126 * which means that we can't trust b_size. To cope with this, we set b_state
127 * to 0 before calling get_block and, if any bit is set, we know we can trust
128 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
129 * and would save us time calling get_block repeatedly.
131 static bool buffer_size_valid(struct buffer_head
*bh
)
133 return bh
->b_state
!= 0;
137 static sector_t
to_sector(const struct buffer_head
*bh
,
138 const struct inode
*inode
)
140 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
145 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
146 loff_t start
, loff_t end
, get_block_t get_block
,
147 struct buffer_head
*bh
)
149 loff_t pos
= start
, max
= start
, bh_max
= start
;
150 bool hole
= false, need_wmb
= false;
151 struct block_device
*bdev
= NULL
;
152 int rw
= iov_iter_rw(iter
), rc
;
154 struct blk_dax_ctl dax
= {
155 .addr
= (void __pmem
*) ERR_PTR(-EIO
),
157 unsigned blkbits
= inode
->i_blkbits
;
158 sector_t file_blks
= (i_size_read(inode
) + (1 << blkbits
) - 1)
162 end
= min(end
, i_size_read(inode
));
167 long page
= pos
>> PAGE_SHIFT
;
168 sector_t block
= page
<< (PAGE_SHIFT
- blkbits
);
169 unsigned first
= pos
- (block
<< blkbits
);
173 bh
->b_size
= PAGE_ALIGN(end
- pos
);
175 rc
= get_block(inode
, block
, bh
, rw
== WRITE
);
178 if (!buffer_size_valid(bh
))
179 bh
->b_size
= 1 << blkbits
;
180 bh_max
= pos
- first
+ bh
->b_size
;
183 * We allow uninitialized buffers for writes
184 * beyond EOF as those cannot race with faults
187 (buffer_new(bh
) && block
< file_blks
) ||
188 (rw
== WRITE
&& buffer_unwritten(bh
)));
190 unsigned done
= bh
->b_size
-
191 (bh_max
- (pos
- first
));
192 bh
->b_blocknr
+= done
>> blkbits
;
196 hole
= rw
== READ
&& !buffer_written(bh
);
198 size
= bh
->b_size
- first
;
200 dax_unmap_atomic(bdev
, &dax
);
201 dax
.sector
= to_sector(bh
, inode
);
202 dax
.size
= bh
->b_size
;
203 map_len
= dax_map_atomic(bdev
, &dax
);
209 size
= map_len
- first
;
212 * pos + size is one past the last offset for IO,
213 * so pos + size can overflow loff_t at extreme offsets.
214 * Cast to u64 to catch this and get the true minimum.
216 max
= min_t(u64
, pos
+ size
, end
);
219 if (iov_iter_rw(iter
) == WRITE
) {
220 len
= copy_from_iter_pmem(dax
.addr
, max
- pos
, iter
);
223 len
= copy_to_iter((void __force
*) dax
.addr
, max
- pos
,
226 len
= iov_iter_zero(max
- pos
, iter
);
234 if (!IS_ERR(dax
.addr
))
240 dax_unmap_atomic(bdev
, &dax
);
242 return (pos
== start
) ? rc
: pos
- start
;
246 * dax_do_io - Perform I/O to a DAX file
247 * @iocb: The control block for this I/O
248 * @inode: The file which the I/O is directed at
249 * @iter: The addresses to do I/O from or to
250 * @get_block: The filesystem method used to translate file offsets to blocks
251 * @end_io: A filesystem callback for I/O completion
254 * This function uses the same locking scheme as do_blockdev_direct_IO:
255 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
256 * caller for writes. For reads, we take and release the i_mutex ourselves.
257 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
258 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
261 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
262 struct iov_iter
*iter
, get_block_t get_block
,
263 dio_iodone_t end_io
, int flags
)
265 struct buffer_head bh
;
266 ssize_t retval
= -EINVAL
;
267 loff_t pos
= iocb
->ki_pos
;
268 loff_t end
= pos
+ iov_iter_count(iter
);
270 memset(&bh
, 0, sizeof(bh
));
271 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
273 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
276 /* Protects against truncate */
277 if (!(flags
& DIO_SKIP_DIO_COUNT
))
278 inode_dio_begin(inode
);
280 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
282 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
288 err
= end_io(iocb
, pos
, retval
, bh
.b_private
);
293 if (!(flags
& DIO_SKIP_DIO_COUNT
))
294 inode_dio_end(inode
);
297 EXPORT_SYMBOL_GPL(dax_do_io
);
300 * DAX radix tree locking
302 struct exceptional_entry_key
{
303 struct address_space
*mapping
;
307 struct wait_exceptional_entry_queue
{
309 struct exceptional_entry_key key
;
312 static int wake_exceptional_entry_func(wait_queue_t
*wait
, unsigned int mode
,
313 int sync
, void *keyp
)
315 struct exceptional_entry_key
*key
= keyp
;
316 struct wait_exceptional_entry_queue
*ewait
=
317 container_of(wait
, struct wait_exceptional_entry_queue
, wait
);
319 if (key
->mapping
!= ewait
->key
.mapping
||
320 key
->index
!= ewait
->key
.index
)
322 return autoremove_wake_function(wait
, mode
, sync
, NULL
);
326 * Check whether the given slot is locked. The function must be called with
327 * mapping->tree_lock held
329 static inline int slot_locked(struct address_space
*mapping
, void **slot
)
331 unsigned long entry
= (unsigned long)
332 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
333 return entry
& RADIX_DAX_ENTRY_LOCK
;
337 * Mark the given slot is locked. The function must be called with
338 * mapping->tree_lock held
340 static inline void *lock_slot(struct address_space
*mapping
, void **slot
)
342 unsigned long entry
= (unsigned long)
343 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
345 entry
|= RADIX_DAX_ENTRY_LOCK
;
346 radix_tree_replace_slot(slot
, (void *)entry
);
347 return (void *)entry
;
351 * Mark the given slot is unlocked. The function must be called with
352 * mapping->tree_lock held
354 static inline void *unlock_slot(struct address_space
*mapping
, void **slot
)
356 unsigned long entry
= (unsigned long)
357 radix_tree_deref_slot_protected(slot
, &mapping
->tree_lock
);
359 entry
&= ~(unsigned long)RADIX_DAX_ENTRY_LOCK
;
360 radix_tree_replace_slot(slot
, (void *)entry
);
361 return (void *)entry
;
365 * Lookup entry in radix tree, wait for it to become unlocked if it is
366 * exceptional entry and return it. The caller must call
367 * put_unlocked_mapping_entry() when he decided not to lock the entry or
368 * put_locked_mapping_entry() when he locked the entry and now wants to
371 * The function must be called with mapping->tree_lock held.
373 static void *get_unlocked_mapping_entry(struct address_space
*mapping
,
374 pgoff_t index
, void ***slotp
)
377 struct wait_exceptional_entry_queue ewait
;
378 wait_queue_head_t
*wq
= dax_entry_waitqueue(mapping
, index
);
380 init_wait(&ewait
.wait
);
381 ewait
.wait
.func
= wake_exceptional_entry_func
;
382 ewait
.key
.mapping
= mapping
;
383 ewait
.key
.index
= index
;
386 ret
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
,
388 if (!ret
|| !radix_tree_exceptional_entry(ret
) ||
389 !slot_locked(mapping
, slot
)) {
394 prepare_to_wait_exclusive(wq
, &ewait
.wait
,
395 TASK_UNINTERRUPTIBLE
);
396 spin_unlock_irq(&mapping
->tree_lock
);
398 finish_wait(wq
, &ewait
.wait
);
399 spin_lock_irq(&mapping
->tree_lock
);
404 * Find radix tree entry at given index. If it points to a page, return with
405 * the page locked. If it points to the exceptional entry, return with the
406 * radix tree entry locked. If the radix tree doesn't contain given index,
407 * create empty exceptional entry for the index and return with it locked.
409 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
410 * persistent memory the benefit is doubtful. We can add that later if we can
413 static void *grab_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
418 spin_lock_irq(&mapping
->tree_lock
);
419 ret
= get_unlocked_mapping_entry(mapping
, index
, &slot
);
420 /* No entry for given index? Make sure radix tree is big enough. */
424 spin_unlock_irq(&mapping
->tree_lock
);
425 err
= radix_tree_preload(
426 mapping_gfp_mask(mapping
) & ~__GFP_HIGHMEM
);
429 ret
= (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY
|
430 RADIX_DAX_ENTRY_LOCK
);
431 spin_lock_irq(&mapping
->tree_lock
);
432 err
= radix_tree_insert(&mapping
->page_tree
, index
, ret
);
433 radix_tree_preload_end();
435 spin_unlock_irq(&mapping
->tree_lock
);
436 /* Someone already created the entry? */
441 /* Good, we have inserted empty locked entry into the tree. */
442 mapping
->nrexceptional
++;
443 spin_unlock_irq(&mapping
->tree_lock
);
446 /* Normal page in radix tree? */
447 if (!radix_tree_exceptional_entry(ret
)) {
448 struct page
*page
= ret
;
451 spin_unlock_irq(&mapping
->tree_lock
);
453 /* Page got truncated? Retry... */
454 if (unlikely(page
->mapping
!= mapping
)) {
461 ret
= lock_slot(mapping
, slot
);
462 spin_unlock_irq(&mapping
->tree_lock
);
466 void dax_wake_mapping_entry_waiter(struct address_space
*mapping
,
467 pgoff_t index
, bool wake_all
)
469 wait_queue_head_t
*wq
= dax_entry_waitqueue(mapping
, index
);
472 * Checking for locked entry and prepare_to_wait_exclusive() happens
473 * under mapping->tree_lock, ditto for entry handling in our callers.
474 * So at this point all tasks that could have seen our entry locked
475 * must be in the waitqueue and the following check will see them.
477 if (waitqueue_active(wq
)) {
478 struct exceptional_entry_key key
;
480 key
.mapping
= mapping
;
482 __wake_up(wq
, TASK_NORMAL
, wake_all
? 0 : 1, &key
);
486 void dax_unlock_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
490 spin_lock_irq(&mapping
->tree_lock
);
491 ret
= __radix_tree_lookup(&mapping
->page_tree
, index
, NULL
, &slot
);
492 if (WARN_ON_ONCE(!ret
|| !radix_tree_exceptional_entry(ret
) ||
493 !slot_locked(mapping
, slot
))) {
494 spin_unlock_irq(&mapping
->tree_lock
);
497 unlock_slot(mapping
, slot
);
498 spin_unlock_irq(&mapping
->tree_lock
);
499 dax_wake_mapping_entry_waiter(mapping
, index
, false);
502 static void put_locked_mapping_entry(struct address_space
*mapping
,
503 pgoff_t index
, void *entry
)
505 if (!radix_tree_exceptional_entry(entry
)) {
509 dax_unlock_mapping_entry(mapping
, index
);
514 * Called when we are done with radix tree entry we looked up via
515 * get_unlocked_mapping_entry() and which we didn't lock in the end.
517 static void put_unlocked_mapping_entry(struct address_space
*mapping
,
518 pgoff_t index
, void *entry
)
520 if (!radix_tree_exceptional_entry(entry
))
523 /* We have to wake up next waiter for the radix tree entry lock */
524 dax_wake_mapping_entry_waiter(mapping
, index
, false);
528 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
529 * entry to get unlocked before deleting it.
531 int dax_delete_mapping_entry(struct address_space
*mapping
, pgoff_t index
)
535 spin_lock_irq(&mapping
->tree_lock
);
536 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
538 * This gets called from truncate / punch_hole path. As such, the caller
539 * must hold locks protecting against concurrent modifications of the
540 * radix tree (usually fs-private i_mmap_sem for writing). Since the
541 * caller has seen exceptional entry for this index, we better find it
542 * at that index as well...
544 if (WARN_ON_ONCE(!entry
|| !radix_tree_exceptional_entry(entry
))) {
545 spin_unlock_irq(&mapping
->tree_lock
);
548 radix_tree_delete(&mapping
->page_tree
, index
);
549 mapping
->nrexceptional
--;
550 spin_unlock_irq(&mapping
->tree_lock
);
551 dax_wake_mapping_entry_waiter(mapping
, index
, true);
557 * The user has performed a load from a hole in the file. Allocating
558 * a new page in the file would cause excessive storage usage for
559 * workloads with sparse files. We allocate a page cache page instead.
560 * We'll kick it out of the page cache if it's ever written to,
561 * otherwise it will simply fall out of the page cache under memory
562 * pressure without ever having been dirtied.
564 static int dax_load_hole(struct address_space
*mapping
, void *entry
,
565 struct vm_fault
*vmf
)
569 /* Hole page already exists? Return it... */
570 if (!radix_tree_exceptional_entry(entry
)) {
572 return VM_FAULT_LOCKED
;
575 /* This will replace locked radix tree entry with a hole page */
576 page
= find_or_create_page(mapping
, vmf
->pgoff
,
577 vmf
->gfp_mask
| __GFP_ZERO
);
579 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
583 return VM_FAULT_LOCKED
;
586 static int copy_user_bh(struct page
*to
, struct inode
*inode
,
587 struct buffer_head
*bh
, unsigned long vaddr
)
589 struct blk_dax_ctl dax
= {
590 .sector
= to_sector(bh
, inode
),
593 struct block_device
*bdev
= bh
->b_bdev
;
596 if (dax_map_atomic(bdev
, &dax
) < 0)
597 return PTR_ERR(dax
.addr
);
598 vto
= kmap_atomic(to
);
599 copy_user_page(vto
, (void __force
*)dax
.addr
, vaddr
, to
);
601 dax_unmap_atomic(bdev
, &dax
);
605 #define DAX_PMD_INDEX(page_index) (page_index & (PMD_MASK >> PAGE_SHIFT))
607 static void *dax_insert_mapping_entry(struct address_space
*mapping
,
608 struct vm_fault
*vmf
,
609 void *entry
, sector_t sector
)
611 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
613 bool hole_fill
= false;
615 pgoff_t index
= vmf
->pgoff
;
617 if (vmf
->flags
& FAULT_FLAG_WRITE
)
618 __mark_inode_dirty(mapping
->host
, I_DIRTY_PAGES
);
620 /* Replacing hole page with block mapping? */
621 if (!radix_tree_exceptional_entry(entry
)) {
624 * Unmap the page now before we remove it from page cache below.
625 * The page is locked so it cannot be faulted in again.
627 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
629 error
= radix_tree_preload(vmf
->gfp_mask
& ~__GFP_HIGHMEM
);
631 return ERR_PTR(error
);
634 spin_lock_irq(&mapping
->tree_lock
);
635 new_entry
= (void *)((unsigned long)RADIX_DAX_ENTRY(sector
, false) |
636 RADIX_DAX_ENTRY_LOCK
);
638 __delete_from_page_cache(entry
, NULL
);
639 /* Drop pagecache reference */
641 error
= radix_tree_insert(page_tree
, index
, new_entry
);
643 new_entry
= ERR_PTR(error
);
646 mapping
->nrexceptional
++;
651 ret
= __radix_tree_lookup(page_tree
, index
, NULL
, &slot
);
652 WARN_ON_ONCE(ret
!= entry
);
653 radix_tree_replace_slot(slot
, new_entry
);
655 if (vmf
->flags
& FAULT_FLAG_WRITE
)
656 radix_tree_tag_set(page_tree
, index
, PAGECACHE_TAG_DIRTY
);
658 spin_unlock_irq(&mapping
->tree_lock
);
660 radix_tree_preload_end();
662 * We don't need hole page anymore, it has been replaced with
663 * locked radix tree entry now.
665 if (mapping
->a_ops
->freepage
)
666 mapping
->a_ops
->freepage(entry
);
673 static int dax_writeback_one(struct block_device
*bdev
,
674 struct address_space
*mapping
, pgoff_t index
, void *entry
)
676 struct radix_tree_root
*page_tree
= &mapping
->page_tree
;
677 int type
= RADIX_DAX_TYPE(entry
);
678 struct radix_tree_node
*node
;
679 struct blk_dax_ctl dax
;
683 spin_lock_irq(&mapping
->tree_lock
);
685 * Regular page slots are stabilized by the page lock even
686 * without the tree itself locked. These unlocked entries
687 * need verification under the tree lock.
689 if (!__radix_tree_lookup(page_tree
, index
, &node
, &slot
))
694 /* another fsync thread may have already written back this entry */
695 if (!radix_tree_tag_get(page_tree
, index
, PAGECACHE_TAG_TOWRITE
))
698 if (WARN_ON_ONCE(type
!= RADIX_DAX_PTE
&& type
!= RADIX_DAX_PMD
)) {
703 dax
.sector
= RADIX_DAX_SECTOR(entry
);
704 dax
.size
= (type
== RADIX_DAX_PMD
? PMD_SIZE
: PAGE_SIZE
);
705 spin_unlock_irq(&mapping
->tree_lock
);
708 * We cannot hold tree_lock while calling dax_map_atomic() because it
709 * eventually calls cond_resched().
711 ret
= dax_map_atomic(bdev
, &dax
);
715 if (WARN_ON_ONCE(ret
< dax
.size
)) {
720 wb_cache_pmem(dax
.addr
, dax
.size
);
722 spin_lock_irq(&mapping
->tree_lock
);
723 radix_tree_tag_clear(page_tree
, index
, PAGECACHE_TAG_TOWRITE
);
724 spin_unlock_irq(&mapping
->tree_lock
);
726 dax_unmap_atomic(bdev
, &dax
);
730 spin_unlock_irq(&mapping
->tree_lock
);
735 * Flush the mapping to the persistent domain within the byte range of [start,
736 * end]. This is required by data integrity operations to ensure file data is
737 * on persistent storage prior to completion of the operation.
739 int dax_writeback_mapping_range(struct address_space
*mapping
,
740 struct block_device
*bdev
, struct writeback_control
*wbc
)
742 struct inode
*inode
= mapping
->host
;
743 pgoff_t start_index
, end_index
, pmd_index
;
744 pgoff_t indices
[PAGEVEC_SIZE
];
750 if (WARN_ON_ONCE(inode
->i_blkbits
!= PAGE_SHIFT
))
753 if (!mapping
->nrexceptional
|| wbc
->sync_mode
!= WB_SYNC_ALL
)
756 start_index
= wbc
->range_start
>> PAGE_SHIFT
;
757 end_index
= wbc
->range_end
>> PAGE_SHIFT
;
758 pmd_index
= DAX_PMD_INDEX(start_index
);
761 entry
= radix_tree_lookup(&mapping
->page_tree
, pmd_index
);
764 /* see if the start of our range is covered by a PMD entry */
765 if (entry
&& RADIX_DAX_TYPE(entry
) == RADIX_DAX_PMD
)
766 start_index
= pmd_index
;
768 tag_pages_for_writeback(mapping
, start_index
, end_index
);
770 pagevec_init(&pvec
, 0);
772 pvec
.nr
= find_get_entries_tag(mapping
, start_index
,
773 PAGECACHE_TAG_TOWRITE
, PAGEVEC_SIZE
,
774 pvec
.pages
, indices
);
779 for (i
= 0; i
< pvec
.nr
; i
++) {
780 if (indices
[i
] > end_index
) {
785 ret
= dax_writeback_one(bdev
, mapping
, indices
[i
],
794 EXPORT_SYMBOL_GPL(dax_writeback_mapping_range
);
796 static int dax_insert_mapping(struct address_space
*mapping
,
797 struct buffer_head
*bh
, void **entryp
,
798 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
800 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
801 struct block_device
*bdev
= bh
->b_bdev
;
802 struct blk_dax_ctl dax
= {
803 .sector
= to_sector(bh
, mapping
->host
),
807 void *entry
= *entryp
;
809 if (dax_map_atomic(bdev
, &dax
) < 0)
810 return PTR_ERR(dax
.addr
);
811 dax_unmap_atomic(bdev
, &dax
);
813 ret
= dax_insert_mapping_entry(mapping
, vmf
, entry
, dax
.sector
);
818 return vm_insert_mixed(vma
, vaddr
, dax
.pfn
);
822 * __dax_fault - handle a page fault on a DAX file
823 * @vma: The virtual memory area where the fault occurred
824 * @vmf: The description of the fault
825 * @get_block: The filesystem method used to translate file offsets to blocks
827 * When a page fault occurs, filesystems may call this helper in their
828 * fault handler for DAX files. __dax_fault() assumes the caller has done all
829 * the necessary locking for the page fault to proceed successfully.
831 int __dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
832 get_block_t get_block
)
834 struct file
*file
= vma
->vm_file
;
835 struct address_space
*mapping
= file
->f_mapping
;
836 struct inode
*inode
= mapping
->host
;
838 struct buffer_head bh
;
839 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
840 unsigned blkbits
= inode
->i_blkbits
;
847 * Check whether offset isn't beyond end of file now. Caller is supposed
848 * to hold locks serializing us with truncate / punch hole so this is
851 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
852 if (vmf
->pgoff
>= size
)
853 return VM_FAULT_SIGBUS
;
855 memset(&bh
, 0, sizeof(bh
));
856 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
857 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
858 bh
.b_size
= PAGE_SIZE
;
860 entry
= grab_mapping_entry(mapping
, vmf
->pgoff
);
862 error
= PTR_ERR(entry
);
866 error
= get_block(inode
, block
, &bh
, 0);
867 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
868 error
= -EIO
; /* fs corruption? */
873 struct page
*new_page
= vmf
->cow_page
;
874 if (buffer_written(&bh
))
875 error
= copy_user_bh(new_page
, inode
, &bh
, vaddr
);
877 clear_user_highpage(new_page
, vaddr
);
880 if (!radix_tree_exceptional_entry(entry
)) {
882 return VM_FAULT_LOCKED
;
885 return VM_FAULT_DAX_LOCKED
;
888 if (!buffer_mapped(&bh
)) {
889 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
890 error
= get_block(inode
, block
, &bh
, 1);
891 count_vm_event(PGMAJFAULT
);
892 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
893 major
= VM_FAULT_MAJOR
;
894 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
899 return dax_load_hole(mapping
, entry
, vmf
);
903 /* Filesystem should not return unwritten buffers to us! */
904 WARN_ON_ONCE(buffer_unwritten(&bh
) || buffer_new(&bh
));
905 error
= dax_insert_mapping(mapping
, &bh
, &entry
, vma
, vmf
);
907 put_locked_mapping_entry(mapping
, vmf
->pgoff
, entry
);
909 if (error
== -ENOMEM
)
910 return VM_FAULT_OOM
| major
;
911 /* -EBUSY is fine, somebody else faulted on the same PTE */
912 if ((error
< 0) && (error
!= -EBUSY
))
913 return VM_FAULT_SIGBUS
| major
;
914 return VM_FAULT_NOPAGE
| major
;
916 EXPORT_SYMBOL(__dax_fault
);
919 * dax_fault - handle a page fault on a DAX file
920 * @vma: The virtual memory area where the fault occurred
921 * @vmf: The description of the fault
922 * @get_block: The filesystem method used to translate file offsets to blocks
924 * When a page fault occurs, filesystems may call this helper in their
925 * fault handler for DAX files.
927 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
928 get_block_t get_block
)
931 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
933 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
934 sb_start_pagefault(sb
);
935 file_update_time(vma
->vm_file
);
937 result
= __dax_fault(vma
, vmf
, get_block
);
938 if (vmf
->flags
& FAULT_FLAG_WRITE
)
939 sb_end_pagefault(sb
);
943 EXPORT_SYMBOL_GPL(dax_fault
);
945 #if defined(CONFIG_TRANSPARENT_HUGEPAGE)
947 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
948 * more often than one might expect in the below function.
950 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
952 static void __dax_dbg(struct buffer_head
*bh
, unsigned long address
,
953 const char *reason
, const char *fn
)
956 char bname
[BDEVNAME_SIZE
];
957 bdevname(bh
->b_bdev
, bname
);
958 pr_debug("%s: %s addr: %lx dev %s state %lx start %lld "
959 "length %zd fallback: %s\n", fn
, current
->comm
,
960 address
, bname
, bh
->b_state
, (u64
)bh
->b_blocknr
,
963 pr_debug("%s: %s addr: %lx fallback: %s\n", fn
,
964 current
->comm
, address
, reason
);
968 #define dax_pmd_dbg(bh, address, reason) __dax_dbg(bh, address, reason, "dax_pmd")
970 int __dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
971 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
)
973 struct file
*file
= vma
->vm_file
;
974 struct address_space
*mapping
= file
->f_mapping
;
975 struct inode
*inode
= mapping
->host
;
976 struct buffer_head bh
;
977 unsigned blkbits
= inode
->i_blkbits
;
978 unsigned long pmd_addr
= address
& PMD_MASK
;
979 bool write
= flags
& FAULT_FLAG_WRITE
;
980 struct block_device
*bdev
;
986 /* dax pmd mappings require pfn_t_devmap() */
987 if (!IS_ENABLED(CONFIG_FS_DAX_PMD
))
988 return VM_FAULT_FALLBACK
;
990 /* Fall back to PTEs if we're going to COW */
991 if (write
&& !(vma
->vm_flags
& VM_SHARED
)) {
992 split_huge_pmd(vma
, pmd
, address
);
993 dax_pmd_dbg(NULL
, address
, "cow write");
994 return VM_FAULT_FALLBACK
;
996 /* If the PMD would extend outside the VMA */
997 if (pmd_addr
< vma
->vm_start
) {
998 dax_pmd_dbg(NULL
, address
, "vma start unaligned");
999 return VM_FAULT_FALLBACK
;
1001 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
) {
1002 dax_pmd_dbg(NULL
, address
, "vma end unaligned");
1003 return VM_FAULT_FALLBACK
;
1006 pgoff
= linear_page_index(vma
, pmd_addr
);
1007 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1009 return VM_FAULT_SIGBUS
;
1010 /* If the PMD would cover blocks out of the file */
1011 if ((pgoff
| PG_PMD_COLOUR
) >= size
) {
1012 dax_pmd_dbg(NULL
, address
,
1013 "offset + huge page size > file size");
1014 return VM_FAULT_FALLBACK
;
1017 memset(&bh
, 0, sizeof(bh
));
1018 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
1019 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
1021 bh
.b_size
= PMD_SIZE
;
1023 if (get_block(inode
, block
, &bh
, 0) != 0)
1024 return VM_FAULT_SIGBUS
;
1026 if (!buffer_mapped(&bh
) && write
) {
1027 if (get_block(inode
, block
, &bh
, 1) != 0)
1028 return VM_FAULT_SIGBUS
;
1030 WARN_ON_ONCE(buffer_unwritten(&bh
) || buffer_new(&bh
));
1036 * If the filesystem isn't willing to tell us the length of a hole,
1037 * just fall back to PTEs. Calling get_block 512 times in a loop
1040 if (!buffer_size_valid(&bh
) || bh
.b_size
< PMD_SIZE
) {
1041 dax_pmd_dbg(&bh
, address
, "allocated block too small");
1042 return VM_FAULT_FALLBACK
;
1046 * If we allocated new storage, make sure no process has any
1047 * zero pages covering this hole
1050 loff_t lstart
= pgoff
<< PAGE_SHIFT
;
1051 loff_t lend
= lstart
+ PMD_SIZE
- 1; /* inclusive */
1053 truncate_pagecache_range(inode
, lstart
, lend
);
1056 if (!write
&& !buffer_mapped(&bh
)) {
1059 struct page
*zero_page
= get_huge_zero_page();
1061 if (unlikely(!zero_page
)) {
1062 dax_pmd_dbg(&bh
, address
, "no zero page");
1066 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
1067 if (!pmd_none(*pmd
)) {
1069 dax_pmd_dbg(&bh
, address
, "pmd already present");
1073 dev_dbg(part_to_dev(bdev
->bd_part
),
1074 "%s: %s addr: %lx pfn: <zero> sect: %llx\n",
1075 __func__
, current
->comm
, address
,
1076 (unsigned long long) to_sector(&bh
, inode
));
1078 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
1079 entry
= pmd_mkhuge(entry
);
1080 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
1081 result
= VM_FAULT_NOPAGE
;
1084 struct blk_dax_ctl dax
= {
1085 .sector
= to_sector(&bh
, inode
),
1088 long length
= dax_map_atomic(bdev
, &dax
);
1091 dax_pmd_dbg(&bh
, address
, "dax-error fallback");
1094 if (length
< PMD_SIZE
) {
1095 dax_pmd_dbg(&bh
, address
, "dax-length too small");
1096 dax_unmap_atomic(bdev
, &dax
);
1099 if (pfn_t_to_pfn(dax
.pfn
) & PG_PMD_COLOUR
) {
1100 dax_pmd_dbg(&bh
, address
, "pfn unaligned");
1101 dax_unmap_atomic(bdev
, &dax
);
1105 if (!pfn_t_devmap(dax
.pfn
)) {
1106 dax_unmap_atomic(bdev
, &dax
);
1107 dax_pmd_dbg(&bh
, address
, "pfn not in memmap");
1110 dax_unmap_atomic(bdev
, &dax
);
1113 * For PTE faults we insert a radix tree entry for reads, and
1114 * leave it clean. Then on the first write we dirty the radix
1115 * tree entry via the dax_pfn_mkwrite() path. This sequence
1116 * allows the dax_pfn_mkwrite() call to be simpler and avoid a
1117 * call into get_block() to translate the pgoff to a sector in
1118 * order to be able to create a new radix tree entry.
1120 * The PMD path doesn't have an equivalent to
1121 * dax_pfn_mkwrite(), though, so for a read followed by a
1122 * write we traverse all the way through __dax_pmd_fault()
1123 * twice. This means we can just skip inserting a radix tree
1124 * entry completely on the initial read and just wait until
1125 * the write to insert a dirty entry.
1129 * We should insert radix-tree entry and dirty it here.
1130 * For now this is broken...
1134 dev_dbg(part_to_dev(bdev
->bd_part
),
1135 "%s: %s addr: %lx pfn: %lx sect: %llx\n",
1136 __func__
, current
->comm
, address
,
1137 pfn_t_to_pfn(dax
.pfn
),
1138 (unsigned long long) dax
.sector
);
1139 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
,
1147 count_vm_event(THP_FAULT_FALLBACK
);
1148 result
= VM_FAULT_FALLBACK
;
1151 EXPORT_SYMBOL_GPL(__dax_pmd_fault
);
1154 * dax_pmd_fault - handle a PMD fault on a DAX file
1155 * @vma: The virtual memory area where the fault occurred
1156 * @vmf: The description of the fault
1157 * @get_block: The filesystem method used to translate file offsets to blocks
1159 * When a page fault occurs, filesystems may call this helper in their
1160 * pmd_fault handler for DAX files.
1162 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
1163 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
)
1166 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
1168 if (flags
& FAULT_FLAG_WRITE
) {
1169 sb_start_pagefault(sb
);
1170 file_update_time(vma
->vm_file
);
1172 result
= __dax_pmd_fault(vma
, address
, pmd
, flags
, get_block
);
1173 if (flags
& FAULT_FLAG_WRITE
)
1174 sb_end_pagefault(sb
);
1178 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
1179 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1182 * dax_pfn_mkwrite - handle first write to DAX page
1183 * @vma: The virtual memory area where the fault occurred
1184 * @vmf: The description of the fault
1186 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1188 struct file
*file
= vma
->vm_file
;
1189 struct address_space
*mapping
= file
->f_mapping
;
1191 pgoff_t index
= vmf
->pgoff
;
1193 spin_lock_irq(&mapping
->tree_lock
);
1194 entry
= get_unlocked_mapping_entry(mapping
, index
, NULL
);
1195 if (!entry
|| !radix_tree_exceptional_entry(entry
))
1197 radix_tree_tag_set(&mapping
->page_tree
, index
, PAGECACHE_TAG_DIRTY
);
1198 put_unlocked_mapping_entry(mapping
, index
, entry
);
1200 spin_unlock_irq(&mapping
->tree_lock
);
1201 return VM_FAULT_NOPAGE
;
1203 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
1205 static bool dax_range_is_aligned(struct block_device
*bdev
,
1206 unsigned int offset
, unsigned int length
)
1208 unsigned short sector_size
= bdev_logical_block_size(bdev
);
1210 if (!IS_ALIGNED(offset
, sector_size
))
1212 if (!IS_ALIGNED(length
, sector_size
))
1218 int __dax_zero_page_range(struct block_device
*bdev
, sector_t sector
,
1219 unsigned int offset
, unsigned int length
)
1221 struct blk_dax_ctl dax
= {
1226 if (dax_range_is_aligned(bdev
, offset
, length
)) {
1227 sector_t start_sector
= dax
.sector
+ (offset
>> 9);
1229 return blkdev_issue_zeroout(bdev
, start_sector
,
1230 length
>> 9, GFP_NOFS
, true);
1232 if (dax_map_atomic(bdev
, &dax
) < 0)
1233 return PTR_ERR(dax
.addr
);
1234 clear_pmem(dax
.addr
+ offset
, length
);
1236 dax_unmap_atomic(bdev
, &dax
);
1240 EXPORT_SYMBOL_GPL(__dax_zero_page_range
);
1243 * dax_zero_page_range - zero a range within a page of a DAX file
1244 * @inode: The file being truncated
1245 * @from: The file offset that is being truncated to
1246 * @length: The number of bytes to zero
1247 * @get_block: The filesystem method used to translate file offsets to blocks
1249 * This function can be called by a filesystem when it is zeroing part of a
1250 * page in a DAX file. This is intended for hole-punch operations. If
1251 * you are truncating a file, the helper function dax_truncate_page() may be
1254 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
1255 get_block_t get_block
)
1257 struct buffer_head bh
;
1258 pgoff_t index
= from
>> PAGE_SHIFT
;
1259 unsigned offset
= from
& (PAGE_SIZE
-1);
1262 /* Block boundary? Nothing to do */
1265 BUG_ON((offset
+ length
) > PAGE_SIZE
);
1267 memset(&bh
, 0, sizeof(bh
));
1268 bh
.b_bdev
= inode
->i_sb
->s_bdev
;
1269 bh
.b_size
= PAGE_SIZE
;
1270 err
= get_block(inode
, index
, &bh
, 0);
1271 if (err
< 0 || !buffer_written(&bh
))
1274 return __dax_zero_page_range(bh
.b_bdev
, to_sector(&bh
, inode
),
1277 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
1280 * dax_truncate_page - handle a partial page being truncated in a DAX file
1281 * @inode: The file being truncated
1282 * @from: The file offset that is being truncated to
1283 * @get_block: The filesystem method used to translate file offsets to blocks
1285 * Similar to block_truncate_page(), this function can be called by a
1286 * filesystem when it is truncating a DAX file to handle the partial page.
1288 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
1290 unsigned length
= PAGE_ALIGN(from
) - from
;
1291 return dax_zero_page_range(inode
, from
, length
, get_block
);
1293 EXPORT_SYMBOL_GPL(dax_truncate_page
);