2 * fs/dax.c - Direct Access filesystem code
3 * Copyright (c) 2013-2014 Intel Corporation
4 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/dax.h>
22 #include <linux/genhd.h>
23 #include <linux/highmem.h>
24 #include <linux/memcontrol.h>
26 #include <linux/mutex.h>
27 #include <linux/pmem.h>
28 #include <linux/sched.h>
29 #include <linux/uio.h>
30 #include <linux/vmstat.h>
31 #include <linux/sizes.h>
33 static long dax_map_atomic(struct block_device
*bdev
, struct blk_dax_ctl
*dax
)
35 struct request_queue
*q
= bdev
->bd_queue
;
38 dax
->addr
= (void __pmem
*) ERR_PTR(-EIO
);
39 if (blk_queue_enter(q
, true) != 0)
42 rc
= bdev_direct_access(bdev
, dax
);
44 dax
->addr
= (void __pmem
*) ERR_PTR(rc
);
51 static void dax_unmap_atomic(struct block_device
*bdev
,
52 const struct blk_dax_ctl
*dax
)
54 if (IS_ERR(dax
->addr
))
56 blk_queue_exit(bdev
->bd_queue
);
60 * dax_clear_blocks() is called from within transaction context from XFS,
61 * and hence this means the stack from this point must follow GFP_NOFS
62 * semantics for all operations.
64 int dax_clear_blocks(struct inode
*inode
, sector_t block
, long _size
)
66 struct block_device
*bdev
= inode
->i_sb
->s_bdev
;
67 struct blk_dax_ctl dax
= {
68 .sector
= block
<< (inode
->i_blkbits
- 9),
76 count
= dax_map_atomic(bdev
, &dax
);
79 sz
= min_t(long, count
, SZ_128K
);
80 clear_pmem(dax
.addr
, sz
);
82 dax
.sector
+= sz
/ 512;
83 dax_unmap_atomic(bdev
, &dax
);
90 EXPORT_SYMBOL_GPL(dax_clear_blocks
);
92 /* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
93 static void dax_new_buf(void __pmem
*addr
, unsigned size
, unsigned first
,
94 loff_t pos
, loff_t end
)
96 loff_t final
= end
- pos
+ first
; /* The final byte of the buffer */
99 clear_pmem(addr
, first
);
101 clear_pmem(addr
+ final
, size
- final
);
104 static bool buffer_written(struct buffer_head
*bh
)
106 return buffer_mapped(bh
) && !buffer_unwritten(bh
);
110 * When ext4 encounters a hole, it returns without modifying the buffer_head
111 * which means that we can't trust b_size. To cope with this, we set b_state
112 * to 0 before calling get_block and, if any bit is set, we know we can trust
113 * b_size. Unfortunate, really, since ext4 knows precisely how long a hole is
114 * and would save us time calling get_block repeatedly.
116 static bool buffer_size_valid(struct buffer_head
*bh
)
118 return bh
->b_state
!= 0;
122 static sector_t
to_sector(const struct buffer_head
*bh
,
123 const struct inode
*inode
)
125 sector_t sector
= bh
->b_blocknr
<< (inode
->i_blkbits
- 9);
130 static ssize_t
dax_io(struct inode
*inode
, struct iov_iter
*iter
,
131 loff_t start
, loff_t end
, get_block_t get_block
,
132 struct buffer_head
*bh
)
134 loff_t pos
= start
, max
= start
, bh_max
= start
;
135 bool hole
= false, need_wmb
= false;
136 struct block_device
*bdev
= NULL
;
137 int rw
= iov_iter_rw(iter
), rc
;
139 struct blk_dax_ctl dax
= {
140 .addr
= (void __pmem
*) ERR_PTR(-EIO
),
144 end
= min(end
, i_size_read(inode
));
149 unsigned blkbits
= inode
->i_blkbits
;
150 long page
= pos
>> PAGE_SHIFT
;
151 sector_t block
= page
<< (PAGE_SHIFT
- blkbits
);
152 unsigned first
= pos
- (block
<< blkbits
);
156 bh
->b_size
= PAGE_ALIGN(end
- pos
);
158 rc
= get_block(inode
, block
, bh
, rw
== WRITE
);
161 if (!buffer_size_valid(bh
))
162 bh
->b_size
= 1 << blkbits
;
163 bh_max
= pos
- first
+ bh
->b_size
;
166 unsigned done
= bh
->b_size
-
167 (bh_max
- (pos
- first
));
168 bh
->b_blocknr
+= done
>> blkbits
;
172 hole
= rw
== READ
&& !buffer_written(bh
);
174 size
= bh
->b_size
- first
;
176 dax_unmap_atomic(bdev
, &dax
);
177 dax
.sector
= to_sector(bh
, inode
);
178 dax
.size
= bh
->b_size
;
179 map_len
= dax_map_atomic(bdev
, &dax
);
184 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
185 dax_new_buf(dax
.addr
, map_len
, first
,
190 size
= map_len
- first
;
192 max
= min(pos
+ size
, end
);
195 if (iov_iter_rw(iter
) == WRITE
) {
196 len
= copy_from_iter_pmem(dax
.addr
, max
- pos
, iter
);
199 len
= copy_to_iter((void __force
*) dax
.addr
, max
- pos
,
202 len
= iov_iter_zero(max
- pos
, iter
);
210 if (!IS_ERR(dax
.addr
))
216 dax_unmap_atomic(bdev
, &dax
);
218 return (pos
== start
) ? rc
: pos
- start
;
222 * dax_do_io - Perform I/O to a DAX file
223 * @iocb: The control block for this I/O
224 * @inode: The file which the I/O is directed at
225 * @iter: The addresses to do I/O from or to
226 * @pos: The file offset where the I/O starts
227 * @get_block: The filesystem method used to translate file offsets to blocks
228 * @end_io: A filesystem callback for I/O completion
231 * This function uses the same locking scheme as do_blockdev_direct_IO:
232 * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
233 * caller for writes. For reads, we take and release the i_mutex ourselves.
234 * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
235 * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
238 ssize_t
dax_do_io(struct kiocb
*iocb
, struct inode
*inode
,
239 struct iov_iter
*iter
, loff_t pos
, get_block_t get_block
,
240 dio_iodone_t end_io
, int flags
)
242 struct buffer_head bh
;
243 ssize_t retval
= -EINVAL
;
244 loff_t end
= pos
+ iov_iter_count(iter
);
246 memset(&bh
, 0, sizeof(bh
));
248 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
) {
249 struct address_space
*mapping
= inode
->i_mapping
;
250 mutex_lock(&inode
->i_mutex
);
251 retval
= filemap_write_and_wait_range(mapping
, pos
, end
- 1);
253 mutex_unlock(&inode
->i_mutex
);
258 /* Protects against truncate */
259 if (!(flags
& DIO_SKIP_DIO_COUNT
))
260 inode_dio_begin(inode
);
262 retval
= dax_io(inode
, iter
, pos
, end
, get_block
, &bh
);
264 if ((flags
& DIO_LOCKING
) && iov_iter_rw(iter
) == READ
)
265 mutex_unlock(&inode
->i_mutex
);
267 if ((retval
> 0) && end_io
)
268 end_io(iocb
, pos
, retval
, bh
.b_private
);
270 if (!(flags
& DIO_SKIP_DIO_COUNT
))
271 inode_dio_end(inode
);
275 EXPORT_SYMBOL_GPL(dax_do_io
);
278 * The user has performed a load from a hole in the file. Allocating
279 * a new page in the file would cause excessive storage usage for
280 * workloads with sparse files. We allocate a page cache page instead.
281 * We'll kick it out of the page cache if it's ever written to,
282 * otherwise it will simply fall out of the page cache under memory
283 * pressure without ever having been dirtied.
285 static int dax_load_hole(struct address_space
*mapping
, struct page
*page
,
286 struct vm_fault
*vmf
)
289 struct inode
*inode
= mapping
->host
;
291 page
= find_or_create_page(mapping
, vmf
->pgoff
,
292 GFP_KERNEL
| __GFP_ZERO
);
295 /* Recheck i_size under page lock to avoid truncate race */
296 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
297 if (vmf
->pgoff
>= size
) {
299 page_cache_release(page
);
300 return VM_FAULT_SIGBUS
;
304 return VM_FAULT_LOCKED
;
307 static int copy_user_bh(struct page
*to
, struct inode
*inode
,
308 struct buffer_head
*bh
, unsigned long vaddr
)
310 struct blk_dax_ctl dax
= {
311 .sector
= to_sector(bh
, inode
),
314 struct block_device
*bdev
= bh
->b_bdev
;
317 if (dax_map_atomic(bdev
, &dax
) < 0)
318 return PTR_ERR(dax
.addr
);
319 vto
= kmap_atomic(to
);
320 copy_user_page(vto
, (void __force
*)dax
.addr
, vaddr
, to
);
322 dax_unmap_atomic(bdev
, &dax
);
326 static int dax_insert_mapping(struct inode
*inode
, struct buffer_head
*bh
,
327 struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
329 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
330 struct address_space
*mapping
= inode
->i_mapping
;
331 struct block_device
*bdev
= bh
->b_bdev
;
332 struct blk_dax_ctl dax
= {
333 .sector
= to_sector(bh
, inode
),
339 i_mmap_lock_read(mapping
);
342 * Check truncate didn't happen while we were allocating a block.
343 * If it did, this block may or may not be still allocated to the
344 * file. We can't tell the filesystem to free it because we can't
345 * take i_mutex here. In the worst case, the file still has blocks
346 * allocated past the end of the file.
348 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
349 if (unlikely(vmf
->pgoff
>= size
)) {
354 if (dax_map_atomic(bdev
, &dax
) < 0) {
355 error
= PTR_ERR(dax
.addr
);
359 if (buffer_unwritten(bh
) || buffer_new(bh
)) {
360 clear_pmem(dax
.addr
, PAGE_SIZE
);
363 dax_unmap_atomic(bdev
, &dax
);
365 error
= vm_insert_mixed(vma
, vaddr
, dax
.pfn
);
368 i_mmap_unlock_read(mapping
);
374 * __dax_fault - handle a page fault on a DAX file
375 * @vma: The virtual memory area where the fault occurred
376 * @vmf: The description of the fault
377 * @get_block: The filesystem method used to translate file offsets to blocks
378 * @complete_unwritten: The filesystem method used to convert unwritten blocks
379 * to written so the data written to them is exposed. This is required for
380 * required by write faults for filesystems that will return unwritten
381 * extent mappings from @get_block, but it is optional for reads as
382 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
383 * not support unwritten extents, the it should pass NULL.
385 * When a page fault occurs, filesystems may call this helper in their
386 * fault handler for DAX files. __dax_fault() assumes the caller has done all
387 * the necessary locking for the page fault to proceed successfully.
389 int __dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
390 get_block_t get_block
, dax_iodone_t complete_unwritten
)
392 struct file
*file
= vma
->vm_file
;
393 struct address_space
*mapping
= file
->f_mapping
;
394 struct inode
*inode
= mapping
->host
;
396 struct buffer_head bh
;
397 unsigned long vaddr
= (unsigned long)vmf
->virtual_address
;
398 unsigned blkbits
= inode
->i_blkbits
;
404 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
405 if (vmf
->pgoff
>= size
)
406 return VM_FAULT_SIGBUS
;
408 memset(&bh
, 0, sizeof(bh
));
409 block
= (sector_t
)vmf
->pgoff
<< (PAGE_SHIFT
- blkbits
);
410 bh
.b_size
= PAGE_SIZE
;
413 page
= find_get_page(mapping
, vmf
->pgoff
);
415 if (!lock_page_or_retry(page
, vma
->vm_mm
, vmf
->flags
)) {
416 page_cache_release(page
);
417 return VM_FAULT_RETRY
;
419 if (unlikely(page
->mapping
!= mapping
)) {
421 page_cache_release(page
);
424 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
425 if (unlikely(vmf
->pgoff
>= size
)) {
427 * We have a struct page covering a hole in the file
428 * from a read fault and we've raced with a truncate
435 error
= get_block(inode
, block
, &bh
, 0);
436 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
437 error
= -EIO
; /* fs corruption? */
441 if (!buffer_mapped(&bh
) && !buffer_unwritten(&bh
) && !vmf
->cow_page
) {
442 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
443 error
= get_block(inode
, block
, &bh
, 1);
444 count_vm_event(PGMAJFAULT
);
445 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
446 major
= VM_FAULT_MAJOR
;
447 if (!error
&& (bh
.b_size
< PAGE_SIZE
))
452 return dax_load_hole(mapping
, page
, vmf
);
457 struct page
*new_page
= vmf
->cow_page
;
458 if (buffer_written(&bh
))
459 error
= copy_user_bh(new_page
, inode
, &bh
, vaddr
);
461 clear_user_highpage(new_page
, vaddr
);
466 i_mmap_lock_read(mapping
);
467 /* Check we didn't race with truncate */
468 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >>
470 if (vmf
->pgoff
>= size
) {
471 i_mmap_unlock_read(mapping
);
476 return VM_FAULT_LOCKED
;
479 /* Check we didn't race with a read fault installing a new page */
481 page
= find_lock_page(mapping
, vmf
->pgoff
);
484 unmap_mapping_range(mapping
, vmf
->pgoff
<< PAGE_SHIFT
,
486 delete_from_page_cache(page
);
488 page_cache_release(page
);
492 * If we successfully insert the new mapping over an unwritten extent,
493 * we need to ensure we convert the unwritten extent. If there is an
494 * error inserting the mapping, the filesystem needs to leave it as
495 * unwritten to prevent exposure of the stale underlying data to
496 * userspace, but we still need to call the completion function so
497 * the private resources on the mapping buffer can be released. We
498 * indicate what the callback should do via the uptodate variable, same
499 * as for normal BH based IO completions.
501 error
= dax_insert_mapping(inode
, &bh
, vma
, vmf
);
502 if (buffer_unwritten(&bh
)) {
503 if (complete_unwritten
)
504 complete_unwritten(&bh
, !error
);
506 WARN_ON_ONCE(!(vmf
->flags
& FAULT_FLAG_WRITE
));
510 if (error
== -ENOMEM
)
511 return VM_FAULT_OOM
| major
;
512 /* -EBUSY is fine, somebody else faulted on the same PTE */
513 if ((error
< 0) && (error
!= -EBUSY
))
514 return VM_FAULT_SIGBUS
| major
;
515 return VM_FAULT_NOPAGE
| major
;
520 page_cache_release(page
);
524 EXPORT_SYMBOL(__dax_fault
);
527 * dax_fault - handle a page fault on a DAX file
528 * @vma: The virtual memory area where the fault occurred
529 * @vmf: The description of the fault
530 * @get_block: The filesystem method used to translate file offsets to blocks
532 * When a page fault occurs, filesystems may call this helper in their
533 * fault handler for DAX files.
535 int dax_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
536 get_block_t get_block
, dax_iodone_t complete_unwritten
)
539 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
541 if (vmf
->flags
& FAULT_FLAG_WRITE
) {
542 sb_start_pagefault(sb
);
543 file_update_time(vma
->vm_file
);
545 result
= __dax_fault(vma
, vmf
, get_block
, complete_unwritten
);
546 if (vmf
->flags
& FAULT_FLAG_WRITE
)
547 sb_end_pagefault(sb
);
551 EXPORT_SYMBOL_GPL(dax_fault
);
553 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
555 * The 'colour' (ie low bits) within a PMD of a page offset. This comes up
556 * more often than one might expect in the below function.
558 #define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
560 int __dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
561 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
562 dax_iodone_t complete_unwritten
)
564 struct file
*file
= vma
->vm_file
;
565 struct address_space
*mapping
= file
->f_mapping
;
566 struct inode
*inode
= mapping
->host
;
567 struct buffer_head bh
;
568 unsigned blkbits
= inode
->i_blkbits
;
569 unsigned long pmd_addr
= address
& PMD_MASK
;
570 bool write
= flags
& FAULT_FLAG_WRITE
;
571 struct block_device
*bdev
;
576 /* dax pmd mappings are broken wrt gup and fork */
577 if (!IS_ENABLED(CONFIG_FS_DAX_PMD
))
578 return VM_FAULT_FALLBACK
;
580 /* Fall back to PTEs if we're going to COW */
581 if (write
&& !(vma
->vm_flags
& VM_SHARED
))
582 return VM_FAULT_FALLBACK
;
583 /* If the PMD would extend outside the VMA */
584 if (pmd_addr
< vma
->vm_start
)
585 return VM_FAULT_FALLBACK
;
586 if ((pmd_addr
+ PMD_SIZE
) > vma
->vm_end
)
587 return VM_FAULT_FALLBACK
;
589 pgoff
= linear_page_index(vma
, pmd_addr
);
590 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
592 return VM_FAULT_SIGBUS
;
593 /* If the PMD would cover blocks out of the file */
594 if ((pgoff
| PG_PMD_COLOUR
) >= size
)
595 return VM_FAULT_FALLBACK
;
597 memset(&bh
, 0, sizeof(bh
));
598 block
= (sector_t
)pgoff
<< (PAGE_SHIFT
- blkbits
);
600 bh
.b_size
= PMD_SIZE
;
601 if (get_block(inode
, block
, &bh
, write
) != 0)
602 return VM_FAULT_SIGBUS
;
604 i_mmap_lock_read(mapping
);
607 * If the filesystem isn't willing to tell us the length of a hole,
608 * just fall back to PTEs. Calling get_block 512 times in a loop
611 if (!buffer_size_valid(&bh
) || bh
.b_size
< PMD_SIZE
)
615 * If we allocated new storage, make sure no process has any
616 * zero pages covering this hole
618 if (buffer_new(&bh
)) {
619 i_mmap_unlock_read(mapping
);
620 unmap_mapping_range(mapping
, pgoff
<< PAGE_SHIFT
, PMD_SIZE
, 0);
621 i_mmap_lock_read(mapping
);
625 * If a truncate happened while we were allocating blocks, we may
626 * leave blocks allocated to the file that are beyond EOF. We can't
627 * take i_mutex here, so just leave them hanging; they'll be freed
628 * when the file is deleted.
630 size
= (i_size_read(inode
) + PAGE_SIZE
- 1) >> PAGE_SHIFT
;
632 result
= VM_FAULT_SIGBUS
;
635 if ((pgoff
| PG_PMD_COLOUR
) >= size
)
638 if (!write
&& !buffer_mapped(&bh
) && buffer_uptodate(&bh
)) {
641 struct page
*zero_page
= get_huge_zero_page();
643 if (unlikely(!zero_page
))
646 ptl
= pmd_lock(vma
->vm_mm
, pmd
);
647 if (!pmd_none(*pmd
)) {
652 entry
= mk_pmd(zero_page
, vma
->vm_page_prot
);
653 entry
= pmd_mkhuge(entry
);
654 set_pmd_at(vma
->vm_mm
, pmd_addr
, pmd
, entry
);
655 result
= VM_FAULT_NOPAGE
;
658 struct blk_dax_ctl dax
= {
659 .sector
= to_sector(&bh
, inode
),
662 long length
= dax_map_atomic(bdev
, &dax
);
665 result
= VM_FAULT_SIGBUS
;
668 if ((length
< PMD_SIZE
) || (dax
.pfn
& PG_PMD_COLOUR
)) {
669 dax_unmap_atomic(bdev
, &dax
);
674 * TODO: teach vmf_insert_pfn_pmd() to support
675 * 'pte_special' for pmds
677 if (pfn_valid(dax
.pfn
)) {
678 dax_unmap_atomic(bdev
, &dax
);
682 if (buffer_unwritten(&bh
) || buffer_new(&bh
)) {
683 clear_pmem(dax
.addr
, PMD_SIZE
);
685 count_vm_event(PGMAJFAULT
);
686 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
687 result
|= VM_FAULT_MAJOR
;
689 dax_unmap_atomic(bdev
, &dax
);
691 result
|= vmf_insert_pfn_pmd(vma
, address
, pmd
, dax
.pfn
, write
);
695 i_mmap_unlock_read(mapping
);
697 if (buffer_unwritten(&bh
))
698 complete_unwritten(&bh
, !(result
& VM_FAULT_ERROR
));
703 count_vm_event(THP_FAULT_FALLBACK
);
704 result
= VM_FAULT_FALLBACK
;
707 EXPORT_SYMBOL_GPL(__dax_pmd_fault
);
710 * dax_pmd_fault - handle a PMD fault on a DAX file
711 * @vma: The virtual memory area where the fault occurred
712 * @vmf: The description of the fault
713 * @get_block: The filesystem method used to translate file offsets to blocks
715 * When a page fault occurs, filesystems may call this helper in their
716 * pmd_fault handler for DAX files.
718 int dax_pmd_fault(struct vm_area_struct
*vma
, unsigned long address
,
719 pmd_t
*pmd
, unsigned int flags
, get_block_t get_block
,
720 dax_iodone_t complete_unwritten
)
723 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
725 if (flags
& FAULT_FLAG_WRITE
) {
726 sb_start_pagefault(sb
);
727 file_update_time(vma
->vm_file
);
729 result
= __dax_pmd_fault(vma
, address
, pmd
, flags
, get_block
,
731 if (flags
& FAULT_FLAG_WRITE
)
732 sb_end_pagefault(sb
);
736 EXPORT_SYMBOL_GPL(dax_pmd_fault
);
737 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
740 * dax_pfn_mkwrite - handle first write to DAX page
741 * @vma: The virtual memory area where the fault occurred
742 * @vmf: The description of the fault
745 int dax_pfn_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
747 struct super_block
*sb
= file_inode(vma
->vm_file
)->i_sb
;
749 sb_start_pagefault(sb
);
750 file_update_time(vma
->vm_file
);
751 sb_end_pagefault(sb
);
752 return VM_FAULT_NOPAGE
;
754 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite
);
757 * dax_zero_page_range - zero a range within a page of a DAX file
758 * @inode: The file being truncated
759 * @from: The file offset that is being truncated to
760 * @length: The number of bytes to zero
761 * @get_block: The filesystem method used to translate file offsets to blocks
763 * This function can be called by a filesystem when it is zeroing part of a
764 * page in a DAX file. This is intended for hole-punch operations. If
765 * you are truncating a file, the helper function dax_truncate_page() may be
768 * We work in terms of PAGE_CACHE_SIZE here for commonality with
769 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
770 * took care of disposing of the unnecessary blocks. Even if the filesystem
771 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
772 * since the file might be mmapped.
774 int dax_zero_page_range(struct inode
*inode
, loff_t from
, unsigned length
,
775 get_block_t get_block
)
777 struct buffer_head bh
;
778 pgoff_t index
= from
>> PAGE_CACHE_SHIFT
;
779 unsigned offset
= from
& (PAGE_CACHE_SIZE
-1);
782 /* Block boundary? Nothing to do */
785 BUG_ON((offset
+ length
) > PAGE_CACHE_SIZE
);
787 memset(&bh
, 0, sizeof(bh
));
788 bh
.b_size
= PAGE_CACHE_SIZE
;
789 err
= get_block(inode
, index
, &bh
, 0);
792 if (buffer_written(&bh
)) {
793 struct block_device
*bdev
= bh
.b_bdev
;
794 struct blk_dax_ctl dax
= {
795 .sector
= to_sector(&bh
, inode
),
796 .size
= PAGE_CACHE_SIZE
,
799 if (dax_map_atomic(bdev
, &dax
) < 0)
800 return PTR_ERR(dax
.addr
);
801 clear_pmem(dax
.addr
+ offset
, length
);
803 dax_unmap_atomic(bdev
, &dax
);
808 EXPORT_SYMBOL_GPL(dax_zero_page_range
);
811 * dax_truncate_page - handle a partial page being truncated in a DAX file
812 * @inode: The file being truncated
813 * @from: The file offset that is being truncated to
814 * @get_block: The filesystem method used to translate file offsets to blocks
816 * Similar to block_truncate_page(), this function can be called by a
817 * filesystem when it is truncating a DAX file to handle the partial page.
819 * We work in terms of PAGE_CACHE_SIZE here for commonality with
820 * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
821 * took care of disposing of the unnecessary blocks. Even if the filesystem
822 * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
823 * since the file might be mmapped.
825 int dax_truncate_page(struct inode
*inode
, loff_t from
, get_block_t get_block
)
827 unsigned length
= PAGE_CACHE_ALIGN(from
) - from
;
828 return dax_zero_page_range(inode
, from
, length
, get_block
);
830 EXPORT_SYMBOL_GPL(dax_truncate_page
);