]>
Commit | Line | Data |
---|---|---|
1 | #ifndef _LINUX_DAX_H | |
2 | #define _LINUX_DAX_H | |
3 | ||
4 | #include <linux/fs.h> | |
5 | #include <linux/mm.h> | |
6 | #include <linux/radix-tree.h> | |
7 | #include <asm/pgtable.h> | |
8 | ||
9 | struct iomap_ops; | |
10 | ||
11 | /* We use lowest available exceptional entry bit for locking */ | |
12 | #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) | |
13 | ||
14 | ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter, | |
15 | struct iomap_ops *ops); | |
16 | ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, | |
17 | get_block_t, dio_iodone_t, int flags); | |
18 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); | |
19 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); | |
20 | int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf, | |
21 | struct iomap_ops *ops); | |
22 | int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); | |
23 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); | |
24 | void dax_wake_mapping_entry_waiter(struct address_space *mapping, | |
25 | pgoff_t index, bool wake_all); | |
26 | ||
27 | #ifdef CONFIG_FS_DAX | |
28 | struct page *read_dax_sector(struct block_device *bdev, sector_t n); | |
29 | void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index); | |
30 | int __dax_zero_page_range(struct block_device *bdev, sector_t sector, | |
31 | unsigned int offset, unsigned int length); | |
32 | #else | |
33 | static inline struct page *read_dax_sector(struct block_device *bdev, | |
34 | sector_t n) | |
35 | { | |
36 | return ERR_PTR(-ENXIO); | |
37 | } | |
38 | /* Shouldn't ever be called when dax is disabled. */ | |
39 | static inline void dax_unlock_mapping_entry(struct address_space *mapping, | |
40 | pgoff_t index) | |
41 | { | |
42 | BUG(); | |
43 | } | |
44 | static inline int __dax_zero_page_range(struct block_device *bdev, | |
45 | sector_t sector, unsigned int offset, unsigned int length) | |
46 | { | |
47 | return -ENXIO; | |
48 | } | |
49 | #endif | |
50 | ||
51 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) | |
52 | int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, | |
53 | unsigned int flags, get_block_t); | |
54 | #else | |
55 | static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, | |
56 | pmd_t *pmd, unsigned int flags, get_block_t gb) | |
57 | { | |
58 | return VM_FAULT_FALLBACK; | |
59 | } | |
60 | #endif | |
61 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); | |
62 | #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) | |
63 | ||
64 | static inline bool vma_is_dax(struct vm_area_struct *vma) | |
65 | { | |
66 | return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); | |
67 | } | |
68 | ||
69 | static inline bool dax_mapping(struct address_space *mapping) | |
70 | { | |
71 | return mapping->host && IS_DAX(mapping->host); | |
72 | } | |
73 | ||
74 | struct writeback_control; | |
75 | int dax_writeback_mapping_range(struct address_space *mapping, | |
76 | struct block_device *bdev, struct writeback_control *wbc); | |
77 | #endif |