]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - include/linux/dax.h
Merge tag 'for-linus-4.9-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-zesty-kernel.git] / include / linux / dax.h
CommitLineData
c94c2acf
MW
1#ifndef _LINUX_DAX_H
2#define _LINUX_DAX_H
3
4#include <linux/fs.h>
5#include <linux/mm.h>
4f622938 6#include <linux/radix-tree.h>
c94c2acf
MW
7#include <asm/pgtable.h>
8
a254e568
CH
9struct iomap_ops;
10
e804315d
JK
11/* We use lowest available exceptional entry bit for locking */
12#define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT)
13
a254e568
CH
14ssize_t iomap_dax_rw(struct kiocb *iocb, struct iov_iter *iter,
15 struct iomap_ops *ops);
c8b8e32d 16ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *,
c94c2acf 17 get_block_t, dio_iodone_t, int flags);
c94c2acf
MW
18int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
19int dax_truncate_page(struct inode *, loff_t from, get_block_t);
a7d73fe6
CH
20int iomap_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
21 struct iomap_ops *ops);
02fbd139 22int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t);
ac401cc7
JK
23int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
24void dax_wake_mapping_entry_waiter(struct address_space *mapping,
25 pgoff_t index, bool wake_all);
d1a5f2b4
DW
26
27#ifdef CONFIG_FS_DAX
28struct page *read_dax_sector(struct block_device *bdev, sector_t n);
bc2466e4 29void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index);
679c8bd3
CH
30int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
31 unsigned int offset, unsigned int length);
d1a5f2b4
DW
32#else
33static inline struct page *read_dax_sector(struct block_device *bdev,
34 sector_t n)
35{
36 return ERR_PTR(-ENXIO);
37}
bc2466e4
JK
38/* Shouldn't ever be called when dax is disabled. */
39static inline void dax_unlock_mapping_entry(struct address_space *mapping,
40 pgoff_t index)
41{
42 BUG();
43}
679c8bd3
CH
44static inline int __dax_zero_page_range(struct block_device *bdev,
45 sector_t sector, unsigned int offset, unsigned int length)
46{
47 return -ENXIO;
48}
d1a5f2b4
DW
49#endif
50
348e967a 51#if defined(CONFIG_TRANSPARENT_HUGEPAGE)
844f35db 52int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
02fbd139 53 unsigned int flags, get_block_t);
844f35db
MW
54#else
55static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
02fbd139 56 pmd_t *pmd, unsigned int flags, get_block_t gb)
844f35db
MW
57{
58 return VM_FAULT_FALLBACK;
59}
844f35db 60#endif
c94c2acf 61int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *);
02fbd139 62#define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb)
c94c2acf 63
4897c765
MW
64static inline bool vma_is_dax(struct vm_area_struct *vma)
65{
66 return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
67}
f9fe48be
RZ
68
69static inline bool dax_mapping(struct address_space *mapping)
70{
71 return mapping->host && IS_DAX(mapping->host);
72}
7f6d5b52
RZ
73
74struct writeback_control;
75int dax_writeback_mapping_range(struct address_space *mapping,
76 struct block_device *bdev, struct writeback_control *wbc);
c94c2acf 77#endif