Commit | Line | Data |
---|---|---|
c94c2acf MW |
1 | #ifndef _LINUX_DAX_H |
2 | #define _LINUX_DAX_H | |
3 | ||
4 | #include <linux/fs.h> | |
5 | #include <linux/mm.h> | |
4f622938 | 6 | #include <linux/radix-tree.h> |
c94c2acf MW |
7 | #include <asm/pgtable.h> |
8 | ||
e804315d JK |
9 | /* We use lowest available exceptional entry bit for locking */ |
10 | #define RADIX_DAX_ENTRY_LOCK (1 << RADIX_TREE_EXCEPTIONAL_SHIFT) | |
11 | ||
c8b8e32d | 12 | ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, |
c94c2acf | 13 | get_block_t, dio_iodone_t, int flags); |
c94c2acf MW |
14 | int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t); |
15 | int dax_truncate_page(struct inode *, loff_t from, get_block_t); | |
02fbd139 JK |
16 | int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); |
17 | int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t); | |
ac401cc7 JK |
18 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); |
19 | void dax_wake_mapping_entry_waiter(struct address_space *mapping, | |
20 | pgoff_t index, bool wake_all); | |
d1a5f2b4 DW |
21 | |
22 | #ifdef CONFIG_FS_DAX | |
23 | struct page *read_dax_sector(struct block_device *bdev, sector_t n); | |
bc2466e4 | 24 | void dax_unlock_mapping_entry(struct address_space *mapping, pgoff_t index); |
679c8bd3 CH |
25 | int __dax_zero_page_range(struct block_device *bdev, sector_t sector, |
26 | unsigned int offset, unsigned int length); | |
d1a5f2b4 DW |
27 | #else |
28 | static inline struct page *read_dax_sector(struct block_device *bdev, | |
29 | sector_t n) | |
30 | { | |
31 | return ERR_PTR(-ENXIO); | |
32 | } | |
bc2466e4 JK |
33 | /* Shouldn't ever be called when dax is disabled. */ |
34 | static inline void dax_unlock_mapping_entry(struct address_space *mapping, | |
35 | pgoff_t index) | |
36 | { | |
37 | BUG(); | |
38 | } | |
679c8bd3 CH |
39 | static inline int __dax_zero_page_range(struct block_device *bdev, |
40 | sector_t sector, unsigned int offset, unsigned int length) | |
41 | { | |
42 | return -ENXIO; | |
43 | } | |
d1a5f2b4 DW |
44 | #endif |
45 | ||
348e967a | 46 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) |
844f35db | 47 | int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, |
02fbd139 | 48 | unsigned int flags, get_block_t); |
844f35db | 49 | int __dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, |
02fbd139 | 50 | unsigned int flags, get_block_t); |
844f35db MW |
51 | #else |
52 | static inline int dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr, | |
02fbd139 | 53 | pmd_t *pmd, unsigned int flags, get_block_t gb) |
844f35db MW |
54 | { |
55 | return VM_FAULT_FALLBACK; | |
56 | } | |
57 | #define __dax_pmd_fault dax_pmd_fault | |
58 | #endif | |
c94c2acf | 59 | int dax_pfn_mkwrite(struct vm_area_struct *, struct vm_fault *); |
02fbd139 JK |
60 | #define dax_mkwrite(vma, vmf, gb) dax_fault(vma, vmf, gb) |
61 | #define __dax_mkwrite(vma, vmf, gb) __dax_fault(vma, vmf, gb) | |
c94c2acf | 62 | |
4897c765 MW |
63 | static inline bool vma_is_dax(struct vm_area_struct *vma) |
64 | { | |
65 | return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); | |
66 | } | |
f9fe48be RZ |
67 | |
68 | static inline bool dax_mapping(struct address_space *mapping) | |
69 | { | |
70 | return mapping->host && IS_DAX(mapping->host); | |
71 | } | |
7f6d5b52 RZ |
72 | |
73 | struct writeback_control; | |
74 | int dax_writeback_mapping_range(struct address_space *mapping, | |
75 | struct block_device *bdev, struct writeback_control *wbc); | |
c94c2acf | 76 | #endif |