.. | .. |
---|
5 | 5 | #include <linux/fs.h> |
---|
6 | 6 | #include <linux/mm.h> |
---|
7 | 7 | #include <linux/radix-tree.h> |
---|
8 | | -#include <asm/pgtable.h> |
---|
| 8 | + |
---|
| 9 | +/* Flag for synchronous flush */ |
---|
| 10 | +#define DAXDEV_F_SYNC (1UL << 0) |
---|
| 11 | + |
---|
| 12 | +typedef unsigned long dax_entry_t; |
---|
9 | 13 | |
---|
10 | 14 | struct iomap_ops; |
---|
| 15 | +struct iomap; |
---|
11 | 16 | struct dax_device; |
---|
12 | 17 | struct dax_operations { |
---|
13 | 18 | /* |
---|
.. | .. |
---|
17 | 22 | */ |
---|
18 | 23 | long (*direct_access)(struct dax_device *, pgoff_t, long, |
---|
19 | 24 | void **, pfn_t *); |
---|
| 25 | + /* |
---|
| 26 | + * Validate whether this device is usable as an fsdax backing |
---|
| 27 | + * device. |
---|
| 28 | + */ |
---|
| 29 | + bool (*dax_supported)(struct dax_device *, struct block_device *, int, |
---|
| 30 | + sector_t, sector_t); |
---|
20 | 31 | /* copy_from_iter: required operation for fs-dax direct-i/o */ |
---|
21 | 32 | size_t (*copy_from_iter)(struct dax_device *, pgoff_t, void *, size_t, |
---|
22 | 33 | struct iov_iter *); |
---|
23 | 34 | /* copy_to_iter: required operation for fs-dax direct-i/o */ |
---|
24 | 35 | size_t (*copy_to_iter)(struct dax_device *, pgoff_t, void *, size_t, |
---|
25 | 36 | struct iov_iter *); |
---|
| 37 | + /* zero_page_range: required operation. Zero page range */ |
---|
| 38 | + int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); |
---|
26 | 39 | }; |
---|
27 | 40 | |
---|
28 | 41 | extern struct attribute_group dax_attribute_group; |
---|
.. | .. |
---|
30 | 43 | #if IS_ENABLED(CONFIG_DAX) |
---|
31 | 44 | struct dax_device *dax_get_by_host(const char *host); |
---|
32 | 45 | struct dax_device *alloc_dax(void *private, const char *host, |
---|
33 | | - const struct dax_operations *ops); |
---|
| 46 | + const struct dax_operations *ops, unsigned long flags); |
---|
34 | 47 | void put_dax(struct dax_device *dax_dev); |
---|
35 | 48 | void kill_dax(struct dax_device *dax_dev); |
---|
36 | 49 | void dax_write_cache(struct dax_device *dax_dev, bool wc); |
---|
37 | 50 | bool dax_write_cache_enabled(struct dax_device *dax_dev); |
---|
| 51 | +bool __dax_synchronous(struct dax_device *dax_dev); |
---|
| 52 | +static inline bool dax_synchronous(struct dax_device *dax_dev) |
---|
| 53 | +{ |
---|
| 54 | + return __dax_synchronous(dax_dev); |
---|
| 55 | +} |
---|
| 56 | +void __set_dax_synchronous(struct dax_device *dax_dev); |
---|
| 57 | +static inline void set_dax_synchronous(struct dax_device *dax_dev) |
---|
| 58 | +{ |
---|
| 59 | + __set_dax_synchronous(dax_dev); |
---|
| 60 | +} |
---|
| 61 | +bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, |
---|
| 62 | + int blocksize, sector_t start, sector_t len); |
---|
| 63 | +/* |
---|
| 64 | + * Check if given mapping is supported by the file / underlying device. |
---|
| 65 | + */ |
---|
| 66 | +static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, |
---|
| 67 | + struct dax_device *dax_dev) |
---|
| 68 | +{ |
---|
| 69 | + if (!(vma->vm_flags & VM_SYNC)) |
---|
| 70 | + return true; |
---|
| 71 | + if (!IS_DAX(file_inode(vma->vm_file))) |
---|
| 72 | + return false; |
---|
| 73 | + return dax_synchronous(dax_dev); |
---|
| 74 | +} |
---|
38 | 75 | #else |
---|
39 | 76 | static inline struct dax_device *dax_get_by_host(const char *host) |
---|
40 | 77 | { |
---|
41 | 78 | return NULL; |
---|
42 | 79 | } |
---|
43 | 80 | static inline struct dax_device *alloc_dax(void *private, const char *host, |
---|
44 | | - const struct dax_operations *ops) |
---|
| 81 | + const struct dax_operations *ops, unsigned long flags) |
---|
45 | 82 | { |
---|
46 | 83 | /* |
---|
47 | 84 | * Callers should check IS_ENABLED(CONFIG_DAX) to know if this |
---|
.. | .. |
---|
62 | 99 | { |
---|
63 | 100 | return false; |
---|
64 | 101 | } |
---|
| 102 | +static inline bool dax_synchronous(struct dax_device *dax_dev) |
---|
| 103 | +{ |
---|
| 104 | + return true; |
---|
| 105 | +} |
---|
| 106 | +static inline void set_dax_synchronous(struct dax_device *dax_dev) |
---|
| 107 | +{ |
---|
| 108 | +} |
---|
| 109 | +static inline bool dax_supported(struct dax_device *dax_dev, |
---|
| 110 | + struct block_device *bdev, int blocksize, sector_t start, |
---|
| 111 | + sector_t len) |
---|
| 112 | +{ |
---|
| 113 | + return false; |
---|
| 114 | +} |
---|
| 115 | +static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, |
---|
| 116 | + struct dax_device *dax_dev) |
---|
| 117 | +{ |
---|
| 118 | + return !(vma->vm_flags & VM_SYNC); |
---|
| 119 | +} |
---|
65 | 120 | #endif |
---|
66 | 121 | |
---|
67 | 122 | struct writeback_control; |
---|
.. | .. |
---|
73 | 128 | return __bdev_dax_supported(bdev, blocksize); |
---|
74 | 129 | } |
---|
75 | 130 | |
---|
76 | | -static inline struct dax_device *fs_dax_get_by_host(const char *host) |
---|
| 131 | +bool __generic_fsdax_supported(struct dax_device *dax_dev, |
---|
| 132 | + struct block_device *bdev, int blocksize, sector_t start, |
---|
| 133 | + sector_t sectors); |
---|
| 134 | +static inline bool generic_fsdax_supported(struct dax_device *dax_dev, |
---|
| 135 | + struct block_device *bdev, int blocksize, sector_t start, |
---|
| 136 | + sector_t sectors) |
---|
77 | 137 | { |
---|
78 | | - return dax_get_by_host(host); |
---|
| 138 | + return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, |
---|
| 139 | + sectors); |
---|
79 | 140 | } |
---|
80 | 141 | |
---|
81 | 142 | static inline void fs_put_dax(struct dax_device *dax_dev) |
---|
.. | .. |
---|
85 | 146 | |
---|
86 | 147 | struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev); |
---|
87 | 148 | int dax_writeback_mapping_range(struct address_space *mapping, |
---|
88 | | - struct block_device *bdev, struct writeback_control *wbc); |
---|
| 149 | + struct dax_device *dax_dev, struct writeback_control *wbc); |
---|
89 | 150 | |
---|
90 | 151 | struct page *dax_layout_busy_page(struct address_space *mapping); |
---|
91 | | -bool dax_lock_mapping_entry(struct page *page); |
---|
92 | | -void dax_unlock_mapping_entry(struct page *page); |
---|
| 152 | +struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end); |
---|
| 153 | +dax_entry_t dax_lock_page(struct page *page); |
---|
| 154 | +void dax_unlock_page(struct page *page, dax_entry_t cookie); |
---|
93 | 155 | #else |
---|
94 | 156 | static inline bool bdev_dax_supported(struct block_device *bdev, |
---|
95 | 157 | int blocksize) |
---|
.. | .. |
---|
97 | 159 | return false; |
---|
98 | 160 | } |
---|
99 | 161 | |
---|
100 | | -static inline struct dax_device *fs_dax_get_by_host(const char *host) |
---|
| 162 | +static inline bool generic_fsdax_supported(struct dax_device *dax_dev, |
---|
| 163 | + struct block_device *bdev, int blocksize, sector_t start, |
---|
| 164 | + sector_t sectors) |
---|
101 | 165 | { |
---|
102 | | - return NULL; |
---|
| 166 | + return false; |
---|
103 | 167 | } |
---|
104 | 168 | |
---|
105 | 169 | static inline void fs_put_dax(struct dax_device *dax_dev) |
---|
.. | .. |
---|
116 | 180 | return NULL; |
---|
117 | 181 | } |
---|
118 | 182 | |
---|
| 183 | +static inline struct page *dax_layout_busy_page_range(struct address_space *mapping, pgoff_t start, pgoff_t nr_pages) |
---|
| 184 | +{ |
---|
| 185 | + return NULL; |
---|
| 186 | +} |
---|
| 187 | + |
---|
119 | 188 | static inline int dax_writeback_mapping_range(struct address_space *mapping, |
---|
120 | | - struct block_device *bdev, struct writeback_control *wbc) |
---|
| 189 | + struct dax_device *dax_dev, struct writeback_control *wbc) |
---|
121 | 190 | { |
---|
122 | 191 | return -EOPNOTSUPP; |
---|
123 | 192 | } |
---|
124 | 193 | |
---|
125 | | -static inline bool dax_lock_mapping_entry(struct page *page) |
---|
| 194 | +static inline dax_entry_t dax_lock_page(struct page *page) |
---|
126 | 195 | { |
---|
127 | 196 | if (IS_DAX(page->mapping->host)) |
---|
128 | | - return true; |
---|
129 | | - return false; |
---|
| 197 | + return ~0UL; |
---|
| 198 | + return 0; |
---|
130 | 199 | } |
---|
131 | 200 | |
---|
132 | | -static inline void dax_unlock_mapping_entry(struct page *page) |
---|
| 201 | +static inline void dax_unlock_page(struct page *page, dax_entry_t cookie) |
---|
133 | 202 | { |
---|
134 | 203 | } |
---|
135 | 204 | #endif |
---|
136 | 205 | |
---|
| 206 | +#if IS_ENABLED(CONFIG_DAX) |
---|
137 | 207 | int dax_read_lock(void); |
---|
138 | 208 | void dax_read_unlock(int id); |
---|
| 209 | +#else |
---|
| 210 | +static inline int dax_read_lock(void) |
---|
| 211 | +{ |
---|
| 212 | + return 0; |
---|
| 213 | +} |
---|
| 214 | + |
---|
| 215 | +static inline void dax_read_unlock(int id) |
---|
| 216 | +{ |
---|
| 217 | +} |
---|
| 218 | +#endif /* CONFIG_DAX */ |
---|
139 | 219 | bool dax_alive(struct dax_device *dax_dev); |
---|
140 | 220 | void *dax_get_private(struct dax_device *dax_dev); |
---|
141 | 221 | long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, |
---|
.. | .. |
---|
144 | 224 | size_t bytes, struct iov_iter *i); |
---|
145 | 225 | size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, |
---|
146 | 226 | size_t bytes, struct iov_iter *i); |
---|
| 227 | +int dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff, |
---|
| 228 | + size_t nr_pages); |
---|
147 | 229 | void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); |
---|
148 | 230 | |
---|
149 | 231 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, |
---|
.. | .. |
---|
155 | 237 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); |
---|
156 | 238 | int dax_invalidate_mapping_entry_sync(struct address_space *mapping, |
---|
157 | 239 | pgoff_t index); |
---|
158 | | - |
---|
159 | | -#ifdef CONFIG_FS_DAX |
---|
160 | | -int __dax_zero_page_range(struct block_device *bdev, |
---|
161 | | - struct dax_device *dax_dev, sector_t sector, |
---|
162 | | - unsigned int offset, unsigned int length); |
---|
163 | | -#else |
---|
164 | | -static inline int __dax_zero_page_range(struct block_device *bdev, |
---|
165 | | - struct dax_device *dax_dev, sector_t sector, |
---|
166 | | - unsigned int offset, unsigned int length) |
---|
167 | | -{ |
---|
168 | | - return -ENXIO; |
---|
169 | | -} |
---|
170 | | -#endif |
---|
171 | | - |
---|
| 240 | +s64 dax_iomap_zero(loff_t pos, u64 length, struct iomap *iomap); |
---|
172 | 241 | static inline bool dax_mapping(struct address_space *mapping) |
---|
173 | 242 | { |
---|
174 | 243 | return mapping->host && IS_DAX(mapping->host); |
---|
175 | 244 | } |
---|
176 | 245 | |
---|
| 246 | +#ifdef CONFIG_DEV_DAX_HMEM_DEVICES |
---|
| 247 | +void hmem_register_device(int target_nid, struct resource *r); |
---|
| 248 | +#else |
---|
| 249 | +static inline void hmem_register_device(int target_nid, struct resource *r) |
---|
| 250 | +{ |
---|
| 251 | +} |
---|
| 252 | +#endif |
---|
| 253 | + |
---|
177 | 254 | #endif |
---|