.. | .. |
---|
20 | 20 | #include <linux/uio.h> |
---|
21 | 21 | #include <linux/fs.h> |
---|
22 | 22 | |
---|
23 | | -static const struct file_operations fuse_direct_io_file_operations; |
---|
| 23 | +static struct page **fuse_pages_alloc(unsigned int npages, gfp_t flags, |
---|
| 24 | + struct fuse_page_desc **desc) |
---|
| 25 | +{ |
---|
| 26 | + struct page **pages; |
---|
24 | 27 | |
---|
25 | | -static int fuse_send_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
---|
| 28 | + pages = kzalloc(npages * (sizeof(struct page *) + |
---|
| 29 | + sizeof(struct fuse_page_desc)), flags); |
---|
| 30 | + *desc = (void *) (pages + npages); |
---|
| 31 | + |
---|
| 32 | + return pages; |
---|
| 33 | +} |
---|
| 34 | + |
---|
| 35 | +static int fuse_send_open(struct fuse_mount *fm, u64 nodeid, struct file *file, |
---|
26 | 36 | int opcode, struct fuse_open_out *outargp) |
---|
27 | 37 | { |
---|
28 | 38 | struct fuse_open_in inarg; |
---|
.. | .. |
---|
30 | 40 | |
---|
31 | 41 | memset(&inarg, 0, sizeof(inarg)); |
---|
32 | 42 | inarg.flags = file->f_flags & ~(O_CREAT | O_EXCL | O_NOCTTY); |
---|
33 | | - if (!fc->atomic_o_trunc) |
---|
| 43 | + if (!fm->fc->atomic_o_trunc) |
---|
34 | 44 | inarg.flags &= ~O_TRUNC; |
---|
35 | | - args.in.h.opcode = opcode; |
---|
36 | | - args.in.h.nodeid = nodeid; |
---|
37 | | - args.in.numargs = 1; |
---|
38 | | - args.in.args[0].size = sizeof(inarg); |
---|
39 | | - args.in.args[0].value = &inarg; |
---|
40 | | - args.out.numargs = 1; |
---|
41 | | - args.out.args[0].size = sizeof(*outargp); |
---|
42 | | - args.out.args[0].value = outargp; |
---|
| 45 | + args.opcode = opcode; |
---|
| 46 | + args.nodeid = nodeid; |
---|
| 47 | + args.in_numargs = 1; |
---|
| 48 | + args.in_args[0].size = sizeof(inarg); |
---|
| 49 | + args.in_args[0].value = &inarg; |
---|
| 50 | + args.out_numargs = 1; |
---|
| 51 | + args.out_args[0].size = sizeof(*outargp); |
---|
| 52 | + args.out_args[0].value = outargp; |
---|
43 | 53 | |
---|
44 | | - return fuse_simple_request(fc, &args); |
---|
| 54 | + return fuse_simple_request(fm, &args); |
---|
45 | 55 | } |
---|
46 | 56 | |
---|
47 | | -struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) |
---|
| 57 | +struct fuse_release_args { |
---|
| 58 | + struct fuse_args args; |
---|
| 59 | + struct fuse_release_in inarg; |
---|
| 60 | + struct inode *inode; |
---|
| 61 | +}; |
---|
| 62 | + |
---|
| 63 | +struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) |
---|
48 | 64 | { |
---|
49 | 65 | struct fuse_file *ff; |
---|
50 | 66 | |
---|
51 | | - ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL); |
---|
| 67 | + ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL_ACCOUNT); |
---|
52 | 68 | if (unlikely(!ff)) |
---|
53 | 69 | return NULL; |
---|
54 | 70 | |
---|
55 | | - ff->fc = fc; |
---|
56 | | - ff->reserved_req = fuse_request_alloc(0); |
---|
57 | | - if (unlikely(!ff->reserved_req)) { |
---|
| 71 | + ff->fm = fm; |
---|
| 72 | + ff->release_args = kzalloc(sizeof(*ff->release_args), |
---|
| 73 | + GFP_KERNEL_ACCOUNT); |
---|
| 74 | + if (!ff->release_args) { |
---|
58 | 75 | kfree(ff); |
---|
59 | 76 | return NULL; |
---|
60 | 77 | } |
---|
61 | 78 | |
---|
62 | 79 | INIT_LIST_HEAD(&ff->write_entry); |
---|
| 80 | + mutex_init(&ff->readdir.lock); |
---|
63 | 81 | refcount_set(&ff->count, 1); |
---|
64 | 82 | RB_CLEAR_NODE(&ff->polled_node); |
---|
65 | 83 | init_waitqueue_head(&ff->poll_wait); |
---|
66 | 84 | |
---|
67 | | - spin_lock(&fc->lock); |
---|
68 | | - ff->kh = ++fc->khctr; |
---|
69 | | - spin_unlock(&fc->lock); |
---|
| 85 | + ff->kh = atomic64_inc_return(&fm->fc->khctr); |
---|
70 | 86 | |
---|
71 | 87 | return ff; |
---|
72 | 88 | } |
---|
73 | 89 | |
---|
74 | 90 | void fuse_file_free(struct fuse_file *ff) |
---|
75 | 91 | { |
---|
76 | | - fuse_request_free(ff->reserved_req); |
---|
| 92 | + kfree(ff->release_args); |
---|
| 93 | + mutex_destroy(&ff->readdir.lock); |
---|
77 | 94 | kfree(ff); |
---|
78 | 95 | } |
---|
79 | 96 | |
---|
.. | .. |
---|
83 | 100 | return ff; |
---|
84 | 101 | } |
---|
85 | 102 | |
---|
86 | | -static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 103 | +static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args, |
---|
| 104 | + int error) |
---|
87 | 105 | { |
---|
88 | | - iput(req->misc.release.inode); |
---|
| 106 | + struct fuse_release_args *ra = container_of(args, typeof(*ra), args); |
---|
| 107 | + |
---|
| 108 | + iput(ra->inode); |
---|
| 109 | + kfree(ra); |
---|
89 | 110 | } |
---|
90 | 111 | |
---|
91 | 112 | static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir) |
---|
92 | 113 | { |
---|
93 | 114 | if (refcount_dec_and_test(&ff->count)) { |
---|
94 | | - struct fuse_req *req = ff->reserved_req; |
---|
| 115 | + struct fuse_args *args = &ff->release_args->args; |
---|
95 | 116 | |
---|
96 | | - if (ff->fc->no_open && !isdir) { |
---|
97 | | - /* |
---|
98 | | - * Drop the release request when client does not |
---|
99 | | - * implement 'open' |
---|
100 | | - */ |
---|
101 | | - __clear_bit(FR_BACKGROUND, &req->flags); |
---|
102 | | - iput(req->misc.release.inode); |
---|
103 | | - fuse_put_request(ff->fc, req); |
---|
| 117 | + if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) { |
---|
| 118 | + /* Do nothing when client does not implement 'open' */ |
---|
| 119 | + fuse_release_end(ff->fm, args, 0); |
---|
104 | 120 | } else if (sync) { |
---|
105 | | - __set_bit(FR_FORCE, &req->flags); |
---|
106 | | - __clear_bit(FR_BACKGROUND, &req->flags); |
---|
107 | | - fuse_request_send(ff->fc, req); |
---|
108 | | - iput(req->misc.release.inode); |
---|
109 | | - fuse_put_request(ff->fc, req); |
---|
| 121 | + fuse_simple_request(ff->fm, args); |
---|
| 122 | + fuse_release_end(ff->fm, args, 0); |
---|
110 | 123 | } else { |
---|
111 | | - req->end = fuse_release_end; |
---|
112 | | - __set_bit(FR_BACKGROUND, &req->flags); |
---|
113 | | - fuse_request_send_background(ff->fc, req); |
---|
| 124 | + args->end = fuse_release_end; |
---|
| 125 | + if (fuse_simple_background(ff->fm, args, |
---|
| 126 | + GFP_KERNEL | __GFP_NOFAIL)) |
---|
| 127 | + fuse_release_end(ff->fm, args, -ENOTCONN); |
---|
114 | 128 | } |
---|
115 | 129 | kfree(ff); |
---|
116 | 130 | } |
---|
117 | 131 | } |
---|
118 | 132 | |
---|
119 | | -int fuse_do_open(struct fuse_conn *fc, u64 nodeid, struct file *file, |
---|
| 133 | +int fuse_do_open(struct fuse_mount *fm, u64 nodeid, struct file *file, |
---|
120 | 134 | bool isdir) |
---|
121 | 135 | { |
---|
| 136 | + struct fuse_conn *fc = fm->fc; |
---|
122 | 137 | struct fuse_file *ff; |
---|
123 | 138 | int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; |
---|
124 | 139 | |
---|
125 | | - ff = fuse_file_alloc(fc); |
---|
| 140 | + ff = fuse_file_alloc(fm); |
---|
126 | 141 | if (!ff) |
---|
127 | 142 | return -ENOMEM; |
---|
128 | 143 | |
---|
129 | 144 | ff->fh = 0; |
---|
130 | | - ff->open_flags = FOPEN_KEEP_CACHE; /* Default for no-open */ |
---|
131 | | - if (!fc->no_open || isdir) { |
---|
| 145 | + /* Default for no-open */ |
---|
| 146 | + ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0); |
---|
| 147 | + if (isdir ? !fc->no_opendir : !fc->no_open) { |
---|
132 | 148 | struct fuse_open_out outarg; |
---|
133 | 149 | int err; |
---|
134 | 150 | |
---|
135 | | - err = fuse_send_open(fc, nodeid, file, opcode, &outarg); |
---|
| 151 | + err = fuse_send_open(fm, nodeid, file, opcode, &outarg); |
---|
136 | 152 | if (!err) { |
---|
137 | 153 | ff->fh = outarg.fh; |
---|
138 | 154 | ff->open_flags = outarg.open_flags; |
---|
139 | | - |
---|
140 | | - } else if (err != -ENOSYS || isdir) { |
---|
| 155 | + fuse_passthrough_setup(fc, ff, &outarg); |
---|
| 156 | + } else if (err != -ENOSYS) { |
---|
141 | 157 | fuse_file_free(ff); |
---|
142 | 158 | return err; |
---|
143 | 159 | } else { |
---|
144 | | - fc->no_open = 1; |
---|
| 160 | + if (isdir) |
---|
| 161 | + fc->no_opendir = 1; |
---|
| 162 | + else |
---|
| 163 | + fc->no_open = 1; |
---|
145 | 164 | } |
---|
146 | 165 | } |
---|
147 | 166 | |
---|
.. | .. |
---|
158 | 177 | static void fuse_link_write_file(struct file *file) |
---|
159 | 178 | { |
---|
160 | 179 | struct inode *inode = file_inode(file); |
---|
161 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
162 | 180 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
163 | 181 | struct fuse_file *ff = file->private_data; |
---|
164 | 182 | /* |
---|
165 | 183 | * file may be written through mmap, so chain it onto the |
---|
166 | 184 | * inodes's write_file list |
---|
167 | 185 | */ |
---|
168 | | - spin_lock(&fc->lock); |
---|
| 186 | + spin_lock(&fi->lock); |
---|
169 | 187 | if (list_empty(&ff->write_entry)) |
---|
170 | 188 | list_add(&ff->write_entry, &fi->write_files); |
---|
171 | | - spin_unlock(&fc->lock); |
---|
| 189 | + spin_unlock(&fi->lock); |
---|
172 | 190 | } |
---|
173 | 191 | |
---|
174 | 192 | void fuse_finish_open(struct inode *inode, struct file *file) |
---|
.. | .. |
---|
176 | 194 | struct fuse_file *ff = file->private_data; |
---|
177 | 195 | struct fuse_conn *fc = get_fuse_conn(inode); |
---|
178 | 196 | |
---|
179 | | - if (ff->open_flags & FOPEN_DIRECT_IO) |
---|
180 | | - file->f_op = &fuse_direct_io_file_operations; |
---|
181 | 197 | if (ff->open_flags & FOPEN_STREAM) |
---|
182 | 198 | stream_open(inode, file); |
---|
183 | 199 | else if (ff->open_flags & FOPEN_NONSEEKABLE) |
---|
.. | .. |
---|
186 | 202 | if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { |
---|
187 | 203 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
188 | 204 | |
---|
189 | | - spin_lock(&fc->lock); |
---|
190 | | - fi->attr_version = ++fc->attr_version; |
---|
| 205 | + spin_lock(&fi->lock); |
---|
| 206 | + fi->attr_version = atomic64_inc_return(&fc->attr_version); |
---|
191 | 207 | i_size_write(inode, 0); |
---|
192 | | - spin_unlock(&fc->lock); |
---|
193 | | - truncate_pagecache(inode, 0); |
---|
| 208 | + spin_unlock(&fi->lock); |
---|
194 | 209 | fuse_invalidate_attr(inode); |
---|
195 | 210 | if (fc->writeback_cache) |
---|
196 | 211 | file_update_time(file); |
---|
197 | | - } else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) { |
---|
198 | | - invalidate_inode_pages2(inode->i_mapping); |
---|
199 | 212 | } |
---|
200 | | - |
---|
201 | 213 | if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) |
---|
202 | 214 | fuse_link_write_file(file); |
---|
203 | 215 | } |
---|
204 | 216 | |
---|
205 | 217 | int fuse_open_common(struct inode *inode, struct file *file, bool isdir) |
---|
206 | 218 | { |
---|
207 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 219 | + struct fuse_mount *fm = get_fuse_mount(inode); |
---|
| 220 | + struct fuse_conn *fc = fm->fc; |
---|
208 | 221 | int err; |
---|
209 | 222 | bool is_wb_truncate = (file->f_flags & O_TRUNC) && |
---|
210 | 223 | fc->atomic_o_trunc && |
---|
211 | 224 | fc->writeback_cache; |
---|
| 225 | + bool dax_truncate = (file->f_flags & O_TRUNC) && |
---|
| 226 | + fc->atomic_o_trunc && FUSE_IS_DAX(inode); |
---|
212 | 227 | |
---|
213 | 228 | if (fuse_is_bad(inode)) |
---|
214 | 229 | return -EIO; |
---|
.. | .. |
---|
217 | 232 | if (err) |
---|
218 | 233 | return err; |
---|
219 | 234 | |
---|
220 | | - if (is_wb_truncate) { |
---|
| 235 | + if (is_wb_truncate || dax_truncate) |
---|
221 | 236 | inode_lock(inode); |
---|
222 | | - fuse_set_nowrite(inode); |
---|
| 237 | + |
---|
| 238 | + if (dax_truncate) { |
---|
| 239 | + down_write(&get_fuse_inode(inode)->i_mmap_sem); |
---|
| 240 | + err = fuse_dax_break_layouts(inode, 0, 0); |
---|
| 241 | + if (err) |
---|
| 242 | + goto out_inode_unlock; |
---|
223 | 243 | } |
---|
224 | 244 | |
---|
225 | | - err = fuse_do_open(fc, get_node_id(inode), file, isdir); |
---|
| 245 | + if (is_wb_truncate || dax_truncate) |
---|
| 246 | + fuse_set_nowrite(inode); |
---|
226 | 247 | |
---|
| 248 | + err = fuse_do_open(fm, get_node_id(inode), file, isdir); |
---|
227 | 249 | if (!err) |
---|
228 | 250 | fuse_finish_open(inode, file); |
---|
229 | 251 | |
---|
230 | | - if (is_wb_truncate) { |
---|
| 252 | + if (is_wb_truncate || dax_truncate) |
---|
231 | 253 | fuse_release_nowrite(inode); |
---|
232 | | - inode_unlock(inode); |
---|
| 254 | + if (!err) { |
---|
| 255 | + struct fuse_file *ff = file->private_data; |
---|
| 256 | + |
---|
| 257 | + if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) |
---|
| 258 | + truncate_pagecache(inode, 0); |
---|
| 259 | + else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) |
---|
| 260 | + invalidate_inode_pages2(inode->i_mapping); |
---|
233 | 261 | } |
---|
| 262 | + if (dax_truncate) |
---|
| 263 | + up_write(&get_fuse_inode(inode)->i_mmap_sem); |
---|
| 264 | + |
---|
| 265 | +out_inode_unlock: |
---|
| 266 | + if (is_wb_truncate || dax_truncate) |
---|
| 267 | + inode_unlock(inode); |
---|
234 | 268 | |
---|
235 | 269 | return err; |
---|
236 | 270 | } |
---|
237 | 271 | |
---|
238 | | -static void fuse_prepare_release(struct fuse_file *ff, int flags, int opcode) |
---|
| 272 | +static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, |
---|
| 273 | + int flags, int opcode) |
---|
239 | 274 | { |
---|
240 | | - struct fuse_conn *fc = ff->fc; |
---|
241 | | - struct fuse_req *req = ff->reserved_req; |
---|
242 | | - struct fuse_release_in *inarg = &req->misc.release.in; |
---|
| 275 | + struct fuse_conn *fc = ff->fm->fc; |
---|
| 276 | + struct fuse_release_args *ra = ff->release_args; |
---|
243 | 277 | |
---|
| 278 | + /* Inode is NULL on error path of fuse_create_open() */ |
---|
| 279 | + if (likely(fi)) { |
---|
| 280 | + spin_lock(&fi->lock); |
---|
| 281 | + list_del(&ff->write_entry); |
---|
| 282 | + spin_unlock(&fi->lock); |
---|
| 283 | + } |
---|
244 | 284 | spin_lock(&fc->lock); |
---|
245 | | - list_del(&ff->write_entry); |
---|
246 | 285 | if (!RB_EMPTY_NODE(&ff->polled_node)) |
---|
247 | 286 | rb_erase(&ff->polled_node, &fc->polled_files); |
---|
248 | 287 | spin_unlock(&fc->lock); |
---|
249 | 288 | |
---|
250 | 289 | wake_up_interruptible_all(&ff->poll_wait); |
---|
251 | 290 | |
---|
252 | | - inarg->fh = ff->fh; |
---|
253 | | - inarg->flags = flags; |
---|
254 | | - req->in.h.opcode = opcode; |
---|
255 | | - req->in.h.nodeid = ff->nodeid; |
---|
256 | | - req->in.numargs = 1; |
---|
257 | | - req->in.args[0].size = sizeof(struct fuse_release_in); |
---|
258 | | - req->in.args[0].value = inarg; |
---|
| 291 | + ra->inarg.fh = ff->fh; |
---|
| 292 | + ra->inarg.flags = flags; |
---|
| 293 | + ra->args.in_numargs = 1; |
---|
| 294 | + ra->args.in_args[0].size = sizeof(struct fuse_release_in); |
---|
| 295 | + ra->args.in_args[0].value = &ra->inarg; |
---|
| 296 | + ra->args.opcode = opcode; |
---|
| 297 | + ra->args.nodeid = ff->nodeid; |
---|
| 298 | + ra->args.force = true; |
---|
| 299 | + ra->args.nocreds = true; |
---|
259 | 300 | } |
---|
260 | 301 | |
---|
261 | 302 | void fuse_release_common(struct file *file, bool isdir) |
---|
262 | 303 | { |
---|
| 304 | + struct fuse_inode *fi = get_fuse_inode(file_inode(file)); |
---|
263 | 305 | struct fuse_file *ff = file->private_data; |
---|
264 | | - struct fuse_req *req = ff->reserved_req; |
---|
| 306 | + struct fuse_release_args *ra = ff->release_args; |
---|
265 | 307 | int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; |
---|
266 | 308 | |
---|
267 | | - fuse_prepare_release(ff, file->f_flags, opcode); |
---|
| 309 | + fuse_passthrough_release(&ff->passthrough); |
---|
| 310 | + |
---|
| 311 | + fuse_prepare_release(fi, ff, file->f_flags, opcode); |
---|
268 | 312 | |
---|
269 | 313 | if (ff->flock) { |
---|
270 | | - struct fuse_release_in *inarg = &req->misc.release.in; |
---|
271 | | - inarg->release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; |
---|
272 | | - inarg->lock_owner = fuse_lock_owner_id(ff->fc, |
---|
273 | | - (fl_owner_t) file); |
---|
| 314 | + ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; |
---|
| 315 | + ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, |
---|
| 316 | + (fl_owner_t) file); |
---|
274 | 317 | } |
---|
275 | 318 | /* Hold inode until release is finished */ |
---|
276 | | - req->misc.release.inode = igrab(file_inode(file)); |
---|
| 319 | + ra->inode = igrab(file_inode(file)); |
---|
277 | 320 | |
---|
278 | 321 | /* |
---|
279 | 322 | * Normally this will send the RELEASE request, however if |
---|
.. | .. |
---|
284 | 327 | * synchronous RELEASE is allowed (and desirable) in this case |
---|
285 | 328 | * because the server can be trusted not to screw up. |
---|
286 | 329 | */ |
---|
287 | | - fuse_file_put(ff, ff->fc->destroy_req != NULL, isdir); |
---|
| 330 | + fuse_file_put(ff, ff->fm->fc->destroy, isdir); |
---|
288 | 331 | } |
---|
289 | 332 | |
---|
290 | 333 | static int fuse_open(struct inode *inode, struct file *file) |
---|
.. | .. |
---|
306 | 349 | return 0; |
---|
307 | 350 | } |
---|
308 | 351 | |
---|
309 | | -void fuse_sync_release(struct fuse_file *ff, int flags) |
---|
| 352 | +void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, int flags) |
---|
310 | 353 | { |
---|
311 | 354 | WARN_ON(refcount_read(&ff->count) > 1); |
---|
312 | | - fuse_prepare_release(ff, flags, FUSE_RELEASE); |
---|
| 355 | + fuse_prepare_release(fi, ff, flags, FUSE_RELEASE); |
---|
313 | 356 | /* |
---|
314 | 357 | * iput(NULL) is a no-op and since the refcount is 1 and everything's |
---|
315 | 358 | * synchronous, we are fine with not doing igrab() here" |
---|
.. | .. |
---|
340 | 383 | return (u64) v0 + ((u64) v1 << 32); |
---|
341 | 384 | } |
---|
342 | 385 | |
---|
| 386 | +struct fuse_writepage_args { |
---|
| 387 | + struct fuse_io_args ia; |
---|
| 388 | + struct rb_node writepages_entry; |
---|
| 389 | + struct list_head queue_entry; |
---|
| 390 | + struct fuse_writepage_args *next; |
---|
| 391 | + struct inode *inode; |
---|
| 392 | +}; |
---|
| 393 | + |
---|
| 394 | +static struct fuse_writepage_args *fuse_find_writeback(struct fuse_inode *fi, |
---|
| 395 | + pgoff_t idx_from, pgoff_t idx_to) |
---|
| 396 | +{ |
---|
| 397 | + struct rb_node *n; |
---|
| 398 | + |
---|
| 399 | + n = fi->writepages.rb_node; |
---|
| 400 | + |
---|
| 401 | + while (n) { |
---|
| 402 | + struct fuse_writepage_args *wpa; |
---|
| 403 | + pgoff_t curr_index; |
---|
| 404 | + |
---|
| 405 | + wpa = rb_entry(n, struct fuse_writepage_args, writepages_entry); |
---|
| 406 | + WARN_ON(get_fuse_inode(wpa->inode) != fi); |
---|
| 407 | + curr_index = wpa->ia.write.in.offset >> PAGE_SHIFT; |
---|
| 408 | + if (idx_from >= curr_index + wpa->ia.ap.num_pages) |
---|
| 409 | + n = n->rb_right; |
---|
| 410 | + else if (idx_to < curr_index) |
---|
| 411 | + n = n->rb_left; |
---|
| 412 | + else |
---|
| 413 | + return wpa; |
---|
| 414 | + } |
---|
| 415 | + return NULL; |
---|
| 416 | +} |
---|
| 417 | + |
---|
343 | 418 | /* |
---|
344 | 419 | * Check if any page in a range is under writeback |
---|
345 | 420 | * |
---|
.. | .. |
---|
349 | 424 | static bool fuse_range_is_writeback(struct inode *inode, pgoff_t idx_from, |
---|
350 | 425 | pgoff_t idx_to) |
---|
351 | 426 | { |
---|
352 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
353 | 427 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
354 | | - struct fuse_req *req; |
---|
355 | | - bool found = false; |
---|
| 428 | + bool found; |
---|
356 | 429 | |
---|
357 | | - spin_lock(&fc->lock); |
---|
358 | | - list_for_each_entry(req, &fi->writepages, writepages_entry) { |
---|
359 | | - pgoff_t curr_index; |
---|
360 | | - |
---|
361 | | - BUG_ON(req->inode != inode); |
---|
362 | | - curr_index = req->misc.write.in.offset >> PAGE_SHIFT; |
---|
363 | | - if (idx_from < curr_index + req->num_pages && |
---|
364 | | - curr_index <= idx_to) { |
---|
365 | | - found = true; |
---|
366 | | - break; |
---|
367 | | - } |
---|
368 | | - } |
---|
369 | | - spin_unlock(&fc->lock); |
---|
| 430 | + spin_lock(&fi->lock); |
---|
| 431 | + found = fuse_find_writeback(fi, idx_from, idx_to); |
---|
| 432 | + spin_unlock(&fi->lock); |
---|
370 | 433 | |
---|
371 | 434 | return found; |
---|
372 | 435 | } |
---|
.. | .. |
---|
382 | 445 | * Since fuse doesn't rely on the VM writeback tracking, this has to |
---|
383 | 446 | * use some other means. |
---|
384 | 447 | */ |
---|
385 | | -static int fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) |
---|
| 448 | +static void fuse_wait_on_page_writeback(struct inode *inode, pgoff_t index) |
---|
386 | 449 | { |
---|
387 | 450 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
388 | 451 | |
---|
389 | 452 | wait_event(fi->page_waitq, !fuse_page_is_writeback(inode, index)); |
---|
390 | | - return 0; |
---|
391 | 453 | } |
---|
392 | 454 | |
---|
393 | 455 | /* |
---|
.. | .. |
---|
408 | 470 | static int fuse_flush(struct file *file, fl_owner_t id) |
---|
409 | 471 | { |
---|
410 | 472 | struct inode *inode = file_inode(file); |
---|
411 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 473 | + struct fuse_mount *fm = get_fuse_mount(inode); |
---|
412 | 474 | struct fuse_file *ff = file->private_data; |
---|
413 | | - struct fuse_req *req; |
---|
414 | 475 | struct fuse_flush_in inarg; |
---|
| 476 | + FUSE_ARGS(args); |
---|
415 | 477 | int err; |
---|
416 | 478 | |
---|
417 | 479 | if (fuse_is_bad(inode)) |
---|
418 | 480 | return -EIO; |
---|
419 | | - |
---|
420 | | - if (fc->no_flush) |
---|
421 | | - return 0; |
---|
422 | 481 | |
---|
423 | 482 | err = write_inode_now(inode, 1); |
---|
424 | 483 | if (err) |
---|
.. | .. |
---|
432 | 491 | if (err) |
---|
433 | 492 | return err; |
---|
434 | 493 | |
---|
435 | | - req = fuse_get_req_nofail_nopages(fc, file); |
---|
| 494 | + err = 0; |
---|
| 495 | + if (fm->fc->no_flush) |
---|
| 496 | + goto inval_attr_out; |
---|
| 497 | + |
---|
436 | 498 | memset(&inarg, 0, sizeof(inarg)); |
---|
437 | 499 | inarg.fh = ff->fh; |
---|
438 | | - inarg.lock_owner = fuse_lock_owner_id(fc, id); |
---|
439 | | - req->in.h.opcode = FUSE_FLUSH; |
---|
440 | | - req->in.h.nodeid = get_node_id(inode); |
---|
441 | | - req->in.numargs = 1; |
---|
442 | | - req->in.args[0].size = sizeof(inarg); |
---|
443 | | - req->in.args[0].value = &inarg; |
---|
444 | | - __set_bit(FR_FORCE, &req->flags); |
---|
445 | | - fuse_request_send(fc, req); |
---|
446 | | - err = req->out.h.error; |
---|
447 | | - fuse_put_request(fc, req); |
---|
| 500 | + inarg.lock_owner = fuse_lock_owner_id(fm->fc, id); |
---|
| 501 | + args.opcode = FUSE_FLUSH; |
---|
| 502 | + args.nodeid = get_node_id(inode); |
---|
| 503 | + args.in_numargs = 1; |
---|
| 504 | + args.in_args[0].size = sizeof(inarg); |
---|
| 505 | + args.in_args[0].value = &inarg; |
---|
| 506 | + args.force = true; |
---|
| 507 | + |
---|
| 508 | + err = fuse_simple_request(fm, &args); |
---|
448 | 509 | if (err == -ENOSYS) { |
---|
449 | | - fc->no_flush = 1; |
---|
| 510 | + fm->fc->no_flush = 1; |
---|
450 | 511 | err = 0; |
---|
451 | 512 | } |
---|
| 513 | + |
---|
| 514 | +inval_attr_out: |
---|
| 515 | + /* |
---|
| 516 | + * In memory i_blocks is not maintained by fuse, if writeback cache is |
---|
| 517 | + * enabled, i_blocks from cached attr may not be accurate. |
---|
| 518 | + */ |
---|
| 519 | + if (!err && fm->fc->writeback_cache) |
---|
| 520 | + fuse_invalidate_attr(inode); |
---|
452 | 521 | return err; |
---|
453 | 522 | } |
---|
454 | 523 | |
---|
455 | 524 | int fuse_fsync_common(struct file *file, loff_t start, loff_t end, |
---|
456 | | - int datasync, int isdir) |
---|
| 525 | + int datasync, int opcode) |
---|
457 | 526 | { |
---|
458 | 527 | struct inode *inode = file->f_mapping->host; |
---|
459 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 528 | + struct fuse_mount *fm = get_fuse_mount(inode); |
---|
460 | 529 | struct fuse_file *ff = file->private_data; |
---|
461 | 530 | FUSE_ARGS(args); |
---|
462 | 531 | struct fuse_fsync_in inarg; |
---|
| 532 | + |
---|
| 533 | + memset(&inarg, 0, sizeof(inarg)); |
---|
| 534 | + inarg.fh = ff->fh; |
---|
| 535 | + inarg.fsync_flags = datasync ? FUSE_FSYNC_FDATASYNC : 0; |
---|
| 536 | + args.opcode = opcode; |
---|
| 537 | + args.nodeid = get_node_id(inode); |
---|
| 538 | + args.in_numargs = 1; |
---|
| 539 | + args.in_args[0].size = sizeof(inarg); |
---|
| 540 | + args.in_args[0].value = &inarg; |
---|
| 541 | + return fuse_simple_request(fm, &args); |
---|
| 542 | +} |
---|
| 543 | + |
---|
| 544 | +static int fuse_fsync(struct file *file, loff_t start, loff_t end, |
---|
| 545 | + int datasync) |
---|
| 546 | +{ |
---|
| 547 | + struct inode *inode = file->f_mapping->host; |
---|
| 548 | + struct fuse_conn *fc = get_fuse_conn(inode); |
---|
463 | 549 | int err; |
---|
464 | 550 | |
---|
465 | 551 | if (fuse_is_bad(inode)) |
---|
.. | .. |
---|
491 | 577 | if (err) |
---|
492 | 578 | goto out; |
---|
493 | 579 | |
---|
494 | | - if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) |
---|
| 580 | + if (fc->no_fsync) |
---|
495 | 581 | goto out; |
---|
496 | 582 | |
---|
497 | | - memset(&inarg, 0, sizeof(inarg)); |
---|
498 | | - inarg.fh = ff->fh; |
---|
499 | | - inarg.fsync_flags = datasync ? 1 : 0; |
---|
500 | | - args.in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; |
---|
501 | | - args.in.h.nodeid = get_node_id(inode); |
---|
502 | | - args.in.numargs = 1; |
---|
503 | | - args.in.args[0].size = sizeof(inarg); |
---|
504 | | - args.in.args[0].value = &inarg; |
---|
505 | | - err = fuse_simple_request(fc, &args); |
---|
| 583 | + err = fuse_fsync_common(file, start, end, datasync, FUSE_FSYNC); |
---|
506 | 584 | if (err == -ENOSYS) { |
---|
507 | | - if (isdir) |
---|
508 | | - fc->no_fsyncdir = 1; |
---|
509 | | - else |
---|
510 | | - fc->no_fsync = 1; |
---|
| 585 | + fc->no_fsync = 1; |
---|
511 | 586 | err = 0; |
---|
512 | 587 | } |
---|
513 | 588 | out: |
---|
514 | 589 | inode_unlock(inode); |
---|
| 590 | + |
---|
515 | 591 | return err; |
---|
516 | 592 | } |
---|
517 | 593 | |
---|
518 | | -static int fuse_fsync(struct file *file, loff_t start, loff_t end, |
---|
519 | | - int datasync) |
---|
| 594 | +void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, |
---|
| 595 | + size_t count, int opcode) |
---|
520 | 596 | { |
---|
521 | | - return fuse_fsync_common(file, start, end, datasync, 0); |
---|
522 | | -} |
---|
523 | | - |
---|
524 | | -void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, |
---|
525 | | - size_t count, int opcode) |
---|
526 | | -{ |
---|
527 | | - struct fuse_read_in *inarg = &req->misc.read.in; |
---|
528 | 597 | struct fuse_file *ff = file->private_data; |
---|
| 598 | + struct fuse_args *args = &ia->ap.args; |
---|
529 | 599 | |
---|
530 | | - inarg->fh = ff->fh; |
---|
531 | | - inarg->offset = pos; |
---|
532 | | - inarg->size = count; |
---|
533 | | - inarg->flags = file->f_flags; |
---|
534 | | - req->in.h.opcode = opcode; |
---|
535 | | - req->in.h.nodeid = ff->nodeid; |
---|
536 | | - req->in.numargs = 1; |
---|
537 | | - req->in.args[0].size = sizeof(struct fuse_read_in); |
---|
538 | | - req->in.args[0].value = inarg; |
---|
539 | | - req->out.argvar = 1; |
---|
540 | | - req->out.numargs = 1; |
---|
541 | | - req->out.args[0].size = count; |
---|
| 600 | + ia->read.in.fh = ff->fh; |
---|
| 601 | + ia->read.in.offset = pos; |
---|
| 602 | + ia->read.in.size = count; |
---|
| 603 | + ia->read.in.flags = file->f_flags; |
---|
| 604 | + args->opcode = opcode; |
---|
| 605 | + args->nodeid = ff->nodeid; |
---|
| 606 | + args->in_numargs = 1; |
---|
| 607 | + args->in_args[0].size = sizeof(ia->read.in); |
---|
| 608 | + args->in_args[0].value = &ia->read.in; |
---|
| 609 | + args->out_argvar = true; |
---|
| 610 | + args->out_numargs = 1; |
---|
| 611 | + args->out_args[0].size = count; |
---|
542 | 612 | } |
---|
543 | 613 | |
---|
544 | | -static void fuse_release_user_pages(struct fuse_req *req, bool should_dirty) |
---|
| 614 | +static void fuse_release_user_pages(struct fuse_args_pages *ap, |
---|
| 615 | + bool should_dirty) |
---|
545 | 616 | { |
---|
546 | | - unsigned i; |
---|
| 617 | + unsigned int i; |
---|
547 | 618 | |
---|
548 | | - for (i = 0; i < req->num_pages; i++) { |
---|
549 | | - struct page *page = req->pages[i]; |
---|
| 619 | + for (i = 0; i < ap->num_pages; i++) { |
---|
550 | 620 | if (should_dirty) |
---|
551 | | - set_page_dirty_lock(page); |
---|
552 | | - put_page(page); |
---|
| 621 | + set_page_dirty_lock(ap->pages[i]); |
---|
| 622 | + put_page(ap->pages[i]); |
---|
553 | 623 | } |
---|
554 | 624 | } |
---|
555 | 625 | |
---|
.. | .. |
---|
608 | 678 | struct fuse_conn *fc = get_fuse_conn(inode); |
---|
609 | 679 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
610 | 680 | |
---|
611 | | - spin_lock(&fc->lock); |
---|
612 | | - fi->attr_version = ++fc->attr_version; |
---|
613 | | - spin_unlock(&fc->lock); |
---|
| 681 | + spin_lock(&fi->lock); |
---|
| 682 | + fi->attr_version = atomic64_inc_return(&fc->attr_version); |
---|
| 683 | + spin_unlock(&fi->lock); |
---|
614 | 684 | } |
---|
615 | 685 | |
---|
616 | 686 | io->iocb->ki_complete(io->iocb, res, 0); |
---|
.. | .. |
---|
619 | 689 | kref_put(&io->refcnt, fuse_io_release); |
---|
620 | 690 | } |
---|
621 | 691 | |
---|
622 | | -static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 692 | +static struct fuse_io_args *fuse_io_alloc(struct fuse_io_priv *io, |
---|
| 693 | + unsigned int npages) |
---|
623 | 694 | { |
---|
624 | | - struct fuse_io_priv *io = req->io; |
---|
625 | | - ssize_t pos = -1; |
---|
| 695 | + struct fuse_io_args *ia; |
---|
626 | 696 | |
---|
627 | | - fuse_release_user_pages(req, io->should_dirty); |
---|
628 | | - |
---|
629 | | - if (io->write) { |
---|
630 | | - if (req->misc.write.in.size != req->misc.write.out.size) |
---|
631 | | - pos = req->misc.write.in.offset - io->offset + |
---|
632 | | - req->misc.write.out.size; |
---|
633 | | - } else { |
---|
634 | | - if (req->misc.read.in.size != req->out.args[0].size) |
---|
635 | | - pos = req->misc.read.in.offset - io->offset + |
---|
636 | | - req->out.args[0].size; |
---|
| 697 | + ia = kzalloc(sizeof(*ia), GFP_KERNEL); |
---|
| 698 | + if (ia) { |
---|
| 699 | + ia->io = io; |
---|
| 700 | + ia->ap.pages = fuse_pages_alloc(npages, GFP_KERNEL, |
---|
| 701 | + &ia->ap.descs); |
---|
| 702 | + if (!ia->ap.pages) { |
---|
| 703 | + kfree(ia); |
---|
| 704 | + ia = NULL; |
---|
| 705 | + } |
---|
637 | 706 | } |
---|
638 | | - |
---|
639 | | - fuse_aio_complete(io, req->out.h.error, pos); |
---|
| 707 | + return ia; |
---|
640 | 708 | } |
---|
641 | 709 | |
---|
642 | | -static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req, |
---|
643 | | - size_t num_bytes, struct fuse_io_priv *io) |
---|
| 710 | +static void fuse_io_free(struct fuse_io_args *ia) |
---|
644 | 711 | { |
---|
| 712 | + kfree(ia->ap.pages); |
---|
| 713 | + kfree(ia); |
---|
| 714 | +} |
---|
| 715 | + |
---|
| 716 | +static void fuse_aio_complete_req(struct fuse_mount *fm, struct fuse_args *args, |
---|
| 717 | + int err) |
---|
| 718 | +{ |
---|
| 719 | + struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); |
---|
| 720 | + struct fuse_io_priv *io = ia->io; |
---|
| 721 | + ssize_t pos = -1; |
---|
| 722 | + |
---|
| 723 | + fuse_release_user_pages(&ia->ap, io->should_dirty); |
---|
| 724 | + |
---|
| 725 | + if (err) { |
---|
| 726 | + /* Nothing */ |
---|
| 727 | + } else if (io->write) { |
---|
| 728 | + if (ia->write.out.size > ia->write.in.size) { |
---|
| 729 | + err = -EIO; |
---|
| 730 | + } else if (ia->write.in.size != ia->write.out.size) { |
---|
| 731 | + pos = ia->write.in.offset - io->offset + |
---|
| 732 | + ia->write.out.size; |
---|
| 733 | + } |
---|
| 734 | + } else { |
---|
| 735 | + u32 outsize = args->out_args[0].size; |
---|
| 736 | + |
---|
| 737 | + if (ia->read.in.size != outsize) |
---|
| 738 | + pos = ia->read.in.offset - io->offset + outsize; |
---|
| 739 | + } |
---|
| 740 | + |
---|
| 741 | + fuse_aio_complete(io, err, pos); |
---|
| 742 | + fuse_io_free(ia); |
---|
| 743 | +} |
---|
| 744 | + |
---|
| 745 | +static ssize_t fuse_async_req_send(struct fuse_mount *fm, |
---|
| 746 | + struct fuse_io_args *ia, size_t num_bytes) |
---|
| 747 | +{ |
---|
| 748 | + ssize_t err; |
---|
| 749 | + struct fuse_io_priv *io = ia->io; |
---|
| 750 | + |
---|
645 | 751 | spin_lock(&io->lock); |
---|
646 | 752 | kref_get(&io->refcnt); |
---|
647 | 753 | io->size += num_bytes; |
---|
648 | 754 | io->reqs++; |
---|
649 | 755 | spin_unlock(&io->lock); |
---|
650 | 756 | |
---|
651 | | - req->io = io; |
---|
652 | | - req->end = fuse_aio_complete_req; |
---|
653 | | - |
---|
654 | | - __fuse_get_request(req); |
---|
655 | | - fuse_request_send_background(fc, req); |
---|
| 757 | + ia->ap.args.end = fuse_aio_complete_req; |
---|
| 758 | + ia->ap.args.may_block = io->should_dirty; |
---|
| 759 | + err = fuse_simple_background(fm, &ia->ap.args, GFP_KERNEL); |
---|
| 760 | + if (err) |
---|
| 761 | + fuse_aio_complete_req(fm, &ia->ap.args, err); |
---|
656 | 762 | |
---|
657 | 763 | return num_bytes; |
---|
658 | 764 | } |
---|
659 | 765 | |
---|
660 | | -static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io, |
---|
661 | | - loff_t pos, size_t count, fl_owner_t owner) |
---|
| 766 | +static ssize_t fuse_send_read(struct fuse_io_args *ia, loff_t pos, size_t count, |
---|
| 767 | + fl_owner_t owner) |
---|
662 | 768 | { |
---|
663 | | - struct file *file = io->iocb->ki_filp; |
---|
| 769 | + struct file *file = ia->io->iocb->ki_filp; |
---|
664 | 770 | struct fuse_file *ff = file->private_data; |
---|
665 | | - struct fuse_conn *fc = ff->fc; |
---|
| 771 | + struct fuse_mount *fm = ff->fm; |
---|
666 | 772 | |
---|
667 | | - fuse_read_fill(req, file, pos, count, FUSE_READ); |
---|
| 773 | + fuse_read_args_fill(ia, file, pos, count, FUSE_READ); |
---|
668 | 774 | if (owner != NULL) { |
---|
669 | | - struct fuse_read_in *inarg = &req->misc.read.in; |
---|
670 | | - |
---|
671 | | - inarg->read_flags |= FUSE_READ_LOCKOWNER; |
---|
672 | | - inarg->lock_owner = fuse_lock_owner_id(fc, owner); |
---|
| 775 | + ia->read.in.read_flags |= FUSE_READ_LOCKOWNER; |
---|
| 776 | + ia->read.in.lock_owner = fuse_lock_owner_id(fm->fc, owner); |
---|
673 | 777 | } |
---|
674 | 778 | |
---|
675 | | - if (io->async) |
---|
676 | | - return fuse_async_req_send(fc, req, count, io); |
---|
| 779 | + if (ia->io->async) |
---|
| 780 | + return fuse_async_req_send(fm, ia, count); |
---|
677 | 781 | |
---|
678 | | - fuse_request_send(fc, req); |
---|
679 | | - return req->out.args[0].size; |
---|
| 782 | + return fuse_simple_request(fm, &ia->ap.args); |
---|
680 | 783 | } |
---|
681 | 784 | |
---|
682 | 785 | static void fuse_read_update_size(struct inode *inode, loff_t size, |
---|
.. | .. |
---|
685 | 788 | struct fuse_conn *fc = get_fuse_conn(inode); |
---|
686 | 789 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
687 | 790 | |
---|
688 | | - spin_lock(&fc->lock); |
---|
689 | | - if (attr_ver == fi->attr_version && size < inode->i_size && |
---|
| 791 | + spin_lock(&fi->lock); |
---|
| 792 | + if (attr_ver >= fi->attr_version && size < inode->i_size && |
---|
690 | 793 | !test_bit(FUSE_I_SIZE_UNSTABLE, &fi->state)) { |
---|
691 | | - fi->attr_version = ++fc->attr_version; |
---|
| 794 | + fi->attr_version = atomic64_inc_return(&fc->attr_version); |
---|
692 | 795 | i_size_write(inode, size); |
---|
693 | 796 | } |
---|
694 | | - spin_unlock(&fc->lock); |
---|
| 797 | + spin_unlock(&fi->lock); |
---|
695 | 798 | } |
---|
696 | 799 | |
---|
697 | | -static void fuse_short_read(struct fuse_req *req, struct inode *inode, |
---|
698 | | - u64 attr_ver) |
---|
| 800 | +static void fuse_short_read(struct inode *inode, u64 attr_ver, size_t num_read, |
---|
| 801 | + struct fuse_args_pages *ap) |
---|
699 | 802 | { |
---|
700 | | - size_t num_read = req->out.args[0].size; |
---|
701 | 803 | struct fuse_conn *fc = get_fuse_conn(inode); |
---|
702 | 804 | |
---|
703 | 805 | if (fc->writeback_cache) { |
---|
.. | .. |
---|
710 | 812 | int start_idx = num_read >> PAGE_SHIFT; |
---|
711 | 813 | size_t off = num_read & (PAGE_SIZE - 1); |
---|
712 | 814 | |
---|
713 | | - for (i = start_idx; i < req->num_pages; i++) { |
---|
714 | | - zero_user_segment(req->pages[i], off, PAGE_SIZE); |
---|
| 815 | + for (i = start_idx; i < ap->num_pages; i++) { |
---|
| 816 | + zero_user_segment(ap->pages[i], off, PAGE_SIZE); |
---|
715 | 817 | off = 0; |
---|
716 | 818 | } |
---|
717 | 819 | } else { |
---|
718 | | - loff_t pos = page_offset(req->pages[0]) + num_read; |
---|
| 820 | + loff_t pos = page_offset(ap->pages[0]) + num_read; |
---|
719 | 821 | fuse_read_update_size(inode, pos, attr_ver); |
---|
720 | 822 | } |
---|
721 | 823 | } |
---|
722 | 824 | |
---|
723 | 825 | static int fuse_do_readpage(struct file *file, struct page *page) |
---|
724 | 826 | { |
---|
725 | | - struct kiocb iocb; |
---|
726 | | - struct fuse_io_priv io; |
---|
727 | 827 | struct inode *inode = page->mapping->host; |
---|
728 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
729 | | - struct fuse_req *req; |
---|
730 | | - size_t num_read; |
---|
| 828 | + struct fuse_mount *fm = get_fuse_mount(inode); |
---|
731 | 829 | loff_t pos = page_offset(page); |
---|
732 | | - size_t count = PAGE_SIZE; |
---|
| 830 | + struct fuse_page_desc desc = { .length = PAGE_SIZE }; |
---|
| 831 | + struct fuse_io_args ia = { |
---|
| 832 | + .ap.args.page_zeroing = true, |
---|
| 833 | + .ap.args.out_pages = true, |
---|
| 834 | + .ap.num_pages = 1, |
---|
| 835 | + .ap.pages = &page, |
---|
| 836 | + .ap.descs = &desc, |
---|
| 837 | + }; |
---|
| 838 | + ssize_t res; |
---|
733 | 839 | u64 attr_ver; |
---|
734 | | - int err; |
---|
735 | 840 | |
---|
736 | 841 | /* |
---|
737 | 842 | * Page writeback can extend beyond the lifetime of the |
---|
.. | .. |
---|
740 | 845 | */ |
---|
741 | 846 | fuse_wait_on_page_writeback(inode, page->index); |
---|
742 | 847 | |
---|
743 | | - req = fuse_get_req(fc, 1); |
---|
744 | | - if (IS_ERR(req)) |
---|
745 | | - return PTR_ERR(req); |
---|
| 848 | + attr_ver = fuse_get_attr_version(fm->fc); |
---|
746 | 849 | |
---|
747 | | - attr_ver = fuse_get_attr_version(fc); |
---|
| 850 | + /* Don't overflow end offset */ |
---|
| 851 | + if (pos + (desc.length - 1) == LLONG_MAX) |
---|
| 852 | + desc.length--; |
---|
748 | 853 | |
---|
749 | | - req->out.page_zeroing = 1; |
---|
750 | | - req->out.argpages = 1; |
---|
751 | | - req->num_pages = 1; |
---|
752 | | - req->pages[0] = page; |
---|
753 | | - req->page_descs[0].length = count; |
---|
754 | | - init_sync_kiocb(&iocb, file); |
---|
755 | | - io = (struct fuse_io_priv) FUSE_IO_PRIV_SYNC(&iocb); |
---|
756 | | - num_read = fuse_send_read(req, &io, pos, count, NULL); |
---|
757 | | - err = req->out.h.error; |
---|
| 854 | + fuse_read_args_fill(&ia, file, pos, desc.length, FUSE_READ); |
---|
| 855 | + res = fuse_simple_request(fm, &ia.ap.args); |
---|
| 856 | + if (res < 0) |
---|
| 857 | + return res; |
---|
| 858 | + /* |
---|
| 859 | + * Short read means EOF. If file size is larger, truncate it |
---|
| 860 | + */ |
---|
| 861 | + if (res < desc.length) |
---|
| 862 | + fuse_short_read(inode, attr_ver, res, &ia.ap); |
---|
758 | 863 | |
---|
759 | | - if (!err) { |
---|
760 | | - /* |
---|
761 | | - * Short read means EOF. If file size is larger, truncate it |
---|
762 | | - */ |
---|
763 | | - if (num_read < count) |
---|
764 | | - fuse_short_read(req, inode, attr_ver); |
---|
| 864 | + SetPageUptodate(page); |
---|
765 | 865 | |
---|
766 | | - SetPageUptodate(page); |
---|
767 | | - } |
---|
768 | | - |
---|
769 | | - fuse_put_request(fc, req); |
---|
770 | | - |
---|
771 | | - return err; |
---|
| 866 | + return 0; |
---|
772 | 867 | } |
---|
773 | 868 | |
---|
774 | 869 | static int fuse_readpage(struct file *file, struct page *page) |
---|
.. | .. |
---|
787 | 882 | return err; |
---|
788 | 883 | } |
---|
789 | 884 | |
---|
790 | | -static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 885 | +static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, |
---|
| 886 | + int err) |
---|
791 | 887 | { |
---|
792 | 888 | int i; |
---|
793 | | - size_t count = req->misc.read.in.size; |
---|
794 | | - size_t num_read = req->out.args[0].size; |
---|
| 889 | + struct fuse_io_args *ia = container_of(args, typeof(*ia), ap.args); |
---|
| 890 | + struct fuse_args_pages *ap = &ia->ap; |
---|
| 891 | + size_t count = ia->read.in.size; |
---|
| 892 | + size_t num_read = args->out_args[0].size; |
---|
795 | 893 | struct address_space *mapping = NULL; |
---|
796 | 894 | |
---|
797 | | - for (i = 0; mapping == NULL && i < req->num_pages; i++) |
---|
798 | | - mapping = req->pages[i]->mapping; |
---|
| 895 | + for (i = 0; mapping == NULL && i < ap->num_pages; i++) |
---|
| 896 | + mapping = ap->pages[i]->mapping; |
---|
799 | 897 | |
---|
800 | 898 | if (mapping) { |
---|
801 | 899 | struct inode *inode = mapping->host; |
---|
.. | .. |
---|
803 | 901 | /* |
---|
804 | 902 | * Short read means EOF. If file size is larger, truncate it |
---|
805 | 903 | */ |
---|
806 | | - if (!req->out.h.error && num_read < count) |
---|
807 | | - fuse_short_read(req, inode, req->misc.read.attr_ver); |
---|
| 904 | + if (!err && num_read < count) |
---|
| 905 | + fuse_short_read(inode, ia->read.attr_ver, num_read, ap); |
---|
808 | 906 | |
---|
809 | 907 | fuse_invalidate_atime(inode); |
---|
810 | 908 | } |
---|
811 | 909 | |
---|
812 | | - for (i = 0; i < req->num_pages; i++) { |
---|
813 | | - struct page *page = req->pages[i]; |
---|
814 | | - if (!req->out.h.error) |
---|
| 910 | + for (i = 0; i < ap->num_pages; i++) { |
---|
| 911 | + struct page *page = ap->pages[i]; |
---|
| 912 | + |
---|
| 913 | + if (!err) |
---|
815 | 914 | SetPageUptodate(page); |
---|
816 | 915 | else |
---|
817 | 916 | SetPageError(page); |
---|
818 | 917 | unlock_page(page); |
---|
819 | 918 | put_page(page); |
---|
820 | 919 | } |
---|
821 | | - if (req->ff) |
---|
822 | | - fuse_file_put(req->ff, false, false); |
---|
| 920 | + if (ia->ff) |
---|
| 921 | + fuse_file_put(ia->ff, false, false); |
---|
| 922 | + |
---|
| 923 | + fuse_io_free(ia); |
---|
823 | 924 | } |
---|
824 | 925 | |
---|
825 | | -static void fuse_send_readpages(struct fuse_req *req, struct file *file) |
---|
| 926 | +static void fuse_send_readpages(struct fuse_io_args *ia, struct file *file) |
---|
826 | 927 | { |
---|
827 | 928 | struct fuse_file *ff = file->private_data; |
---|
828 | | - struct fuse_conn *fc = ff->fc; |
---|
829 | | - loff_t pos = page_offset(req->pages[0]); |
---|
830 | | - size_t count = req->num_pages << PAGE_SHIFT; |
---|
831 | | - |
---|
832 | | - req->out.argpages = 1; |
---|
833 | | - req->out.page_zeroing = 1; |
---|
834 | | - req->out.page_replace = 1; |
---|
835 | | - fuse_read_fill(req, file, pos, count, FUSE_READ); |
---|
836 | | - req->misc.read.attr_ver = fuse_get_attr_version(fc); |
---|
837 | | - if (fc->async_read) { |
---|
838 | | - req->ff = fuse_file_get(ff); |
---|
839 | | - req->end = fuse_readpages_end; |
---|
840 | | - fuse_request_send_background(fc, req); |
---|
841 | | - } else { |
---|
842 | | - fuse_request_send(fc, req); |
---|
843 | | - fuse_readpages_end(fc, req); |
---|
844 | | - fuse_put_request(fc, req); |
---|
845 | | - } |
---|
846 | | -} |
---|
847 | | - |
---|
848 | | -struct fuse_fill_data { |
---|
849 | | - struct fuse_req *req; |
---|
850 | | - struct file *file; |
---|
851 | | - struct inode *inode; |
---|
852 | | - unsigned nr_pages; |
---|
853 | | -}; |
---|
854 | | - |
---|
855 | | -static int fuse_readpages_fill(void *_data, struct page *page) |
---|
856 | | -{ |
---|
857 | | - struct fuse_fill_data *data = _data; |
---|
858 | | - struct fuse_req *req = data->req; |
---|
859 | | - struct inode *inode = data->inode; |
---|
860 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
861 | | - |
---|
862 | | - fuse_wait_on_page_writeback(inode, page->index); |
---|
863 | | - |
---|
864 | | - if (req->num_pages && |
---|
865 | | - (req->num_pages == FUSE_MAX_PAGES_PER_REQ || |
---|
866 | | - (req->num_pages + 1) * PAGE_SIZE > fc->max_read || |
---|
867 | | - req->pages[req->num_pages - 1]->index + 1 != page->index)) { |
---|
868 | | - int nr_alloc = min_t(unsigned, data->nr_pages, |
---|
869 | | - FUSE_MAX_PAGES_PER_REQ); |
---|
870 | | - fuse_send_readpages(req, data->file); |
---|
871 | | - if (fc->async_read) |
---|
872 | | - req = fuse_get_req_for_background(fc, nr_alloc); |
---|
873 | | - else |
---|
874 | | - req = fuse_get_req(fc, nr_alloc); |
---|
875 | | - |
---|
876 | | - data->req = req; |
---|
877 | | - if (IS_ERR(req)) { |
---|
878 | | - unlock_page(page); |
---|
879 | | - return PTR_ERR(req); |
---|
880 | | - } |
---|
881 | | - } |
---|
882 | | - |
---|
883 | | - if (WARN_ON(req->num_pages >= req->max_pages)) { |
---|
884 | | - unlock_page(page); |
---|
885 | | - fuse_put_request(fc, req); |
---|
886 | | - return -EIO; |
---|
887 | | - } |
---|
888 | | - |
---|
889 | | - get_page(page); |
---|
890 | | - req->pages[req->num_pages] = page; |
---|
891 | | - req->page_descs[req->num_pages].length = PAGE_SIZE; |
---|
892 | | - req->num_pages++; |
---|
893 | | - data->nr_pages--; |
---|
894 | | - return 0; |
---|
895 | | -} |
---|
896 | | - |
---|
897 | | -static int fuse_readpages(struct file *file, struct address_space *mapping, |
---|
898 | | - struct list_head *pages, unsigned nr_pages) |
---|
899 | | -{ |
---|
900 | | - struct inode *inode = mapping->host; |
---|
901 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
902 | | - struct fuse_fill_data data; |
---|
| 929 | + struct fuse_mount *fm = ff->fm; |
---|
| 930 | + struct fuse_args_pages *ap = &ia->ap; |
---|
| 931 | + loff_t pos = page_offset(ap->pages[0]); |
---|
| 932 | + size_t count = ap->num_pages << PAGE_SHIFT; |
---|
| 933 | + ssize_t res; |
---|
903 | 934 | int err; |
---|
904 | | - int nr_alloc = min_t(unsigned, nr_pages, FUSE_MAX_PAGES_PER_REQ); |
---|
905 | 935 | |
---|
906 | | - err = -EIO; |
---|
907 | | - if (fuse_is_bad(inode)) |
---|
908 | | - goto out; |
---|
| 936 | + ap->args.out_pages = true; |
---|
| 937 | + ap->args.page_zeroing = true; |
---|
| 938 | + ap->args.page_replace = true; |
---|
909 | 939 | |
---|
910 | | - data.file = file; |
---|
911 | | - data.inode = inode; |
---|
912 | | - if (fc->async_read) |
---|
913 | | - data.req = fuse_get_req_for_background(fc, nr_alloc); |
---|
914 | | - else |
---|
915 | | - data.req = fuse_get_req(fc, nr_alloc); |
---|
916 | | - data.nr_pages = nr_pages; |
---|
917 | | - err = PTR_ERR(data.req); |
---|
918 | | - if (IS_ERR(data.req)) |
---|
919 | | - goto out; |
---|
920 | | - |
---|
921 | | - err = read_cache_pages(mapping, pages, fuse_readpages_fill, &data); |
---|
922 | | - if (!err) { |
---|
923 | | - if (data.req->num_pages) |
---|
924 | | - fuse_send_readpages(data.req, file); |
---|
925 | | - else |
---|
926 | | - fuse_put_request(fc, data.req); |
---|
| 940 | + /* Don't overflow end offset */ |
---|
| 941 | + if (pos + (count - 1) == LLONG_MAX) { |
---|
| 942 | + count--; |
---|
| 943 | + ap->descs[ap->num_pages - 1].length--; |
---|
927 | 944 | } |
---|
928 | | -out: |
---|
929 | | - return err; |
---|
| 945 | + WARN_ON((loff_t) (pos + count) < 0); |
---|
| 946 | + |
---|
| 947 | + fuse_read_args_fill(ia, file, pos, count, FUSE_READ); |
---|
| 948 | + ia->read.attr_ver = fuse_get_attr_version(fm->fc); |
---|
| 949 | + if (fm->fc->async_read) { |
---|
| 950 | + ia->ff = fuse_file_get(ff); |
---|
| 951 | + ap->args.end = fuse_readpages_end; |
---|
| 952 | + err = fuse_simple_background(fm, &ap->args, GFP_KERNEL); |
---|
| 953 | + if (!err) |
---|
| 954 | + return; |
---|
| 955 | + } else { |
---|
| 956 | + res = fuse_simple_request(fm, &ap->args); |
---|
| 957 | + err = res < 0 ? res : 0; |
---|
| 958 | + } |
---|
| 959 | + fuse_readpages_end(fm, &ap->args, err); |
---|
930 | 960 | } |
---|
931 | 961 | |
---|
932 | | -static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to) |
---|
| 962 | +static void fuse_readahead(struct readahead_control *rac) |
---|
| 963 | +{ |
---|
| 964 | + struct inode *inode = rac->mapping->host; |
---|
| 965 | + struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 966 | + unsigned int i, max_pages, nr_pages = 0; |
---|
| 967 | + |
---|
| 968 | + if (fuse_is_bad(inode)) |
---|
| 969 | + return; |
---|
| 970 | + |
---|
| 971 | + max_pages = min_t(unsigned int, fc->max_pages, |
---|
| 972 | + fc->max_read / PAGE_SIZE); |
---|
| 973 | + |
---|
| 974 | + for (;;) { |
---|
| 975 | + struct fuse_io_args *ia; |
---|
| 976 | + struct fuse_args_pages *ap; |
---|
| 977 | + |
---|
| 978 | + nr_pages = readahead_count(rac) - nr_pages; |
---|
| 979 | + if (nr_pages > max_pages) |
---|
| 980 | + nr_pages = max_pages; |
---|
| 981 | + if (nr_pages == 0) |
---|
| 982 | + break; |
---|
| 983 | + ia = fuse_io_alloc(NULL, nr_pages); |
---|
| 984 | + if (!ia) |
---|
| 985 | + return; |
---|
| 986 | + ap = &ia->ap; |
---|
| 987 | + nr_pages = __readahead_batch(rac, ap->pages, nr_pages); |
---|
| 988 | + for (i = 0; i < nr_pages; i++) { |
---|
| 989 | + fuse_wait_on_page_writeback(inode, |
---|
| 990 | + readahead_index(rac) + i); |
---|
| 991 | + ap->descs[i].length = PAGE_SIZE; |
---|
| 992 | + } |
---|
| 993 | + ap->num_pages = nr_pages; |
---|
| 994 | + fuse_send_readpages(ia, rac->file); |
---|
| 995 | + } |
---|
| 996 | +} |
---|
| 997 | + |
---|
| 998 | +static ssize_t fuse_cache_read_iter(struct kiocb *iocb, struct iov_iter *to) |
---|
933 | 999 | { |
---|
934 | 1000 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
---|
935 | 1001 | struct fuse_conn *fc = get_fuse_conn(inode); |
---|
936 | | - |
---|
937 | | - if (fuse_is_bad(inode)) |
---|
938 | | - return -EIO; |
---|
939 | 1002 | |
---|
940 | 1003 | /* |
---|
941 | 1004 | * In auto invalidate mode, always update attributes on read. |
---|
.. | .. |
---|
953 | 1016 | return generic_file_read_iter(iocb, to); |
---|
954 | 1017 | } |
---|
955 | 1018 | |
---|
956 | | -static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, |
---|
957 | | - loff_t pos, size_t count) |
---|
| 1019 | +static void fuse_write_args_fill(struct fuse_io_args *ia, struct fuse_file *ff, |
---|
| 1020 | + loff_t pos, size_t count) |
---|
958 | 1021 | { |
---|
959 | | - struct fuse_write_in *inarg = &req->misc.write.in; |
---|
960 | | - struct fuse_write_out *outarg = &req->misc.write.out; |
---|
| 1022 | + struct fuse_args *args = &ia->ap.args; |
---|
961 | 1023 | |
---|
962 | | - inarg->fh = ff->fh; |
---|
963 | | - inarg->offset = pos; |
---|
964 | | - inarg->size = count; |
---|
965 | | - req->in.h.opcode = FUSE_WRITE; |
---|
966 | | - req->in.h.nodeid = ff->nodeid; |
---|
967 | | - req->in.numargs = 2; |
---|
968 | | - if (ff->fc->minor < 9) |
---|
969 | | - req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; |
---|
| 1024 | + ia->write.in.fh = ff->fh; |
---|
| 1025 | + ia->write.in.offset = pos; |
---|
| 1026 | + ia->write.in.size = count; |
---|
| 1027 | + args->opcode = FUSE_WRITE; |
---|
| 1028 | + args->nodeid = ff->nodeid; |
---|
| 1029 | + args->in_numargs = 2; |
---|
| 1030 | + if (ff->fm->fc->minor < 9) |
---|
| 1031 | + args->in_args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; |
---|
970 | 1032 | else |
---|
971 | | - req->in.args[0].size = sizeof(struct fuse_write_in); |
---|
972 | | - req->in.args[0].value = inarg; |
---|
973 | | - req->in.args[1].size = count; |
---|
974 | | - req->out.numargs = 1; |
---|
975 | | - req->out.args[0].size = sizeof(struct fuse_write_out); |
---|
976 | | - req->out.args[0].value = outarg; |
---|
| 1033 | + args->in_args[0].size = sizeof(ia->write.in); |
---|
| 1034 | + args->in_args[0].value = &ia->write.in; |
---|
| 1035 | + args->in_args[1].size = count; |
---|
| 1036 | + args->out_numargs = 1; |
---|
| 1037 | + args->out_args[0].size = sizeof(ia->write.out); |
---|
| 1038 | + args->out_args[0].value = &ia->write.out; |
---|
977 | 1039 | } |
---|
978 | 1040 | |
---|
979 | | -static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io, |
---|
980 | | - loff_t pos, size_t count, fl_owner_t owner) |
---|
| 1041 | +static unsigned int fuse_write_flags(struct kiocb *iocb) |
---|
981 | 1042 | { |
---|
982 | | - struct kiocb *iocb = io->iocb; |
---|
| 1043 | + unsigned int flags = iocb->ki_filp->f_flags; |
---|
| 1044 | + |
---|
| 1045 | + if (iocb->ki_flags & IOCB_DSYNC) |
---|
| 1046 | + flags |= O_DSYNC; |
---|
| 1047 | + if (iocb->ki_flags & IOCB_SYNC) |
---|
| 1048 | + flags |= O_SYNC; |
---|
| 1049 | + |
---|
| 1050 | + return flags; |
---|
| 1051 | +} |
---|
| 1052 | + |
---|
| 1053 | +static ssize_t fuse_send_write(struct fuse_io_args *ia, loff_t pos, |
---|
| 1054 | + size_t count, fl_owner_t owner) |
---|
| 1055 | +{ |
---|
| 1056 | + struct kiocb *iocb = ia->io->iocb; |
---|
983 | 1057 | struct file *file = iocb->ki_filp; |
---|
984 | 1058 | struct fuse_file *ff = file->private_data; |
---|
985 | | - struct fuse_conn *fc = ff->fc; |
---|
986 | | - struct fuse_write_in *inarg = &req->misc.write.in; |
---|
| 1059 | + struct fuse_mount *fm = ff->fm; |
---|
| 1060 | + struct fuse_write_in *inarg = &ia->write.in; |
---|
| 1061 | + ssize_t err; |
---|
987 | 1062 | |
---|
988 | | - fuse_write_fill(req, ff, pos, count); |
---|
989 | | - inarg->flags = file->f_flags; |
---|
990 | | - if (iocb->ki_flags & IOCB_DSYNC) |
---|
991 | | - inarg->flags |= O_DSYNC; |
---|
992 | | - if (iocb->ki_flags & IOCB_SYNC) |
---|
993 | | - inarg->flags |= O_SYNC; |
---|
| 1063 | + fuse_write_args_fill(ia, ff, pos, count); |
---|
| 1064 | + inarg->flags = fuse_write_flags(iocb); |
---|
994 | 1065 | if (owner != NULL) { |
---|
995 | 1066 | inarg->write_flags |= FUSE_WRITE_LOCKOWNER; |
---|
996 | | - inarg->lock_owner = fuse_lock_owner_id(fc, owner); |
---|
| 1067 | + inarg->lock_owner = fuse_lock_owner_id(fm->fc, owner); |
---|
997 | 1068 | } |
---|
998 | 1069 | |
---|
999 | | - if (io->async) |
---|
1000 | | - return fuse_async_req_send(fc, req, count, io); |
---|
| 1070 | + if (ia->io->async) |
---|
| 1071 | + return fuse_async_req_send(fm, ia, count); |
---|
1001 | 1072 | |
---|
1002 | | - fuse_request_send(fc, req); |
---|
1003 | | - return req->misc.write.out.size; |
---|
| 1073 | + err = fuse_simple_request(fm, &ia->ap.args); |
---|
| 1074 | + if (!err && ia->write.out.size > count) |
---|
| 1075 | + err = -EIO; |
---|
| 1076 | + |
---|
| 1077 | + return err ?: ia->write.out.size; |
---|
1004 | 1078 | } |
---|
1005 | 1079 | |
---|
1006 | 1080 | bool fuse_write_update_size(struct inode *inode, loff_t pos) |
---|
.. | .. |
---|
1009 | 1083 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
1010 | 1084 | bool ret = false; |
---|
1011 | 1085 | |
---|
1012 | | - spin_lock(&fc->lock); |
---|
1013 | | - fi->attr_version = ++fc->attr_version; |
---|
| 1086 | + spin_lock(&fi->lock); |
---|
| 1087 | + fi->attr_version = atomic64_inc_return(&fc->attr_version); |
---|
1014 | 1088 | if (pos > inode->i_size) { |
---|
1015 | 1089 | i_size_write(inode, pos); |
---|
1016 | 1090 | ret = true; |
---|
1017 | 1091 | } |
---|
1018 | | - spin_unlock(&fc->lock); |
---|
| 1092 | + spin_unlock(&fi->lock); |
---|
1019 | 1093 | |
---|
1020 | 1094 | return ret; |
---|
1021 | 1095 | } |
---|
1022 | 1096 | |
---|
1023 | | -static size_t fuse_send_write_pages(struct fuse_req *req, struct kiocb *iocb, |
---|
1024 | | - struct inode *inode, loff_t pos, |
---|
1025 | | - size_t count) |
---|
| 1097 | +static ssize_t fuse_send_write_pages(struct fuse_io_args *ia, |
---|
| 1098 | + struct kiocb *iocb, struct inode *inode, |
---|
| 1099 | + loff_t pos, size_t count) |
---|
1026 | 1100 | { |
---|
1027 | | - size_t res; |
---|
1028 | | - unsigned offset; |
---|
1029 | | - unsigned i; |
---|
1030 | | - struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); |
---|
| 1101 | + struct fuse_args_pages *ap = &ia->ap; |
---|
| 1102 | + struct file *file = iocb->ki_filp; |
---|
| 1103 | + struct fuse_file *ff = file->private_data; |
---|
| 1104 | + struct fuse_mount *fm = ff->fm; |
---|
| 1105 | + unsigned int offset, i; |
---|
| 1106 | + bool short_write; |
---|
| 1107 | + int err; |
---|
1031 | 1108 | |
---|
1032 | | - for (i = 0; i < req->num_pages; i++) |
---|
1033 | | - fuse_wait_on_page_writeback(inode, req->pages[i]->index); |
---|
| 1109 | + for (i = 0; i < ap->num_pages; i++) |
---|
| 1110 | + fuse_wait_on_page_writeback(inode, ap->pages[i]->index); |
---|
1034 | 1111 | |
---|
1035 | | - res = fuse_send_write(req, &io, pos, count, NULL); |
---|
| 1112 | + fuse_write_args_fill(ia, ff, pos, count); |
---|
| 1113 | + ia->write.in.flags = fuse_write_flags(iocb); |
---|
1036 | 1114 | |
---|
1037 | | - offset = req->page_descs[0].offset; |
---|
1038 | | - count = res; |
---|
1039 | | - for (i = 0; i < req->num_pages; i++) { |
---|
1040 | | - struct page *page = req->pages[i]; |
---|
| 1115 | + err = fuse_simple_request(fm, &ap->args); |
---|
| 1116 | + if (!err && ia->write.out.size > count) |
---|
| 1117 | + err = -EIO; |
---|
1041 | 1118 | |
---|
1042 | | - if (!req->out.h.error && !offset && count >= PAGE_SIZE) |
---|
1043 | | - SetPageUptodate(page); |
---|
| 1119 | + short_write = ia->write.out.size < count; |
---|
| 1120 | + offset = ap->descs[0].offset; |
---|
| 1121 | + count = ia->write.out.size; |
---|
| 1122 | + for (i = 0; i < ap->num_pages; i++) { |
---|
| 1123 | + struct page *page = ap->pages[i]; |
---|
1044 | 1124 | |
---|
1045 | | - if (count > PAGE_SIZE - offset) |
---|
1046 | | - count -= PAGE_SIZE - offset; |
---|
1047 | | - else |
---|
1048 | | - count = 0; |
---|
1049 | | - offset = 0; |
---|
1050 | | - |
---|
1051 | | - unlock_page(page); |
---|
| 1125 | + if (err) { |
---|
| 1126 | + ClearPageUptodate(page); |
---|
| 1127 | + } else { |
---|
| 1128 | + if (count >= PAGE_SIZE - offset) |
---|
| 1129 | + count -= PAGE_SIZE - offset; |
---|
| 1130 | + else { |
---|
| 1131 | + if (short_write) |
---|
| 1132 | + ClearPageUptodate(page); |
---|
| 1133 | + count = 0; |
---|
| 1134 | + } |
---|
| 1135 | + offset = 0; |
---|
| 1136 | + } |
---|
| 1137 | + if (ia->write.page_locked && (i == ap->num_pages - 1)) |
---|
| 1138 | + unlock_page(page); |
---|
1052 | 1139 | put_page(page); |
---|
1053 | 1140 | } |
---|
1054 | 1141 | |
---|
1055 | | - return res; |
---|
| 1142 | + return err; |
---|
1056 | 1143 | } |
---|
1057 | 1144 | |
---|
1058 | | -static ssize_t fuse_fill_write_pages(struct fuse_req *req, |
---|
1059 | | - struct address_space *mapping, |
---|
1060 | | - struct iov_iter *ii, loff_t pos) |
---|
| 1145 | +static ssize_t fuse_fill_write_pages(struct fuse_io_args *ia, |
---|
| 1146 | + struct address_space *mapping, |
---|
| 1147 | + struct iov_iter *ii, loff_t pos, |
---|
| 1148 | + unsigned int max_pages) |
---|
1061 | 1149 | { |
---|
| 1150 | + struct fuse_args_pages *ap = &ia->ap; |
---|
1062 | 1151 | struct fuse_conn *fc = get_fuse_conn(mapping->host); |
---|
1063 | 1152 | unsigned offset = pos & (PAGE_SIZE - 1); |
---|
1064 | 1153 | size_t count = 0; |
---|
1065 | 1154 | int err; |
---|
1066 | 1155 | |
---|
1067 | | - req->in.argpages = 1; |
---|
1068 | | - req->page_descs[0].offset = offset; |
---|
| 1156 | + ap->args.in_pages = true; |
---|
| 1157 | + ap->descs[0].offset = offset; |
---|
1069 | 1158 | |
---|
1070 | 1159 | do { |
---|
1071 | 1160 | size_t tmp; |
---|
.. | .. |
---|
1101 | 1190 | } |
---|
1102 | 1191 | |
---|
1103 | 1192 | err = 0; |
---|
1104 | | - req->pages[req->num_pages] = page; |
---|
1105 | | - req->page_descs[req->num_pages].length = tmp; |
---|
1106 | | - req->num_pages++; |
---|
| 1193 | + ap->pages[ap->num_pages] = page; |
---|
| 1194 | + ap->descs[ap->num_pages].length = tmp; |
---|
| 1195 | + ap->num_pages++; |
---|
1107 | 1196 | |
---|
1108 | 1197 | count += tmp; |
---|
1109 | 1198 | pos += tmp; |
---|
.. | .. |
---|
1111 | 1200 | if (offset == PAGE_SIZE) |
---|
1112 | 1201 | offset = 0; |
---|
1113 | 1202 | |
---|
| 1203 | + /* If we copied full page, mark it uptodate */ |
---|
| 1204 | + if (tmp == PAGE_SIZE) |
---|
| 1205 | + SetPageUptodate(page); |
---|
| 1206 | + |
---|
| 1207 | + if (PageUptodate(page)) { |
---|
| 1208 | + unlock_page(page); |
---|
| 1209 | + } else { |
---|
| 1210 | + ia->write.page_locked = true; |
---|
| 1211 | + break; |
---|
| 1212 | + } |
---|
1114 | 1213 | if (!fc->big_writes) |
---|
1115 | 1214 | break; |
---|
1116 | 1215 | } while (iov_iter_count(ii) && count < fc->max_write && |
---|
1117 | | - req->num_pages < req->max_pages && offset == 0); |
---|
| 1216 | + ap->num_pages < max_pages && offset == 0); |
---|
1118 | 1217 | |
---|
1119 | 1218 | return count > 0 ? count : err; |
---|
1120 | 1219 | } |
---|
1121 | 1220 | |
---|
1122 | | -static inline unsigned fuse_wr_pages(loff_t pos, size_t len) |
---|
| 1221 | +static inline unsigned int fuse_wr_pages(loff_t pos, size_t len, |
---|
| 1222 | + unsigned int max_pages) |
---|
1123 | 1223 | { |
---|
1124 | | - return min_t(unsigned, |
---|
| 1224 | + return min_t(unsigned int, |
---|
1125 | 1225 | ((pos + len - 1) >> PAGE_SHIFT) - |
---|
1126 | 1226 | (pos >> PAGE_SHIFT) + 1, |
---|
1127 | | - FUSE_MAX_PAGES_PER_REQ); |
---|
| 1227 | + max_pages); |
---|
1128 | 1228 | } |
---|
1129 | 1229 | |
---|
1130 | 1230 | static ssize_t fuse_perform_write(struct kiocb *iocb, |
---|
.. | .. |
---|
1137 | 1237 | int err = 0; |
---|
1138 | 1238 | ssize_t res = 0; |
---|
1139 | 1239 | |
---|
1140 | | - if (fuse_is_bad(inode)) |
---|
1141 | | - return -EIO; |
---|
1142 | | - |
---|
1143 | 1240 | if (inode->i_size < pos + iov_iter_count(ii)) |
---|
1144 | 1241 | set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); |
---|
1145 | 1242 | |
---|
1146 | 1243 | do { |
---|
1147 | | - struct fuse_req *req; |
---|
1148 | 1244 | ssize_t count; |
---|
1149 | | - unsigned nr_pages = fuse_wr_pages(pos, iov_iter_count(ii)); |
---|
| 1245 | + struct fuse_io_args ia = {}; |
---|
| 1246 | + struct fuse_args_pages *ap = &ia.ap; |
---|
| 1247 | + unsigned int nr_pages = fuse_wr_pages(pos, iov_iter_count(ii), |
---|
| 1248 | + fc->max_pages); |
---|
1150 | 1249 | |
---|
1151 | | - req = fuse_get_req(fc, nr_pages); |
---|
1152 | | - if (IS_ERR(req)) { |
---|
1153 | | - err = PTR_ERR(req); |
---|
| 1250 | + ap->pages = fuse_pages_alloc(nr_pages, GFP_KERNEL, &ap->descs); |
---|
| 1251 | + if (!ap->pages) { |
---|
| 1252 | + err = -ENOMEM; |
---|
1154 | 1253 | break; |
---|
1155 | 1254 | } |
---|
1156 | 1255 | |
---|
1157 | | - count = fuse_fill_write_pages(req, mapping, ii, pos); |
---|
| 1256 | + count = fuse_fill_write_pages(&ia, mapping, ii, pos, nr_pages); |
---|
1158 | 1257 | if (count <= 0) { |
---|
1159 | 1258 | err = count; |
---|
1160 | 1259 | } else { |
---|
1161 | | - size_t num_written; |
---|
1162 | | - |
---|
1163 | | - num_written = fuse_send_write_pages(req, iocb, inode, |
---|
1164 | | - pos, count); |
---|
1165 | | - err = req->out.h.error; |
---|
| 1260 | + err = fuse_send_write_pages(&ia, iocb, inode, |
---|
| 1261 | + pos, count); |
---|
1166 | 1262 | if (!err) { |
---|
| 1263 | + size_t num_written = ia.write.out.size; |
---|
| 1264 | + |
---|
1167 | 1265 | res += num_written; |
---|
1168 | 1266 | pos += num_written; |
---|
1169 | 1267 | |
---|
.. | .. |
---|
1172 | 1270 | err = -EIO; |
---|
1173 | 1271 | } |
---|
1174 | 1272 | } |
---|
1175 | | - fuse_put_request(fc, req); |
---|
| 1273 | + kfree(ap->pages); |
---|
1176 | 1274 | } while (!err && iov_iter_count(ii)); |
---|
1177 | 1275 | |
---|
1178 | 1276 | if (res > 0) |
---|
.. | .. |
---|
1184 | 1282 | return res > 0 ? res : err; |
---|
1185 | 1283 | } |
---|
1186 | 1284 | |
---|
1187 | | -static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
---|
| 1285 | +static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) |
---|
1188 | 1286 | { |
---|
1189 | 1287 | struct file *file = iocb->ki_filp; |
---|
1190 | 1288 | struct address_space *mapping = file->f_mapping; |
---|
.. | .. |
---|
1193 | 1291 | struct inode *inode = mapping->host; |
---|
1194 | 1292 | ssize_t err; |
---|
1195 | 1293 | loff_t endbyte = 0; |
---|
1196 | | - |
---|
1197 | | - if (fuse_is_bad(inode)) |
---|
1198 | | - return -EIO; |
---|
1199 | 1294 | |
---|
1200 | 1295 | if (get_fuse_conn(inode)->writeback_cache) { |
---|
1201 | 1296 | /* Update size (EOF optimization) and mode (SUID clearing) */ |
---|
.. | .. |
---|
1263 | 1358 | return written ? written : err; |
---|
1264 | 1359 | } |
---|
1265 | 1360 | |
---|
1266 | | -static inline void fuse_page_descs_length_init(struct fuse_req *req, |
---|
1267 | | - unsigned index, unsigned nr_pages) |
---|
| 1361 | +static inline void fuse_page_descs_length_init(struct fuse_page_desc *descs, |
---|
| 1362 | + unsigned int index, |
---|
| 1363 | + unsigned int nr_pages) |
---|
1268 | 1364 | { |
---|
1269 | 1365 | int i; |
---|
1270 | 1366 | |
---|
1271 | 1367 | for (i = index; i < index + nr_pages; i++) |
---|
1272 | | - req->page_descs[i].length = PAGE_SIZE - |
---|
1273 | | - req->page_descs[i].offset; |
---|
| 1368 | + descs[i].length = PAGE_SIZE - descs[i].offset; |
---|
1274 | 1369 | } |
---|
1275 | 1370 | |
---|
1276 | 1371 | static inline unsigned long fuse_get_user_addr(const struct iov_iter *ii) |
---|
.. | .. |
---|
1284 | 1379 | return min(iov_iter_single_seg_count(ii), max_size); |
---|
1285 | 1380 | } |
---|
1286 | 1381 | |
---|
1287 | | -static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, |
---|
1288 | | - size_t *nbytesp, int write) |
---|
| 1382 | +static int fuse_get_user_pages(struct fuse_args_pages *ap, struct iov_iter *ii, |
---|
| 1383 | + size_t *nbytesp, int write, |
---|
| 1384 | + unsigned int max_pages) |
---|
1289 | 1385 | { |
---|
1290 | 1386 | size_t nbytes = 0; /* # bytes already packed in req */ |
---|
1291 | 1387 | ssize_t ret = 0; |
---|
1292 | 1388 | |
---|
1293 | 1389 | /* Special case for kernel I/O: can copy directly into the buffer */ |
---|
1294 | | - if (ii->type & ITER_KVEC) { |
---|
| 1390 | + if (iov_iter_is_kvec(ii)) { |
---|
1295 | 1391 | unsigned long user_addr = fuse_get_user_addr(ii); |
---|
1296 | 1392 | size_t frag_size = fuse_get_frag_size(ii, *nbytesp); |
---|
1297 | 1393 | |
---|
1298 | 1394 | if (write) |
---|
1299 | | - req->in.args[1].value = (void *) user_addr; |
---|
| 1395 | + ap->args.in_args[1].value = (void *) user_addr; |
---|
1300 | 1396 | else |
---|
1301 | | - req->out.args[0].value = (void *) user_addr; |
---|
| 1397 | + ap->args.out_args[0].value = (void *) user_addr; |
---|
1302 | 1398 | |
---|
1303 | 1399 | iov_iter_advance(ii, frag_size); |
---|
1304 | 1400 | *nbytesp = frag_size; |
---|
1305 | 1401 | return 0; |
---|
1306 | 1402 | } |
---|
1307 | 1403 | |
---|
1308 | | - while (nbytes < *nbytesp && req->num_pages < req->max_pages) { |
---|
| 1404 | + while (nbytes < *nbytesp && ap->num_pages < max_pages) { |
---|
1309 | 1405 | unsigned npages; |
---|
1310 | 1406 | size_t start; |
---|
1311 | | - ret = iov_iter_get_pages(ii, &req->pages[req->num_pages], |
---|
| 1407 | + ret = iov_iter_get_pages(ii, &ap->pages[ap->num_pages], |
---|
1312 | 1408 | *nbytesp - nbytes, |
---|
1313 | | - req->max_pages - req->num_pages, |
---|
| 1409 | + max_pages - ap->num_pages, |
---|
1314 | 1410 | &start); |
---|
1315 | 1411 | if (ret < 0) |
---|
1316 | 1412 | break; |
---|
.. | .. |
---|
1321 | 1417 | ret += start; |
---|
1322 | 1418 | npages = (ret + PAGE_SIZE - 1) / PAGE_SIZE; |
---|
1323 | 1419 | |
---|
1324 | | - req->page_descs[req->num_pages].offset = start; |
---|
1325 | | - fuse_page_descs_length_init(req, req->num_pages, npages); |
---|
| 1420 | + ap->descs[ap->num_pages].offset = start; |
---|
| 1421 | + fuse_page_descs_length_init(ap->descs, ap->num_pages, npages); |
---|
1326 | 1422 | |
---|
1327 | | - req->num_pages += npages; |
---|
1328 | | - req->page_descs[req->num_pages - 1].length -= |
---|
| 1423 | + ap->num_pages += npages; |
---|
| 1424 | + ap->descs[ap->num_pages - 1].length -= |
---|
1329 | 1425 | (PAGE_SIZE - ret) & (PAGE_SIZE - 1); |
---|
1330 | 1426 | } |
---|
1331 | 1427 | |
---|
| 1428 | + ap->args.user_pages = true; |
---|
1332 | 1429 | if (write) |
---|
1333 | | - req->in.argpages = 1; |
---|
| 1430 | + ap->args.in_pages = true; |
---|
1334 | 1431 | else |
---|
1335 | | - req->out.argpages = 1; |
---|
| 1432 | + ap->args.out_pages = true; |
---|
1336 | 1433 | |
---|
1337 | 1434 | *nbytesp = nbytes; |
---|
1338 | 1435 | |
---|
1339 | 1436 | return ret < 0 ? ret : 0; |
---|
1340 | | -} |
---|
1341 | | - |
---|
1342 | | -static inline int fuse_iter_npages(const struct iov_iter *ii_p) |
---|
1343 | | -{ |
---|
1344 | | - return iov_iter_npages(ii_p, FUSE_MAX_PAGES_PER_REQ); |
---|
1345 | 1437 | } |
---|
1346 | 1438 | |
---|
1347 | 1439 | ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, |
---|
.. | .. |
---|
1352 | 1444 | struct file *file = io->iocb->ki_filp; |
---|
1353 | 1445 | struct inode *inode = file->f_mapping->host; |
---|
1354 | 1446 | struct fuse_file *ff = file->private_data; |
---|
1355 | | - struct fuse_conn *fc = ff->fc; |
---|
| 1447 | + struct fuse_conn *fc = ff->fm->fc; |
---|
1356 | 1448 | size_t nmax = write ? fc->max_write : fc->max_read; |
---|
1357 | 1449 | loff_t pos = *ppos; |
---|
1358 | 1450 | size_t count = iov_iter_count(iter); |
---|
1359 | 1451 | pgoff_t idx_from = pos >> PAGE_SHIFT; |
---|
1360 | 1452 | pgoff_t idx_to = (pos + count - 1) >> PAGE_SHIFT; |
---|
1361 | 1453 | ssize_t res = 0; |
---|
1362 | | - struct fuse_req *req; |
---|
1363 | 1454 | int err = 0; |
---|
| 1455 | + struct fuse_io_args *ia; |
---|
| 1456 | + unsigned int max_pages; |
---|
1364 | 1457 | |
---|
1365 | | - if (io->async) |
---|
1366 | | - req = fuse_get_req_for_background(fc, fuse_iter_npages(iter)); |
---|
1367 | | - else |
---|
1368 | | - req = fuse_get_req(fc, fuse_iter_npages(iter)); |
---|
1369 | | - if (IS_ERR(req)) |
---|
1370 | | - return PTR_ERR(req); |
---|
| 1458 | + max_pages = iov_iter_npages(iter, fc->max_pages); |
---|
| 1459 | + ia = fuse_io_alloc(io, max_pages); |
---|
| 1460 | + if (!ia) |
---|
| 1461 | + return -ENOMEM; |
---|
1371 | 1462 | |
---|
| 1463 | + ia->io = io; |
---|
1372 | 1464 | if (!cuse && fuse_range_is_writeback(inode, idx_from, idx_to)) { |
---|
1373 | 1465 | if (!write) |
---|
1374 | 1466 | inode_lock(inode); |
---|
.. | .. |
---|
1379 | 1471 | |
---|
1380 | 1472 | io->should_dirty = !write && iter_is_iovec(iter); |
---|
1381 | 1473 | while (count) { |
---|
1382 | | - size_t nres; |
---|
| 1474 | + ssize_t nres; |
---|
1383 | 1475 | fl_owner_t owner = current->files; |
---|
1384 | 1476 | size_t nbytes = min(count, nmax); |
---|
1385 | | - err = fuse_get_user_pages(req, iter, &nbytes, write); |
---|
| 1477 | + |
---|
| 1478 | + err = fuse_get_user_pages(&ia->ap, iter, &nbytes, write, |
---|
| 1479 | + max_pages); |
---|
1386 | 1480 | if (err && !nbytes) |
---|
1387 | 1481 | break; |
---|
1388 | 1482 | |
---|
1389 | | - if (write) |
---|
1390 | | - nres = fuse_send_write(req, io, pos, nbytes, owner); |
---|
1391 | | - else |
---|
1392 | | - nres = fuse_send_read(req, io, pos, nbytes, owner); |
---|
| 1483 | + if (write) { |
---|
| 1484 | + if (!capable(CAP_FSETID)) |
---|
| 1485 | + ia->write.in.write_flags |= FUSE_WRITE_KILL_PRIV; |
---|
1393 | 1486 | |
---|
1394 | | - if (!io->async) |
---|
1395 | | - fuse_release_user_pages(req, io->should_dirty); |
---|
1396 | | - if (req->out.h.error) { |
---|
1397 | | - err = req->out.h.error; |
---|
1398 | | - break; |
---|
1399 | | - } else if (nres > nbytes) { |
---|
1400 | | - res = 0; |
---|
1401 | | - err = -EIO; |
---|
| 1487 | + nres = fuse_send_write(ia, pos, nbytes, owner); |
---|
| 1488 | + } else { |
---|
| 1489 | + nres = fuse_send_read(ia, pos, nbytes, owner); |
---|
| 1490 | + } |
---|
| 1491 | + |
---|
| 1492 | + if (!io->async || nres < 0) { |
---|
| 1493 | + fuse_release_user_pages(&ia->ap, io->should_dirty); |
---|
| 1494 | + fuse_io_free(ia); |
---|
| 1495 | + } |
---|
| 1496 | + ia = NULL; |
---|
| 1497 | + if (nres < 0) { |
---|
| 1498 | + iov_iter_revert(iter, nbytes); |
---|
| 1499 | + err = nres; |
---|
1402 | 1500 | break; |
---|
1403 | 1501 | } |
---|
| 1502 | + WARN_ON(nres > nbytes); |
---|
| 1503 | + |
---|
1404 | 1504 | count -= nres; |
---|
1405 | 1505 | res += nres; |
---|
1406 | 1506 | pos += nres; |
---|
1407 | | - if (nres != nbytes) |
---|
| 1507 | + if (nres != nbytes) { |
---|
| 1508 | + iov_iter_revert(iter, nbytes - nres); |
---|
1408 | 1509 | break; |
---|
| 1510 | + } |
---|
1409 | 1511 | if (count) { |
---|
1410 | | - fuse_put_request(fc, req); |
---|
1411 | | - if (io->async) |
---|
1412 | | - req = fuse_get_req_for_background(fc, |
---|
1413 | | - fuse_iter_npages(iter)); |
---|
1414 | | - else |
---|
1415 | | - req = fuse_get_req(fc, fuse_iter_npages(iter)); |
---|
1416 | | - if (IS_ERR(req)) |
---|
| 1512 | + max_pages = iov_iter_npages(iter, fc->max_pages); |
---|
| 1513 | + ia = fuse_io_alloc(io, max_pages); |
---|
| 1514 | + if (!ia) |
---|
1417 | 1515 | break; |
---|
1418 | 1516 | } |
---|
1419 | 1517 | } |
---|
1420 | | - if (!IS_ERR(req)) |
---|
1421 | | - fuse_put_request(fc, req); |
---|
| 1518 | + if (ia) |
---|
| 1519 | + fuse_io_free(ia); |
---|
1422 | 1520 | if (res > 0) |
---|
1423 | 1521 | *ppos = pos; |
---|
1424 | 1522 | |
---|
.. | .. |
---|
1433 | 1531 | ssize_t res; |
---|
1434 | 1532 | struct inode *inode = file_inode(io->iocb->ki_filp); |
---|
1435 | 1533 | |
---|
1436 | | - if (fuse_is_bad(inode)) |
---|
1437 | | - return -EIO; |
---|
1438 | | - |
---|
1439 | 1534 | res = fuse_direct_io(io, iter, ppos, 0); |
---|
1440 | 1535 | |
---|
1441 | | - fuse_invalidate_attr(inode); |
---|
| 1536 | + fuse_invalidate_atime(inode); |
---|
1442 | 1537 | |
---|
1443 | 1538 | return res; |
---|
1444 | 1539 | } |
---|
1445 | 1540 | |
---|
| 1541 | +static ssize_t fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter); |
---|
| 1542 | + |
---|
1446 | 1543 | static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) |
---|
1447 | 1544 | { |
---|
1448 | | - struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); |
---|
1449 | | - return __fuse_direct_read(&io, to, &iocb->ki_pos); |
---|
| 1545 | + ssize_t res; |
---|
| 1546 | + |
---|
| 1547 | + if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { |
---|
| 1548 | + res = fuse_direct_IO(iocb, to); |
---|
| 1549 | + } else { |
---|
| 1550 | + struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); |
---|
| 1551 | + |
---|
| 1552 | + res = __fuse_direct_read(&io, to, &iocb->ki_pos); |
---|
| 1553 | + } |
---|
| 1554 | + |
---|
| 1555 | + return res; |
---|
1450 | 1556 | } |
---|
1451 | 1557 | |
---|
1452 | 1558 | static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) |
---|
.. | .. |
---|
1455 | 1561 | struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); |
---|
1456 | 1562 | ssize_t res; |
---|
1457 | 1563 | |
---|
1458 | | - if (fuse_is_bad(inode)) |
---|
1459 | | - return -EIO; |
---|
1460 | | - |
---|
1461 | 1564 | /* Don't allow parallel writes to the same file */ |
---|
1462 | 1565 | inode_lock(inode); |
---|
1463 | 1566 | res = generic_write_checks(iocb, from); |
---|
1464 | | - if (res > 0) |
---|
1465 | | - res = fuse_direct_io(&io, from, &iocb->ki_pos, FUSE_DIO_WRITE); |
---|
| 1567 | + if (res > 0) { |
---|
| 1568 | + if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { |
---|
| 1569 | + res = fuse_direct_IO(iocb, from); |
---|
| 1570 | + } else { |
---|
| 1571 | + res = fuse_direct_io(&io, from, &iocb->ki_pos, |
---|
| 1572 | + FUSE_DIO_WRITE); |
---|
| 1573 | + } |
---|
| 1574 | + } |
---|
1466 | 1575 | fuse_invalidate_attr(inode); |
---|
1467 | 1576 | if (res > 0) |
---|
1468 | 1577 | fuse_write_update_size(inode, iocb->ki_pos); |
---|
.. | .. |
---|
1471 | 1580 | return res; |
---|
1472 | 1581 | } |
---|
1473 | 1582 | |
---|
1474 | | -static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 1583 | +static ssize_t fuse_file_read_iter(struct kiocb *iocb, struct iov_iter *to) |
---|
1475 | 1584 | { |
---|
1476 | | - int i; |
---|
| 1585 | + struct file *file = iocb->ki_filp; |
---|
| 1586 | + struct fuse_file *ff = file->private_data; |
---|
| 1587 | + struct inode *inode = file_inode(file); |
---|
1477 | 1588 | |
---|
1478 | | - for (i = 0; i < req->num_pages; i++) |
---|
1479 | | - __free_page(req->pages[i]); |
---|
| 1589 | + if (fuse_is_bad(inode)) |
---|
| 1590 | + return -EIO; |
---|
1480 | 1591 | |
---|
1481 | | - if (req->ff) |
---|
1482 | | - fuse_file_put(req->ff, false, false); |
---|
| 1592 | + if (FUSE_IS_DAX(inode)) |
---|
| 1593 | + return fuse_dax_read_iter(iocb, to); |
---|
| 1594 | + |
---|
| 1595 | + if (ff->passthrough.filp) |
---|
| 1596 | + return fuse_passthrough_read_iter(iocb, to); |
---|
| 1597 | + else if (!(ff->open_flags & FOPEN_DIRECT_IO)) |
---|
| 1598 | + return fuse_cache_read_iter(iocb, to); |
---|
| 1599 | + else |
---|
| 1600 | + return fuse_direct_read_iter(iocb, to); |
---|
1483 | 1601 | } |
---|
1484 | 1602 | |
---|
1485 | | -static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 1603 | +static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) |
---|
1486 | 1604 | { |
---|
1487 | | - struct inode *inode = req->inode; |
---|
| 1605 | + struct file *file = iocb->ki_filp; |
---|
| 1606 | + struct fuse_file *ff = file->private_data; |
---|
| 1607 | + struct inode *inode = file_inode(file); |
---|
| 1608 | + |
---|
| 1609 | + if (fuse_is_bad(inode)) |
---|
| 1610 | + return -EIO; |
---|
| 1611 | + |
---|
| 1612 | + if (FUSE_IS_DAX(inode)) |
---|
| 1613 | + return fuse_dax_write_iter(iocb, from); |
---|
| 1614 | + |
---|
| 1615 | + if (ff->passthrough.filp) |
---|
| 1616 | + return fuse_passthrough_write_iter(iocb, from); |
---|
| 1617 | + else if (!(ff->open_flags & FOPEN_DIRECT_IO)) |
---|
| 1618 | + return fuse_cache_write_iter(iocb, from); |
---|
| 1619 | + else |
---|
| 1620 | + return fuse_direct_write_iter(iocb, from); |
---|
| 1621 | +} |
---|
| 1622 | + |
---|
| 1623 | +static void fuse_writepage_free(struct fuse_writepage_args *wpa) |
---|
| 1624 | +{ |
---|
| 1625 | + struct fuse_args_pages *ap = &wpa->ia.ap; |
---|
| 1626 | + int i; |
---|
| 1627 | + |
---|
| 1628 | + for (i = 0; i < ap->num_pages; i++) |
---|
| 1629 | + __free_page(ap->pages[i]); |
---|
| 1630 | + |
---|
| 1631 | + if (wpa->ia.ff) |
---|
| 1632 | + fuse_file_put(wpa->ia.ff, false, false); |
---|
| 1633 | + |
---|
| 1634 | + kfree(ap->pages); |
---|
| 1635 | + kfree(wpa); |
---|
| 1636 | +} |
---|
| 1637 | + |
---|
| 1638 | +static void fuse_writepage_finish(struct fuse_mount *fm, |
---|
| 1639 | + struct fuse_writepage_args *wpa) |
---|
| 1640 | +{ |
---|
| 1641 | + struct fuse_args_pages *ap = &wpa->ia.ap; |
---|
| 1642 | + struct inode *inode = wpa->inode; |
---|
1488 | 1643 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
1489 | 1644 | struct backing_dev_info *bdi = inode_to_bdi(inode); |
---|
1490 | 1645 | int i; |
---|
1491 | 1646 | |
---|
1492 | | - list_del(&req->writepages_entry); |
---|
1493 | | - for (i = 0; i < req->num_pages; i++) { |
---|
| 1647 | + for (i = 0; i < ap->num_pages; i++) { |
---|
1494 | 1648 | dec_wb_stat(&bdi->wb, WB_WRITEBACK); |
---|
1495 | | - dec_node_page_state(req->pages[i], NR_WRITEBACK_TEMP); |
---|
| 1649 | + dec_node_page_state(ap->pages[i], NR_WRITEBACK_TEMP); |
---|
1496 | 1650 | wb_writeout_inc(&bdi->wb); |
---|
1497 | 1651 | } |
---|
1498 | 1652 | wake_up(&fi->page_waitq); |
---|
1499 | 1653 | } |
---|
1500 | 1654 | |
---|
1501 | | -/* Called under fc->lock, may release and reacquire it */ |
---|
1502 | | -static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req, |
---|
1503 | | - loff_t size) |
---|
1504 | | -__releases(fc->lock) |
---|
1505 | | -__acquires(fc->lock) |
---|
| 1655 | +/* Called under fi->lock, may release and reacquire it */ |
---|
| 1656 | +static void fuse_send_writepage(struct fuse_mount *fm, |
---|
| 1657 | + struct fuse_writepage_args *wpa, loff_t size) |
---|
| 1658 | +__releases(fi->lock) |
---|
| 1659 | +__acquires(fi->lock) |
---|
1506 | 1660 | { |
---|
1507 | | - struct fuse_inode *fi = get_fuse_inode(req->inode); |
---|
1508 | | - struct fuse_write_in *inarg = &req->misc.write.in; |
---|
1509 | | - __u64 data_size = req->num_pages * PAGE_SIZE; |
---|
| 1661 | + struct fuse_writepage_args *aux, *next; |
---|
| 1662 | + struct fuse_inode *fi = get_fuse_inode(wpa->inode); |
---|
| 1663 | + struct fuse_write_in *inarg = &wpa->ia.write.in; |
---|
| 1664 | + struct fuse_args *args = &wpa->ia.ap.args; |
---|
| 1665 | + __u64 data_size = wpa->ia.ap.num_pages * PAGE_SIZE; |
---|
| 1666 | + int err; |
---|
1510 | 1667 | |
---|
1511 | | - if (!fc->connected) |
---|
1512 | | - goto out_free; |
---|
1513 | | - |
---|
| 1668 | + fi->writectr++; |
---|
1514 | 1669 | if (inarg->offset + data_size <= size) { |
---|
1515 | 1670 | inarg->size = data_size; |
---|
1516 | 1671 | } else if (inarg->offset < size) { |
---|
.. | .. |
---|
1520 | 1675 | goto out_free; |
---|
1521 | 1676 | } |
---|
1522 | 1677 | |
---|
1523 | | - req->in.args[1].size = inarg->size; |
---|
1524 | | - fi->writectr++; |
---|
1525 | | - fuse_request_send_background_locked(fc, req); |
---|
| 1678 | + args->in_args[1].size = inarg->size; |
---|
| 1679 | + args->force = true; |
---|
| 1680 | + args->nocreds = true; |
---|
| 1681 | + |
---|
| 1682 | + err = fuse_simple_background(fm, args, GFP_ATOMIC); |
---|
| 1683 | + if (err == -ENOMEM) { |
---|
| 1684 | + spin_unlock(&fi->lock); |
---|
| 1685 | + err = fuse_simple_background(fm, args, GFP_NOFS | __GFP_NOFAIL); |
---|
| 1686 | + spin_lock(&fi->lock); |
---|
| 1687 | + } |
---|
| 1688 | + |
---|
| 1689 | + /* Fails on broken connection only */ |
---|
| 1690 | + if (unlikely(err)) |
---|
| 1691 | + goto out_free; |
---|
| 1692 | + |
---|
1526 | 1693 | return; |
---|
1527 | 1694 | |
---|
1528 | 1695 | out_free: |
---|
1529 | | - fuse_writepage_finish(fc, req); |
---|
1530 | | - spin_unlock(&fc->lock); |
---|
1531 | | - fuse_writepage_free(fc, req); |
---|
1532 | | - fuse_put_request(fc, req); |
---|
1533 | | - spin_lock(&fc->lock); |
---|
| 1696 | + fi->writectr--; |
---|
| 1697 | + rb_erase(&wpa->writepages_entry, &fi->writepages); |
---|
| 1698 | + fuse_writepage_finish(fm, wpa); |
---|
| 1699 | + spin_unlock(&fi->lock); |
---|
| 1700 | + |
---|
| 1701 | + /* After fuse_writepage_finish() aux request list is private */ |
---|
| 1702 | + for (aux = wpa->next; aux; aux = next) { |
---|
| 1703 | + next = aux->next; |
---|
| 1704 | + aux->next = NULL; |
---|
| 1705 | + fuse_writepage_free(aux); |
---|
| 1706 | + } |
---|
| 1707 | + |
---|
| 1708 | + fuse_writepage_free(wpa); |
---|
| 1709 | + spin_lock(&fi->lock); |
---|
1534 | 1710 | } |
---|
1535 | 1711 | |
---|
1536 | 1712 | /* |
---|
1537 | 1713 | * If fi->writectr is positive (no truncate or fsync going on) send |
---|
1538 | 1714 | * all queued writepage requests. |
---|
1539 | 1715 | * |
---|
1540 | | - * Called with fc->lock |
---|
| 1716 | + * Called with fi->lock |
---|
1541 | 1717 | */ |
---|
1542 | 1718 | void fuse_flush_writepages(struct inode *inode) |
---|
1543 | | -__releases(fc->lock) |
---|
1544 | | -__acquires(fc->lock) |
---|
| 1719 | +__releases(fi->lock) |
---|
| 1720 | +__acquires(fi->lock) |
---|
1545 | 1721 | { |
---|
1546 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 1722 | + struct fuse_mount *fm = get_fuse_mount(inode); |
---|
1547 | 1723 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
1548 | 1724 | loff_t crop = i_size_read(inode); |
---|
1549 | | - struct fuse_req *req; |
---|
| 1725 | + struct fuse_writepage_args *wpa; |
---|
1550 | 1726 | |
---|
1551 | 1727 | while (fi->writectr >= 0 && !list_empty(&fi->queued_writes)) { |
---|
1552 | | - req = list_entry(fi->queued_writes.next, struct fuse_req, list); |
---|
1553 | | - list_del_init(&req->list); |
---|
1554 | | - fuse_send_writepage(fc, req, crop); |
---|
| 1728 | + wpa = list_entry(fi->queued_writes.next, |
---|
| 1729 | + struct fuse_writepage_args, queue_entry); |
---|
| 1730 | + list_del_init(&wpa->queue_entry); |
---|
| 1731 | + fuse_send_writepage(fm, wpa, crop); |
---|
1555 | 1732 | } |
---|
1556 | 1733 | } |
---|
1557 | 1734 | |
---|
1558 | | -static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 1735 | +static struct fuse_writepage_args *fuse_insert_writeback(struct rb_root *root, |
---|
| 1736 | + struct fuse_writepage_args *wpa) |
---|
1559 | 1737 | { |
---|
1560 | | - struct inode *inode = req->inode; |
---|
1561 | | - struct fuse_inode *fi = get_fuse_inode(inode); |
---|
| 1738 | + pgoff_t idx_from = wpa->ia.write.in.offset >> PAGE_SHIFT; |
---|
| 1739 | + pgoff_t idx_to = idx_from + wpa->ia.ap.num_pages - 1; |
---|
| 1740 | + struct rb_node **p = &root->rb_node; |
---|
| 1741 | + struct rb_node *parent = NULL; |
---|
1562 | 1742 | |
---|
1563 | | - mapping_set_error(inode->i_mapping, req->out.h.error); |
---|
1564 | | - spin_lock(&fc->lock); |
---|
1565 | | - while (req->misc.write.next) { |
---|
1566 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
1567 | | - struct fuse_write_in *inarg = &req->misc.write.in; |
---|
1568 | | - struct fuse_req *next = req->misc.write.next; |
---|
1569 | | - req->misc.write.next = next->misc.write.next; |
---|
1570 | | - next->misc.write.next = NULL; |
---|
1571 | | - next->ff = fuse_file_get(req->ff); |
---|
1572 | | - list_add(&next->writepages_entry, &fi->writepages); |
---|
| 1743 | + WARN_ON(!wpa->ia.ap.num_pages); |
---|
| 1744 | + while (*p) { |
---|
| 1745 | + struct fuse_writepage_args *curr; |
---|
| 1746 | + pgoff_t curr_index; |
---|
| 1747 | + |
---|
| 1748 | + parent = *p; |
---|
| 1749 | + curr = rb_entry(parent, struct fuse_writepage_args, |
---|
| 1750 | + writepages_entry); |
---|
| 1751 | + WARN_ON(curr->inode != wpa->inode); |
---|
| 1752 | + curr_index = curr->ia.write.in.offset >> PAGE_SHIFT; |
---|
| 1753 | + |
---|
| 1754 | + if (idx_from >= curr_index + curr->ia.ap.num_pages) |
---|
| 1755 | + p = &(*p)->rb_right; |
---|
| 1756 | + else if (idx_to < curr_index) |
---|
| 1757 | + p = &(*p)->rb_left; |
---|
| 1758 | + else |
---|
| 1759 | + return curr; |
---|
| 1760 | + } |
---|
| 1761 | + |
---|
| 1762 | + rb_link_node(&wpa->writepages_entry, parent, p); |
---|
| 1763 | + rb_insert_color(&wpa->writepages_entry, root); |
---|
| 1764 | + return NULL; |
---|
| 1765 | +} |
---|
| 1766 | + |
---|
| 1767 | +static void tree_insert(struct rb_root *root, struct fuse_writepage_args *wpa) |
---|
| 1768 | +{ |
---|
| 1769 | + WARN_ON(fuse_insert_writeback(root, wpa)); |
---|
| 1770 | +} |
---|
| 1771 | + |
---|
| 1772 | +static void fuse_writepage_end(struct fuse_mount *fm, struct fuse_args *args, |
---|
| 1773 | + int error) |
---|
| 1774 | +{ |
---|
| 1775 | + struct fuse_writepage_args *wpa = |
---|
| 1776 | + container_of(args, typeof(*wpa), ia.ap.args); |
---|
| 1777 | + struct inode *inode = wpa->inode; |
---|
| 1778 | + struct fuse_inode *fi = get_fuse_inode(inode); |
---|
| 1779 | + struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 1780 | + |
---|
| 1781 | + mapping_set_error(inode->i_mapping, error); |
---|
| 1782 | + /* |
---|
| 1783 | + * A writeback finished and this might have updated mtime/ctime on |
---|
| 1784 | + * server making local mtime/ctime stale. Hence invalidate attrs. |
---|
| 1785 | + * Do this only if writeback_cache is not enabled. If writeback_cache |
---|
| 1786 | + * is enabled, we trust local ctime/mtime. |
---|
| 1787 | + */ |
---|
| 1788 | + if (!fc->writeback_cache) |
---|
| 1789 | + fuse_invalidate_attr(inode); |
---|
| 1790 | + spin_lock(&fi->lock); |
---|
| 1791 | + rb_erase(&wpa->writepages_entry, &fi->writepages); |
---|
| 1792 | + while (wpa->next) { |
---|
| 1793 | + struct fuse_mount *fm = get_fuse_mount(inode); |
---|
| 1794 | + struct fuse_write_in *inarg = &wpa->ia.write.in; |
---|
| 1795 | + struct fuse_writepage_args *next = wpa->next; |
---|
| 1796 | + |
---|
| 1797 | + wpa->next = next->next; |
---|
| 1798 | + next->next = NULL; |
---|
| 1799 | + next->ia.ff = fuse_file_get(wpa->ia.ff); |
---|
| 1800 | + tree_insert(&fi->writepages, next); |
---|
1573 | 1801 | |
---|
1574 | 1802 | /* |
---|
1575 | 1803 | * Skip fuse_flush_writepages() to make it easy to crop requests |
---|
.. | .. |
---|
1594 | 1822 | * no invocations of fuse_writepage_end() while we're in |
---|
1595 | 1823 | * fuse_set_nowrite..fuse_release_nowrite section. |
---|
1596 | 1824 | */ |
---|
1597 | | - fuse_send_writepage(fc, next, inarg->offset + inarg->size); |
---|
| 1825 | + fuse_send_writepage(fm, next, inarg->offset + inarg->size); |
---|
1598 | 1826 | } |
---|
1599 | 1827 | fi->writectr--; |
---|
1600 | | - fuse_writepage_finish(fc, req); |
---|
1601 | | - spin_unlock(&fc->lock); |
---|
1602 | | - fuse_writepage_free(fc, req); |
---|
| 1828 | + fuse_writepage_finish(fm, wpa); |
---|
| 1829 | + spin_unlock(&fi->lock); |
---|
| 1830 | + fuse_writepage_free(wpa); |
---|
1603 | 1831 | } |
---|
1604 | 1832 | |
---|
1605 | 1833 | static struct fuse_file *__fuse_write_file_get(struct fuse_conn *fc, |
---|
.. | .. |
---|
1607 | 1835 | { |
---|
1608 | 1836 | struct fuse_file *ff = NULL; |
---|
1609 | 1837 | |
---|
1610 | | - spin_lock(&fc->lock); |
---|
| 1838 | + spin_lock(&fi->lock); |
---|
1611 | 1839 | if (!list_empty(&fi->write_files)) { |
---|
1612 | 1840 | ff = list_entry(fi->write_files.next, struct fuse_file, |
---|
1613 | 1841 | write_entry); |
---|
1614 | 1842 | fuse_file_get(ff); |
---|
1615 | 1843 | } |
---|
1616 | | - spin_unlock(&fc->lock); |
---|
| 1844 | + spin_unlock(&fi->lock); |
---|
1617 | 1845 | |
---|
1618 | 1846 | return ff; |
---|
1619 | 1847 | } |
---|
.. | .. |
---|
1633 | 1861 | struct fuse_file *ff; |
---|
1634 | 1862 | int err; |
---|
1635 | 1863 | |
---|
| 1864 | + /* |
---|
| 1865 | + * Inode is always written before the last reference is dropped and |
---|
| 1866 | + * hence this should not be reached from reclaim. |
---|
| 1867 | + * |
---|
| 1868 | + * Writing back the inode from reclaim can deadlock if the request |
---|
| 1869 | + * processing itself needs an allocation. Allocations triggering |
---|
| 1870 | + * reclaim while serving a request can't be prevented, because it can |
---|
| 1871 | + * involve any number of unrelated userspace processes. |
---|
| 1872 | + */ |
---|
| 1873 | + WARN_ON(wbc->for_reclaim); |
---|
| 1874 | + |
---|
1636 | 1875 | ff = __fuse_write_file_get(fc, fi); |
---|
1637 | 1876 | err = fuse_flush_times(inode, ff); |
---|
1638 | 1877 | if (ff) |
---|
.. | .. |
---|
1641 | 1880 | return err; |
---|
1642 | 1881 | } |
---|
1643 | 1882 | |
---|
| 1883 | +static struct fuse_writepage_args *fuse_writepage_args_alloc(void) |
---|
| 1884 | +{ |
---|
| 1885 | + struct fuse_writepage_args *wpa; |
---|
| 1886 | + struct fuse_args_pages *ap; |
---|
| 1887 | + |
---|
| 1888 | + wpa = kzalloc(sizeof(*wpa), GFP_NOFS); |
---|
| 1889 | + if (wpa) { |
---|
| 1890 | + ap = &wpa->ia.ap; |
---|
| 1891 | + ap->num_pages = 0; |
---|
| 1892 | + ap->pages = fuse_pages_alloc(1, GFP_NOFS, &ap->descs); |
---|
| 1893 | + if (!ap->pages) { |
---|
| 1894 | + kfree(wpa); |
---|
| 1895 | + wpa = NULL; |
---|
| 1896 | + } |
---|
| 1897 | + } |
---|
| 1898 | + return wpa; |
---|
| 1899 | + |
---|
| 1900 | +} |
---|
| 1901 | + |
---|
1644 | 1902 | static int fuse_writepage_locked(struct page *page) |
---|
1645 | 1903 | { |
---|
1646 | 1904 | struct address_space *mapping = page->mapping; |
---|
1647 | 1905 | struct inode *inode = mapping->host; |
---|
1648 | 1906 | struct fuse_conn *fc = get_fuse_conn(inode); |
---|
1649 | 1907 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
1650 | | - struct fuse_req *req; |
---|
| 1908 | + struct fuse_writepage_args *wpa; |
---|
| 1909 | + struct fuse_args_pages *ap; |
---|
1651 | 1910 | struct page *tmp_page; |
---|
1652 | 1911 | int error = -ENOMEM; |
---|
1653 | 1912 | |
---|
1654 | 1913 | set_page_writeback(page); |
---|
1655 | 1914 | |
---|
1656 | | - req = fuse_request_alloc_nofs(1); |
---|
1657 | | - if (!req) |
---|
| 1915 | + wpa = fuse_writepage_args_alloc(); |
---|
| 1916 | + if (!wpa) |
---|
1658 | 1917 | goto err; |
---|
| 1918 | + ap = &wpa->ia.ap; |
---|
1659 | 1919 | |
---|
1660 | | - /* writeback always goes to bg_queue */ |
---|
1661 | | - __set_bit(FR_BACKGROUND, &req->flags); |
---|
1662 | 1920 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); |
---|
1663 | 1921 | if (!tmp_page) |
---|
1664 | 1922 | goto err_free; |
---|
1665 | 1923 | |
---|
1666 | 1924 | error = -EIO; |
---|
1667 | | - req->ff = fuse_write_file_get(fc, fi); |
---|
1668 | | - if (!req->ff) |
---|
| 1925 | + wpa->ia.ff = fuse_write_file_get(fc, fi); |
---|
| 1926 | + if (!wpa->ia.ff) |
---|
1669 | 1927 | goto err_nofile; |
---|
1670 | 1928 | |
---|
1671 | | - fuse_write_fill(req, req->ff, page_offset(page), 0); |
---|
| 1929 | + fuse_write_args_fill(&wpa->ia, wpa->ia.ff, page_offset(page), 0); |
---|
1672 | 1930 | |
---|
1673 | 1931 | copy_highpage(tmp_page, page); |
---|
1674 | | - req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; |
---|
1675 | | - req->misc.write.next = NULL; |
---|
1676 | | - req->in.argpages = 1; |
---|
1677 | | - req->num_pages = 1; |
---|
1678 | | - req->pages[0] = tmp_page; |
---|
1679 | | - req->page_descs[0].offset = 0; |
---|
1680 | | - req->page_descs[0].length = PAGE_SIZE; |
---|
1681 | | - req->end = fuse_writepage_end; |
---|
1682 | | - req->inode = inode; |
---|
| 1932 | + wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; |
---|
| 1933 | + wpa->next = NULL; |
---|
| 1934 | + ap->args.in_pages = true; |
---|
| 1935 | + ap->num_pages = 1; |
---|
| 1936 | + ap->pages[0] = tmp_page; |
---|
| 1937 | + ap->descs[0].offset = 0; |
---|
| 1938 | + ap->descs[0].length = PAGE_SIZE; |
---|
| 1939 | + ap->args.end = fuse_writepage_end; |
---|
| 1940 | + wpa->inode = inode; |
---|
1683 | 1941 | |
---|
1684 | 1942 | inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); |
---|
1685 | 1943 | inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); |
---|
1686 | 1944 | |
---|
1687 | | - spin_lock(&fc->lock); |
---|
1688 | | - list_add(&req->writepages_entry, &fi->writepages); |
---|
1689 | | - list_add_tail(&req->list, &fi->queued_writes); |
---|
| 1945 | + spin_lock(&fi->lock); |
---|
| 1946 | + tree_insert(&fi->writepages, wpa); |
---|
| 1947 | + list_add_tail(&wpa->queue_entry, &fi->queued_writes); |
---|
1690 | 1948 | fuse_flush_writepages(inode); |
---|
1691 | | - spin_unlock(&fc->lock); |
---|
| 1949 | + spin_unlock(&fi->lock); |
---|
1692 | 1950 | |
---|
1693 | 1951 | end_page_writeback(page); |
---|
1694 | 1952 | |
---|
.. | .. |
---|
1697 | 1955 | err_nofile: |
---|
1698 | 1956 | __free_page(tmp_page); |
---|
1699 | 1957 | err_free: |
---|
1700 | | - fuse_request_free(req); |
---|
| 1958 | + kfree(wpa); |
---|
1701 | 1959 | err: |
---|
1702 | 1960 | mapping_set_error(page->mapping, error); |
---|
1703 | 1961 | end_page_writeback(page); |
---|
.. | .. |
---|
1728 | 1986 | } |
---|
1729 | 1987 | |
---|
1730 | 1988 | struct fuse_fill_wb_data { |
---|
1731 | | - struct fuse_req *req; |
---|
| 1989 | + struct fuse_writepage_args *wpa; |
---|
1732 | 1990 | struct fuse_file *ff; |
---|
1733 | 1991 | struct inode *inode; |
---|
1734 | 1992 | struct page **orig_pages; |
---|
| 1993 | + unsigned int max_pages; |
---|
1735 | 1994 | }; |
---|
| 1995 | + |
---|
| 1996 | +static bool fuse_pages_realloc(struct fuse_fill_wb_data *data) |
---|
| 1997 | +{ |
---|
| 1998 | + struct fuse_args_pages *ap = &data->wpa->ia.ap; |
---|
| 1999 | + struct fuse_conn *fc = get_fuse_conn(data->inode); |
---|
| 2000 | + struct page **pages; |
---|
| 2001 | + struct fuse_page_desc *descs; |
---|
| 2002 | + unsigned int npages = min_t(unsigned int, |
---|
| 2003 | + max_t(unsigned int, data->max_pages * 2, |
---|
| 2004 | + FUSE_DEFAULT_MAX_PAGES_PER_REQ), |
---|
| 2005 | + fc->max_pages); |
---|
| 2006 | + WARN_ON(npages <= data->max_pages); |
---|
| 2007 | + |
---|
| 2008 | + pages = fuse_pages_alloc(npages, GFP_NOFS, &descs); |
---|
| 2009 | + if (!pages) |
---|
| 2010 | + return false; |
---|
| 2011 | + |
---|
| 2012 | + memcpy(pages, ap->pages, sizeof(struct page *) * ap->num_pages); |
---|
| 2013 | + memcpy(descs, ap->descs, sizeof(struct fuse_page_desc) * ap->num_pages); |
---|
| 2014 | + kfree(ap->pages); |
---|
| 2015 | + ap->pages = pages; |
---|
| 2016 | + ap->descs = descs; |
---|
| 2017 | + data->max_pages = npages; |
---|
| 2018 | + |
---|
| 2019 | + return true; |
---|
| 2020 | +} |
---|
1736 | 2021 | |
---|
1737 | 2022 | static void fuse_writepages_send(struct fuse_fill_wb_data *data) |
---|
1738 | 2023 | { |
---|
1739 | | - struct fuse_req *req = data->req; |
---|
| 2024 | + struct fuse_writepage_args *wpa = data->wpa; |
---|
1740 | 2025 | struct inode *inode = data->inode; |
---|
1741 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
1742 | 2026 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
1743 | | - int num_pages = req->num_pages; |
---|
| 2027 | + int num_pages = wpa->ia.ap.num_pages; |
---|
1744 | 2028 | int i; |
---|
1745 | 2029 | |
---|
1746 | | - req->ff = fuse_file_get(data->ff); |
---|
1747 | | - spin_lock(&fc->lock); |
---|
1748 | | - list_add_tail(&req->list, &fi->queued_writes); |
---|
| 2030 | + wpa->ia.ff = fuse_file_get(data->ff); |
---|
| 2031 | + spin_lock(&fi->lock); |
---|
| 2032 | + list_add_tail(&wpa->queue_entry, &fi->queued_writes); |
---|
1749 | 2033 | fuse_flush_writepages(inode); |
---|
1750 | | - spin_unlock(&fc->lock); |
---|
| 2034 | + spin_unlock(&fi->lock); |
---|
1751 | 2035 | |
---|
1752 | 2036 | for (i = 0; i < num_pages; i++) |
---|
1753 | 2037 | end_page_writeback(data->orig_pages[i]); |
---|
1754 | 2038 | } |
---|
1755 | 2039 | |
---|
1756 | | -static bool fuse_writepage_in_flight(struct fuse_req *new_req, |
---|
1757 | | - struct page *page) |
---|
| 2040 | +/* |
---|
| 2041 | + * Check under fi->lock if the page is under writeback, and insert it onto the |
---|
| 2042 | + * rb_tree if not. Otherwise iterate auxiliary write requests, to see if there's |
---|
| 2043 | + * one already added for a page at this offset. If there's none, then insert |
---|
| 2044 | + * this new request onto the auxiliary list, otherwise reuse the existing one by |
---|
| 2045 | + * swapping the new temp page with the old one. |
---|
| 2046 | + */ |
---|
| 2047 | +static bool fuse_writepage_add(struct fuse_writepage_args *new_wpa, |
---|
| 2048 | + struct page *page) |
---|
1758 | 2049 | { |
---|
1759 | | - struct fuse_conn *fc = get_fuse_conn(new_req->inode); |
---|
1760 | | - struct fuse_inode *fi = get_fuse_inode(new_req->inode); |
---|
1761 | | - struct fuse_req *tmp; |
---|
1762 | | - struct fuse_req *old_req; |
---|
1763 | | - bool found = false; |
---|
1764 | | - pgoff_t curr_index; |
---|
| 2050 | + struct fuse_inode *fi = get_fuse_inode(new_wpa->inode); |
---|
| 2051 | + struct fuse_writepage_args *tmp; |
---|
| 2052 | + struct fuse_writepage_args *old_wpa; |
---|
| 2053 | + struct fuse_args_pages *new_ap = &new_wpa->ia.ap; |
---|
1765 | 2054 | |
---|
1766 | | - BUG_ON(new_req->num_pages != 0); |
---|
| 2055 | + WARN_ON(new_ap->num_pages != 0); |
---|
| 2056 | + new_ap->num_pages = 1; |
---|
1767 | 2057 | |
---|
1768 | | - spin_lock(&fc->lock); |
---|
1769 | | - list_del(&new_req->writepages_entry); |
---|
1770 | | - list_for_each_entry(old_req, &fi->writepages, writepages_entry) { |
---|
1771 | | - BUG_ON(old_req->inode != new_req->inode); |
---|
1772 | | - curr_index = old_req->misc.write.in.offset >> PAGE_SHIFT; |
---|
1773 | | - if (curr_index <= page->index && |
---|
1774 | | - page->index < curr_index + old_req->num_pages) { |
---|
1775 | | - found = true; |
---|
| 2058 | + spin_lock(&fi->lock); |
---|
| 2059 | + old_wpa = fuse_insert_writeback(&fi->writepages, new_wpa); |
---|
| 2060 | + if (!old_wpa) { |
---|
| 2061 | + spin_unlock(&fi->lock); |
---|
| 2062 | + return true; |
---|
| 2063 | + } |
---|
| 2064 | + |
---|
| 2065 | + for (tmp = old_wpa->next; tmp; tmp = tmp->next) { |
---|
| 2066 | + pgoff_t curr_index; |
---|
| 2067 | + |
---|
| 2068 | + WARN_ON(tmp->inode != new_wpa->inode); |
---|
| 2069 | + curr_index = tmp->ia.write.in.offset >> PAGE_SHIFT; |
---|
| 2070 | + if (curr_index == page->index) { |
---|
| 2071 | + WARN_ON(tmp->ia.ap.num_pages != 1); |
---|
| 2072 | + swap(tmp->ia.ap.pages[0], new_ap->pages[0]); |
---|
1776 | 2073 | break; |
---|
1777 | 2074 | } |
---|
1778 | 2075 | } |
---|
1779 | | - if (!found) { |
---|
1780 | | - list_add(&new_req->writepages_entry, &fi->writepages); |
---|
1781 | | - goto out_unlock; |
---|
| 2076 | + |
---|
| 2077 | + if (!tmp) { |
---|
| 2078 | + new_wpa->next = old_wpa->next; |
---|
| 2079 | + old_wpa->next = new_wpa; |
---|
1782 | 2080 | } |
---|
1783 | 2081 | |
---|
1784 | | - new_req->num_pages = 1; |
---|
1785 | | - for (tmp = old_req; tmp != NULL; tmp = tmp->misc.write.next) { |
---|
1786 | | - BUG_ON(tmp->inode != new_req->inode); |
---|
1787 | | - curr_index = tmp->misc.write.in.offset >> PAGE_SHIFT; |
---|
1788 | | - if (tmp->num_pages == 1 && |
---|
1789 | | - curr_index == page->index) { |
---|
1790 | | - old_req = tmp; |
---|
1791 | | - } |
---|
1792 | | - } |
---|
| 2082 | + spin_unlock(&fi->lock); |
---|
1793 | 2083 | |
---|
1794 | | - if (old_req->num_pages == 1 && test_bit(FR_PENDING, &old_req->flags)) { |
---|
1795 | | - struct backing_dev_info *bdi = inode_to_bdi(page->mapping->host); |
---|
1796 | | - |
---|
1797 | | - copy_highpage(old_req->pages[0], page); |
---|
1798 | | - spin_unlock(&fc->lock); |
---|
| 2084 | + if (tmp) { |
---|
| 2085 | + struct backing_dev_info *bdi = inode_to_bdi(new_wpa->inode); |
---|
1799 | 2086 | |
---|
1800 | 2087 | dec_wb_stat(&bdi->wb, WB_WRITEBACK); |
---|
1801 | | - dec_node_page_state(new_req->pages[0], NR_WRITEBACK_TEMP); |
---|
| 2088 | + dec_node_page_state(new_ap->pages[0], NR_WRITEBACK_TEMP); |
---|
1802 | 2089 | wb_writeout_inc(&bdi->wb); |
---|
1803 | | - fuse_writepage_free(fc, new_req); |
---|
1804 | | - fuse_request_free(new_req); |
---|
1805 | | - goto out; |
---|
1806 | | - } else { |
---|
1807 | | - new_req->misc.write.next = old_req->misc.write.next; |
---|
1808 | | - old_req->misc.write.next = new_req; |
---|
| 2090 | + fuse_writepage_free(new_wpa); |
---|
1809 | 2091 | } |
---|
1810 | | -out_unlock: |
---|
1811 | | - spin_unlock(&fc->lock); |
---|
1812 | | -out: |
---|
1813 | | - return found; |
---|
| 2092 | + |
---|
| 2093 | + return false; |
---|
1814 | 2094 | } |
---|
1815 | 2095 | |
---|
1816 | | -static int fuse_writepages_fill(struct page *page, |
---|
1817 | | - struct writeback_control *wbc, void *_data) |
---|
| 2096 | +static bool fuse_writepage_need_send(struct fuse_conn *fc, struct page *page, |
---|
| 2097 | + struct fuse_args_pages *ap, |
---|
| 2098 | + struct fuse_fill_wb_data *data) |
---|
1818 | 2099 | { |
---|
1819 | | - struct fuse_fill_wb_data *data = _data; |
---|
1820 | | - struct fuse_req *req = data->req; |
---|
1821 | | - struct inode *inode = data->inode; |
---|
1822 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
1823 | | - struct page *tmp_page; |
---|
1824 | | - bool is_writeback; |
---|
1825 | | - int err; |
---|
1826 | | - |
---|
1827 | | - if (!data->ff) { |
---|
1828 | | - err = -EIO; |
---|
1829 | | - data->ff = fuse_write_file_get(fc, get_fuse_inode(inode)); |
---|
1830 | | - if (!data->ff) |
---|
1831 | | - goto out_unlock; |
---|
1832 | | - } |
---|
| 2100 | + WARN_ON(!ap->num_pages); |
---|
1833 | 2101 | |
---|
1834 | 2102 | /* |
---|
1835 | 2103 | * Being under writeback is unlikely but possible. For example direct |
---|
.. | .. |
---|
1837 | 2105 | * the pages are faulted with get_user_pages(), and then after the read |
---|
1838 | 2106 | * completed. |
---|
1839 | 2107 | */ |
---|
1840 | | - is_writeback = fuse_page_is_writeback(inode, page->index); |
---|
| 2108 | + if (fuse_page_is_writeback(data->inode, page->index)) |
---|
| 2109 | + return true; |
---|
1841 | 2110 | |
---|
1842 | | - if (req && req->num_pages && |
---|
1843 | | - (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || |
---|
1844 | | - (req->num_pages + 1) * PAGE_SIZE > fc->max_write || |
---|
1845 | | - data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { |
---|
1846 | | - fuse_writepages_send(data); |
---|
1847 | | - data->req = NULL; |
---|
| 2111 | + /* Reached max pages */ |
---|
| 2112 | + if (ap->num_pages == fc->max_pages) |
---|
| 2113 | + return true; |
---|
| 2114 | + |
---|
| 2115 | + /* Reached max write bytes */ |
---|
| 2116 | + if ((ap->num_pages + 1) * PAGE_SIZE > fc->max_write) |
---|
| 2117 | + return true; |
---|
| 2118 | + |
---|
| 2119 | + /* Discontinuity */ |
---|
| 2120 | + if (data->orig_pages[ap->num_pages - 1]->index + 1 != page->index) |
---|
| 2121 | + return true; |
---|
| 2122 | + |
---|
| 2123 | + /* Need to grow the pages array? If so, did the expansion fail? */ |
---|
| 2124 | + if (ap->num_pages == data->max_pages && !fuse_pages_realloc(data)) |
---|
| 2125 | + return true; |
---|
| 2126 | + |
---|
| 2127 | + return false; |
---|
| 2128 | +} |
---|
| 2129 | + |
---|
| 2130 | +static int fuse_writepages_fill(struct page *page, |
---|
| 2131 | + struct writeback_control *wbc, void *_data) |
---|
| 2132 | +{ |
---|
| 2133 | + struct fuse_fill_wb_data *data = _data; |
---|
| 2134 | + struct fuse_writepage_args *wpa = data->wpa; |
---|
| 2135 | + struct fuse_args_pages *ap = &wpa->ia.ap; |
---|
| 2136 | + struct inode *inode = data->inode; |
---|
| 2137 | + struct fuse_inode *fi = get_fuse_inode(inode); |
---|
| 2138 | + struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 2139 | + struct page *tmp_page; |
---|
| 2140 | + int err; |
---|
| 2141 | + |
---|
| 2142 | + if (!data->ff) { |
---|
| 2143 | + err = -EIO; |
---|
| 2144 | + data->ff = fuse_write_file_get(fc, fi); |
---|
| 2145 | + if (!data->ff) |
---|
| 2146 | + goto out_unlock; |
---|
1848 | 2147 | } |
---|
| 2148 | + |
---|
| 2149 | + if (wpa && fuse_writepage_need_send(fc, page, ap, data)) { |
---|
| 2150 | + fuse_writepages_send(data); |
---|
| 2151 | + data->wpa = NULL; |
---|
| 2152 | + } |
---|
| 2153 | + |
---|
1849 | 2154 | err = -ENOMEM; |
---|
1850 | 2155 | tmp_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); |
---|
1851 | 2156 | if (!tmp_page) |
---|
.. | .. |
---|
1860 | 2165 | * This is ensured by holding the page lock in page_mkwrite() while |
---|
1861 | 2166 | * checking fuse_page_is_writeback(). We already hold the page lock |
---|
1862 | 2167 | * since clear_page_dirty_for_io() and keep it held until we add the |
---|
1863 | | - * request to the fi->writepages list and increment req->num_pages. |
---|
| 2168 | + * request to the fi->writepages list and increment ap->num_pages. |
---|
1864 | 2169 | * After this fuse_page_is_writeback() will indicate that the page is |
---|
1865 | 2170 | * under writeback, so we can release the page lock. |
---|
1866 | 2171 | */ |
---|
1867 | | - if (data->req == NULL) { |
---|
1868 | | - struct fuse_inode *fi = get_fuse_inode(inode); |
---|
1869 | | - |
---|
| 2172 | + if (data->wpa == NULL) { |
---|
1870 | 2173 | err = -ENOMEM; |
---|
1871 | | - req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ); |
---|
1872 | | - if (!req) { |
---|
| 2174 | + wpa = fuse_writepage_args_alloc(); |
---|
| 2175 | + if (!wpa) { |
---|
1873 | 2176 | __free_page(tmp_page); |
---|
1874 | 2177 | goto out_unlock; |
---|
1875 | 2178 | } |
---|
| 2179 | + data->max_pages = 1; |
---|
1876 | 2180 | |
---|
1877 | | - fuse_write_fill(req, data->ff, page_offset(page), 0); |
---|
1878 | | - req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; |
---|
1879 | | - req->misc.write.next = NULL; |
---|
1880 | | - req->in.argpages = 1; |
---|
1881 | | - __set_bit(FR_BACKGROUND, &req->flags); |
---|
1882 | | - req->num_pages = 0; |
---|
1883 | | - req->end = fuse_writepage_end; |
---|
1884 | | - req->inode = inode; |
---|
1885 | | - |
---|
1886 | | - spin_lock(&fc->lock); |
---|
1887 | | - list_add(&req->writepages_entry, &fi->writepages); |
---|
1888 | | - spin_unlock(&fc->lock); |
---|
1889 | | - |
---|
1890 | | - data->req = req; |
---|
| 2181 | + ap = &wpa->ia.ap; |
---|
| 2182 | + fuse_write_args_fill(&wpa->ia, data->ff, page_offset(page), 0); |
---|
| 2183 | + wpa->ia.write.in.write_flags |= FUSE_WRITE_CACHE; |
---|
| 2184 | + wpa->next = NULL; |
---|
| 2185 | + ap->args.in_pages = true; |
---|
| 2186 | + ap->args.end = fuse_writepage_end; |
---|
| 2187 | + ap->num_pages = 0; |
---|
| 2188 | + wpa->inode = inode; |
---|
1891 | 2189 | } |
---|
1892 | 2190 | set_page_writeback(page); |
---|
1893 | 2191 | |
---|
1894 | 2192 | copy_highpage(tmp_page, page); |
---|
1895 | | - req->pages[req->num_pages] = tmp_page; |
---|
1896 | | - req->page_descs[req->num_pages].offset = 0; |
---|
1897 | | - req->page_descs[req->num_pages].length = PAGE_SIZE; |
---|
| 2193 | + ap->pages[ap->num_pages] = tmp_page; |
---|
| 2194 | + ap->descs[ap->num_pages].offset = 0; |
---|
| 2195 | + ap->descs[ap->num_pages].length = PAGE_SIZE; |
---|
| 2196 | + data->orig_pages[ap->num_pages] = page; |
---|
1898 | 2197 | |
---|
1899 | 2198 | inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK); |
---|
1900 | 2199 | inc_node_page_state(tmp_page, NR_WRITEBACK_TEMP); |
---|
1901 | 2200 | |
---|
1902 | 2201 | err = 0; |
---|
1903 | | - if (is_writeback && fuse_writepage_in_flight(req, page)) { |
---|
| 2202 | + if (data->wpa) { |
---|
| 2203 | + /* |
---|
| 2204 | + * Protected by fi->lock against concurrent access by |
---|
| 2205 | + * fuse_page_is_writeback(). |
---|
| 2206 | + */ |
---|
| 2207 | + spin_lock(&fi->lock); |
---|
| 2208 | + ap->num_pages++; |
---|
| 2209 | + spin_unlock(&fi->lock); |
---|
| 2210 | + } else if (fuse_writepage_add(wpa, page)) { |
---|
| 2211 | + data->wpa = wpa; |
---|
| 2212 | + } else { |
---|
1904 | 2213 | end_page_writeback(page); |
---|
1905 | | - data->req = NULL; |
---|
1906 | | - goto out_unlock; |
---|
1907 | 2214 | } |
---|
1908 | | - data->orig_pages[req->num_pages] = page; |
---|
1909 | | - |
---|
1910 | | - /* |
---|
1911 | | - * Protected by fc->lock against concurrent access by |
---|
1912 | | - * fuse_page_is_writeback(). |
---|
1913 | | - */ |
---|
1914 | | - spin_lock(&fc->lock); |
---|
1915 | | - req->num_pages++; |
---|
1916 | | - spin_unlock(&fc->lock); |
---|
1917 | | - |
---|
1918 | 2215 | out_unlock: |
---|
1919 | 2216 | unlock_page(page); |
---|
1920 | 2217 | |
---|
.. | .. |
---|
1925 | 2222 | struct writeback_control *wbc) |
---|
1926 | 2223 | { |
---|
1927 | 2224 | struct inode *inode = mapping->host; |
---|
| 2225 | + struct fuse_conn *fc = get_fuse_conn(inode); |
---|
1928 | 2226 | struct fuse_fill_wb_data data; |
---|
1929 | 2227 | int err; |
---|
1930 | 2228 | |
---|
.. | .. |
---|
1933 | 2231 | goto out; |
---|
1934 | 2232 | |
---|
1935 | 2233 | data.inode = inode; |
---|
1936 | | - data.req = NULL; |
---|
| 2234 | + data.wpa = NULL; |
---|
1937 | 2235 | data.ff = NULL; |
---|
1938 | 2236 | |
---|
1939 | 2237 | err = -ENOMEM; |
---|
1940 | | - data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, |
---|
| 2238 | + data.orig_pages = kcalloc(fc->max_pages, |
---|
1941 | 2239 | sizeof(struct page *), |
---|
1942 | 2240 | GFP_NOFS); |
---|
1943 | 2241 | if (!data.orig_pages) |
---|
1944 | 2242 | goto out; |
---|
1945 | 2243 | |
---|
1946 | 2244 | err = write_cache_pages(mapping, wbc, fuse_writepages_fill, &data); |
---|
1947 | | - if (data.req) { |
---|
1948 | | - /* Ignore errors if we can write at least one page */ |
---|
1949 | | - BUG_ON(!data.req->num_pages); |
---|
| 2245 | + if (data.wpa) { |
---|
| 2246 | + WARN_ON(!data.wpa->ia.ap.num_pages); |
---|
1950 | 2247 | fuse_writepages_send(&data); |
---|
1951 | | - err = 0; |
---|
1952 | 2248 | } |
---|
1953 | 2249 | if (data.ff) |
---|
1954 | 2250 | fuse_file_put(data.ff, false, false); |
---|
.. | .. |
---|
2096 | 2392 | |
---|
2097 | 2393 | static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) |
---|
2098 | 2394 | { |
---|
| 2395 | + struct fuse_file *ff = file->private_data; |
---|
| 2396 | + |
---|
| 2397 | + /* DAX mmap is superior to direct_io mmap */ |
---|
| 2398 | + if (FUSE_IS_DAX(file_inode(file))) |
---|
| 2399 | + return fuse_dax_mmap(file, vma); |
---|
| 2400 | + |
---|
| 2401 | + if (ff->passthrough.filp) |
---|
| 2402 | + return fuse_passthrough_mmap(file, vma); |
---|
| 2403 | + |
---|
| 2404 | + if (ff->open_flags & FOPEN_DIRECT_IO) { |
---|
| 2405 | + /* Can't provide the coherency needed for MAP_SHARED */ |
---|
| 2406 | + if (vma->vm_flags & VM_MAYSHARE) |
---|
| 2407 | + return -ENODEV; |
---|
| 2408 | + |
---|
| 2409 | + invalidate_inode_pages2(file->f_mapping); |
---|
| 2410 | + |
---|
| 2411 | + return generic_file_mmap(file, vma); |
---|
| 2412 | + } |
---|
| 2413 | + |
---|
2099 | 2414 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) |
---|
2100 | 2415 | fuse_link_write_file(file); |
---|
2101 | 2416 | |
---|
2102 | 2417 | file_accessed(file); |
---|
2103 | 2418 | vma->vm_ops = &fuse_file_vm_ops; |
---|
2104 | 2419 | return 0; |
---|
2105 | | -} |
---|
2106 | | - |
---|
2107 | | -static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) |
---|
2108 | | -{ |
---|
2109 | | - /* Can't provide the coherency needed for MAP_SHARED */ |
---|
2110 | | - if (vma->vm_flags & VM_MAYSHARE) |
---|
2111 | | - return -ENODEV; |
---|
2112 | | - |
---|
2113 | | - invalidate_inode_pages2(file->f_mapping); |
---|
2114 | | - |
---|
2115 | | - return generic_file_mmap(file, vma); |
---|
2116 | 2420 | } |
---|
2117 | 2421 | |
---|
2118 | 2422 | static int convert_fuse_file_lock(struct fuse_conn *fc, |
---|
.. | .. |
---|
2165 | 2469 | inarg->lk.pid = pid; |
---|
2166 | 2470 | if (flock) |
---|
2167 | 2471 | inarg->lk_flags |= FUSE_LK_FLOCK; |
---|
2168 | | - args->in.h.opcode = opcode; |
---|
2169 | | - args->in.h.nodeid = get_node_id(inode); |
---|
2170 | | - args->in.numargs = 1; |
---|
2171 | | - args->in.args[0].size = sizeof(*inarg); |
---|
2172 | | - args->in.args[0].value = inarg; |
---|
| 2472 | + args->opcode = opcode; |
---|
| 2473 | + args->nodeid = get_node_id(inode); |
---|
| 2474 | + args->in_numargs = 1; |
---|
| 2475 | + args->in_args[0].size = sizeof(*inarg); |
---|
| 2476 | + args->in_args[0].value = inarg; |
---|
2173 | 2477 | } |
---|
2174 | 2478 | |
---|
2175 | 2479 | static int fuse_getlk(struct file *file, struct file_lock *fl) |
---|
2176 | 2480 | { |
---|
2177 | 2481 | struct inode *inode = file_inode(file); |
---|
2178 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 2482 | + struct fuse_mount *fm = get_fuse_mount(inode); |
---|
2179 | 2483 | FUSE_ARGS(args); |
---|
2180 | 2484 | struct fuse_lk_in inarg; |
---|
2181 | 2485 | struct fuse_lk_out outarg; |
---|
2182 | 2486 | int err; |
---|
2183 | 2487 | |
---|
2184 | 2488 | fuse_lk_fill(&args, file, fl, FUSE_GETLK, 0, 0, &inarg); |
---|
2185 | | - args.out.numargs = 1; |
---|
2186 | | - args.out.args[0].size = sizeof(outarg); |
---|
2187 | | - args.out.args[0].value = &outarg; |
---|
2188 | | - err = fuse_simple_request(fc, &args); |
---|
| 2489 | + args.out_numargs = 1; |
---|
| 2490 | + args.out_args[0].size = sizeof(outarg); |
---|
| 2491 | + args.out_args[0].value = &outarg; |
---|
| 2492 | + err = fuse_simple_request(fm, &args); |
---|
2189 | 2493 | if (!err) |
---|
2190 | | - err = convert_fuse_file_lock(fc, &outarg.lk, fl); |
---|
| 2494 | + err = convert_fuse_file_lock(fm->fc, &outarg.lk, fl); |
---|
2191 | 2495 | |
---|
2192 | 2496 | return err; |
---|
2193 | 2497 | } |
---|
.. | .. |
---|
2195 | 2499 | static int fuse_setlk(struct file *file, struct file_lock *fl, int flock) |
---|
2196 | 2500 | { |
---|
2197 | 2501 | struct inode *inode = file_inode(file); |
---|
2198 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 2502 | + struct fuse_mount *fm = get_fuse_mount(inode); |
---|
2199 | 2503 | FUSE_ARGS(args); |
---|
2200 | 2504 | struct fuse_lk_in inarg; |
---|
2201 | 2505 | int opcode = (fl->fl_flags & FL_SLEEP) ? FUSE_SETLKW : FUSE_SETLK; |
---|
2202 | 2506 | struct pid *pid = fl->fl_type != F_UNLCK ? task_tgid(current) : NULL; |
---|
2203 | | - pid_t pid_nr = pid_nr_ns(pid, fc->pid_ns); |
---|
| 2507 | + pid_t pid_nr = pid_nr_ns(pid, fm->fc->pid_ns); |
---|
2204 | 2508 | int err; |
---|
2205 | 2509 | |
---|
2206 | 2510 | if (fl->fl_lmops && fl->fl_lmops->lm_grant) { |
---|
.. | .. |
---|
2213 | 2517 | return 0; |
---|
2214 | 2518 | |
---|
2215 | 2519 | fuse_lk_fill(&args, file, fl, opcode, pid_nr, flock, &inarg); |
---|
2216 | | - err = fuse_simple_request(fc, &args); |
---|
| 2520 | + err = fuse_simple_request(fm, &args); |
---|
2217 | 2521 | |
---|
2218 | 2522 | /* locking is restartable */ |
---|
2219 | 2523 | if (err == -EINTR) |
---|
.. | .. |
---|
2267 | 2571 | static sector_t fuse_bmap(struct address_space *mapping, sector_t block) |
---|
2268 | 2572 | { |
---|
2269 | 2573 | struct inode *inode = mapping->host; |
---|
2270 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 2574 | + struct fuse_mount *fm = get_fuse_mount(inode); |
---|
2271 | 2575 | FUSE_ARGS(args); |
---|
2272 | 2576 | struct fuse_bmap_in inarg; |
---|
2273 | 2577 | struct fuse_bmap_out outarg; |
---|
2274 | 2578 | int err; |
---|
2275 | 2579 | |
---|
2276 | | - if (!inode->i_sb->s_bdev || fc->no_bmap) |
---|
| 2580 | + if (!inode->i_sb->s_bdev || fm->fc->no_bmap) |
---|
2277 | 2581 | return 0; |
---|
2278 | 2582 | |
---|
2279 | 2583 | memset(&inarg, 0, sizeof(inarg)); |
---|
2280 | 2584 | inarg.block = block; |
---|
2281 | 2585 | inarg.blocksize = inode->i_sb->s_blocksize; |
---|
2282 | | - args.in.h.opcode = FUSE_BMAP; |
---|
2283 | | - args.in.h.nodeid = get_node_id(inode); |
---|
2284 | | - args.in.numargs = 1; |
---|
2285 | | - args.in.args[0].size = sizeof(inarg); |
---|
2286 | | - args.in.args[0].value = &inarg; |
---|
2287 | | - args.out.numargs = 1; |
---|
2288 | | - args.out.args[0].size = sizeof(outarg); |
---|
2289 | | - args.out.args[0].value = &outarg; |
---|
2290 | | - err = fuse_simple_request(fc, &args); |
---|
| 2586 | + args.opcode = FUSE_BMAP; |
---|
| 2587 | + args.nodeid = get_node_id(inode); |
---|
| 2588 | + args.in_numargs = 1; |
---|
| 2589 | + args.in_args[0].size = sizeof(inarg); |
---|
| 2590 | + args.in_args[0].value = &inarg; |
---|
| 2591 | + args.out_numargs = 1; |
---|
| 2592 | + args.out_args[0].size = sizeof(outarg); |
---|
| 2593 | + args.out_args[0].value = &outarg; |
---|
| 2594 | + err = fuse_simple_request(fm, &args); |
---|
2291 | 2595 | if (err == -ENOSYS) |
---|
2292 | | - fc->no_bmap = 1; |
---|
| 2596 | + fm->fc->no_bmap = 1; |
---|
2293 | 2597 | |
---|
2294 | 2598 | return err ? 0 : outarg.block; |
---|
2295 | 2599 | } |
---|
.. | .. |
---|
2297 | 2601 | static loff_t fuse_lseek(struct file *file, loff_t offset, int whence) |
---|
2298 | 2602 | { |
---|
2299 | 2603 | struct inode *inode = file->f_mapping->host; |
---|
2300 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
| 2604 | + struct fuse_mount *fm = get_fuse_mount(inode); |
---|
2301 | 2605 | struct fuse_file *ff = file->private_data; |
---|
2302 | 2606 | FUSE_ARGS(args); |
---|
2303 | 2607 | struct fuse_lseek_in inarg = { |
---|
.. | .. |
---|
2308 | 2612 | struct fuse_lseek_out outarg; |
---|
2309 | 2613 | int err; |
---|
2310 | 2614 | |
---|
2311 | | - if (fc->no_lseek) |
---|
| 2615 | + if (fm->fc->no_lseek) |
---|
2312 | 2616 | goto fallback; |
---|
2313 | 2617 | |
---|
2314 | | - args.in.h.opcode = FUSE_LSEEK; |
---|
2315 | | - args.in.h.nodeid = ff->nodeid; |
---|
2316 | | - args.in.numargs = 1; |
---|
2317 | | - args.in.args[0].size = sizeof(inarg); |
---|
2318 | | - args.in.args[0].value = &inarg; |
---|
2319 | | - args.out.numargs = 1; |
---|
2320 | | - args.out.args[0].size = sizeof(outarg); |
---|
2321 | | - args.out.args[0].value = &outarg; |
---|
2322 | | - err = fuse_simple_request(fc, &args); |
---|
| 2618 | + args.opcode = FUSE_LSEEK; |
---|
| 2619 | + args.nodeid = ff->nodeid; |
---|
| 2620 | + args.in_numargs = 1; |
---|
| 2621 | + args.in_args[0].size = sizeof(inarg); |
---|
| 2622 | + args.in_args[0].value = &inarg; |
---|
| 2623 | + args.out_numargs = 1; |
---|
| 2624 | + args.out_args[0].size = sizeof(outarg); |
---|
| 2625 | + args.out_args[0].value = &outarg; |
---|
| 2626 | + err = fuse_simple_request(fm, &args); |
---|
2323 | 2627 | if (err) { |
---|
2324 | 2628 | if (err == -ENOSYS) { |
---|
2325 | | - fc->no_lseek = 1; |
---|
| 2629 | + fm->fc->no_lseek = 1; |
---|
2326 | 2630 | goto fallback; |
---|
2327 | 2631 | } |
---|
2328 | 2632 | return err; |
---|
.. | .. |
---|
2408 | 2712 | } |
---|
2409 | 2713 | |
---|
2410 | 2714 | /* Make sure iov_length() won't overflow */ |
---|
2411 | | -static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) |
---|
| 2715 | +static int fuse_verify_ioctl_iov(struct fuse_conn *fc, struct iovec *iov, |
---|
| 2716 | + size_t count) |
---|
2412 | 2717 | { |
---|
2413 | 2718 | size_t n; |
---|
2414 | | - u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; |
---|
| 2719 | + u32 max = fc->max_pages << PAGE_SHIFT; |
---|
2415 | 2720 | |
---|
2416 | 2721 | for (n = 0; n < count; n++, iov++) { |
---|
2417 | 2722 | if (iov->iov_len > (size_t) max) |
---|
.. | .. |
---|
2507 | 2812 | unsigned int flags) |
---|
2508 | 2813 | { |
---|
2509 | 2814 | struct fuse_file *ff = file->private_data; |
---|
2510 | | - struct fuse_conn *fc = ff->fc; |
---|
| 2815 | + struct fuse_mount *fm = ff->fm; |
---|
2511 | 2816 | struct fuse_ioctl_in inarg = { |
---|
2512 | 2817 | .fh = ff->fh, |
---|
2513 | 2818 | .cmd = cmd, |
---|
.. | .. |
---|
2515 | 2820 | .flags = flags |
---|
2516 | 2821 | }; |
---|
2517 | 2822 | struct fuse_ioctl_out outarg; |
---|
2518 | | - struct fuse_req *req = NULL; |
---|
2519 | | - struct page **pages = NULL; |
---|
2520 | 2823 | struct iovec *iov_page = NULL; |
---|
2521 | 2824 | struct iovec *in_iov = NULL, *out_iov = NULL; |
---|
2522 | | - unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; |
---|
2523 | | - size_t in_size, out_size, transferred, c; |
---|
| 2825 | + unsigned int in_iovs = 0, out_iovs = 0, max_pages; |
---|
| 2826 | + size_t in_size, out_size, c; |
---|
| 2827 | + ssize_t transferred; |
---|
2524 | 2828 | int err, i; |
---|
2525 | 2829 | struct iov_iter ii; |
---|
| 2830 | + struct fuse_args_pages ap = {}; |
---|
2526 | 2831 | |
---|
2527 | 2832 | #if BITS_PER_LONG == 32 |
---|
2528 | 2833 | inarg.flags |= FUSE_IOCTL_32BIT; |
---|
2529 | 2834 | #else |
---|
2530 | | - if (flags & FUSE_IOCTL_COMPAT) |
---|
| 2835 | + if (flags & FUSE_IOCTL_COMPAT) { |
---|
2531 | 2836 | inarg.flags |= FUSE_IOCTL_32BIT; |
---|
| 2837 | +#ifdef CONFIG_X86_X32 |
---|
| 2838 | + if (in_x32_syscall()) |
---|
| 2839 | + inarg.flags |= FUSE_IOCTL_COMPAT_X32; |
---|
| 2840 | +#endif |
---|
| 2841 | + } |
---|
2532 | 2842 | #endif |
---|
2533 | 2843 | |
---|
2534 | 2844 | /* assume all the iovs returned by client always fits in a page */ |
---|
2535 | 2845 | BUILD_BUG_ON(sizeof(struct fuse_ioctl_iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); |
---|
2536 | 2846 | |
---|
2537 | 2847 | err = -ENOMEM; |
---|
2538 | | - pages = kcalloc(FUSE_MAX_PAGES_PER_REQ, sizeof(pages[0]), GFP_KERNEL); |
---|
| 2848 | + ap.pages = fuse_pages_alloc(fm->fc->max_pages, GFP_KERNEL, &ap.descs); |
---|
2539 | 2849 | iov_page = (struct iovec *) __get_free_page(GFP_KERNEL); |
---|
2540 | | - if (!pages || !iov_page) |
---|
| 2850 | + if (!ap.pages || !iov_page) |
---|
2541 | 2851 | goto out; |
---|
| 2852 | + |
---|
| 2853 | + fuse_page_descs_length_init(ap.descs, 0, fm->fc->max_pages); |
---|
2542 | 2854 | |
---|
2543 | 2855 | /* |
---|
2544 | 2856 | * If restricted, initialize IO parameters as encoded in @cmd. |
---|
.. | .. |
---|
2583 | 2895 | |
---|
2584 | 2896 | /* make sure there are enough buffer pages and init request with them */ |
---|
2585 | 2897 | err = -ENOMEM; |
---|
2586 | | - if (max_pages > FUSE_MAX_PAGES_PER_REQ) |
---|
| 2898 | + if (max_pages > fm->fc->max_pages) |
---|
2587 | 2899 | goto out; |
---|
2588 | | - while (num_pages < max_pages) { |
---|
2589 | | - pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); |
---|
2590 | | - if (!pages[num_pages]) |
---|
| 2900 | + while (ap.num_pages < max_pages) { |
---|
| 2901 | + ap.pages[ap.num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); |
---|
| 2902 | + if (!ap.pages[ap.num_pages]) |
---|
2591 | 2903 | goto out; |
---|
2592 | | - num_pages++; |
---|
| 2904 | + ap.num_pages++; |
---|
2593 | 2905 | } |
---|
2594 | 2906 | |
---|
2595 | | - req = fuse_get_req(fc, num_pages); |
---|
2596 | | - if (IS_ERR(req)) { |
---|
2597 | | - err = PTR_ERR(req); |
---|
2598 | | - req = NULL; |
---|
2599 | | - goto out; |
---|
2600 | | - } |
---|
2601 | | - memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); |
---|
2602 | | - req->num_pages = num_pages; |
---|
2603 | | - fuse_page_descs_length_init(req, 0, req->num_pages); |
---|
2604 | 2907 | |
---|
2605 | 2908 | /* okay, let's send it to the client */ |
---|
2606 | | - req->in.h.opcode = FUSE_IOCTL; |
---|
2607 | | - req->in.h.nodeid = ff->nodeid; |
---|
2608 | | - req->in.numargs = 1; |
---|
2609 | | - req->in.args[0].size = sizeof(inarg); |
---|
2610 | | - req->in.args[0].value = &inarg; |
---|
| 2909 | + ap.args.opcode = FUSE_IOCTL; |
---|
| 2910 | + ap.args.nodeid = ff->nodeid; |
---|
| 2911 | + ap.args.in_numargs = 1; |
---|
| 2912 | + ap.args.in_args[0].size = sizeof(inarg); |
---|
| 2913 | + ap.args.in_args[0].value = &inarg; |
---|
2611 | 2914 | if (in_size) { |
---|
2612 | | - req->in.numargs++; |
---|
2613 | | - req->in.args[1].size = in_size; |
---|
2614 | | - req->in.argpages = 1; |
---|
| 2915 | + ap.args.in_numargs++; |
---|
| 2916 | + ap.args.in_args[1].size = in_size; |
---|
| 2917 | + ap.args.in_pages = true; |
---|
2615 | 2918 | |
---|
2616 | 2919 | err = -EFAULT; |
---|
2617 | 2920 | iov_iter_init(&ii, WRITE, in_iov, in_iovs, in_size); |
---|
2618 | | - for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= num_pages); i++) { |
---|
2619 | | - c = copy_page_from_iter(pages[i], 0, PAGE_SIZE, &ii); |
---|
| 2921 | + for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) { |
---|
| 2922 | + c = copy_page_from_iter(ap.pages[i], 0, PAGE_SIZE, &ii); |
---|
2620 | 2923 | if (c != PAGE_SIZE && iov_iter_count(&ii)) |
---|
2621 | 2924 | goto out; |
---|
2622 | 2925 | } |
---|
2623 | 2926 | } |
---|
2624 | 2927 | |
---|
2625 | | - req->out.numargs = 2; |
---|
2626 | | - req->out.args[0].size = sizeof(outarg); |
---|
2627 | | - req->out.args[0].value = &outarg; |
---|
2628 | | - req->out.args[1].size = out_size; |
---|
2629 | | - req->out.argpages = 1; |
---|
2630 | | - req->out.argvar = 1; |
---|
| 2928 | + ap.args.out_numargs = 2; |
---|
| 2929 | + ap.args.out_args[0].size = sizeof(outarg); |
---|
| 2930 | + ap.args.out_args[0].value = &outarg; |
---|
| 2931 | + ap.args.out_args[1].size = out_size; |
---|
| 2932 | + ap.args.out_pages = true; |
---|
| 2933 | + ap.args.out_argvar = true; |
---|
2631 | 2934 | |
---|
2632 | | - fuse_request_send(fc, req); |
---|
2633 | | - err = req->out.h.error; |
---|
2634 | | - transferred = req->out.args[1].size; |
---|
2635 | | - fuse_put_request(fc, req); |
---|
2636 | | - req = NULL; |
---|
2637 | | - if (err) |
---|
| 2935 | + transferred = fuse_simple_request(fm, &ap.args); |
---|
| 2936 | + err = transferred; |
---|
| 2937 | + if (transferred < 0) |
---|
2638 | 2938 | goto out; |
---|
2639 | 2939 | |
---|
2640 | 2940 | /* did it ask for retry? */ |
---|
.. | .. |
---|
2659 | 2959 | in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) |
---|
2660 | 2960 | goto out; |
---|
2661 | 2961 | |
---|
2662 | | - vaddr = kmap_atomic(pages[0]); |
---|
2663 | | - err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, |
---|
| 2962 | + vaddr = kmap_atomic(ap.pages[0]); |
---|
| 2963 | + err = fuse_copy_ioctl_iovec(fm->fc, iov_page, vaddr, |
---|
2664 | 2964 | transferred, in_iovs + out_iovs, |
---|
2665 | 2965 | (flags & FUSE_IOCTL_COMPAT) != 0); |
---|
2666 | 2966 | kunmap_atomic(vaddr); |
---|
.. | .. |
---|
2670 | 2970 | in_iov = iov_page; |
---|
2671 | 2971 | out_iov = in_iov + in_iovs; |
---|
2672 | 2972 | |
---|
2673 | | - err = fuse_verify_ioctl_iov(in_iov, in_iovs); |
---|
| 2973 | + err = fuse_verify_ioctl_iov(fm->fc, in_iov, in_iovs); |
---|
2674 | 2974 | if (err) |
---|
2675 | 2975 | goto out; |
---|
2676 | 2976 | |
---|
2677 | | - err = fuse_verify_ioctl_iov(out_iov, out_iovs); |
---|
| 2977 | + err = fuse_verify_ioctl_iov(fm->fc, out_iov, out_iovs); |
---|
2678 | 2978 | if (err) |
---|
2679 | 2979 | goto out; |
---|
2680 | 2980 | |
---|
.. | .. |
---|
2687 | 2987 | |
---|
2688 | 2988 | err = -EFAULT; |
---|
2689 | 2989 | iov_iter_init(&ii, READ, out_iov, out_iovs, transferred); |
---|
2690 | | - for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= num_pages); i++) { |
---|
2691 | | - c = copy_page_to_iter(pages[i], 0, PAGE_SIZE, &ii); |
---|
| 2990 | + for (i = 0; iov_iter_count(&ii) && !WARN_ON(i >= ap.num_pages); i++) { |
---|
| 2991 | + c = copy_page_to_iter(ap.pages[i], 0, PAGE_SIZE, &ii); |
---|
2692 | 2992 | if (c != PAGE_SIZE && iov_iter_count(&ii)) |
---|
2693 | 2993 | goto out; |
---|
2694 | 2994 | } |
---|
2695 | 2995 | err = 0; |
---|
2696 | 2996 | out: |
---|
2697 | | - if (req) |
---|
2698 | | - fuse_put_request(fc, req); |
---|
2699 | 2997 | free_page((unsigned long) iov_page); |
---|
2700 | | - while (num_pages) |
---|
2701 | | - __free_page(pages[--num_pages]); |
---|
2702 | | - kfree(pages); |
---|
| 2998 | + while (ap.num_pages) |
---|
| 2999 | + __free_page(ap.pages[--ap.num_pages]); |
---|
| 3000 | + kfree(ap.pages); |
---|
2703 | 3001 | |
---|
2704 | 3002 | return err ? err : outarg.result; |
---|
2705 | 3003 | } |
---|
.. | .. |
---|
2773 | 3071 | { |
---|
2774 | 3072 | spin_lock(&fc->lock); |
---|
2775 | 3073 | if (RB_EMPTY_NODE(&ff->polled_node)) { |
---|
2776 | | - struct rb_node **link, *uninitialized_var(parent); |
---|
| 3074 | + struct rb_node **link, *parent; |
---|
2777 | 3075 | |
---|
2778 | 3076 | link = fuse_find_polled_node(fc, ff->kh, &parent); |
---|
2779 | 3077 | BUG_ON(*link); |
---|
.. | .. |
---|
2786 | 3084 | __poll_t fuse_file_poll(struct file *file, poll_table *wait) |
---|
2787 | 3085 | { |
---|
2788 | 3086 | struct fuse_file *ff = file->private_data; |
---|
2789 | | - struct fuse_conn *fc = ff->fc; |
---|
| 3087 | + struct fuse_mount *fm = ff->fm; |
---|
2790 | 3088 | struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; |
---|
2791 | 3089 | struct fuse_poll_out outarg; |
---|
2792 | 3090 | FUSE_ARGS(args); |
---|
2793 | 3091 | int err; |
---|
2794 | 3092 | |
---|
2795 | | - if (fc->no_poll) |
---|
| 3093 | + if (fm->fc->no_poll) |
---|
2796 | 3094 | return DEFAULT_POLLMASK; |
---|
2797 | 3095 | |
---|
2798 | 3096 | poll_wait(file, &ff->poll_wait, wait); |
---|
.. | .. |
---|
2804 | 3102 | */ |
---|
2805 | 3103 | if (waitqueue_active(&ff->poll_wait)) { |
---|
2806 | 3104 | inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; |
---|
2807 | | - fuse_register_polled_file(fc, ff); |
---|
| 3105 | + fuse_register_polled_file(fm->fc, ff); |
---|
2808 | 3106 | } |
---|
2809 | 3107 | |
---|
2810 | | - args.in.h.opcode = FUSE_POLL; |
---|
2811 | | - args.in.h.nodeid = ff->nodeid; |
---|
2812 | | - args.in.numargs = 1; |
---|
2813 | | - args.in.args[0].size = sizeof(inarg); |
---|
2814 | | - args.in.args[0].value = &inarg; |
---|
2815 | | - args.out.numargs = 1; |
---|
2816 | | - args.out.args[0].size = sizeof(outarg); |
---|
2817 | | - args.out.args[0].value = &outarg; |
---|
2818 | | - err = fuse_simple_request(fc, &args); |
---|
| 3108 | + args.opcode = FUSE_POLL; |
---|
| 3109 | + args.nodeid = ff->nodeid; |
---|
| 3110 | + args.in_numargs = 1; |
---|
| 3111 | + args.in_args[0].size = sizeof(inarg); |
---|
| 3112 | + args.in_args[0].value = &inarg; |
---|
| 3113 | + args.out_numargs = 1; |
---|
| 3114 | + args.out_args[0].size = sizeof(outarg); |
---|
| 3115 | + args.out_args[0].value = &outarg; |
---|
| 3116 | + err = fuse_simple_request(fm, &args); |
---|
2819 | 3117 | |
---|
2820 | 3118 | if (!err) |
---|
2821 | 3119 | return demangle_poll(outarg.revents); |
---|
2822 | 3120 | if (err == -ENOSYS) { |
---|
2823 | | - fc->no_poll = 1; |
---|
| 3121 | + fm->fc->no_poll = 1; |
---|
2824 | 3122 | return DEFAULT_POLLMASK; |
---|
2825 | 3123 | } |
---|
2826 | 3124 | return EPOLLERR; |
---|
.. | .. |
---|
2865 | 3163 | fuse_do_setattr(file_dentry(file), &attr, file); |
---|
2866 | 3164 | } |
---|
2867 | 3165 | |
---|
2868 | | -static inline loff_t fuse_round_up(loff_t off) |
---|
| 3166 | +static inline loff_t fuse_round_up(struct fuse_conn *fc, loff_t off) |
---|
2869 | 3167 | { |
---|
2870 | | - return round_up(off, FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT); |
---|
| 3168 | + return round_up(off, fc->max_pages << PAGE_SHIFT); |
---|
2871 | 3169 | } |
---|
2872 | 3170 | |
---|
2873 | 3171 | static ssize_t |
---|
.. | .. |
---|
2877 | 3175 | ssize_t ret = 0; |
---|
2878 | 3176 | struct file *file = iocb->ki_filp; |
---|
2879 | 3177 | struct fuse_file *ff = file->private_data; |
---|
2880 | | - bool async_dio = ff->fc->async_dio; |
---|
2881 | 3178 | loff_t pos = 0; |
---|
2882 | 3179 | struct inode *inode; |
---|
2883 | 3180 | loff_t i_size; |
---|
2884 | | - size_t count = iov_iter_count(iter); |
---|
| 3181 | + size_t count = iov_iter_count(iter), shortened = 0; |
---|
2885 | 3182 | loff_t offset = iocb->ki_pos; |
---|
2886 | 3183 | struct fuse_io_priv *io; |
---|
2887 | 3184 | |
---|
.. | .. |
---|
2889 | 3186 | inode = file->f_mapping->host; |
---|
2890 | 3187 | i_size = i_size_read(inode); |
---|
2891 | 3188 | |
---|
2892 | | - if ((iov_iter_rw(iter) == READ) && (offset > i_size)) |
---|
| 3189 | + if ((iov_iter_rw(iter) == READ) && (offset >= i_size)) |
---|
2893 | 3190 | return 0; |
---|
2894 | | - |
---|
2895 | | - /* optimization for short read */ |
---|
2896 | | - if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { |
---|
2897 | | - if (offset >= i_size) |
---|
2898 | | - return 0; |
---|
2899 | | - iov_iter_truncate(iter, fuse_round_up(i_size - offset)); |
---|
2900 | | - count = iov_iter_count(iter); |
---|
2901 | | - } |
---|
2902 | 3191 | |
---|
2903 | 3192 | io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); |
---|
2904 | 3193 | if (!io) |
---|
.. | .. |
---|
2915 | 3204 | * By default, we want to optimize all I/Os with async request |
---|
2916 | 3205 | * submission to the client filesystem if supported. |
---|
2917 | 3206 | */ |
---|
2918 | | - io->async = async_dio; |
---|
| 3207 | + io->async = ff->fm->fc->async_dio; |
---|
2919 | 3208 | io->iocb = iocb; |
---|
2920 | 3209 | io->blocking = is_sync_kiocb(iocb); |
---|
| 3210 | + |
---|
| 3211 | + /* optimization for short read */ |
---|
| 3212 | + if (io->async && !io->write && offset + count > i_size) { |
---|
| 3213 | + iov_iter_truncate(iter, fuse_round_up(ff->fm->fc, i_size - offset)); |
---|
| 3214 | + shortened = count - iov_iter_count(iter); |
---|
| 3215 | + count -= shortened; |
---|
| 3216 | + } |
---|
2921 | 3217 | |
---|
2922 | 3218 | /* |
---|
2923 | 3219 | * We cannot asynchronously extend the size of a file. |
---|
2924 | 3220 | * In such case the aio will behave exactly like sync io. |
---|
2925 | 3221 | */ |
---|
2926 | | - if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE) |
---|
| 3222 | + if ((offset + count > i_size) && io->write) |
---|
2927 | 3223 | io->blocking = true; |
---|
2928 | 3224 | |
---|
2929 | 3225 | if (io->async && io->blocking) { |
---|
.. | .. |
---|
2941 | 3237 | } else { |
---|
2942 | 3238 | ret = __fuse_direct_read(io, iter, &pos); |
---|
2943 | 3239 | } |
---|
| 3240 | + iov_iter_reexpand(iter, iov_iter_count(iter) + shortened); |
---|
2944 | 3241 | |
---|
2945 | 3242 | if (io->async) { |
---|
2946 | 3243 | bool blocking = io->blocking; |
---|
.. | .. |
---|
2967 | 3264 | return ret; |
---|
2968 | 3265 | } |
---|
2969 | 3266 | |
---|
| 3267 | +static int fuse_writeback_range(struct inode *inode, loff_t start, loff_t end) |
---|
| 3268 | +{ |
---|
| 3269 | + int err = filemap_write_and_wait_range(inode->i_mapping, start, LLONG_MAX); |
---|
| 3270 | + |
---|
| 3271 | + if (!err) |
---|
| 3272 | + fuse_sync_writes(inode); |
---|
| 3273 | + |
---|
| 3274 | + return err; |
---|
| 3275 | +} |
---|
| 3276 | + |
---|
2970 | 3277 | static long fuse_file_fallocate(struct file *file, int mode, loff_t offset, |
---|
2971 | 3278 | loff_t length) |
---|
2972 | 3279 | { |
---|
2973 | 3280 | struct fuse_file *ff = file->private_data; |
---|
2974 | 3281 | struct inode *inode = file_inode(file); |
---|
2975 | 3282 | struct fuse_inode *fi = get_fuse_inode(inode); |
---|
2976 | | - struct fuse_conn *fc = ff->fc; |
---|
| 3283 | + struct fuse_mount *fm = ff->fm; |
---|
2977 | 3284 | FUSE_ARGS(args); |
---|
2978 | 3285 | struct fuse_fallocate_in inarg = { |
---|
2979 | 3286 | .fh = ff->fh, |
---|
.. | .. |
---|
2982 | 3289 | .mode = mode |
---|
2983 | 3290 | }; |
---|
2984 | 3291 | int err; |
---|
2985 | | - bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) || |
---|
2986 | | - (mode & FALLOC_FL_PUNCH_HOLE); |
---|
| 3292 | + bool block_faults = FUSE_IS_DAX(inode) && |
---|
| 3293 | + (!(mode & FALLOC_FL_KEEP_SIZE) || |
---|
| 3294 | + (mode & FALLOC_FL_PUNCH_HOLE)); |
---|
2987 | 3295 | |
---|
2988 | 3296 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) |
---|
2989 | 3297 | return -EOPNOTSUPP; |
---|
2990 | 3298 | |
---|
2991 | | - if (fc->no_fallocate) |
---|
| 3299 | + if (fm->fc->no_fallocate) |
---|
2992 | 3300 | return -EOPNOTSUPP; |
---|
2993 | 3301 | |
---|
2994 | | - if (lock_inode) { |
---|
2995 | | - inode_lock(inode); |
---|
2996 | | - if (mode & FALLOC_FL_PUNCH_HOLE) { |
---|
2997 | | - loff_t endbyte = offset + length - 1; |
---|
2998 | | - err = filemap_write_and_wait_range(inode->i_mapping, |
---|
2999 | | - offset, endbyte); |
---|
3000 | | - if (err) |
---|
3001 | | - goto out; |
---|
| 3302 | + inode_lock(inode); |
---|
| 3303 | + if (block_faults) { |
---|
| 3304 | + down_write(&fi->i_mmap_sem); |
---|
| 3305 | + err = fuse_dax_break_layouts(inode, 0, 0); |
---|
| 3306 | + if (err) |
---|
| 3307 | + goto out; |
---|
| 3308 | + } |
---|
3002 | 3309 | |
---|
3003 | | - fuse_sync_writes(inode); |
---|
3004 | | - } |
---|
| 3310 | + if (mode & FALLOC_FL_PUNCH_HOLE) { |
---|
| 3311 | + loff_t endbyte = offset + length - 1; |
---|
| 3312 | + |
---|
| 3313 | + err = fuse_writeback_range(inode, offset, endbyte); |
---|
| 3314 | + if (err) |
---|
| 3315 | + goto out; |
---|
3005 | 3316 | } |
---|
3006 | 3317 | |
---|
3007 | 3318 | if (!(mode & FALLOC_FL_KEEP_SIZE) && |
---|
.. | .. |
---|
3011 | 3322 | goto out; |
---|
3012 | 3323 | } |
---|
3013 | 3324 | |
---|
| 3325 | + err = file_modified(file); |
---|
| 3326 | + if (err) |
---|
| 3327 | + goto out; |
---|
| 3328 | + |
---|
3014 | 3329 | if (!(mode & FALLOC_FL_KEEP_SIZE)) |
---|
3015 | 3330 | set_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); |
---|
3016 | 3331 | |
---|
3017 | | - args.in.h.opcode = FUSE_FALLOCATE; |
---|
3018 | | - args.in.h.nodeid = ff->nodeid; |
---|
3019 | | - args.in.numargs = 1; |
---|
3020 | | - args.in.args[0].size = sizeof(inarg); |
---|
3021 | | - args.in.args[0].value = &inarg; |
---|
3022 | | - err = fuse_simple_request(fc, &args); |
---|
| 3332 | + args.opcode = FUSE_FALLOCATE; |
---|
| 3333 | + args.nodeid = ff->nodeid; |
---|
| 3334 | + args.in_numargs = 1; |
---|
| 3335 | + args.in_args[0].size = sizeof(inarg); |
---|
| 3336 | + args.in_args[0].value = &inarg; |
---|
| 3337 | + err = fuse_simple_request(fm, &args); |
---|
3023 | 3338 | if (err == -ENOSYS) { |
---|
3024 | | - fc->no_fallocate = 1; |
---|
| 3339 | + fm->fc->no_fallocate = 1; |
---|
3025 | 3340 | err = -EOPNOTSUPP; |
---|
3026 | 3341 | } |
---|
3027 | 3342 | if (err) |
---|
.. | .. |
---|
3031 | 3346 | if (!(mode & FALLOC_FL_KEEP_SIZE)) { |
---|
3032 | 3347 | bool changed = fuse_write_update_size(inode, offset + length); |
---|
3033 | 3348 | |
---|
3034 | | - if (changed && fc->writeback_cache) |
---|
| 3349 | + if (changed && fm->fc->writeback_cache) |
---|
3035 | 3350 | file_update_time(file); |
---|
3036 | 3351 | } |
---|
3037 | 3352 | |
---|
.. | .. |
---|
3044 | 3359 | if (!(mode & FALLOC_FL_KEEP_SIZE)) |
---|
3045 | 3360 | clear_bit(FUSE_I_SIZE_UNSTABLE, &fi->state); |
---|
3046 | 3361 | |
---|
3047 | | - if (lock_inode) |
---|
3048 | | - inode_unlock(inode); |
---|
| 3362 | + if (block_faults) |
---|
| 3363 | + up_write(&fi->i_mmap_sem); |
---|
| 3364 | + |
---|
| 3365 | + inode_unlock(inode); |
---|
| 3366 | + |
---|
| 3367 | + fuse_flush_time_update(inode); |
---|
3049 | 3368 | |
---|
3050 | 3369 | return err; |
---|
| 3370 | +} |
---|
| 3371 | + |
---|
| 3372 | +static ssize_t __fuse_copy_file_range(struct file *file_in, loff_t pos_in, |
---|
| 3373 | + struct file *file_out, loff_t pos_out, |
---|
| 3374 | + size_t len, unsigned int flags) |
---|
| 3375 | +{ |
---|
| 3376 | + struct fuse_file *ff_in = file_in->private_data; |
---|
| 3377 | + struct fuse_file *ff_out = file_out->private_data; |
---|
| 3378 | + struct inode *inode_in = file_inode(file_in); |
---|
| 3379 | + struct inode *inode_out = file_inode(file_out); |
---|
| 3380 | + struct fuse_inode *fi_out = get_fuse_inode(inode_out); |
---|
| 3381 | + struct fuse_mount *fm = ff_in->fm; |
---|
| 3382 | + struct fuse_conn *fc = fm->fc; |
---|
| 3383 | + FUSE_ARGS(args); |
---|
| 3384 | + struct fuse_copy_file_range_in inarg = { |
---|
| 3385 | + .fh_in = ff_in->fh, |
---|
| 3386 | + .off_in = pos_in, |
---|
| 3387 | + .nodeid_out = ff_out->nodeid, |
---|
| 3388 | + .fh_out = ff_out->fh, |
---|
| 3389 | + .off_out = pos_out, |
---|
| 3390 | + .len = len, |
---|
| 3391 | + .flags = flags |
---|
| 3392 | + }; |
---|
| 3393 | + struct fuse_write_out outarg; |
---|
| 3394 | + ssize_t err; |
---|
| 3395 | + /* mark unstable when write-back is not used, and file_out gets |
---|
| 3396 | + * extended */ |
---|
| 3397 | + bool is_unstable = (!fc->writeback_cache) && |
---|
| 3398 | + ((pos_out + len) > inode_out->i_size); |
---|
| 3399 | + |
---|
| 3400 | + if (fc->no_copy_file_range) |
---|
| 3401 | + return -EOPNOTSUPP; |
---|
| 3402 | + |
---|
| 3403 | + if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) |
---|
| 3404 | + return -EXDEV; |
---|
| 3405 | + |
---|
| 3406 | + inode_lock(inode_in); |
---|
| 3407 | + err = fuse_writeback_range(inode_in, pos_in, pos_in + len - 1); |
---|
| 3408 | + inode_unlock(inode_in); |
---|
| 3409 | + if (err) |
---|
| 3410 | + return err; |
---|
| 3411 | + |
---|
| 3412 | + inode_lock(inode_out); |
---|
| 3413 | + |
---|
| 3414 | + err = file_modified(file_out); |
---|
| 3415 | + if (err) |
---|
| 3416 | + goto out; |
---|
| 3417 | + |
---|
| 3418 | + /* |
---|
| 3419 | + * Write out dirty pages in the destination file before sending the COPY |
---|
| 3420 | + * request to userspace. After the request is completed, truncate off |
---|
| 3421 | + * pages (including partial ones) from the cache that have been copied, |
---|
| 3422 | + * since these contain stale data at that point. |
---|
| 3423 | + * |
---|
| 3424 | + * This should be mostly correct, but if the COPY writes to partial |
---|
| 3425 | + * pages (at the start or end) and the parts not covered by the COPY are |
---|
| 3426 | + * written through a memory map after calling fuse_writeback_range(), |
---|
| 3427 | + * then these partial page modifications will be lost on truncation. |
---|
| 3428 | + * |
---|
| 3429 | + * It is unlikely that someone would rely on such mixed style |
---|
| 3430 | + * modifications. Yet this does give less guarantees than if the |
---|
| 3431 | + * copying was performed with write(2). |
---|
| 3432 | + * |
---|
| 3433 | + * To fix this a i_mmap_sem style lock could be used to prevent new |
---|
| 3434 | + * faults while the copy is ongoing. |
---|
| 3435 | + */ |
---|
| 3436 | + err = fuse_writeback_range(inode_out, pos_out, pos_out + len - 1); |
---|
| 3437 | + if (err) |
---|
| 3438 | + goto out; |
---|
| 3439 | + |
---|
| 3440 | + if (is_unstable) |
---|
| 3441 | + set_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); |
---|
| 3442 | + |
---|
| 3443 | + args.opcode = FUSE_COPY_FILE_RANGE; |
---|
| 3444 | + args.nodeid = ff_in->nodeid; |
---|
| 3445 | + args.in_numargs = 1; |
---|
| 3446 | + args.in_args[0].size = sizeof(inarg); |
---|
| 3447 | + args.in_args[0].value = &inarg; |
---|
| 3448 | + args.out_numargs = 1; |
---|
| 3449 | + args.out_args[0].size = sizeof(outarg); |
---|
| 3450 | + args.out_args[0].value = &outarg; |
---|
| 3451 | + err = fuse_simple_request(fm, &args); |
---|
| 3452 | + if (err == -ENOSYS) { |
---|
| 3453 | + fc->no_copy_file_range = 1; |
---|
| 3454 | + err = -EOPNOTSUPP; |
---|
| 3455 | + } |
---|
| 3456 | + if (err) |
---|
| 3457 | + goto out; |
---|
| 3458 | + |
---|
| 3459 | + truncate_inode_pages_range(inode_out->i_mapping, |
---|
| 3460 | + ALIGN_DOWN(pos_out, PAGE_SIZE), |
---|
| 3461 | + ALIGN(pos_out + outarg.size, PAGE_SIZE) - 1); |
---|
| 3462 | + |
---|
| 3463 | + if (fc->writeback_cache) { |
---|
| 3464 | + fuse_write_update_size(inode_out, pos_out + outarg.size); |
---|
| 3465 | + file_update_time(file_out); |
---|
| 3466 | + } |
---|
| 3467 | + |
---|
| 3468 | + fuse_invalidate_attr(inode_out); |
---|
| 3469 | + |
---|
| 3470 | + err = outarg.size; |
---|
| 3471 | +out: |
---|
| 3472 | + if (is_unstable) |
---|
| 3473 | + clear_bit(FUSE_I_SIZE_UNSTABLE, &fi_out->state); |
---|
| 3474 | + |
---|
| 3475 | + inode_unlock(inode_out); |
---|
| 3476 | + file_accessed(file_in); |
---|
| 3477 | + |
---|
| 3478 | + fuse_flush_time_update(inode_out); |
---|
| 3479 | + |
---|
| 3480 | + return err; |
---|
| 3481 | +} |
---|
| 3482 | + |
---|
| 3483 | +static ssize_t fuse_copy_file_range(struct file *src_file, loff_t src_off, |
---|
| 3484 | + struct file *dst_file, loff_t dst_off, |
---|
| 3485 | + size_t len, unsigned int flags) |
---|
| 3486 | +{ |
---|
| 3487 | + ssize_t ret; |
---|
| 3488 | + |
---|
| 3489 | + ret = __fuse_copy_file_range(src_file, src_off, dst_file, dst_off, |
---|
| 3490 | + len, flags); |
---|
| 3491 | + |
---|
| 3492 | + if (ret == -EOPNOTSUPP || ret == -EXDEV) |
---|
| 3493 | + ret = generic_copy_file_range(src_file, src_off, dst_file, |
---|
| 3494 | + dst_off, len, flags); |
---|
| 3495 | + return ret; |
---|
3051 | 3496 | } |
---|
3052 | 3497 | |
---|
3053 | 3498 | static const struct file_operations fuse_file_operations = { |
---|
.. | .. |
---|
3060 | 3505 | .release = fuse_release, |
---|
3061 | 3506 | .fsync = fuse_fsync, |
---|
3062 | 3507 | .lock = fuse_file_lock, |
---|
| 3508 | + .get_unmapped_area = thp_get_unmapped_area, |
---|
3063 | 3509 | .flock = fuse_file_flock, |
---|
3064 | 3510 | .splice_read = generic_file_splice_read, |
---|
| 3511 | + .splice_write = iter_file_splice_write, |
---|
3065 | 3512 | .unlocked_ioctl = fuse_file_ioctl, |
---|
3066 | 3513 | .compat_ioctl = fuse_file_compat_ioctl, |
---|
3067 | 3514 | .poll = fuse_file_poll, |
---|
3068 | 3515 | .fallocate = fuse_file_fallocate, |
---|
3069 | | -}; |
---|
3070 | | - |
---|
3071 | | -static const struct file_operations fuse_direct_io_file_operations = { |
---|
3072 | | - .llseek = fuse_file_llseek, |
---|
3073 | | - .read_iter = fuse_direct_read_iter, |
---|
3074 | | - .write_iter = fuse_direct_write_iter, |
---|
3075 | | - .mmap = fuse_direct_mmap, |
---|
3076 | | - .open = fuse_open, |
---|
3077 | | - .flush = fuse_flush, |
---|
3078 | | - .release = fuse_release, |
---|
3079 | | - .fsync = fuse_fsync, |
---|
3080 | | - .lock = fuse_file_lock, |
---|
3081 | | - .flock = fuse_file_flock, |
---|
3082 | | - .unlocked_ioctl = fuse_file_ioctl, |
---|
3083 | | - .compat_ioctl = fuse_file_compat_ioctl, |
---|
3084 | | - .poll = fuse_file_poll, |
---|
3085 | | - .fallocate = fuse_file_fallocate, |
---|
3086 | | - /* no splice_read */ |
---|
| 3516 | + .copy_file_range = fuse_copy_file_range, |
---|
3087 | 3517 | }; |
---|
3088 | 3518 | |
---|
3089 | 3519 | static const struct address_space_operations fuse_file_aops = { |
---|
3090 | 3520 | .readpage = fuse_readpage, |
---|
| 3521 | + .readahead = fuse_readahead, |
---|
3091 | 3522 | .writepage = fuse_writepage, |
---|
3092 | 3523 | .writepages = fuse_writepages, |
---|
3093 | 3524 | .launder_page = fuse_launder_page, |
---|
3094 | | - .readpages = fuse_readpages, |
---|
3095 | 3525 | .set_page_dirty = __set_page_dirty_nobuffers, |
---|
3096 | 3526 | .bmap = fuse_bmap, |
---|
3097 | 3527 | .direct_IO = fuse_direct_IO, |
---|
.. | .. |
---|
3101 | 3531 | |
---|
3102 | 3532 | void fuse_init_file_inode(struct inode *inode) |
---|
3103 | 3533 | { |
---|
| 3534 | + struct fuse_inode *fi = get_fuse_inode(inode); |
---|
| 3535 | + |
---|
3104 | 3536 | inode->i_fop = &fuse_file_operations; |
---|
3105 | 3537 | inode->i_data.a_ops = &fuse_file_aops; |
---|
| 3538 | + |
---|
| 3539 | + INIT_LIST_HEAD(&fi->write_files); |
---|
| 3540 | + INIT_LIST_HEAD(&fi->queued_writes); |
---|
| 3541 | + fi->writectr = 0; |
---|
| 3542 | + init_waitqueue_head(&fi->page_waitq); |
---|
| 3543 | + fi->writepages = RB_ROOT; |
---|
| 3544 | + |
---|
| 3545 | + if (IS_ENABLED(CONFIG_FUSE_DAX)) |
---|
| 3546 | + fuse_dax_inode_init(inode); |
---|
3106 | 3547 | } |
---|