.. | .. |
---|
26 | 26 | MODULE_ALIAS_MISCDEV(FUSE_MINOR); |
---|
27 | 27 | MODULE_ALIAS("devname:fuse"); |
---|
28 | 28 | |
---|
| 29 | +/* Ordinary requests have even IDs, while interrupts IDs are odd */ |
---|
| 30 | +#define FUSE_INT_REQ_BIT (1ULL << 0) |
---|
| 31 | +#define FUSE_REQ_ID_STEP (1ULL << 1) |
---|
| 32 | + |
---|
29 | 33 | static struct kmem_cache *fuse_req_cachep; |
---|
30 | 34 | |
---|
31 | 35 | static struct fuse_dev *fuse_get_dev(struct file *file) |
---|
.. | .. |
---|
37 | 41 | return READ_ONCE(file->private_data); |
---|
38 | 42 | } |
---|
39 | 43 | |
---|
40 | | -static void fuse_request_init(struct fuse_req *req, struct page **pages, |
---|
41 | | - struct fuse_page_desc *page_descs, |
---|
42 | | - unsigned npages) |
---|
| 44 | +static void fuse_request_init(struct fuse_mount *fm, struct fuse_req *req) |
---|
43 | 45 | { |
---|
44 | | - memset(req, 0, sizeof(*req)); |
---|
45 | | - memset(pages, 0, sizeof(*pages) * npages); |
---|
46 | | - memset(page_descs, 0, sizeof(*page_descs) * npages); |
---|
47 | 46 | INIT_LIST_HEAD(&req->list); |
---|
48 | 47 | INIT_LIST_HEAD(&req->intr_entry); |
---|
49 | 48 | init_waitqueue_head(&req->waitq); |
---|
50 | 49 | refcount_set(&req->count, 1); |
---|
51 | | - req->pages = pages; |
---|
52 | | - req->page_descs = page_descs; |
---|
53 | | - req->max_pages = npages; |
---|
54 | 50 | __set_bit(FR_PENDING, &req->flags); |
---|
| 51 | + req->fm = fm; |
---|
55 | 52 | } |
---|
56 | 53 | |
---|
57 | | -static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags) |
---|
| 54 | +static struct fuse_req *fuse_request_alloc(struct fuse_mount *fm, gfp_t flags) |
---|
58 | 55 | { |
---|
59 | | - struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags); |
---|
60 | | - if (req) { |
---|
61 | | - struct page **pages; |
---|
62 | | - struct fuse_page_desc *page_descs; |
---|
| 56 | + struct fuse_req *req = kmem_cache_zalloc(fuse_req_cachep, flags); |
---|
| 57 | + if (req) |
---|
| 58 | + fuse_request_init(fm, req); |
---|
63 | 59 | |
---|
64 | | - if (npages <= FUSE_REQ_INLINE_PAGES) { |
---|
65 | | - pages = req->inline_pages; |
---|
66 | | - page_descs = req->inline_page_descs; |
---|
67 | | - } else { |
---|
68 | | - pages = kmalloc_array(npages, sizeof(struct page *), |
---|
69 | | - flags); |
---|
70 | | - page_descs = |
---|
71 | | - kmalloc_array(npages, |
---|
72 | | - sizeof(struct fuse_page_desc), |
---|
73 | | - flags); |
---|
74 | | - } |
---|
75 | | - |
---|
76 | | - if (!pages || !page_descs) { |
---|
77 | | - kfree(pages); |
---|
78 | | - kfree(page_descs); |
---|
79 | | - kmem_cache_free(fuse_req_cachep, req); |
---|
80 | | - return NULL; |
---|
81 | | - } |
---|
82 | | - |
---|
83 | | - fuse_request_init(req, pages, page_descs, npages); |
---|
84 | | - } |
---|
85 | 60 | return req; |
---|
86 | 61 | } |
---|
87 | 62 | |
---|
88 | | -struct fuse_req *fuse_request_alloc(unsigned npages) |
---|
| 63 | +static void fuse_request_free(struct fuse_req *req) |
---|
89 | 64 | { |
---|
90 | | - return __fuse_request_alloc(npages, GFP_KERNEL); |
---|
91 | | -} |
---|
92 | | -EXPORT_SYMBOL_GPL(fuse_request_alloc); |
---|
93 | | - |
---|
94 | | -struct fuse_req *fuse_request_alloc_nofs(unsigned npages) |
---|
95 | | -{ |
---|
96 | | - return __fuse_request_alloc(npages, GFP_NOFS); |
---|
97 | | -} |
---|
98 | | - |
---|
99 | | -void fuse_request_free(struct fuse_req *req) |
---|
100 | | -{ |
---|
101 | | - if (req->pages != req->inline_pages) { |
---|
102 | | - kfree(req->pages); |
---|
103 | | - kfree(req->page_descs); |
---|
104 | | - } |
---|
105 | 65 | kmem_cache_free(fuse_req_cachep, req); |
---|
106 | 66 | } |
---|
107 | 67 | |
---|
108 | | -void __fuse_get_request(struct fuse_req *req) |
---|
| 68 | +static void __fuse_get_request(struct fuse_req *req) |
---|
109 | 69 | { |
---|
110 | 70 | refcount_inc(&req->count); |
---|
111 | 71 | } |
---|
.. | .. |
---|
142 | 102 | } |
---|
143 | 103 | } |
---|
144 | 104 | |
---|
145 | | -static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, |
---|
146 | | - bool for_background) |
---|
| 105 | +static void fuse_put_request(struct fuse_req *req); |
---|
| 106 | + |
---|
| 107 | +static struct fuse_req *fuse_get_req(struct fuse_mount *fm, bool for_background) |
---|
147 | 108 | { |
---|
| 109 | + struct fuse_conn *fc = fm->fc; |
---|
148 | 110 | struct fuse_req *req; |
---|
149 | 111 | int err; |
---|
150 | 112 | atomic_inc(&fc->num_waiting); |
---|
.. | .. |
---|
166 | 128 | if (fc->conn_error) |
---|
167 | 129 | goto out; |
---|
168 | 130 | |
---|
169 | | - req = fuse_request_alloc(npages); |
---|
| 131 | + req = fuse_request_alloc(fm, GFP_KERNEL); |
---|
170 | 132 | err = -ENOMEM; |
---|
171 | 133 | if (!req) { |
---|
172 | 134 | if (for_background) |
---|
.. | .. |
---|
184 | 146 | |
---|
185 | 147 | if (unlikely(req->in.h.uid == ((uid_t)-1) || |
---|
186 | 148 | req->in.h.gid == ((gid_t)-1))) { |
---|
187 | | - fuse_put_request(fc, req); |
---|
| 149 | + fuse_put_request(req); |
---|
188 | 150 | return ERR_PTR(-EOVERFLOW); |
---|
189 | 151 | } |
---|
190 | 152 | return req; |
---|
.. | .. |
---|
194 | 156 | return ERR_PTR(err); |
---|
195 | 157 | } |
---|
196 | 158 | |
---|
197 | | -struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages) |
---|
| 159 | +static void fuse_put_request(struct fuse_req *req) |
---|
198 | 160 | { |
---|
199 | | - return __fuse_get_req(fc, npages, false); |
---|
200 | | -} |
---|
201 | | -EXPORT_SYMBOL_GPL(fuse_get_req); |
---|
| 161 | + struct fuse_conn *fc = req->fm->fc; |
---|
202 | 162 | |
---|
203 | | -struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc, |
---|
204 | | - unsigned npages) |
---|
205 | | -{ |
---|
206 | | - return __fuse_get_req(fc, npages, true); |
---|
207 | | -} |
---|
208 | | -EXPORT_SYMBOL_GPL(fuse_get_req_for_background); |
---|
209 | | - |
---|
210 | | -/* |
---|
211 | | - * Return request in fuse_file->reserved_req. However that may |
---|
212 | | - * currently be in use. If that is the case, wait for it to become |
---|
213 | | - * available. |
---|
214 | | - */ |
---|
215 | | -static struct fuse_req *get_reserved_req(struct fuse_conn *fc, |
---|
216 | | - struct file *file) |
---|
217 | | -{ |
---|
218 | | - struct fuse_req *req = NULL; |
---|
219 | | - struct fuse_file *ff = file->private_data; |
---|
220 | | - |
---|
221 | | - do { |
---|
222 | | - wait_event(fc->reserved_req_waitq, ff->reserved_req); |
---|
223 | | - spin_lock(&fc->lock); |
---|
224 | | - if (ff->reserved_req) { |
---|
225 | | - req = ff->reserved_req; |
---|
226 | | - ff->reserved_req = NULL; |
---|
227 | | - req->stolen_file = get_file(file); |
---|
228 | | - } |
---|
229 | | - spin_unlock(&fc->lock); |
---|
230 | | - } while (!req); |
---|
231 | | - |
---|
232 | | - return req; |
---|
233 | | -} |
---|
234 | | - |
---|
235 | | -/* |
---|
236 | | - * Put stolen request back into fuse_file->reserved_req |
---|
237 | | - */ |
---|
238 | | -static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) |
---|
239 | | -{ |
---|
240 | | - struct file *file = req->stolen_file; |
---|
241 | | - struct fuse_file *ff = file->private_data; |
---|
242 | | - |
---|
243 | | - spin_lock(&fc->lock); |
---|
244 | | - fuse_request_init(req, req->pages, req->page_descs, req->max_pages); |
---|
245 | | - BUG_ON(ff->reserved_req); |
---|
246 | | - ff->reserved_req = req; |
---|
247 | | - wake_up_all(&fc->reserved_req_waitq); |
---|
248 | | - spin_unlock(&fc->lock); |
---|
249 | | - fput(file); |
---|
250 | | -} |
---|
251 | | - |
---|
252 | | -/* |
---|
253 | | - * Gets a requests for a file operation, always succeeds |
---|
254 | | - * |
---|
255 | | - * This is used for sending the FLUSH request, which must get to |
---|
256 | | - * userspace, due to POSIX locks which may need to be unlocked. |
---|
257 | | - * |
---|
258 | | - * If allocation fails due to OOM, use the reserved request in |
---|
259 | | - * fuse_file. |
---|
260 | | - * |
---|
261 | | - * This is very unlikely to deadlock accidentally, since the |
---|
262 | | - * filesystem should not have it's own file open. If deadlock is |
---|
263 | | - * intentional, it can still be broken by "aborting" the filesystem. |
---|
264 | | - */ |
---|
265 | | -struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, |
---|
266 | | - struct file *file) |
---|
267 | | -{ |
---|
268 | | - struct fuse_req *req; |
---|
269 | | - |
---|
270 | | - atomic_inc(&fc->num_waiting); |
---|
271 | | - wait_event(fc->blocked_waitq, fc->initialized); |
---|
272 | | - /* Matches smp_wmb() in fuse_set_initialized() */ |
---|
273 | | - smp_rmb(); |
---|
274 | | - req = fuse_request_alloc(0); |
---|
275 | | - if (!req) |
---|
276 | | - req = get_reserved_req(fc, file); |
---|
277 | | - |
---|
278 | | - req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); |
---|
279 | | - req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); |
---|
280 | | - req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); |
---|
281 | | - |
---|
282 | | - __set_bit(FR_WAITING, &req->flags); |
---|
283 | | - __clear_bit(FR_BACKGROUND, &req->flags); |
---|
284 | | - return req; |
---|
285 | | -} |
---|
286 | | - |
---|
287 | | -void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) |
---|
288 | | -{ |
---|
289 | 163 | if (refcount_dec_and_test(&req->count)) { |
---|
290 | 164 | if (test_bit(FR_BACKGROUND, &req->flags)) { |
---|
291 | 165 | /* |
---|
292 | 166 | * We get here in the unlikely case that a background |
---|
293 | 167 | * request was allocated but not sent |
---|
294 | 168 | */ |
---|
295 | | - spin_lock(&fc->lock); |
---|
| 169 | + spin_lock(&fc->bg_lock); |
---|
296 | 170 | if (!fc->blocked) |
---|
297 | 171 | wake_up(&fc->blocked_waitq); |
---|
298 | | - spin_unlock(&fc->lock); |
---|
| 172 | + spin_unlock(&fc->bg_lock); |
---|
299 | 173 | } |
---|
300 | 174 | |
---|
301 | 175 | if (test_bit(FR_WAITING, &req->flags)) { |
---|
.. | .. |
---|
303 | 177 | fuse_drop_waiting(fc); |
---|
304 | 178 | } |
---|
305 | 179 | |
---|
306 | | - if (req->stolen_file) |
---|
307 | | - put_reserved_req(fc, req); |
---|
308 | | - else |
---|
309 | | - fuse_request_free(req); |
---|
| 180 | + fuse_request_free(req); |
---|
310 | 181 | } |
---|
311 | 182 | } |
---|
312 | | -EXPORT_SYMBOL_GPL(fuse_put_request); |
---|
313 | 183 | |
---|
314 | | -static unsigned len_args(unsigned numargs, struct fuse_arg *args) |
---|
| 184 | +unsigned int fuse_len_args(unsigned int numargs, struct fuse_arg *args) |
---|
315 | 185 | { |
---|
316 | 186 | unsigned nbytes = 0; |
---|
317 | 187 | unsigned i; |
---|
.. | .. |
---|
321 | 191 | |
---|
322 | 192 | return nbytes; |
---|
323 | 193 | } |
---|
| 194 | +EXPORT_SYMBOL_GPL(fuse_len_args); |
---|
324 | 195 | |
---|
325 | | -static u64 fuse_get_unique(struct fuse_iqueue *fiq) |
---|
| 196 | +u64 fuse_get_unique(struct fuse_iqueue *fiq) |
---|
326 | 197 | { |
---|
327 | | - return ++fiq->reqctr; |
---|
| 198 | + fiq->reqctr += FUSE_REQ_ID_STEP; |
---|
| 199 | + return fiq->reqctr; |
---|
| 200 | +} |
---|
| 201 | +EXPORT_SYMBOL_GPL(fuse_get_unique); |
---|
| 202 | + |
---|
| 203 | +static unsigned int fuse_req_hash(u64 unique) |
---|
| 204 | +{ |
---|
| 205 | + return hash_long(unique & ~FUSE_INT_REQ_BIT, FUSE_PQ_HASH_BITS); |
---|
328 | 206 | } |
---|
329 | 207 | |
---|
330 | | -static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req) |
---|
| 208 | +/** |
---|
| 209 | + * A new request is available, wake fiq->waitq |
---|
| 210 | + */ |
---|
| 211 | +static void fuse_dev_wake_and_unlock(struct fuse_iqueue *fiq, bool sync) |
---|
| 212 | +__releases(fiq->lock) |
---|
| 213 | +{ |
---|
| 214 | + if (sync) |
---|
| 215 | + wake_up_sync(&fiq->waitq); |
---|
| 216 | + else |
---|
| 217 | + wake_up(&fiq->waitq); |
---|
| 218 | + kill_fasync(&fiq->fasync, SIGIO, POLL_IN); |
---|
| 219 | + spin_unlock(&fiq->lock); |
---|
| 220 | +} |
---|
| 221 | + |
---|
| 222 | +const struct fuse_iqueue_ops fuse_dev_fiq_ops = { |
---|
| 223 | + .wake_forget_and_unlock = fuse_dev_wake_and_unlock, |
---|
| 224 | + .wake_interrupt_and_unlock = fuse_dev_wake_and_unlock, |
---|
| 225 | + .wake_pending_and_unlock = fuse_dev_wake_and_unlock, |
---|
| 226 | +}; |
---|
| 227 | +EXPORT_SYMBOL_GPL(fuse_dev_fiq_ops); |
---|
| 228 | + |
---|
| 229 | +static void queue_request_and_unlock(struct fuse_iqueue *fiq, |
---|
| 230 | + struct fuse_req *req, bool sync) |
---|
| 231 | +__releases(fiq->lock) |
---|
331 | 232 | { |
---|
332 | 233 | req->in.h.len = sizeof(struct fuse_in_header) + |
---|
333 | | - len_args(req->in.numargs, (struct fuse_arg *) req->in.args); |
---|
| 234 | + fuse_len_args(req->args->in_numargs, |
---|
| 235 | + (struct fuse_arg *) req->args->in_args); |
---|
334 | 236 | list_add_tail(&req->list, &fiq->pending); |
---|
335 | | - wake_up(&fiq->waitq); |
---|
336 | | - kill_fasync(&fiq->fasync, SIGIO, POLL_IN); |
---|
| 237 | + fiq->ops->wake_pending_and_unlock(fiq, sync); |
---|
337 | 238 | } |
---|
338 | 239 | |
---|
339 | 240 | void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget, |
---|
.. | .. |
---|
348 | 249 | if (fiq->connected) { |
---|
349 | 250 | fiq->forget_list_tail->next = forget; |
---|
350 | 251 | fiq->forget_list_tail = forget; |
---|
351 | | - wake_up(&fiq->waitq); |
---|
352 | | - kill_fasync(&fiq->fasync, SIGIO, POLL_IN); |
---|
| 252 | + fiq->ops->wake_forget_and_unlock(fiq, false); |
---|
353 | 253 | } else { |
---|
354 | 254 | kfree(forget); |
---|
| 255 | + spin_unlock(&fiq->lock); |
---|
355 | 256 | } |
---|
356 | | - spin_unlock(&fiq->lock); |
---|
357 | 257 | } |
---|
358 | 258 | |
---|
359 | 259 | static void flush_bg_queue(struct fuse_conn *fc) |
---|
360 | 260 | { |
---|
| 261 | + struct fuse_iqueue *fiq = &fc->iq; |
---|
| 262 | + |
---|
361 | 263 | while (fc->active_background < fc->max_background && |
---|
362 | 264 | !list_empty(&fc->bg_queue)) { |
---|
363 | 265 | struct fuse_req *req; |
---|
364 | | - struct fuse_iqueue *fiq = &fc->iq; |
---|
365 | 266 | |
---|
366 | | - req = list_entry(fc->bg_queue.next, struct fuse_req, list); |
---|
| 267 | + req = list_first_entry(&fc->bg_queue, struct fuse_req, list); |
---|
367 | 268 | list_del(&req->list); |
---|
368 | 269 | fc->active_background++; |
---|
369 | 270 | spin_lock(&fiq->lock); |
---|
370 | 271 | req->in.h.unique = fuse_get_unique(fiq); |
---|
371 | | - queue_request(fiq, req); |
---|
372 | | - spin_unlock(&fiq->lock); |
---|
| 272 | + queue_request_and_unlock(fiq, req, false); |
---|
373 | 273 | } |
---|
374 | 274 | } |
---|
375 | 275 | |
---|
.. | .. |
---|
381 | 281 | * the 'end' callback is called if given, else the reference to the |
---|
382 | 282 | * request is released |
---|
383 | 283 | */ |
---|
384 | | -static void request_end(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 284 | +void fuse_request_end(struct fuse_req *req) |
---|
385 | 285 | { |
---|
| 286 | + struct fuse_mount *fm = req->fm; |
---|
| 287 | + struct fuse_conn *fc = fm->fc; |
---|
386 | 288 | struct fuse_iqueue *fiq = &fc->iq; |
---|
387 | 289 | |
---|
388 | 290 | if (test_and_set_bit(FR_FINISHED, &req->flags)) |
---|
389 | 291 | goto put_request; |
---|
390 | 292 | |
---|
391 | | - spin_lock(&fiq->lock); |
---|
392 | | - list_del_init(&req->intr_entry); |
---|
393 | | - spin_unlock(&fiq->lock); |
---|
| 293 | + /* |
---|
| 294 | + * test_and_set_bit() implies smp_mb() between bit |
---|
| 295 | + * changing and below FR_INTERRUPTED check. Pairs with |
---|
| 296 | + * smp_mb() from queue_interrupt(). |
---|
| 297 | + */ |
---|
| 298 | + if (test_bit(FR_INTERRUPTED, &req->flags)) { |
---|
| 299 | + spin_lock(&fiq->lock); |
---|
| 300 | + list_del_init(&req->intr_entry); |
---|
| 301 | + spin_unlock(&fiq->lock); |
---|
| 302 | + } |
---|
394 | 303 | WARN_ON(test_bit(FR_PENDING, &req->flags)); |
---|
395 | 304 | WARN_ON(test_bit(FR_SENT, &req->flags)); |
---|
396 | 305 | if (test_bit(FR_BACKGROUND, &req->flags)) { |
---|
397 | | - spin_lock(&fc->lock); |
---|
| 306 | + spin_lock(&fc->bg_lock); |
---|
398 | 307 | clear_bit(FR_BACKGROUND, &req->flags); |
---|
399 | 308 | if (fc->num_background == fc->max_background) { |
---|
400 | 309 | fc->blocked = 0; |
---|
.. | .. |
---|
410 | 319 | wake_up(&fc->blocked_waitq); |
---|
411 | 320 | } |
---|
412 | 321 | |
---|
413 | | - if (fc->num_background == fc->congestion_threshold && fc->sb) { |
---|
414 | | - clear_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); |
---|
415 | | - clear_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); |
---|
| 322 | + if (fc->num_background == fc->congestion_threshold && fm->sb) { |
---|
| 323 | + clear_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC); |
---|
| 324 | + clear_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC); |
---|
416 | 325 | } |
---|
417 | 326 | fc->num_background--; |
---|
418 | 327 | fc->active_background--; |
---|
419 | 328 | flush_bg_queue(fc); |
---|
420 | | - spin_unlock(&fc->lock); |
---|
| 329 | + spin_unlock(&fc->bg_lock); |
---|
| 330 | + } else { |
---|
| 331 | + /* Wake up waiter sleeping in request_wait_answer() */ |
---|
| 332 | + wake_up(&req->waitq); |
---|
421 | 333 | } |
---|
422 | | - wake_up(&req->waitq); |
---|
423 | | - if (req->end) |
---|
424 | | - req->end(fc, req); |
---|
425 | | -put_request: |
---|
426 | | - fuse_put_request(fc, req); |
---|
427 | | -} |
---|
428 | 334 | |
---|
429 | | -static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req) |
---|
| 335 | + if (test_bit(FR_ASYNC, &req->flags)) |
---|
| 336 | + req->args->end(fm, req->args, req->out.h.error); |
---|
| 337 | +put_request: |
---|
| 338 | + fuse_put_request(req); |
---|
| 339 | +} |
---|
| 340 | +EXPORT_SYMBOL_GPL(fuse_request_end); |
---|
| 341 | + |
---|
| 342 | +static int queue_interrupt(struct fuse_req *req) |
---|
430 | 343 | { |
---|
| 344 | + struct fuse_iqueue *fiq = &req->fm->fc->iq; |
---|
| 345 | + |
---|
431 | 346 | spin_lock(&fiq->lock); |
---|
432 | | - if (test_bit(FR_FINISHED, &req->flags)) { |
---|
| 347 | + /* Check for we've sent request to interrupt this req */ |
---|
| 348 | + if (unlikely(!test_bit(FR_INTERRUPTED, &req->flags))) { |
---|
433 | 349 | spin_unlock(&fiq->lock); |
---|
434 | | - return; |
---|
| 350 | + return -EINVAL; |
---|
435 | 351 | } |
---|
| 352 | + |
---|
436 | 353 | if (list_empty(&req->intr_entry)) { |
---|
437 | 354 | list_add_tail(&req->intr_entry, &fiq->interrupts); |
---|
438 | | - wake_up(&fiq->waitq); |
---|
| 355 | + /* |
---|
| 356 | + * Pairs with smp_mb() implied by test_and_set_bit() |
---|
| 357 | + * from fuse_request_end(). |
---|
| 358 | + */ |
---|
| 359 | + smp_mb(); |
---|
| 360 | + if (test_bit(FR_FINISHED, &req->flags)) { |
---|
| 361 | + list_del_init(&req->intr_entry); |
---|
| 362 | + spin_unlock(&fiq->lock); |
---|
| 363 | + return 0; |
---|
| 364 | + } |
---|
| 365 | + fiq->ops->wake_interrupt_and_unlock(fiq, false); |
---|
| 366 | + } else { |
---|
| 367 | + spin_unlock(&fiq->lock); |
---|
439 | 368 | } |
---|
440 | | - spin_unlock(&fiq->lock); |
---|
441 | | - kill_fasync(&fiq->fasync, SIGIO, POLL_IN); |
---|
| 369 | + return 0; |
---|
442 | 370 | } |
---|
443 | 371 | |
---|
444 | | -static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 372 | +static void request_wait_answer(struct fuse_req *req) |
---|
445 | 373 | { |
---|
| 374 | + struct fuse_conn *fc = req->fm->fc; |
---|
446 | 375 | struct fuse_iqueue *fiq = &fc->iq; |
---|
447 | 376 | int err; |
---|
448 | 377 | |
---|
.. | .. |
---|
457 | 386 | /* matches barrier in fuse_dev_do_read() */ |
---|
458 | 387 | smp_mb__after_atomic(); |
---|
459 | 388 | if (test_bit(FR_SENT, &req->flags)) |
---|
460 | | - queue_interrupt(fiq, req); |
---|
| 389 | + queue_interrupt(req); |
---|
461 | 390 | } |
---|
462 | 391 | |
---|
463 | 392 | if (!test_bit(FR_FORCE, &req->flags)) { |
---|
.. | .. |
---|
486 | 415 | wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags)); |
---|
487 | 416 | } |
---|
488 | 417 | |
---|
489 | | -static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 418 | +static void __fuse_request_send(struct fuse_req *req) |
---|
490 | 419 | { |
---|
491 | | - struct fuse_iqueue *fiq = &fc->iq; |
---|
| 420 | + struct fuse_iqueue *fiq = &req->fm->fc->iq; |
---|
492 | 421 | |
---|
493 | 422 | BUG_ON(test_bit(FR_BACKGROUND, &req->flags)); |
---|
494 | 423 | spin_lock(&fiq->lock); |
---|
.. | .. |
---|
497 | 426 | req->out.h.error = -ENOTCONN; |
---|
498 | 427 | } else { |
---|
499 | 428 | req->in.h.unique = fuse_get_unique(fiq); |
---|
500 | | - queue_request(fiq, req); |
---|
501 | 429 | /* acquire extra reference, since request is still needed |
---|
502 | | - after request_end() */ |
---|
| 430 | + after fuse_request_end() */ |
---|
503 | 431 | __fuse_get_request(req); |
---|
504 | | - spin_unlock(&fiq->lock); |
---|
| 432 | + queue_request_and_unlock(fiq, req, true); |
---|
505 | 433 | |
---|
506 | | - request_wait_answer(fc, req); |
---|
507 | | - /* Pairs with smp_wmb() in request_end() */ |
---|
| 434 | + request_wait_answer(req); |
---|
| 435 | + /* Pairs with smp_wmb() in fuse_request_end() */ |
---|
508 | 436 | smp_rmb(); |
---|
509 | 437 | } |
---|
510 | 438 | } |
---|
511 | 439 | |
---|
512 | | -void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) |
---|
513 | | -{ |
---|
514 | | - __set_bit(FR_ISREPLY, &req->flags); |
---|
515 | | - if (!test_bit(FR_WAITING, &req->flags)) { |
---|
516 | | - __set_bit(FR_WAITING, &req->flags); |
---|
517 | | - atomic_inc(&fc->num_waiting); |
---|
518 | | - } |
---|
519 | | - __fuse_request_send(fc, req); |
---|
520 | | -} |
---|
521 | | -EXPORT_SYMBOL_GPL(fuse_request_send); |
---|
522 | | - |
---|
523 | 440 | static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) |
---|
524 | 441 | { |
---|
525 | | - if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) |
---|
526 | | - args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE; |
---|
| 442 | + if (fc->minor < 4 && args->opcode == FUSE_STATFS) |
---|
| 443 | + args->out_args[0].size = FUSE_COMPAT_STATFS_SIZE; |
---|
527 | 444 | |
---|
528 | 445 | if (fc->minor < 9) { |
---|
529 | | - switch (args->in.h.opcode) { |
---|
| 446 | + switch (args->opcode) { |
---|
530 | 447 | case FUSE_LOOKUP: |
---|
531 | 448 | case FUSE_CREATE: |
---|
532 | 449 | case FUSE_MKNOD: |
---|
533 | 450 | case FUSE_MKDIR: |
---|
534 | 451 | case FUSE_SYMLINK: |
---|
535 | 452 | case FUSE_LINK: |
---|
536 | | - args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; |
---|
| 453 | + args->out_args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; |
---|
537 | 454 | break; |
---|
538 | 455 | case FUSE_GETATTR: |
---|
539 | 456 | case FUSE_SETATTR: |
---|
540 | | - args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; |
---|
| 457 | + args->out_args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; |
---|
541 | 458 | break; |
---|
542 | 459 | } |
---|
543 | 460 | } |
---|
544 | 461 | if (fc->minor < 12) { |
---|
545 | | - switch (args->in.h.opcode) { |
---|
| 462 | + switch (args->opcode) { |
---|
546 | 463 | case FUSE_CREATE: |
---|
547 | | - args->in.args[0].size = sizeof(struct fuse_open_in); |
---|
| 464 | + args->in_args[0].size = sizeof(struct fuse_open_in); |
---|
548 | 465 | break; |
---|
549 | 466 | case FUSE_MKNOD: |
---|
550 | | - args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; |
---|
| 467 | + args->in_args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; |
---|
551 | 468 | break; |
---|
552 | 469 | } |
---|
553 | 470 | } |
---|
554 | 471 | } |
---|
555 | 472 | |
---|
556 | | -ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) |
---|
| 473 | +static void fuse_force_creds(struct fuse_req *req) |
---|
557 | 474 | { |
---|
| 475 | + struct fuse_conn *fc = req->fm->fc; |
---|
| 476 | + |
---|
| 477 | + req->in.h.uid = from_kuid_munged(fc->user_ns, current_fsuid()); |
---|
| 478 | + req->in.h.gid = from_kgid_munged(fc->user_ns, current_fsgid()); |
---|
| 479 | + req->in.h.pid = pid_nr_ns(task_pid(current), fc->pid_ns); |
---|
| 480 | +} |
---|
| 481 | + |
---|
| 482 | +static void fuse_args_to_req(struct fuse_req *req, struct fuse_args *args) |
---|
| 483 | +{ |
---|
| 484 | + req->in.h.opcode = args->opcode; |
---|
| 485 | + req->in.h.nodeid = args->nodeid; |
---|
| 486 | + req->args = args; |
---|
| 487 | + if (args->end) |
---|
| 488 | + __set_bit(FR_ASYNC, &req->flags); |
---|
| 489 | +} |
---|
| 490 | + |
---|
| 491 | +ssize_t fuse_simple_request(struct fuse_mount *fm, struct fuse_args *args) |
---|
| 492 | +{ |
---|
| 493 | + struct fuse_conn *fc = fm->fc; |
---|
558 | 494 | struct fuse_req *req; |
---|
559 | 495 | ssize_t ret; |
---|
560 | 496 | |
---|
561 | | - req = fuse_get_req(fc, 0); |
---|
562 | | - if (IS_ERR(req)) |
---|
563 | | - return PTR_ERR(req); |
---|
| 497 | + if (args->force) { |
---|
| 498 | + atomic_inc(&fc->num_waiting); |
---|
| 499 | + req = fuse_request_alloc(fm, GFP_KERNEL | __GFP_NOFAIL); |
---|
| 500 | + |
---|
| 501 | + if (!args->nocreds) |
---|
| 502 | + fuse_force_creds(req); |
---|
| 503 | + |
---|
| 504 | + __set_bit(FR_WAITING, &req->flags); |
---|
| 505 | + __set_bit(FR_FORCE, &req->flags); |
---|
| 506 | + } else { |
---|
| 507 | + WARN_ON(args->nocreds); |
---|
| 508 | + req = fuse_get_req(fm, false); |
---|
| 509 | + if (IS_ERR(req)) |
---|
| 510 | + return PTR_ERR(req); |
---|
| 511 | + } |
---|
564 | 512 | |
---|
565 | 513 | /* Needs to be done after fuse_get_req() so that fc->minor is valid */ |
---|
566 | 514 | fuse_adjust_compat(fc, args); |
---|
| 515 | + fuse_args_to_req(req, args); |
---|
567 | 516 | |
---|
568 | | - req->in.h.opcode = args->in.h.opcode; |
---|
569 | | - req->in.h.nodeid = args->in.h.nodeid; |
---|
570 | | - req->in.numargs = args->in.numargs; |
---|
571 | | - memcpy(req->in.args, args->in.args, |
---|
572 | | - args->in.numargs * sizeof(struct fuse_in_arg)); |
---|
573 | | - req->out.argvar = args->out.argvar; |
---|
574 | | - req->out.numargs = args->out.numargs; |
---|
575 | | - memcpy(req->out.args, args->out.args, |
---|
576 | | - args->out.numargs * sizeof(struct fuse_arg)); |
---|
577 | | - req->out.canonical_path = args->out.canonical_path; |
---|
578 | | - fuse_request_send(fc, req); |
---|
| 517 | + if (!args->noreply) |
---|
| 518 | + __set_bit(FR_ISREPLY, &req->flags); |
---|
| 519 | + __fuse_request_send(req); |
---|
579 | 520 | ret = req->out.h.error; |
---|
580 | | - if (!ret && args->out.argvar) { |
---|
581 | | - BUG_ON(args->out.numargs != 1); |
---|
582 | | - ret = req->out.args[0].size; |
---|
| 521 | + if (!ret && args->out_argvar) { |
---|
| 522 | + BUG_ON(args->out_numargs == 0); |
---|
| 523 | + ret = args->out_args[args->out_numargs - 1].size; |
---|
583 | 524 | } |
---|
584 | | - fuse_put_request(fc, req); |
---|
| 525 | + fuse_put_request(req); |
---|
585 | 526 | |
---|
586 | 527 | return ret; |
---|
587 | 528 | } |
---|
588 | 529 | |
---|
589 | | -/* |
---|
590 | | - * Called under fc->lock |
---|
591 | | - * |
---|
592 | | - * fc->connected must have been checked previously |
---|
593 | | - */ |
---|
594 | | -void fuse_request_send_background_locked(struct fuse_conn *fc, |
---|
595 | | - struct fuse_req *req) |
---|
| 530 | +static bool fuse_request_queue_background(struct fuse_req *req) |
---|
596 | 531 | { |
---|
597 | | - BUG_ON(!test_bit(FR_BACKGROUND, &req->flags)); |
---|
| 532 | + struct fuse_mount *fm = req->fm; |
---|
| 533 | + struct fuse_conn *fc = fm->fc; |
---|
| 534 | + bool queued = false; |
---|
| 535 | + |
---|
| 536 | + WARN_ON(!test_bit(FR_BACKGROUND, &req->flags)); |
---|
598 | 537 | if (!test_bit(FR_WAITING, &req->flags)) { |
---|
599 | 538 | __set_bit(FR_WAITING, &req->flags); |
---|
600 | 539 | atomic_inc(&fc->num_waiting); |
---|
601 | 540 | } |
---|
602 | 541 | __set_bit(FR_ISREPLY, &req->flags); |
---|
603 | | - fc->num_background++; |
---|
604 | | - if (fc->num_background == fc->max_background) |
---|
605 | | - fc->blocked = 1; |
---|
606 | | - if (fc->num_background == fc->congestion_threshold && fc->sb) { |
---|
607 | | - set_bdi_congested(fc->sb->s_bdi, BLK_RW_SYNC); |
---|
608 | | - set_bdi_congested(fc->sb->s_bdi, BLK_RW_ASYNC); |
---|
| 542 | + spin_lock(&fc->bg_lock); |
---|
| 543 | + if (likely(fc->connected)) { |
---|
| 544 | + fc->num_background++; |
---|
| 545 | + if (fc->num_background == fc->max_background) |
---|
| 546 | + fc->blocked = 1; |
---|
| 547 | + if (fc->num_background == fc->congestion_threshold && fm->sb) { |
---|
| 548 | + set_bdi_congested(fm->sb->s_bdi, BLK_RW_SYNC); |
---|
| 549 | + set_bdi_congested(fm->sb->s_bdi, BLK_RW_ASYNC); |
---|
| 550 | + } |
---|
| 551 | + list_add_tail(&req->list, &fc->bg_queue); |
---|
| 552 | + flush_bg_queue(fc); |
---|
| 553 | + queued = true; |
---|
609 | 554 | } |
---|
610 | | - list_add_tail(&req->list, &fc->bg_queue); |
---|
611 | | - flush_bg_queue(fc); |
---|
| 555 | + spin_unlock(&fc->bg_lock); |
---|
| 556 | + |
---|
| 557 | + return queued; |
---|
612 | 558 | } |
---|
613 | 559 | |
---|
614 | | -void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 560 | +int fuse_simple_background(struct fuse_mount *fm, struct fuse_args *args, |
---|
| 561 | + gfp_t gfp_flags) |
---|
615 | 562 | { |
---|
616 | | - BUG_ON(!req->end); |
---|
617 | | - spin_lock(&fc->lock); |
---|
618 | | - if (fc->connected) { |
---|
619 | | - fuse_request_send_background_locked(fc, req); |
---|
620 | | - spin_unlock(&fc->lock); |
---|
| 563 | + struct fuse_req *req; |
---|
| 564 | + |
---|
| 565 | + if (args->force) { |
---|
| 566 | + WARN_ON(!args->nocreds); |
---|
| 567 | + req = fuse_request_alloc(fm, gfp_flags); |
---|
| 568 | + if (!req) |
---|
| 569 | + return -ENOMEM; |
---|
| 570 | + __set_bit(FR_BACKGROUND, &req->flags); |
---|
621 | 571 | } else { |
---|
622 | | - spin_unlock(&fc->lock); |
---|
623 | | - req->out.h.error = -ENOTCONN; |
---|
624 | | - req->end(fc, req); |
---|
625 | | - fuse_put_request(fc, req); |
---|
| 572 | + WARN_ON(args->nocreds); |
---|
| 573 | + req = fuse_get_req(fm, true); |
---|
| 574 | + if (IS_ERR(req)) |
---|
| 575 | + return PTR_ERR(req); |
---|
626 | 576 | } |
---|
627 | | -} |
---|
628 | | -EXPORT_SYMBOL_GPL(fuse_request_send_background); |
---|
629 | 577 | |
---|
630 | | -static int fuse_request_send_notify_reply(struct fuse_conn *fc, |
---|
631 | | - struct fuse_req *req, u64 unique) |
---|
| 578 | + fuse_args_to_req(req, args); |
---|
| 579 | + |
---|
| 580 | + if (!fuse_request_queue_background(req)) { |
---|
| 581 | + fuse_put_request(req); |
---|
| 582 | + return -ENOTCONN; |
---|
| 583 | + } |
---|
| 584 | + |
---|
| 585 | + return 0; |
---|
| 586 | +} |
---|
| 587 | +EXPORT_SYMBOL_GPL(fuse_simple_background); |
---|
| 588 | + |
---|
| 589 | +static int fuse_simple_notify_reply(struct fuse_mount *fm, |
---|
| 590 | + struct fuse_args *args, u64 unique) |
---|
632 | 591 | { |
---|
633 | | - int err = -ENODEV; |
---|
634 | | - struct fuse_iqueue *fiq = &fc->iq; |
---|
| 592 | + struct fuse_req *req; |
---|
| 593 | + struct fuse_iqueue *fiq = &fm->fc->iq; |
---|
| 594 | + int err = 0; |
---|
| 595 | + |
---|
| 596 | + req = fuse_get_req(fm, false); |
---|
| 597 | + if (IS_ERR(req)) |
---|
| 598 | + return PTR_ERR(req); |
---|
635 | 599 | |
---|
636 | 600 | __clear_bit(FR_ISREPLY, &req->flags); |
---|
637 | 601 | req->in.h.unique = unique; |
---|
| 602 | + |
---|
| 603 | + fuse_args_to_req(req, args); |
---|
| 604 | + |
---|
638 | 605 | spin_lock(&fiq->lock); |
---|
639 | 606 | if (fiq->connected) { |
---|
640 | | - queue_request(fiq, req); |
---|
641 | | - err = 0; |
---|
| 607 | + queue_request_and_unlock(fiq, req, false); |
---|
| 608 | + } else { |
---|
| 609 | + err = -ENODEV; |
---|
| 610 | + spin_unlock(&fiq->lock); |
---|
| 611 | + fuse_put_request(req); |
---|
642 | 612 | } |
---|
643 | | - spin_unlock(&fiq->lock); |
---|
644 | 613 | |
---|
645 | 614 | return err; |
---|
646 | | -} |
---|
647 | | - |
---|
648 | | -void fuse_force_forget(struct file *file, u64 nodeid) |
---|
649 | | -{ |
---|
650 | | - struct inode *inode = file_inode(file); |
---|
651 | | - struct fuse_conn *fc = get_fuse_conn(inode); |
---|
652 | | - struct fuse_req *req; |
---|
653 | | - struct fuse_forget_in inarg; |
---|
654 | | - |
---|
655 | | - memset(&inarg, 0, sizeof(inarg)); |
---|
656 | | - inarg.nlookup = 1; |
---|
657 | | - req = fuse_get_req_nofail_nopages(fc, file); |
---|
658 | | - req->in.h.opcode = FUSE_FORGET; |
---|
659 | | - req->in.h.nodeid = nodeid; |
---|
660 | | - req->in.numargs = 1; |
---|
661 | | - req->in.args[0].size = sizeof(inarg); |
---|
662 | | - req->in.args[0].value = &inarg; |
---|
663 | | - __clear_bit(FR_ISREPLY, &req->flags); |
---|
664 | | - __fuse_request_send(fc, req); |
---|
665 | | - /* ignore errors */ |
---|
666 | | - fuse_put_request(fc, req); |
---|
667 | 615 | } |
---|
668 | 616 | |
---|
669 | 617 | /* |
---|
.. | .. |
---|
739 | 687 | flush_dcache_page(cs->pg); |
---|
740 | 688 | set_page_dirty_lock(cs->pg); |
---|
741 | 689 | } |
---|
742 | | - put_page(cs->pg); |
---|
| 690 | + /* |
---|
| 691 | + * The page could be GUP page(see iov_iter_get_pages in |
---|
| 692 | + * fuse_copy_fill) so use put_user_page to release it. |
---|
| 693 | + */ |
---|
| 694 | + put_user_page(cs->pg); |
---|
743 | 695 | } |
---|
744 | 696 | cs->pg = NULL; |
---|
745 | 697 | } |
---|
.. | .. |
---|
774 | 726 | cs->pipebufs++; |
---|
775 | 727 | cs->nr_segs--; |
---|
776 | 728 | } else { |
---|
777 | | - if (cs->nr_segs == cs->pipe->buffers) |
---|
| 729 | + if (cs->nr_segs >= cs->pipe->max_usage) |
---|
778 | 730 | return -EIO; |
---|
779 | 731 | |
---|
780 | 732 | page = alloc_page(GFP_HIGHUSER); |
---|
.. | .. |
---|
839 | 791 | 1 << PG_uptodate | |
---|
840 | 792 | 1 << PG_lru | |
---|
841 | 793 | 1 << PG_active | |
---|
| 794 | + 1 << PG_workingset | |
---|
842 | 795 | 1 << PG_reclaim | |
---|
843 | 796 | 1 << PG_waiters))) { |
---|
844 | | - printk(KERN_WARNING "fuse: trying to steal weird page\n"); |
---|
845 | | - printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping); |
---|
| 797 | + dump_page(page, "fuse: trying to steal weird page"); |
---|
846 | 798 | return 1; |
---|
847 | 799 | } |
---|
848 | 800 | return 0; |
---|
.. | .. |
---|
875 | 827 | if (cs->len != PAGE_SIZE) |
---|
876 | 828 | goto out_fallback; |
---|
877 | 829 | |
---|
878 | | - if (pipe_buf_steal(cs->pipe, buf) != 0) |
---|
| 830 | + if (!pipe_buf_try_steal(cs->pipe, buf)) |
---|
879 | 831 | goto out_fallback; |
---|
880 | 832 | |
---|
881 | 833 | newpage = buf->page; |
---|
.. | .. |
---|
910 | 862 | get_page(newpage); |
---|
911 | 863 | |
---|
912 | 864 | if (!(buf->flags & PIPE_BUF_FLAG_LRU)) |
---|
913 | | - lru_cache_add_file(newpage); |
---|
| 865 | + lru_cache_add(newpage); |
---|
914 | 866 | |
---|
915 | 867 | /* |
---|
916 | 868 | * Release while we have extra ref on stolen page. Otherwise |
---|
.. | .. |
---|
962 | 914 | struct pipe_buffer *buf; |
---|
963 | 915 | int err; |
---|
964 | 916 | |
---|
965 | | - if (cs->nr_segs == cs->pipe->buffers) |
---|
| 917 | + if (cs->nr_segs >= cs->pipe->max_usage) |
---|
966 | 918 | return -EIO; |
---|
967 | 919 | |
---|
968 | 920 | get_page(page); |
---|
.. | .. |
---|
1001 | 953 | |
---|
1002 | 954 | while (count) { |
---|
1003 | 955 | if (cs->write && cs->pipebufs && page) { |
---|
1004 | | - return fuse_ref_page(cs, page, offset, count); |
---|
| 956 | + /* |
---|
| 957 | + * Can't control lifetime of pipe buffers, so always |
---|
| 958 | + * copy user pages. |
---|
| 959 | + */ |
---|
| 960 | + if (cs->req->args->user_pages) { |
---|
| 961 | + err = fuse_copy_fill(cs); |
---|
| 962 | + if (err) |
---|
| 963 | + return err; |
---|
| 964 | + } else { |
---|
| 965 | + return fuse_ref_page(cs, page, offset, count); |
---|
| 966 | + } |
---|
1005 | 967 | } else if (!cs->len) { |
---|
1006 | 968 | if (cs->move_pages && page && |
---|
1007 | 969 | offset == 0 && count == PAGE_SIZE) { |
---|
.. | .. |
---|
1033 | 995 | { |
---|
1034 | 996 | unsigned i; |
---|
1035 | 997 | struct fuse_req *req = cs->req; |
---|
| 998 | + struct fuse_args_pages *ap = container_of(req->args, typeof(*ap), args); |
---|
1036 | 999 | |
---|
1037 | | - for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { |
---|
| 1000 | + |
---|
| 1001 | + for (i = 0; i < ap->num_pages && (nbytes || zeroing); i++) { |
---|
1038 | 1002 | int err; |
---|
1039 | | - unsigned offset = req->page_descs[i].offset; |
---|
1040 | | - unsigned count = min(nbytes, req->page_descs[i].length); |
---|
| 1003 | + unsigned int offset = ap->descs[i].offset; |
---|
| 1004 | + unsigned int count = min(nbytes, ap->descs[i].length); |
---|
1041 | 1005 | |
---|
1042 | | - err = fuse_copy_page(cs, &req->pages[i], offset, count, |
---|
1043 | | - zeroing); |
---|
| 1006 | + err = fuse_copy_page(cs, &ap->pages[i], offset, count, zeroing); |
---|
1044 | 1007 | if (err) |
---|
1045 | 1008 | return err; |
---|
1046 | 1009 | |
---|
.. | .. |
---|
1111 | 1074 | int err; |
---|
1112 | 1075 | |
---|
1113 | 1076 | list_del_init(&req->intr_entry); |
---|
1114 | | - req->intr_unique = fuse_get_unique(fiq); |
---|
1115 | 1077 | memset(&ih, 0, sizeof(ih)); |
---|
1116 | 1078 | memset(&arg, 0, sizeof(arg)); |
---|
1117 | 1079 | ih.len = reqsize; |
---|
1118 | 1080 | ih.opcode = FUSE_INTERRUPT; |
---|
1119 | | - ih.unique = req->intr_unique; |
---|
| 1081 | + ih.unique = (req->in.h.unique | FUSE_INT_REQ_BIT); |
---|
1120 | 1082 | arg.unique = req->in.h.unique; |
---|
1121 | 1083 | |
---|
1122 | 1084 | spin_unlock(&fiq->lock); |
---|
.. | .. |
---|
1131 | 1093 | return err ? err : reqsize; |
---|
1132 | 1094 | } |
---|
1133 | 1095 | |
---|
1134 | | -static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq, |
---|
1135 | | - unsigned max, |
---|
1136 | | - unsigned *countp) |
---|
| 1096 | +struct fuse_forget_link *fuse_dequeue_forget(struct fuse_iqueue *fiq, |
---|
| 1097 | + unsigned int max, |
---|
| 1098 | + unsigned int *countp) |
---|
1137 | 1099 | { |
---|
1138 | 1100 | struct fuse_forget_link *head = fiq->forget_list_head.next; |
---|
1139 | 1101 | struct fuse_forget_link **newhead = &head; |
---|
.. | .. |
---|
1152 | 1114 | |
---|
1153 | 1115 | return head; |
---|
1154 | 1116 | } |
---|
| 1117 | +EXPORT_SYMBOL(fuse_dequeue_forget); |
---|
1155 | 1118 | |
---|
1156 | 1119 | static int fuse_read_single_forget(struct fuse_iqueue *fiq, |
---|
1157 | 1120 | struct fuse_copy_state *cs, |
---|
.. | .. |
---|
1159 | 1122 | __releases(fiq->lock) |
---|
1160 | 1123 | { |
---|
1161 | 1124 | int err; |
---|
1162 | | - struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL); |
---|
| 1125 | + struct fuse_forget_link *forget = fuse_dequeue_forget(fiq, 1, NULL); |
---|
1163 | 1126 | struct fuse_forget_in arg = { |
---|
1164 | 1127 | .nlookup = forget->forget_one.nlookup, |
---|
1165 | 1128 | }; |
---|
.. | .. |
---|
1207 | 1170 | } |
---|
1208 | 1171 | |
---|
1209 | 1172 | max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one); |
---|
1210 | | - head = dequeue_forget(fiq, max_forgets, &count); |
---|
| 1173 | + head = fuse_dequeue_forget(fiq, max_forgets, &count); |
---|
1211 | 1174 | spin_unlock(&fiq->lock); |
---|
1212 | 1175 | |
---|
1213 | 1176 | arg.count = count; |
---|
.. | .. |
---|
1252 | 1215 | * the pending list and copies request data to userspace buffer. If |
---|
1253 | 1216 | * no reply is needed (FORGET) or request has been aborted or there |
---|
1254 | 1217 | * was an error during the copying then it's finished by calling |
---|
1255 | | - * request_end(). Otherwise add it to the processing list, and set |
---|
| 1218 | + * fuse_request_end(). Otherwise add it to the processing list, and set |
---|
1256 | 1219 | * the 'sent' flag. |
---|
1257 | 1220 | */ |
---|
1258 | 1221 | static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file, |
---|
.. | .. |
---|
1263 | 1226 | struct fuse_iqueue *fiq = &fc->iq; |
---|
1264 | 1227 | struct fuse_pqueue *fpq = &fud->pq; |
---|
1265 | 1228 | struct fuse_req *req; |
---|
1266 | | - struct fuse_in *in; |
---|
| 1229 | + struct fuse_args *args; |
---|
1267 | 1230 | unsigned reqsize; |
---|
| 1231 | + unsigned int hash; |
---|
| 1232 | + |
---|
| 1233 | + /* |
---|
| 1234 | + * Require sane minimum read buffer - that has capacity for fixed part |
---|
| 1235 | + * of any request header + negotiated max_write room for data. |
---|
| 1236 | + * |
---|
| 1237 | + * Historically libfuse reserves 4K for fixed header room, but e.g. |
---|
| 1238 | + * GlusterFS reserves only 80 bytes |
---|
| 1239 | + * |
---|
| 1240 | + * = `sizeof(fuse_in_header) + sizeof(fuse_write_in)` |
---|
| 1241 | + * |
---|
| 1242 | + * which is the absolute minimum any sane filesystem should be using |
---|
| 1243 | + * for header room. |
---|
| 1244 | + */ |
---|
| 1245 | + if (nbytes < max_t(size_t, FUSE_MIN_READ_BUFFER, |
---|
| 1246 | + sizeof(struct fuse_in_header) + |
---|
| 1247 | + sizeof(struct fuse_write_in) + |
---|
| 1248 | + fc->max_write)) |
---|
| 1249 | + return -EINVAL; |
---|
1268 | 1250 | |
---|
1269 | 1251 | restart: |
---|
1270 | 1252 | for (;;) { |
---|
.. | .. |
---|
1282 | 1264 | } |
---|
1283 | 1265 | |
---|
1284 | 1266 | if (!fiq->connected) { |
---|
1285 | | - err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV; |
---|
| 1267 | + err = fc->aborted ? -ECONNABORTED : -ENODEV; |
---|
1286 | 1268 | goto err_unlock; |
---|
1287 | 1269 | } |
---|
1288 | 1270 | |
---|
.. | .. |
---|
1305 | 1287 | list_del_init(&req->list); |
---|
1306 | 1288 | spin_unlock(&fiq->lock); |
---|
1307 | 1289 | |
---|
1308 | | - in = &req->in; |
---|
1309 | | - reqsize = in->h.len; |
---|
| 1290 | + args = req->args; |
---|
| 1291 | + reqsize = req->in.h.len; |
---|
1310 | 1292 | |
---|
1311 | 1293 | /* If request is too large, reply with an error and restart the read */ |
---|
1312 | 1294 | if (nbytes < reqsize) { |
---|
1313 | 1295 | req->out.h.error = -EIO; |
---|
1314 | 1296 | /* SETXATTR is special, since it may contain too large data */ |
---|
1315 | | - if (in->h.opcode == FUSE_SETXATTR) |
---|
| 1297 | + if (args->opcode == FUSE_SETXATTR) |
---|
1316 | 1298 | req->out.h.error = -E2BIG; |
---|
1317 | | - request_end(fc, req); |
---|
| 1299 | + fuse_request_end(req); |
---|
1318 | 1300 | goto restart; |
---|
1319 | 1301 | } |
---|
1320 | 1302 | spin_lock(&fpq->lock); |
---|
.. | .. |
---|
1330 | 1312 | list_add(&req->list, &fpq->io); |
---|
1331 | 1313 | spin_unlock(&fpq->lock); |
---|
1332 | 1314 | cs->req = req; |
---|
1333 | | - err = fuse_copy_one(cs, &in->h, sizeof(in->h)); |
---|
| 1315 | + err = fuse_copy_one(cs, &req->in.h, sizeof(req->in.h)); |
---|
1334 | 1316 | if (!err) |
---|
1335 | | - err = fuse_copy_args(cs, in->numargs, in->argpages, |
---|
1336 | | - (struct fuse_arg *) in->args, 0); |
---|
| 1317 | + err = fuse_copy_args(cs, args->in_numargs, args->in_pages, |
---|
| 1318 | + (struct fuse_arg *) args->in_args, 0); |
---|
1337 | 1319 | fuse_copy_finish(cs); |
---|
1338 | 1320 | spin_lock(&fpq->lock); |
---|
1339 | 1321 | clear_bit(FR_LOCKED, &req->flags); |
---|
1340 | 1322 | if (!fpq->connected) { |
---|
1341 | | - err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV; |
---|
| 1323 | + err = fc->aborted ? -ECONNABORTED : -ENODEV; |
---|
1342 | 1324 | goto out_end; |
---|
1343 | 1325 | } |
---|
1344 | 1326 | if (err) { |
---|
.. | .. |
---|
1349 | 1331 | err = reqsize; |
---|
1350 | 1332 | goto out_end; |
---|
1351 | 1333 | } |
---|
1352 | | - list_move_tail(&req->list, &fpq->processing); |
---|
| 1334 | + hash = fuse_req_hash(req->in.h.unique); |
---|
| 1335 | + list_move_tail(&req->list, &fpq->processing[hash]); |
---|
1353 | 1336 | __fuse_get_request(req); |
---|
1354 | 1337 | set_bit(FR_SENT, &req->flags); |
---|
1355 | 1338 | spin_unlock(&fpq->lock); |
---|
1356 | 1339 | /* matches barrier in request_wait_answer() */ |
---|
1357 | 1340 | smp_mb__after_atomic(); |
---|
1358 | 1341 | if (test_bit(FR_INTERRUPTED, &req->flags)) |
---|
1359 | | - queue_interrupt(fiq, req); |
---|
1360 | | - fuse_put_request(fc, req); |
---|
| 1342 | + queue_interrupt(req); |
---|
| 1343 | + fuse_put_request(req); |
---|
1361 | 1344 | |
---|
1362 | 1345 | return reqsize; |
---|
1363 | 1346 | |
---|
.. | .. |
---|
1365 | 1348 | if (!test_bit(FR_PRIVATE, &req->flags)) |
---|
1366 | 1349 | list_del_init(&req->list); |
---|
1367 | 1350 | spin_unlock(&fpq->lock); |
---|
1368 | | - request_end(fc, req); |
---|
| 1351 | + fuse_request_end(req); |
---|
1369 | 1352 | return err; |
---|
1370 | 1353 | |
---|
1371 | 1354 | err_unlock: |
---|
.. | .. |
---|
1414 | 1397 | if (!fud) |
---|
1415 | 1398 | return -EPERM; |
---|
1416 | 1399 | |
---|
1417 | | - bufs = kvmalloc_array(pipe->buffers, sizeof(struct pipe_buffer), |
---|
| 1400 | + bufs = kvmalloc_array(pipe->max_usage, sizeof(struct pipe_buffer), |
---|
1418 | 1401 | GFP_KERNEL); |
---|
1419 | 1402 | if (!bufs) |
---|
1420 | 1403 | return -ENOMEM; |
---|
.. | .. |
---|
1426 | 1409 | if (ret < 0) |
---|
1427 | 1410 | goto out; |
---|
1428 | 1411 | |
---|
1429 | | - if (pipe->nrbufs + cs.nr_segs > pipe->buffers) { |
---|
| 1412 | + if (pipe_occupancy(pipe->head, pipe->tail) + cs.nr_segs > pipe->max_usage) { |
---|
1430 | 1413 | ret = -EIO; |
---|
1431 | 1414 | goto out; |
---|
1432 | 1415 | } |
---|
.. | .. |
---|
1488 | 1471 | fuse_copy_finish(cs); |
---|
1489 | 1472 | |
---|
1490 | 1473 | down_read(&fc->killsb); |
---|
1491 | | - err = -ENOENT; |
---|
1492 | | - if (fc->sb) { |
---|
1493 | | - err = fuse_reverse_inval_inode(fc->sb, outarg.ino, |
---|
1494 | | - outarg.off, outarg.len); |
---|
1495 | | - } |
---|
| 1474 | + err = fuse_reverse_inval_inode(fc, outarg.ino, |
---|
| 1475 | + outarg.off, outarg.len); |
---|
1496 | 1476 | up_read(&fc->killsb); |
---|
1497 | 1477 | return err; |
---|
1498 | 1478 | |
---|
.. | .. |
---|
1538 | 1518 | buf[outarg.namelen] = 0; |
---|
1539 | 1519 | |
---|
1540 | 1520 | down_read(&fc->killsb); |
---|
1541 | | - err = -ENOENT; |
---|
1542 | | - if (fc->sb) |
---|
1543 | | - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name); |
---|
| 1521 | + err = fuse_reverse_inval_entry(fc, outarg.parent, 0, &name); |
---|
1544 | 1522 | up_read(&fc->killsb); |
---|
1545 | 1523 | kfree(buf); |
---|
1546 | 1524 | return err; |
---|
.. | .. |
---|
1588 | 1566 | buf[outarg.namelen] = 0; |
---|
1589 | 1567 | |
---|
1590 | 1568 | down_read(&fc->killsb); |
---|
1591 | | - err = -ENOENT; |
---|
1592 | | - if (fc->sb) |
---|
1593 | | - err = fuse_reverse_inval_entry(fc->sb, outarg.parent, |
---|
1594 | | - outarg.child, &name); |
---|
| 1569 | + err = fuse_reverse_inval_entry(fc, outarg.parent, outarg.child, &name); |
---|
1595 | 1570 | up_read(&fc->killsb); |
---|
1596 | 1571 | kfree(buf); |
---|
1597 | 1572 | return err; |
---|
.. | .. |
---|
1633 | 1608 | down_read(&fc->killsb); |
---|
1634 | 1609 | |
---|
1635 | 1610 | err = -ENOENT; |
---|
1636 | | - if (!fc->sb) |
---|
1637 | | - goto out_up_killsb; |
---|
1638 | | - |
---|
1639 | | - inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); |
---|
| 1611 | + inode = fuse_ilookup(fc, nodeid, NULL); |
---|
1640 | 1612 | if (!inode) |
---|
1641 | 1613 | goto out_up_killsb; |
---|
1642 | 1614 | |
---|
.. | .. |
---|
1688 | 1660 | return err; |
---|
1689 | 1661 | } |
---|
1690 | 1662 | |
---|
1691 | | -static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) |
---|
| 1663 | +struct fuse_retrieve_args { |
---|
| 1664 | + struct fuse_args_pages ap; |
---|
| 1665 | + struct fuse_notify_retrieve_in inarg; |
---|
| 1666 | +}; |
---|
| 1667 | + |
---|
| 1668 | +static void fuse_retrieve_end(struct fuse_mount *fm, struct fuse_args *args, |
---|
| 1669 | + int error) |
---|
1692 | 1670 | { |
---|
1693 | | - release_pages(req->pages, req->num_pages); |
---|
| 1671 | + struct fuse_retrieve_args *ra = |
---|
| 1672 | + container_of(args, typeof(*ra), ap.args); |
---|
| 1673 | + |
---|
| 1674 | + release_pages(ra->ap.pages, ra->ap.num_pages); |
---|
| 1675 | + kfree(ra); |
---|
1694 | 1676 | } |
---|
1695 | 1677 | |
---|
1696 | | -static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, |
---|
| 1678 | +static int fuse_retrieve(struct fuse_mount *fm, struct inode *inode, |
---|
1697 | 1679 | struct fuse_notify_retrieve_out *outarg) |
---|
1698 | 1680 | { |
---|
1699 | 1681 | int err; |
---|
1700 | 1682 | struct address_space *mapping = inode->i_mapping; |
---|
1701 | | - struct fuse_req *req; |
---|
1702 | 1683 | pgoff_t index; |
---|
1703 | 1684 | loff_t file_size; |
---|
1704 | 1685 | unsigned int num; |
---|
1705 | 1686 | unsigned int offset; |
---|
1706 | 1687 | size_t total_len = 0; |
---|
1707 | | - int num_pages; |
---|
| 1688 | + unsigned int num_pages; |
---|
| 1689 | + struct fuse_conn *fc = fm->fc; |
---|
| 1690 | + struct fuse_retrieve_args *ra; |
---|
| 1691 | + size_t args_size = sizeof(*ra); |
---|
| 1692 | + struct fuse_args_pages *ap; |
---|
| 1693 | + struct fuse_args *args; |
---|
1708 | 1694 | |
---|
1709 | 1695 | offset = outarg->offset & ~PAGE_MASK; |
---|
1710 | 1696 | file_size = i_size_read(inode); |
---|
.. | .. |
---|
1716 | 1702 | num = file_size - outarg->offset; |
---|
1717 | 1703 | |
---|
1718 | 1704 | num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT; |
---|
1719 | | - num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ); |
---|
| 1705 | + num_pages = min(num_pages, fc->max_pages); |
---|
1720 | 1706 | |
---|
1721 | | - req = fuse_get_req(fc, num_pages); |
---|
1722 | | - if (IS_ERR(req)) |
---|
1723 | | - return PTR_ERR(req); |
---|
| 1707 | + args_size += num_pages * (sizeof(ap->pages[0]) + sizeof(ap->descs[0])); |
---|
1724 | 1708 | |
---|
1725 | | - req->in.h.opcode = FUSE_NOTIFY_REPLY; |
---|
1726 | | - req->in.h.nodeid = outarg->nodeid; |
---|
1727 | | - req->in.numargs = 2; |
---|
1728 | | - req->in.argpages = 1; |
---|
1729 | | - req->end = fuse_retrieve_end; |
---|
| 1709 | + ra = kzalloc(args_size, GFP_KERNEL); |
---|
| 1710 | + if (!ra) |
---|
| 1711 | + return -ENOMEM; |
---|
| 1712 | + |
---|
| 1713 | + ap = &ra->ap; |
---|
| 1714 | + ap->pages = (void *) (ra + 1); |
---|
| 1715 | + ap->descs = (void *) (ap->pages + num_pages); |
---|
| 1716 | + |
---|
| 1717 | + args = &ap->args; |
---|
| 1718 | + args->nodeid = outarg->nodeid; |
---|
| 1719 | + args->opcode = FUSE_NOTIFY_REPLY; |
---|
| 1720 | + args->in_numargs = 2; |
---|
| 1721 | + args->in_pages = true; |
---|
| 1722 | + args->end = fuse_retrieve_end; |
---|
1730 | 1723 | |
---|
1731 | 1724 | index = outarg->offset >> PAGE_SHIFT; |
---|
1732 | 1725 | |
---|
1733 | | - while (num && req->num_pages < num_pages) { |
---|
| 1726 | + while (num && ap->num_pages < num_pages) { |
---|
1734 | 1727 | struct page *page; |
---|
1735 | 1728 | unsigned int this_num; |
---|
1736 | 1729 | |
---|
.. | .. |
---|
1739 | 1732 | break; |
---|
1740 | 1733 | |
---|
1741 | 1734 | this_num = min_t(unsigned, num, PAGE_SIZE - offset); |
---|
1742 | | - req->pages[req->num_pages] = page; |
---|
1743 | | - req->page_descs[req->num_pages].offset = offset; |
---|
1744 | | - req->page_descs[req->num_pages].length = this_num; |
---|
1745 | | - req->num_pages++; |
---|
| 1735 | + ap->pages[ap->num_pages] = page; |
---|
| 1736 | + ap->descs[ap->num_pages].offset = offset; |
---|
| 1737 | + ap->descs[ap->num_pages].length = this_num; |
---|
| 1738 | + ap->num_pages++; |
---|
1746 | 1739 | |
---|
1747 | 1740 | offset = 0; |
---|
1748 | 1741 | num -= this_num; |
---|
1749 | 1742 | total_len += this_num; |
---|
1750 | 1743 | index++; |
---|
1751 | 1744 | } |
---|
1752 | | - req->misc.retrieve_in.offset = outarg->offset; |
---|
1753 | | - req->misc.retrieve_in.size = total_len; |
---|
1754 | | - req->in.args[0].size = sizeof(req->misc.retrieve_in); |
---|
1755 | | - req->in.args[0].value = &req->misc.retrieve_in; |
---|
1756 | | - req->in.args[1].size = total_len; |
---|
| 1745 | + ra->inarg.offset = outarg->offset; |
---|
| 1746 | + ra->inarg.size = total_len; |
---|
| 1747 | + args->in_args[0].size = sizeof(ra->inarg); |
---|
| 1748 | + args->in_args[0].value = &ra->inarg; |
---|
| 1749 | + args->in_args[1].size = total_len; |
---|
1757 | 1750 | |
---|
1758 | | - err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); |
---|
1759 | | - if (err) { |
---|
1760 | | - fuse_retrieve_end(fc, req); |
---|
1761 | | - fuse_put_request(fc, req); |
---|
1762 | | - } |
---|
| 1751 | + err = fuse_simple_notify_reply(fm, args, outarg->notify_unique); |
---|
| 1752 | + if (err) |
---|
| 1753 | + fuse_retrieve_end(fm, args, err); |
---|
1763 | 1754 | |
---|
1764 | 1755 | return err; |
---|
1765 | 1756 | } |
---|
.. | .. |
---|
1768 | 1759 | struct fuse_copy_state *cs) |
---|
1769 | 1760 | { |
---|
1770 | 1761 | struct fuse_notify_retrieve_out outarg; |
---|
| 1762 | + struct fuse_mount *fm; |
---|
1771 | 1763 | struct inode *inode; |
---|
| 1764 | + u64 nodeid; |
---|
1772 | 1765 | int err; |
---|
1773 | 1766 | |
---|
1774 | 1767 | err = -EINVAL; |
---|
.. | .. |
---|
1783 | 1776 | |
---|
1784 | 1777 | down_read(&fc->killsb); |
---|
1785 | 1778 | err = -ENOENT; |
---|
1786 | | - if (fc->sb) { |
---|
1787 | | - u64 nodeid = outarg.nodeid; |
---|
| 1779 | + nodeid = outarg.nodeid; |
---|
1788 | 1780 | |
---|
1789 | | - inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid); |
---|
1790 | | - if (inode) { |
---|
1791 | | - err = fuse_retrieve(fc, inode, &outarg); |
---|
1792 | | - iput(inode); |
---|
1793 | | - } |
---|
| 1781 | + inode = fuse_ilookup(fc, nodeid, &fm); |
---|
| 1782 | + if (inode) { |
---|
| 1783 | + err = fuse_retrieve(fm, inode, &outarg); |
---|
| 1784 | + iput(inode); |
---|
1794 | 1785 | } |
---|
1795 | 1786 | up_read(&fc->killsb); |
---|
1796 | 1787 | |
---|
.. | .. |
---|
1835 | 1826 | /* Look up request on processing list by unique ID */ |
---|
1836 | 1827 | static struct fuse_req *request_find(struct fuse_pqueue *fpq, u64 unique) |
---|
1837 | 1828 | { |
---|
| 1829 | + unsigned int hash = fuse_req_hash(unique); |
---|
1838 | 1830 | struct fuse_req *req; |
---|
1839 | 1831 | |
---|
1840 | | - list_for_each_entry(req, &fpq->processing, list) { |
---|
1841 | | - if (req->in.h.unique == unique || req->intr_unique == unique) |
---|
| 1832 | + list_for_each_entry(req, &fpq->processing[hash], list) { |
---|
| 1833 | + if (req->in.h.unique == unique) |
---|
1842 | 1834 | return req; |
---|
1843 | 1835 | } |
---|
1844 | 1836 | return NULL; |
---|
1845 | 1837 | } |
---|
1846 | 1838 | |
---|
1847 | | -static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out, |
---|
| 1839 | +static int copy_out_args(struct fuse_copy_state *cs, struct fuse_args *args, |
---|
1848 | 1840 | unsigned nbytes) |
---|
1849 | 1841 | { |
---|
1850 | 1842 | unsigned reqsize = sizeof(struct fuse_out_header); |
---|
1851 | 1843 | |
---|
1852 | | - if (out->h.error) |
---|
1853 | | - return nbytes != reqsize ? -EINVAL : 0; |
---|
| 1844 | + reqsize += fuse_len_args(args->out_numargs, args->out_args); |
---|
1854 | 1845 | |
---|
1855 | | - reqsize += len_args(out->numargs, out->args); |
---|
1856 | | - |
---|
1857 | | - if (reqsize < nbytes || (reqsize > nbytes && !out->argvar)) |
---|
| 1846 | + if (reqsize < nbytes || (reqsize > nbytes && !args->out_argvar)) |
---|
1858 | 1847 | return -EINVAL; |
---|
1859 | 1848 | else if (reqsize > nbytes) { |
---|
1860 | | - struct fuse_arg *lastarg = &out->args[out->numargs-1]; |
---|
| 1849 | + struct fuse_arg *lastarg = &args->out_args[args->out_numargs-1]; |
---|
1861 | 1850 | unsigned diffsize = reqsize - nbytes; |
---|
| 1851 | + |
---|
1862 | 1852 | if (diffsize > lastarg->size) |
---|
1863 | 1853 | return -EINVAL; |
---|
1864 | 1854 | lastarg->size -= diffsize; |
---|
1865 | 1855 | } |
---|
1866 | | - return fuse_copy_args(cs, out->numargs, out->argpages, out->args, |
---|
1867 | | - out->page_zeroing); |
---|
| 1856 | + return fuse_copy_args(cs, args->out_numargs, args->out_pages, |
---|
| 1857 | + args->out_args, args->page_zeroing); |
---|
1868 | 1858 | } |
---|
1869 | 1859 | |
---|
1870 | 1860 | /* |
---|
.. | .. |
---|
1872 | 1862 | * the write buffer. The request is then searched on the processing |
---|
1873 | 1863 | * list by the unique ID found in the header. If found, then remove |
---|
1874 | 1864 | * it from the list and copy the rest of the buffer to the request. |
---|
1875 | | - * The request is finished by calling request_end() |
---|
| 1865 | + * The request is finished by calling fuse_request_end(). |
---|
1876 | 1866 | */ |
---|
1877 | 1867 | static ssize_t fuse_dev_do_write(struct fuse_dev *fud, |
---|
1878 | 1868 | struct fuse_copy_state *cs, size_t nbytes) |
---|
.. | .. |
---|
1883 | 1873 | struct fuse_req *req; |
---|
1884 | 1874 | struct fuse_out_header oh; |
---|
1885 | 1875 | |
---|
| 1876 | + err = -EINVAL; |
---|
1886 | 1877 | if (nbytes < sizeof(struct fuse_out_header)) |
---|
1887 | | - return -EINVAL; |
---|
| 1878 | + goto out; |
---|
1888 | 1879 | |
---|
1889 | 1880 | err = fuse_copy_one(cs, &oh, sizeof(oh)); |
---|
1890 | 1881 | if (err) |
---|
1891 | | - goto err_finish; |
---|
| 1882 | + goto copy_finish; |
---|
1892 | 1883 | |
---|
1893 | 1884 | err = -EINVAL; |
---|
1894 | 1885 | if (oh.len != nbytes) |
---|
1895 | | - goto err_finish; |
---|
| 1886 | + goto copy_finish; |
---|
1896 | 1887 | |
---|
1897 | 1888 | /* |
---|
1898 | 1889 | * Zero oh.unique indicates unsolicited notification message |
---|
.. | .. |
---|
1900 | 1891 | */ |
---|
1901 | 1892 | if (!oh.unique) { |
---|
1902 | 1893 | err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs); |
---|
1903 | | - return err ? err : nbytes; |
---|
| 1894 | + goto out; |
---|
1904 | 1895 | } |
---|
1905 | 1896 | |
---|
1906 | 1897 | err = -EINVAL; |
---|
1907 | 1898 | if (oh.error <= -512 || oh.error > 0) |
---|
1908 | | - goto err_finish; |
---|
| 1899 | + goto copy_finish; |
---|
1909 | 1900 | |
---|
1910 | 1901 | spin_lock(&fpq->lock); |
---|
| 1902 | + req = NULL; |
---|
| 1903 | + if (fpq->connected) |
---|
| 1904 | + req = request_find(fpq, oh.unique & ~FUSE_INT_REQ_BIT); |
---|
| 1905 | + |
---|
1911 | 1906 | err = -ENOENT; |
---|
1912 | | - if (!fpq->connected) |
---|
1913 | | - goto err_unlock_pq; |
---|
| 1907 | + if (!req) { |
---|
| 1908 | + spin_unlock(&fpq->lock); |
---|
| 1909 | + goto copy_finish; |
---|
| 1910 | + } |
---|
1914 | 1911 | |
---|
1915 | | - req = request_find(fpq, oh.unique); |
---|
1916 | | - if (!req) |
---|
1917 | | - goto err_unlock_pq; |
---|
1918 | | - |
---|
1919 | | - /* Is it an interrupt reply? */ |
---|
1920 | | - if (req->intr_unique == oh.unique) { |
---|
| 1912 | + /* Is it an interrupt reply ID? */ |
---|
| 1913 | + if (oh.unique & FUSE_INT_REQ_BIT) { |
---|
1921 | 1914 | __fuse_get_request(req); |
---|
1922 | 1915 | spin_unlock(&fpq->lock); |
---|
1923 | 1916 | |
---|
1924 | | - err = -EINVAL; |
---|
1925 | | - if (nbytes != sizeof(struct fuse_out_header)) { |
---|
1926 | | - fuse_put_request(fc, req); |
---|
1927 | | - goto err_finish; |
---|
1928 | | - } |
---|
1929 | | - |
---|
1930 | | - if (oh.error == -ENOSYS) |
---|
| 1917 | + err = 0; |
---|
| 1918 | + if (nbytes != sizeof(struct fuse_out_header)) |
---|
| 1919 | + err = -EINVAL; |
---|
| 1920 | + else if (oh.error == -ENOSYS) |
---|
1931 | 1921 | fc->no_interrupt = 1; |
---|
1932 | 1922 | else if (oh.error == -EAGAIN) |
---|
1933 | | - queue_interrupt(&fc->iq, req); |
---|
1934 | | - fuse_put_request(fc, req); |
---|
| 1923 | + err = queue_interrupt(req); |
---|
1935 | 1924 | |
---|
1936 | | - fuse_copy_finish(cs); |
---|
1937 | | - return nbytes; |
---|
| 1925 | + fuse_put_request(req); |
---|
| 1926 | + |
---|
| 1927 | + goto copy_finish; |
---|
1938 | 1928 | } |
---|
1939 | 1929 | |
---|
1940 | 1930 | clear_bit(FR_SENT, &req->flags); |
---|
.. | .. |
---|
1943 | 1933 | set_bit(FR_LOCKED, &req->flags); |
---|
1944 | 1934 | spin_unlock(&fpq->lock); |
---|
1945 | 1935 | cs->req = req; |
---|
1946 | | - if (!req->out.page_replace) |
---|
| 1936 | + if (!req->args->page_replace) |
---|
1947 | 1937 | cs->move_pages = 0; |
---|
1948 | 1938 | |
---|
1949 | | - err = copy_out_args(cs, &req->out, nbytes); |
---|
| 1939 | + if (oh.error) |
---|
| 1940 | + err = nbytes != sizeof(oh) ? -EINVAL : 0; |
---|
| 1941 | + else |
---|
| 1942 | + err = copy_out_args(cs, req->args, nbytes); |
---|
1950 | 1943 | fuse_copy_finish(cs); |
---|
1951 | 1944 | |
---|
1952 | 1945 | if (!err && req->in.h.opcode == FUSE_CANONICAL_PATH) { |
---|
1953 | | - char *path = (char *)req->out.args[0].value; |
---|
| 1946 | + char *path = (char *)req->args->out_args[0].value; |
---|
1954 | 1947 | |
---|
1955 | | - path[req->out.args[0].size - 1] = 0; |
---|
| 1948 | + path[req->args->out_args[0].size - 1] = 0; |
---|
1956 | 1949 | if (req->out.h.error != -ENOSYS) |
---|
1957 | | - req->out.h.error = kern_path(path, 0, req->out.canonical_path); |
---|
| 1950 | + req->out.h.error = kern_path(path, 0, req->args->canonical_path); |
---|
1958 | 1951 | } |
---|
1959 | 1952 | |
---|
1960 | 1953 | spin_lock(&fpq->lock); |
---|
.. | .. |
---|
1967 | 1960 | list_del_init(&req->list); |
---|
1968 | 1961 | spin_unlock(&fpq->lock); |
---|
1969 | 1962 | |
---|
1970 | | - request_end(fc, req); |
---|
1971 | | - |
---|
| 1963 | + fuse_request_end(req); |
---|
| 1964 | +out: |
---|
1972 | 1965 | return err ? err : nbytes; |
---|
1973 | 1966 | |
---|
1974 | | - err_unlock_pq: |
---|
1975 | | - spin_unlock(&fpq->lock); |
---|
1976 | | - err_finish: |
---|
| 1967 | +copy_finish: |
---|
1977 | 1968 | fuse_copy_finish(cs); |
---|
1978 | | - return err; |
---|
| 1969 | + goto out; |
---|
1979 | 1970 | } |
---|
1980 | 1971 | |
---|
1981 | 1972 | static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from) |
---|
.. | .. |
---|
1998 | 1989 | struct file *out, loff_t *ppos, |
---|
1999 | 1990 | size_t len, unsigned int flags) |
---|
2000 | 1991 | { |
---|
| 1992 | + unsigned int head, tail, mask, count; |
---|
2001 | 1993 | unsigned nbuf; |
---|
2002 | 1994 | unsigned idx; |
---|
2003 | 1995 | struct pipe_buffer *bufs; |
---|
.. | .. |
---|
2012 | 2004 | |
---|
2013 | 2005 | pipe_lock(pipe); |
---|
2014 | 2006 | |
---|
2015 | | - bufs = kvmalloc_array(pipe->nrbufs, sizeof(struct pipe_buffer), |
---|
2016 | | - GFP_KERNEL); |
---|
| 2007 | + head = pipe->head; |
---|
| 2008 | + tail = pipe->tail; |
---|
| 2009 | + mask = pipe->ring_size - 1; |
---|
| 2010 | + count = head - tail; |
---|
| 2011 | + |
---|
| 2012 | + bufs = kvmalloc_array(count, sizeof(struct pipe_buffer), GFP_KERNEL); |
---|
2017 | 2013 | if (!bufs) { |
---|
2018 | 2014 | pipe_unlock(pipe); |
---|
2019 | 2015 | return -ENOMEM; |
---|
.. | .. |
---|
2021 | 2017 | |
---|
2022 | 2018 | nbuf = 0; |
---|
2023 | 2019 | rem = 0; |
---|
2024 | | - for (idx = 0; idx < pipe->nrbufs && rem < len; idx++) |
---|
2025 | | - rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len; |
---|
| 2020 | + for (idx = tail; idx != head && rem < len; idx++) |
---|
| 2021 | + rem += pipe->bufs[idx & mask].len; |
---|
2026 | 2022 | |
---|
2027 | 2023 | ret = -EINVAL; |
---|
2028 | 2024 | if (rem < len) |
---|
.. | .. |
---|
2033 | 2029 | struct pipe_buffer *ibuf; |
---|
2034 | 2030 | struct pipe_buffer *obuf; |
---|
2035 | 2031 | |
---|
2036 | | - BUG_ON(nbuf >= pipe->buffers); |
---|
2037 | | - BUG_ON(!pipe->nrbufs); |
---|
2038 | | - ibuf = &pipe->bufs[pipe->curbuf]; |
---|
| 2032 | + if (WARN_ON(nbuf >= count || tail == head)) |
---|
| 2033 | + goto out_free; |
---|
| 2034 | + |
---|
| 2035 | + ibuf = &pipe->bufs[tail & mask]; |
---|
2039 | 2036 | obuf = &bufs[nbuf]; |
---|
2040 | 2037 | |
---|
2041 | 2038 | if (rem >= ibuf->len) { |
---|
2042 | 2039 | *obuf = *ibuf; |
---|
2043 | 2040 | ibuf->ops = NULL; |
---|
2044 | | - pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1); |
---|
2045 | | - pipe->nrbufs--; |
---|
| 2041 | + tail++; |
---|
| 2042 | + pipe->tail = tail; |
---|
2046 | 2043 | } else { |
---|
2047 | 2044 | if (!pipe_buf_get(pipe, ibuf)) |
---|
2048 | 2045 | goto out_free; |
---|
.. | .. |
---|
2104 | 2101 | return mask; |
---|
2105 | 2102 | } |
---|
2106 | 2103 | |
---|
2107 | | -/* |
---|
2108 | | - * Abort all requests on the given list (pending or processing) |
---|
2109 | | - * |
---|
2110 | | - * This function releases and reacquires fc->lock |
---|
2111 | | - */ |
---|
2112 | | -static void end_requests(struct fuse_conn *fc, struct list_head *head) |
---|
| 2104 | +/* Abort all requests on the given list (pending or processing) */ |
---|
| 2105 | +static void end_requests(struct list_head *head) |
---|
2113 | 2106 | { |
---|
2114 | 2107 | while (!list_empty(head)) { |
---|
2115 | 2108 | struct fuse_req *req; |
---|
.. | .. |
---|
2117 | 2110 | req->out.h.error = -ECONNABORTED; |
---|
2118 | 2111 | clear_bit(FR_SENT, &req->flags); |
---|
2119 | 2112 | list_del_init(&req->list); |
---|
2120 | | - request_end(fc, req); |
---|
| 2113 | + fuse_request_end(req); |
---|
2121 | 2114 | } |
---|
2122 | 2115 | } |
---|
2123 | 2116 | |
---|
.. | .. |
---|
2145 | 2138 | * The same effect is usually achievable through killing the filesystem daemon |
---|
2146 | 2139 | * and all users of the filesystem. The exception is the combination of an |
---|
2147 | 2140 | * asynchronous request and the tricky deadlock (see |
---|
2148 | | - * Documentation/filesystems/fuse.txt). |
---|
| 2141 | + * Documentation/filesystems/fuse.rst). |
---|
2149 | 2142 | * |
---|
2150 | 2143 | * Aborting requests under I/O goes as follows: 1: Separate out unlocked |
---|
2151 | 2144 | * requests, they should be finished off immediately. Locked requests will be |
---|
.. | .. |
---|
2154 | 2147 | * is OK, the request will in that case be removed from the list before we touch |
---|
2155 | 2148 | * it. |
---|
2156 | 2149 | */ |
---|
2157 | | -void fuse_abort_conn(struct fuse_conn *fc, bool is_abort) |
---|
| 2150 | +void fuse_abort_conn(struct fuse_conn *fc) |
---|
2158 | 2151 | { |
---|
2159 | 2152 | struct fuse_iqueue *fiq = &fc->iq; |
---|
2160 | 2153 | |
---|
.. | .. |
---|
2163 | 2156 | struct fuse_dev *fud; |
---|
2164 | 2157 | struct fuse_req *req, *next; |
---|
2165 | 2158 | LIST_HEAD(to_end); |
---|
| 2159 | + unsigned int i; |
---|
2166 | 2160 | |
---|
| 2161 | + /* Background queuing checks fc->connected under bg_lock */ |
---|
| 2162 | + spin_lock(&fc->bg_lock); |
---|
2167 | 2163 | fc->connected = 0; |
---|
2168 | | - fc->blocked = 0; |
---|
2169 | | - fc->aborted = is_abort; |
---|
| 2164 | + spin_unlock(&fc->bg_lock); |
---|
| 2165 | + |
---|
2170 | 2166 | fuse_set_initialized(fc); |
---|
2171 | 2167 | list_for_each_entry(fud, &fc->devices, entry) { |
---|
2172 | 2168 | struct fuse_pqueue *fpq = &fud->pq; |
---|
.. | .. |
---|
2184 | 2180 | } |
---|
2185 | 2181 | spin_unlock(&req->waitq.lock); |
---|
2186 | 2182 | } |
---|
2187 | | - list_splice_tail_init(&fpq->processing, &to_end); |
---|
| 2183 | + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) |
---|
| 2184 | + list_splice_tail_init(&fpq->processing[i], |
---|
| 2185 | + &to_end); |
---|
2188 | 2186 | spin_unlock(&fpq->lock); |
---|
2189 | 2187 | } |
---|
| 2188 | + spin_lock(&fc->bg_lock); |
---|
| 2189 | + fc->blocked = 0; |
---|
2190 | 2190 | fc->max_background = UINT_MAX; |
---|
2191 | 2191 | flush_bg_queue(fc); |
---|
| 2192 | + spin_unlock(&fc->bg_lock); |
---|
2192 | 2193 | |
---|
2193 | 2194 | spin_lock(&fiq->lock); |
---|
2194 | 2195 | fiq->connected = 0; |
---|
.. | .. |
---|
2196 | 2197 | clear_bit(FR_PENDING, &req->flags); |
---|
2197 | 2198 | list_splice_tail_init(&fiq->pending, &to_end); |
---|
2198 | 2199 | while (forget_pending(fiq)) |
---|
2199 | | - kfree(dequeue_forget(fiq, 1, NULL)); |
---|
| 2200 | + kfree(fuse_dequeue_forget(fiq, 1, NULL)); |
---|
2200 | 2201 | wake_up_all(&fiq->waitq); |
---|
2201 | 2202 | spin_unlock(&fiq->lock); |
---|
2202 | 2203 | kill_fasync(&fiq->fasync, SIGIO, POLL_IN); |
---|
.. | .. |
---|
2204 | 2205 | wake_up_all(&fc->blocked_waitq); |
---|
2205 | 2206 | spin_unlock(&fc->lock); |
---|
2206 | 2207 | |
---|
2207 | | - end_requests(fc, &to_end); |
---|
| 2208 | + end_requests(&to_end); |
---|
2208 | 2209 | } else { |
---|
2209 | 2210 | spin_unlock(&fc->lock); |
---|
2210 | 2211 | } |
---|
.. | .. |
---|
2226 | 2227 | struct fuse_conn *fc = fud->fc; |
---|
2227 | 2228 | struct fuse_pqueue *fpq = &fud->pq; |
---|
2228 | 2229 | LIST_HEAD(to_end); |
---|
| 2230 | + unsigned int i; |
---|
2229 | 2231 | |
---|
2230 | 2232 | spin_lock(&fpq->lock); |
---|
2231 | 2233 | WARN_ON(!list_empty(&fpq->io)); |
---|
2232 | | - list_splice_init(&fpq->processing, &to_end); |
---|
| 2234 | + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) |
---|
| 2235 | + list_splice_init(&fpq->processing[i], &to_end); |
---|
2233 | 2236 | spin_unlock(&fpq->lock); |
---|
2234 | 2237 | |
---|
2235 | | - end_requests(fc, &to_end); |
---|
| 2238 | + end_requests(&to_end); |
---|
2236 | 2239 | |
---|
2237 | 2240 | /* Are we the last open device? */ |
---|
2238 | 2241 | if (atomic_dec_and_test(&fc->dev_count)) { |
---|
2239 | 2242 | WARN_ON(fc->iq.fasync != NULL); |
---|
2240 | | - fuse_abort_conn(fc, false); |
---|
| 2243 | + fuse_abort_conn(fc); |
---|
2241 | 2244 | } |
---|
2242 | 2245 | fuse_dev_free(fud); |
---|
2243 | 2246 | } |
---|
.. | .. |
---|
2263 | 2266 | if (new->private_data) |
---|
2264 | 2267 | return -EINVAL; |
---|
2265 | 2268 | |
---|
2266 | | - fud = fuse_dev_alloc(fc); |
---|
| 2269 | + fud = fuse_dev_alloc_install(fc); |
---|
2267 | 2270 | if (!fud) |
---|
2268 | 2271 | return -ENOMEM; |
---|
2269 | 2272 | |
---|
.. | .. |
---|
2276 | 2279 | static long fuse_dev_ioctl(struct file *file, unsigned int cmd, |
---|
2277 | 2280 | unsigned long arg) |
---|
2278 | 2281 | { |
---|
2279 | | - int err = -ENOTTY; |
---|
| 2282 | + int res; |
---|
| 2283 | + int oldfd; |
---|
| 2284 | + struct fuse_dev *fud = NULL; |
---|
2280 | 2285 | |
---|
2281 | | - if (cmd == FUSE_DEV_IOC_CLONE) { |
---|
2282 | | - int oldfd; |
---|
2283 | | - |
---|
2284 | | - err = -EFAULT; |
---|
2285 | | - if (!get_user(oldfd, (__u32 __user *) arg)) { |
---|
| 2286 | + switch (cmd) { |
---|
| 2287 | + case FUSE_DEV_IOC_CLONE: |
---|
| 2288 | + res = -EFAULT; |
---|
| 2289 | + if (!get_user(oldfd, (__u32 __user *)arg)) { |
---|
2286 | 2290 | struct file *old = fget(oldfd); |
---|
2287 | 2291 | |
---|
2288 | | - err = -EINVAL; |
---|
| 2292 | + res = -EINVAL; |
---|
2289 | 2293 | if (old) { |
---|
2290 | | - struct fuse_dev *fud = NULL; |
---|
2291 | | - |
---|
2292 | 2294 | /* |
---|
2293 | 2295 | * Check against file->f_op because CUSE |
---|
2294 | 2296 | * uses the same ioctl handler. |
---|
2295 | 2297 | */ |
---|
2296 | 2298 | if (old->f_op == file->f_op && |
---|
2297 | | - old->f_cred->user_ns == file->f_cred->user_ns) |
---|
| 2299 | + old->f_cred->user_ns == |
---|
| 2300 | + file->f_cred->user_ns) |
---|
2298 | 2301 | fud = fuse_get_dev(old); |
---|
2299 | 2302 | |
---|
2300 | 2303 | if (fud) { |
---|
2301 | 2304 | mutex_lock(&fuse_mutex); |
---|
2302 | | - err = fuse_device_clone(fud->fc, file); |
---|
| 2305 | + res = fuse_device_clone(fud->fc, file); |
---|
2303 | 2306 | mutex_unlock(&fuse_mutex); |
---|
2304 | 2307 | } |
---|
2305 | 2308 | fput(old); |
---|
2306 | 2309 | } |
---|
2307 | 2310 | } |
---|
| 2311 | + break; |
---|
| 2312 | + case FUSE_DEV_IOC_PASSTHROUGH_OPEN: |
---|
| 2313 | + res = -EFAULT; |
---|
| 2314 | + if (!get_user(oldfd, (__u32 __user *)arg)) { |
---|
| 2315 | + res = -EINVAL; |
---|
| 2316 | + fud = fuse_get_dev(file); |
---|
| 2317 | + if (fud) |
---|
| 2318 | + res = fuse_passthrough_open(fud, oldfd); |
---|
| 2319 | + } |
---|
| 2320 | + break; |
---|
| 2321 | + default: |
---|
| 2322 | + res = -ENOTTY; |
---|
| 2323 | + break; |
---|
2308 | 2324 | } |
---|
2309 | | - return err; |
---|
| 2325 | + return res; |
---|
2310 | 2326 | } |
---|
2311 | 2327 | |
---|
2312 | 2328 | const struct file_operations fuse_dev_operations = { |
---|
.. | .. |
---|
2321 | 2337 | .release = fuse_dev_release, |
---|
2322 | 2338 | .fasync = fuse_dev_fasync, |
---|
2323 | 2339 | .unlocked_ioctl = fuse_dev_ioctl, |
---|
2324 | | - .compat_ioctl = fuse_dev_ioctl, |
---|
| 2340 | + .compat_ioctl = compat_ptr_ioctl, |
---|
2325 | 2341 | }; |
---|
2326 | 2342 | EXPORT_SYMBOL_GPL(fuse_dev_operations); |
---|
2327 | 2343 | |
---|