fuse: allow interrupt queuing without fc->lock
[deliverable/linux.git] / fs / fuse / dev.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
22
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
24 MODULE_ALIAS("devname:fuse");
25
26 static struct kmem_cache *fuse_req_cachep;
27
28 static struct fuse_conn *fuse_get_conn(struct file *file)
29 {
30 /*
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
33 */
34 return file->private_data;
35 }
36
37 static void fuse_request_init(struct fuse_req *req, struct page **pages,
38 struct fuse_page_desc *page_descs,
39 unsigned npages)
40 {
41 memset(req, 0, sizeof(*req));
42 memset(pages, 0, sizeof(*pages) * npages);
43 memset(page_descs, 0, sizeof(*page_descs) * npages);
44 INIT_LIST_HEAD(&req->list);
45 INIT_LIST_HEAD(&req->intr_entry);
46 init_waitqueue_head(&req->waitq);
47 atomic_set(&req->count, 1);
48 req->pages = pages;
49 req->page_descs = page_descs;
50 req->max_pages = npages;
51 __set_bit(FR_PENDING, &req->flags);
52 }
53
54 static struct fuse_req *__fuse_request_alloc(unsigned npages, gfp_t flags)
55 {
56 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags);
57 if (req) {
58 struct page **pages;
59 struct fuse_page_desc *page_descs;
60
61 if (npages <= FUSE_REQ_INLINE_PAGES) {
62 pages = req->inline_pages;
63 page_descs = req->inline_page_descs;
64 } else {
65 pages = kmalloc(sizeof(struct page *) * npages, flags);
66 page_descs = kmalloc(sizeof(struct fuse_page_desc) *
67 npages, flags);
68 }
69
70 if (!pages || !page_descs) {
71 kfree(pages);
72 kfree(page_descs);
73 kmem_cache_free(fuse_req_cachep, req);
74 return NULL;
75 }
76
77 fuse_request_init(req, pages, page_descs, npages);
78 }
79 return req;
80 }
81
82 struct fuse_req *fuse_request_alloc(unsigned npages)
83 {
84 return __fuse_request_alloc(npages, GFP_KERNEL);
85 }
86 EXPORT_SYMBOL_GPL(fuse_request_alloc);
87
88 struct fuse_req *fuse_request_alloc_nofs(unsigned npages)
89 {
90 return __fuse_request_alloc(npages, GFP_NOFS);
91 }
92
93 void fuse_request_free(struct fuse_req *req)
94 {
95 if (req->pages != req->inline_pages) {
96 kfree(req->pages);
97 kfree(req->page_descs);
98 }
99 kmem_cache_free(fuse_req_cachep, req);
100 }
101
102 static void block_sigs(sigset_t *oldset)
103 {
104 sigset_t mask;
105
106 siginitsetinv(&mask, sigmask(SIGKILL));
107 sigprocmask(SIG_BLOCK, &mask, oldset);
108 }
109
110 static void restore_sigs(sigset_t *oldset)
111 {
112 sigprocmask(SIG_SETMASK, oldset, NULL);
113 }
114
115 void __fuse_get_request(struct fuse_req *req)
116 {
117 atomic_inc(&req->count);
118 }
119
120 /* Must be called with > 1 refcount */
121 static void __fuse_put_request(struct fuse_req *req)
122 {
123 BUG_ON(atomic_read(&req->count) < 2);
124 atomic_dec(&req->count);
125 }
126
127 static void fuse_req_init_context(struct fuse_req *req)
128 {
129 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
130 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
131 req->in.h.pid = current->pid;
132 }
133
134 void fuse_set_initialized(struct fuse_conn *fc)
135 {
136 /* Make sure stores before this are seen on another CPU */
137 smp_wmb();
138 fc->initialized = 1;
139 }
140
141 static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
142 {
143 return !fc->initialized || (for_background && fc->blocked);
144 }
145
146 static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
147 bool for_background)
148 {
149 struct fuse_req *req;
150 int err;
151 atomic_inc(&fc->num_waiting);
152
153 if (fuse_block_alloc(fc, for_background)) {
154 sigset_t oldset;
155 int intr;
156
157 block_sigs(&oldset);
158 intr = wait_event_interruptible_exclusive(fc->blocked_waitq,
159 !fuse_block_alloc(fc, for_background));
160 restore_sigs(&oldset);
161 err = -EINTR;
162 if (intr)
163 goto out;
164 }
165 /* Matches smp_wmb() in fuse_set_initialized() */
166 smp_rmb();
167
168 err = -ENOTCONN;
169 if (!fc->connected)
170 goto out;
171
172 err = -ECONNREFUSED;
173 if (fc->conn_error)
174 goto out;
175
176 req = fuse_request_alloc(npages);
177 err = -ENOMEM;
178 if (!req) {
179 if (for_background)
180 wake_up(&fc->blocked_waitq);
181 goto out;
182 }
183
184 fuse_req_init_context(req);
185 __set_bit(FR_WAITING, &req->flags);
186 if (for_background)
187 __set_bit(FR_BACKGROUND, &req->flags);
188
189 return req;
190
191 out:
192 atomic_dec(&fc->num_waiting);
193 return ERR_PTR(err);
194 }
195
196 struct fuse_req *fuse_get_req(struct fuse_conn *fc, unsigned npages)
197 {
198 return __fuse_get_req(fc, npages, false);
199 }
200 EXPORT_SYMBOL_GPL(fuse_get_req);
201
202 struct fuse_req *fuse_get_req_for_background(struct fuse_conn *fc,
203 unsigned npages)
204 {
205 return __fuse_get_req(fc, npages, true);
206 }
207 EXPORT_SYMBOL_GPL(fuse_get_req_for_background);
208
209 /*
210 * Return request in fuse_file->reserved_req. However that may
211 * currently be in use. If that is the case, wait for it to become
212 * available.
213 */
214 static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
215 struct file *file)
216 {
217 struct fuse_req *req = NULL;
218 struct fuse_file *ff = file->private_data;
219
220 do {
221 wait_event(fc->reserved_req_waitq, ff->reserved_req);
222 spin_lock(&fc->lock);
223 if (ff->reserved_req) {
224 req = ff->reserved_req;
225 ff->reserved_req = NULL;
226 req->stolen_file = get_file(file);
227 }
228 spin_unlock(&fc->lock);
229 } while (!req);
230
231 return req;
232 }
233
234 /*
235 * Put stolen request back into fuse_file->reserved_req
236 */
237 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
238 {
239 struct file *file = req->stolen_file;
240 struct fuse_file *ff = file->private_data;
241
242 spin_lock(&fc->lock);
243 fuse_request_init(req, req->pages, req->page_descs, req->max_pages);
244 BUG_ON(ff->reserved_req);
245 ff->reserved_req = req;
246 wake_up_all(&fc->reserved_req_waitq);
247 spin_unlock(&fc->lock);
248 fput(file);
249 }
250
251 /*
252 * Gets a requests for a file operation, always succeeds
253 *
254 * This is used for sending the FLUSH request, which must get to
255 * userspace, due to POSIX locks which may need to be unlocked.
256 *
257 * If allocation fails due to OOM, use the reserved request in
258 * fuse_file.
259 *
260 * This is very unlikely to deadlock accidentally, since the
261 * filesystem should not have it's own file open. If deadlock is
262 * intentional, it can still be broken by "aborting" the filesystem.
263 */
264 struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
265 struct file *file)
266 {
267 struct fuse_req *req;
268
269 atomic_inc(&fc->num_waiting);
270 wait_event(fc->blocked_waitq, fc->initialized);
271 /* Matches smp_wmb() in fuse_set_initialized() */
272 smp_rmb();
273 req = fuse_request_alloc(0);
274 if (!req)
275 req = get_reserved_req(fc, file);
276
277 fuse_req_init_context(req);
278 __set_bit(FR_WAITING, &req->flags);
279 __clear_bit(FR_BACKGROUND, &req->flags);
280 return req;
281 }
282
283 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
284 {
285 if (atomic_dec_and_test(&req->count)) {
286 if (test_bit(FR_BACKGROUND, &req->flags)) {
287 /*
288 * We get here in the unlikely case that a background
289 * request was allocated but not sent
290 */
291 spin_lock(&fc->lock);
292 if (!fc->blocked)
293 wake_up(&fc->blocked_waitq);
294 spin_unlock(&fc->lock);
295 }
296
297 if (test_bit(FR_WAITING, &req->flags)) {
298 __clear_bit(FR_WAITING, &req->flags);
299 atomic_dec(&fc->num_waiting);
300 }
301
302 if (req->stolen_file)
303 put_reserved_req(fc, req);
304 else
305 fuse_request_free(req);
306 }
307 }
308 EXPORT_SYMBOL_GPL(fuse_put_request);
309
310 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
311 {
312 unsigned nbytes = 0;
313 unsigned i;
314
315 for (i = 0; i < numargs; i++)
316 nbytes += args[i].size;
317
318 return nbytes;
319 }
320
321 static u64 fuse_get_unique(struct fuse_iqueue *fiq)
322 {
323 return ++fiq->reqctr;
324 }
325
326 static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
327 {
328 req->in.h.len = sizeof(struct fuse_in_header) +
329 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
330 list_add_tail(&req->list, &fiq->pending);
331 wake_up_locked(&fiq->waitq);
332 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
333 }
334
335 void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
336 u64 nodeid, u64 nlookup)
337 {
338 struct fuse_iqueue *fiq = &fc->iq;
339
340 forget->forget_one.nodeid = nodeid;
341 forget->forget_one.nlookup = nlookup;
342
343 spin_lock(&fc->lock);
344 spin_lock(&fiq->waitq.lock);
345 if (fiq->connected) {
346 fiq->forget_list_tail->next = forget;
347 fiq->forget_list_tail = forget;
348 wake_up_locked(&fiq->waitq);
349 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
350 } else {
351 kfree(forget);
352 }
353 spin_unlock(&fiq->waitq.lock);
354 spin_unlock(&fc->lock);
355 }
356
357 static void flush_bg_queue(struct fuse_conn *fc)
358 {
359 while (fc->active_background < fc->max_background &&
360 !list_empty(&fc->bg_queue)) {
361 struct fuse_req *req;
362 struct fuse_iqueue *fiq = &fc->iq;
363
364 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
365 list_del(&req->list);
366 fc->active_background++;
367 spin_lock(&fiq->waitq.lock);
368 req->in.h.unique = fuse_get_unique(fiq);
369 queue_request(fiq, req);
370 spin_unlock(&fiq->waitq.lock);
371 }
372 }
373
374 /*
375 * This function is called when a request is finished. Either a reply
376 * has arrived or it was aborted (and not yet sent) or some error
377 * occurred during communication with userspace, or the device file
378 * was closed. The requester thread is woken up (if still waiting),
379 * the 'end' callback is called if given, else the reference to the
380 * request is released
381 *
382 * Called with fc->lock, unlocks it
383 */
384 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
385 __releases(fc->lock)
386 {
387 struct fuse_iqueue *fiq = &fc->iq;
388 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
389 req->end = NULL;
390 list_del_init(&req->list);
391 spin_lock(&fiq->waitq.lock);
392 list_del_init(&req->intr_entry);
393 spin_unlock(&fiq->waitq.lock);
394 WARN_ON(test_bit(FR_PENDING, &req->flags));
395 WARN_ON(test_bit(FR_SENT, &req->flags));
396 smp_wmb();
397 set_bit(FR_FINISHED, &req->flags);
398 if (test_bit(FR_BACKGROUND, &req->flags)) {
399 clear_bit(FR_BACKGROUND, &req->flags);
400 if (fc->num_background == fc->max_background)
401 fc->blocked = 0;
402
403 /* Wake up next waiter, if any */
404 if (!fc->blocked && waitqueue_active(&fc->blocked_waitq))
405 wake_up(&fc->blocked_waitq);
406
407 if (fc->num_background == fc->congestion_threshold &&
408 fc->connected && fc->bdi_initialized) {
409 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
410 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
411 }
412 fc->num_background--;
413 fc->active_background--;
414 flush_bg_queue(fc);
415 }
416 spin_unlock(&fc->lock);
417 wake_up(&req->waitq);
418 if (end)
419 end(fc, req);
420 fuse_put_request(fc, req);
421 }
422
423 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
424 {
425 spin_lock(&fiq->waitq.lock);
426 if (list_empty(&req->intr_entry)) {
427 list_add_tail(&req->intr_entry, &fiq->interrupts);
428 wake_up_locked(&fiq->waitq);
429 }
430 spin_unlock(&fiq->waitq.lock);
431 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
432 }
433
434 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
435 {
436 struct fuse_iqueue *fiq = &fc->iq;
437 int err;
438
439 if (!fc->no_interrupt) {
440 /* Any signal may interrupt this */
441 err = wait_event_interruptible(req->waitq,
442 test_bit(FR_FINISHED, &req->flags));
443 if (!err)
444 return;
445
446 spin_lock(&fc->lock);
447 set_bit(FR_INTERRUPTED, &req->flags);
448 /* matches barrier in fuse_dev_do_read() */
449 smp_mb__after_atomic();
450 if (test_bit(FR_SENT, &req->flags))
451 queue_interrupt(fiq, req);
452 spin_unlock(&fc->lock);
453 }
454
455 if (!test_bit(FR_FORCE, &req->flags)) {
456 sigset_t oldset;
457
458 /* Only fatal signals may interrupt this */
459 block_sigs(&oldset);
460 err = wait_event_interruptible(req->waitq,
461 test_bit(FR_FINISHED, &req->flags));
462 restore_sigs(&oldset);
463
464 if (!err)
465 return;
466
467 spin_lock(&fc->lock);
468 spin_lock(&fiq->waitq.lock);
469 /* Request is not yet in userspace, bail out */
470 if (test_bit(FR_PENDING, &req->flags)) {
471 list_del(&req->list);
472 spin_unlock(&fiq->waitq.lock);
473 spin_unlock(&fc->lock);
474 __fuse_put_request(req);
475 req->out.h.error = -EINTR;
476 return;
477 }
478 spin_unlock(&fiq->waitq.lock);
479 spin_unlock(&fc->lock);
480 }
481
482 /*
483 * Either request is already in userspace, or it was forced.
484 * Wait it out.
485 */
486 wait_event(req->waitq, test_bit(FR_FINISHED, &req->flags));
487 }
488
489 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
490 {
491 struct fuse_iqueue *fiq = &fc->iq;
492
493 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
494 spin_lock(&fc->lock);
495 spin_lock(&fiq->waitq.lock);
496 if (!fiq->connected) {
497 spin_unlock(&fc->lock);
498 spin_unlock(&fiq->waitq.lock);
499 req->out.h.error = -ENOTCONN;
500 } else {
501 req->in.h.unique = fuse_get_unique(fiq);
502 queue_request(fiq, req);
503 /* acquire extra reference, since request is still needed
504 after request_end() */
505 __fuse_get_request(req);
506 spin_unlock(&fiq->waitq.lock);
507 spin_unlock(&fc->lock);
508
509 request_wait_answer(fc, req);
510 /* Pairs with smp_wmb() in request_end() */
511 smp_rmb();
512 }
513 }
514
515 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
516 {
517 __set_bit(FR_ISREPLY, &req->flags);
518 if (!test_bit(FR_WAITING, &req->flags)) {
519 __set_bit(FR_WAITING, &req->flags);
520 atomic_inc(&fc->num_waiting);
521 }
522 __fuse_request_send(fc, req);
523 }
524 EXPORT_SYMBOL_GPL(fuse_request_send);
525
526 static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
527 {
528 if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
529 args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
530
531 if (fc->minor < 9) {
532 switch (args->in.h.opcode) {
533 case FUSE_LOOKUP:
534 case FUSE_CREATE:
535 case FUSE_MKNOD:
536 case FUSE_MKDIR:
537 case FUSE_SYMLINK:
538 case FUSE_LINK:
539 args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
540 break;
541 case FUSE_GETATTR:
542 case FUSE_SETATTR:
543 args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
544 break;
545 }
546 }
547 if (fc->minor < 12) {
548 switch (args->in.h.opcode) {
549 case FUSE_CREATE:
550 args->in.args[0].size = sizeof(struct fuse_open_in);
551 break;
552 case FUSE_MKNOD:
553 args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
554 break;
555 }
556 }
557 }
558
559 ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
560 {
561 struct fuse_req *req;
562 ssize_t ret;
563
564 req = fuse_get_req(fc, 0);
565 if (IS_ERR(req))
566 return PTR_ERR(req);
567
568 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
569 fuse_adjust_compat(fc, args);
570
571 req->in.h.opcode = args->in.h.opcode;
572 req->in.h.nodeid = args->in.h.nodeid;
573 req->in.numargs = args->in.numargs;
574 memcpy(req->in.args, args->in.args,
575 args->in.numargs * sizeof(struct fuse_in_arg));
576 req->out.argvar = args->out.argvar;
577 req->out.numargs = args->out.numargs;
578 memcpy(req->out.args, args->out.args,
579 args->out.numargs * sizeof(struct fuse_arg));
580 fuse_request_send(fc, req);
581 ret = req->out.h.error;
582 if (!ret && args->out.argvar) {
583 BUG_ON(args->out.numargs != 1);
584 ret = req->out.args[0].size;
585 }
586 fuse_put_request(fc, req);
587
588 return ret;
589 }
590
591 /*
592 * Called under fc->lock
593 *
594 * fc->connected must have been checked previously
595 */
596 void fuse_request_send_background_locked(struct fuse_conn *fc,
597 struct fuse_req *req)
598 {
599 BUG_ON(!test_bit(FR_BACKGROUND, &req->flags));
600 if (!test_bit(FR_WAITING, &req->flags)) {
601 __set_bit(FR_WAITING, &req->flags);
602 atomic_inc(&fc->num_waiting);
603 }
604 __set_bit(FR_ISREPLY, &req->flags);
605 fc->num_background++;
606 if (fc->num_background == fc->max_background)
607 fc->blocked = 1;
608 if (fc->num_background == fc->congestion_threshold &&
609 fc->bdi_initialized) {
610 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
611 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
612 }
613 list_add_tail(&req->list, &fc->bg_queue);
614 flush_bg_queue(fc);
615 }
616
617 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
618 {
619 BUG_ON(!req->end);
620 spin_lock(&fc->lock);
621 if (fc->connected) {
622 fuse_request_send_background_locked(fc, req);
623 spin_unlock(&fc->lock);
624 } else {
625 spin_unlock(&fc->lock);
626 req->out.h.error = -ENOTCONN;
627 req->end(fc, req);
628 fuse_put_request(fc, req);
629 }
630 }
631 EXPORT_SYMBOL_GPL(fuse_request_send_background);
632
633 static int fuse_request_send_notify_reply(struct fuse_conn *fc,
634 struct fuse_req *req, u64 unique)
635 {
636 int err = -ENODEV;
637 struct fuse_iqueue *fiq = &fc->iq;
638
639 __clear_bit(FR_ISREPLY, &req->flags);
640 req->in.h.unique = unique;
641 spin_lock(&fc->lock);
642 spin_lock(&fiq->waitq.lock);
643 if (fiq->connected) {
644 queue_request(fiq, req);
645 err = 0;
646 }
647 spin_unlock(&fiq->waitq.lock);
648 spin_unlock(&fc->lock);
649
650 return err;
651 }
652
653 void fuse_force_forget(struct file *file, u64 nodeid)
654 {
655 struct inode *inode = file_inode(file);
656 struct fuse_conn *fc = get_fuse_conn(inode);
657 struct fuse_req *req;
658 struct fuse_forget_in inarg;
659
660 memset(&inarg, 0, sizeof(inarg));
661 inarg.nlookup = 1;
662 req = fuse_get_req_nofail_nopages(fc, file);
663 req->in.h.opcode = FUSE_FORGET;
664 req->in.h.nodeid = nodeid;
665 req->in.numargs = 1;
666 req->in.args[0].size = sizeof(inarg);
667 req->in.args[0].value = &inarg;
668 __clear_bit(FR_ISREPLY, &req->flags);
669 __fuse_request_send(fc, req);
670 /* ignore errors */
671 fuse_put_request(fc, req);
672 }
673
674 /*
675 * Lock the request. Up to the next unlock_request() there mustn't be
676 * anything that could cause a page-fault. If the request was already
677 * aborted bail out.
678 */
679 static int lock_request(struct fuse_req *req)
680 {
681 int err = 0;
682 if (req) {
683 spin_lock(&req->waitq.lock);
684 if (test_bit(FR_ABORTED, &req->flags))
685 err = -ENOENT;
686 else
687 set_bit(FR_LOCKED, &req->flags);
688 spin_unlock(&req->waitq.lock);
689 }
690 return err;
691 }
692
693 /*
694 * Unlock request. If it was aborted while locked, caller is responsible
695 * for unlocking and ending the request.
696 */
697 static int unlock_request(struct fuse_req *req)
698 {
699 int err = 0;
700 if (req) {
701 spin_lock(&req->waitq.lock);
702 if (test_bit(FR_ABORTED, &req->flags))
703 err = -ENOENT;
704 else
705 clear_bit(FR_LOCKED, &req->flags);
706 spin_unlock(&req->waitq.lock);
707 }
708 return err;
709 }
710
711 struct fuse_copy_state {
712 int write;
713 struct fuse_req *req;
714 struct iov_iter *iter;
715 struct pipe_buffer *pipebufs;
716 struct pipe_buffer *currbuf;
717 struct pipe_inode_info *pipe;
718 unsigned long nr_segs;
719 struct page *pg;
720 unsigned len;
721 unsigned offset;
722 unsigned move_pages:1;
723 };
724
725 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
726 struct iov_iter *iter)
727 {
728 memset(cs, 0, sizeof(*cs));
729 cs->write = write;
730 cs->iter = iter;
731 }
732
733 /* Unmap and put previous page of userspace buffer */
734 static void fuse_copy_finish(struct fuse_copy_state *cs)
735 {
736 if (cs->currbuf) {
737 struct pipe_buffer *buf = cs->currbuf;
738
739 if (cs->write)
740 buf->len = PAGE_SIZE - cs->len;
741 cs->currbuf = NULL;
742 } else if (cs->pg) {
743 if (cs->write) {
744 flush_dcache_page(cs->pg);
745 set_page_dirty_lock(cs->pg);
746 }
747 put_page(cs->pg);
748 }
749 cs->pg = NULL;
750 }
751
752 /*
753 * Get another pagefull of userspace buffer, and map it to kernel
754 * address space, and lock request
755 */
756 static int fuse_copy_fill(struct fuse_copy_state *cs)
757 {
758 struct page *page;
759 int err;
760
761 err = unlock_request(cs->req);
762 if (err)
763 return err;
764
765 fuse_copy_finish(cs);
766 if (cs->pipebufs) {
767 struct pipe_buffer *buf = cs->pipebufs;
768
769 if (!cs->write) {
770 err = buf->ops->confirm(cs->pipe, buf);
771 if (err)
772 return err;
773
774 BUG_ON(!cs->nr_segs);
775 cs->currbuf = buf;
776 cs->pg = buf->page;
777 cs->offset = buf->offset;
778 cs->len = buf->len;
779 cs->pipebufs++;
780 cs->nr_segs--;
781 } else {
782 if (cs->nr_segs == cs->pipe->buffers)
783 return -EIO;
784
785 page = alloc_page(GFP_HIGHUSER);
786 if (!page)
787 return -ENOMEM;
788
789 buf->page = page;
790 buf->offset = 0;
791 buf->len = 0;
792
793 cs->currbuf = buf;
794 cs->pg = page;
795 cs->offset = 0;
796 cs->len = PAGE_SIZE;
797 cs->pipebufs++;
798 cs->nr_segs++;
799 }
800 } else {
801 size_t off;
802 err = iov_iter_get_pages(cs->iter, &page, PAGE_SIZE, 1, &off);
803 if (err < 0)
804 return err;
805 BUG_ON(!err);
806 cs->len = err;
807 cs->offset = off;
808 cs->pg = page;
809 cs->offset = off;
810 iov_iter_advance(cs->iter, err);
811 }
812
813 return lock_request(cs->req);
814 }
815
816 /* Do as much copy to/from userspace buffer as we can */
817 static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
818 {
819 unsigned ncpy = min(*size, cs->len);
820 if (val) {
821 void *pgaddr = kmap_atomic(cs->pg);
822 void *buf = pgaddr + cs->offset;
823
824 if (cs->write)
825 memcpy(buf, *val, ncpy);
826 else
827 memcpy(*val, buf, ncpy);
828
829 kunmap_atomic(pgaddr);
830 *val += ncpy;
831 }
832 *size -= ncpy;
833 cs->len -= ncpy;
834 cs->offset += ncpy;
835 return ncpy;
836 }
837
838 static int fuse_check_page(struct page *page)
839 {
840 if (page_mapcount(page) ||
841 page->mapping != NULL ||
842 page_count(page) != 1 ||
843 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
844 ~(1 << PG_locked |
845 1 << PG_referenced |
846 1 << PG_uptodate |
847 1 << PG_lru |
848 1 << PG_active |
849 1 << PG_reclaim))) {
850 printk(KERN_WARNING "fuse: trying to steal weird page\n");
851 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
852 return 1;
853 }
854 return 0;
855 }
856
857 static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
858 {
859 int err;
860 struct page *oldpage = *pagep;
861 struct page *newpage;
862 struct pipe_buffer *buf = cs->pipebufs;
863
864 err = unlock_request(cs->req);
865 if (err)
866 return err;
867
868 fuse_copy_finish(cs);
869
870 err = buf->ops->confirm(cs->pipe, buf);
871 if (err)
872 return err;
873
874 BUG_ON(!cs->nr_segs);
875 cs->currbuf = buf;
876 cs->len = buf->len;
877 cs->pipebufs++;
878 cs->nr_segs--;
879
880 if (cs->len != PAGE_SIZE)
881 goto out_fallback;
882
883 if (buf->ops->steal(cs->pipe, buf) != 0)
884 goto out_fallback;
885
886 newpage = buf->page;
887
888 if (!PageUptodate(newpage))
889 SetPageUptodate(newpage);
890
891 ClearPageMappedToDisk(newpage);
892
893 if (fuse_check_page(newpage) != 0)
894 goto out_fallback_unlock;
895
896 /*
897 * This is a new and locked page, it shouldn't be mapped or
898 * have any special flags on it
899 */
900 if (WARN_ON(page_mapped(oldpage)))
901 goto out_fallback_unlock;
902 if (WARN_ON(page_has_private(oldpage)))
903 goto out_fallback_unlock;
904 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
905 goto out_fallback_unlock;
906 if (WARN_ON(PageMlocked(oldpage)))
907 goto out_fallback_unlock;
908
909 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
910 if (err) {
911 unlock_page(newpage);
912 return err;
913 }
914
915 page_cache_get(newpage);
916
917 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
918 lru_cache_add_file(newpage);
919
920 err = 0;
921 spin_lock(&cs->req->waitq.lock);
922 if (test_bit(FR_ABORTED, &cs->req->flags))
923 err = -ENOENT;
924 else
925 *pagep = newpage;
926 spin_unlock(&cs->req->waitq.lock);
927
928 if (err) {
929 unlock_page(newpage);
930 page_cache_release(newpage);
931 return err;
932 }
933
934 unlock_page(oldpage);
935 page_cache_release(oldpage);
936 cs->len = 0;
937
938 return 0;
939
940 out_fallback_unlock:
941 unlock_page(newpage);
942 out_fallback:
943 cs->pg = buf->page;
944 cs->offset = buf->offset;
945
946 err = lock_request(cs->req);
947 if (err)
948 return err;
949
950 return 1;
951 }
952
953 static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
954 unsigned offset, unsigned count)
955 {
956 struct pipe_buffer *buf;
957 int err;
958
959 if (cs->nr_segs == cs->pipe->buffers)
960 return -EIO;
961
962 err = unlock_request(cs->req);
963 if (err)
964 return err;
965
966 fuse_copy_finish(cs);
967
968 buf = cs->pipebufs;
969 page_cache_get(page);
970 buf->page = page;
971 buf->offset = offset;
972 buf->len = count;
973
974 cs->pipebufs++;
975 cs->nr_segs++;
976 cs->len = 0;
977
978 return 0;
979 }
980
981 /*
982 * Copy a page in the request to/from the userspace buffer. Must be
983 * done atomically
984 */
985 static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
986 unsigned offset, unsigned count, int zeroing)
987 {
988 int err;
989 struct page *page = *pagep;
990
991 if (page && zeroing && count < PAGE_SIZE)
992 clear_highpage(page);
993
994 while (count) {
995 if (cs->write && cs->pipebufs && page) {
996 return fuse_ref_page(cs, page, offset, count);
997 } else if (!cs->len) {
998 if (cs->move_pages && page &&
999 offset == 0 && count == PAGE_SIZE) {
1000 err = fuse_try_move_page(cs, pagep);
1001 if (err <= 0)
1002 return err;
1003 } else {
1004 err = fuse_copy_fill(cs);
1005 if (err)
1006 return err;
1007 }
1008 }
1009 if (page) {
1010 void *mapaddr = kmap_atomic(page);
1011 void *buf = mapaddr + offset;
1012 offset += fuse_copy_do(cs, &buf, &count);
1013 kunmap_atomic(mapaddr);
1014 } else
1015 offset += fuse_copy_do(cs, NULL, &count);
1016 }
1017 if (page && !cs->write)
1018 flush_dcache_page(page);
1019 return 0;
1020 }
1021
1022 /* Copy pages in the request to/from userspace buffer */
1023 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
1024 int zeroing)
1025 {
1026 unsigned i;
1027 struct fuse_req *req = cs->req;
1028
1029 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
1030 int err;
1031 unsigned offset = req->page_descs[i].offset;
1032 unsigned count = min(nbytes, req->page_descs[i].length);
1033
1034 err = fuse_copy_page(cs, &req->pages[i], offset, count,
1035 zeroing);
1036 if (err)
1037 return err;
1038
1039 nbytes -= count;
1040 }
1041 return 0;
1042 }
1043
1044 /* Copy a single argument in the request to/from userspace buffer */
1045 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
1046 {
1047 while (size) {
1048 if (!cs->len) {
1049 int err = fuse_copy_fill(cs);
1050 if (err)
1051 return err;
1052 }
1053 fuse_copy_do(cs, &val, &size);
1054 }
1055 return 0;
1056 }
1057
1058 /* Copy request arguments to/from userspace buffer */
1059 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
1060 unsigned argpages, struct fuse_arg *args,
1061 int zeroing)
1062 {
1063 int err = 0;
1064 unsigned i;
1065
1066 for (i = 0; !err && i < numargs; i++) {
1067 struct fuse_arg *arg = &args[i];
1068 if (i == numargs - 1 && argpages)
1069 err = fuse_copy_pages(cs, arg->size, zeroing);
1070 else
1071 err = fuse_copy_one(cs, arg->value, arg->size);
1072 }
1073 return err;
1074 }
1075
1076 static int forget_pending(struct fuse_iqueue *fiq)
1077 {
1078 return fiq->forget_list_head.next != NULL;
1079 }
1080
1081 static int request_pending(struct fuse_iqueue *fiq)
1082 {
1083 return !list_empty(&fiq->pending) || !list_empty(&fiq->interrupts) ||
1084 forget_pending(fiq);
1085 }
1086
1087 /* Wait until a request is available on the pending list */
1088 static void request_wait(struct fuse_conn *fc)
1089 __releases(fc->iq.waitq.lock)
1090 __releases(fc->lock)
1091 __acquires(fc->lock)
1092 __acquires(fc->iq.waitq.lock)
1093 {
1094 struct fuse_iqueue *fiq = &fc->iq;
1095 DECLARE_WAITQUEUE(wait, current);
1096
1097 add_wait_queue_exclusive(&fiq->waitq, &wait);
1098 while (fiq->connected && !request_pending(fiq)) {
1099 set_current_state(TASK_INTERRUPTIBLE);
1100 if (signal_pending(current))
1101 break;
1102
1103 spin_unlock(&fiq->waitq.lock);
1104 spin_unlock(&fc->lock);
1105 schedule();
1106 spin_lock(&fc->lock);
1107 spin_lock(&fiq->waitq.lock);
1108 }
1109 set_current_state(TASK_RUNNING);
1110 remove_wait_queue(&fiq->waitq, &wait);
1111 }
1112
1113 /*
1114 * Transfer an interrupt request to userspace
1115 *
1116 * Unlike other requests this is assembled on demand, without a need
1117 * to allocate a separate fuse_req structure.
1118 *
1119 * Called with fc->lock held, releases it
1120 */
1121 static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
1122 size_t nbytes, struct fuse_req *req)
1123 __releases(fc->iq.waitq.lock)
1124 __releases(fc->lock)
1125 {
1126 struct fuse_iqueue *fiq = &fc->iq;
1127 struct fuse_in_header ih;
1128 struct fuse_interrupt_in arg;
1129 unsigned reqsize = sizeof(ih) + sizeof(arg);
1130 int err;
1131
1132 list_del_init(&req->intr_entry);
1133 req->intr_unique = fuse_get_unique(fiq);
1134 memset(&ih, 0, sizeof(ih));
1135 memset(&arg, 0, sizeof(arg));
1136 ih.len = reqsize;
1137 ih.opcode = FUSE_INTERRUPT;
1138 ih.unique = req->intr_unique;
1139 arg.unique = req->in.h.unique;
1140
1141 spin_unlock(&fiq->waitq.lock);
1142 spin_unlock(&fc->lock);
1143 if (nbytes < reqsize)
1144 return -EINVAL;
1145
1146 err = fuse_copy_one(cs, &ih, sizeof(ih));
1147 if (!err)
1148 err = fuse_copy_one(cs, &arg, sizeof(arg));
1149 fuse_copy_finish(cs);
1150
1151 return err ? err : reqsize;
1152 }
1153
1154 static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
1155 unsigned max,
1156 unsigned *countp)
1157 {
1158 struct fuse_forget_link *head = fiq->forget_list_head.next;
1159 struct fuse_forget_link **newhead = &head;
1160 unsigned count;
1161
1162 for (count = 0; *newhead != NULL && count < max; count++)
1163 newhead = &(*newhead)->next;
1164
1165 fiq->forget_list_head.next = *newhead;
1166 *newhead = NULL;
1167 if (fiq->forget_list_head.next == NULL)
1168 fiq->forget_list_tail = &fiq->forget_list_head;
1169
1170 if (countp != NULL)
1171 *countp = count;
1172
1173 return head;
1174 }
1175
1176 static int fuse_read_single_forget(struct fuse_conn *fc,
1177 struct fuse_copy_state *cs,
1178 size_t nbytes)
1179 __releases(fc->iq.waitq.lock)
1180 __releases(fc->lock)
1181 {
1182 int err;
1183 struct fuse_iqueue *fiq = &fc->iq;
1184 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
1185 struct fuse_forget_in arg = {
1186 .nlookup = forget->forget_one.nlookup,
1187 };
1188 struct fuse_in_header ih = {
1189 .opcode = FUSE_FORGET,
1190 .nodeid = forget->forget_one.nodeid,
1191 .unique = fuse_get_unique(fiq),
1192 .len = sizeof(ih) + sizeof(arg),
1193 };
1194
1195 spin_unlock(&fiq->waitq.lock);
1196 spin_unlock(&fc->lock);
1197 kfree(forget);
1198 if (nbytes < ih.len)
1199 return -EINVAL;
1200
1201 err = fuse_copy_one(cs, &ih, sizeof(ih));
1202 if (!err)
1203 err = fuse_copy_one(cs, &arg, sizeof(arg));
1204 fuse_copy_finish(cs);
1205
1206 if (err)
1207 return err;
1208
1209 return ih.len;
1210 }
1211
1212 static int fuse_read_batch_forget(struct fuse_conn *fc,
1213 struct fuse_copy_state *cs, size_t nbytes)
1214 __releases(fc->iq.waitq.lock)
1215 __releases(fc->lock)
1216 {
1217 int err;
1218 unsigned max_forgets;
1219 unsigned count;
1220 struct fuse_forget_link *head;
1221 struct fuse_iqueue *fiq = &fc->iq;
1222 struct fuse_batch_forget_in arg = { .count = 0 };
1223 struct fuse_in_header ih = {
1224 .opcode = FUSE_BATCH_FORGET,
1225 .unique = fuse_get_unique(fiq),
1226 .len = sizeof(ih) + sizeof(arg),
1227 };
1228
1229 if (nbytes < ih.len) {
1230 spin_unlock(&fiq->waitq.lock);
1231 spin_unlock(&fc->lock);
1232 return -EINVAL;
1233 }
1234
1235 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1236 head = dequeue_forget(fiq, max_forgets, &count);
1237 spin_unlock(&fiq->waitq.lock);
1238 spin_unlock(&fc->lock);
1239
1240 arg.count = count;
1241 ih.len += count * sizeof(struct fuse_forget_one);
1242 err = fuse_copy_one(cs, &ih, sizeof(ih));
1243 if (!err)
1244 err = fuse_copy_one(cs, &arg, sizeof(arg));
1245
1246 while (head) {
1247 struct fuse_forget_link *forget = head;
1248
1249 if (!err) {
1250 err = fuse_copy_one(cs, &forget->forget_one,
1251 sizeof(forget->forget_one));
1252 }
1253 head = forget->next;
1254 kfree(forget);
1255 }
1256
1257 fuse_copy_finish(cs);
1258
1259 if (err)
1260 return err;
1261
1262 return ih.len;
1263 }
1264
1265 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1266 size_t nbytes)
1267 __releases(fc->iq.waitq.lock)
1268 __releases(fc->lock)
1269 {
1270 struct fuse_iqueue *fiq = &fc->iq;
1271
1272 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
1273 return fuse_read_single_forget(fc, cs, nbytes);
1274 else
1275 return fuse_read_batch_forget(fc, cs, nbytes);
1276 }
1277
1278 /*
1279 * Read a single request into the userspace filesystem's buffer. This
1280 * function waits until a request is available, then removes it from
1281 * the pending list and copies request data to userspace buffer. If
1282 * no reply is needed (FORGET) or request has been aborted or there
1283 * was an error during the copying then it's finished by calling
1284 * request_end(). Otherwise add it to the processing list, and set
1285 * the 'sent' flag.
1286 */
1287 static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1288 struct fuse_copy_state *cs, size_t nbytes)
1289 {
1290 int err;
1291 struct fuse_iqueue *fiq = &fc->iq;
1292 struct fuse_req *req;
1293 struct fuse_in *in;
1294 unsigned reqsize;
1295
1296 restart:
1297 spin_lock(&fc->lock);
1298 spin_lock(&fiq->waitq.lock);
1299 err = -EAGAIN;
1300 if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
1301 !request_pending(fiq))
1302 goto err_unlock;
1303
1304 request_wait(fc);
1305 err = -ENODEV;
1306 if (!fiq->connected)
1307 goto err_unlock;
1308 err = -ERESTARTSYS;
1309 if (!request_pending(fiq))
1310 goto err_unlock;
1311
1312 if (!list_empty(&fiq->interrupts)) {
1313 req = list_entry(fiq->interrupts.next, struct fuse_req,
1314 intr_entry);
1315 return fuse_read_interrupt(fc, cs, nbytes, req);
1316 }
1317
1318 if (forget_pending(fiq)) {
1319 if (list_empty(&fiq->pending) || fiq->forget_batch-- > 0)
1320 return fuse_read_forget(fc, cs, nbytes);
1321
1322 if (fiq->forget_batch <= -8)
1323 fiq->forget_batch = 16;
1324 }
1325
1326 req = list_entry(fiq->pending.next, struct fuse_req, list);
1327 clear_bit(FR_PENDING, &req->flags);
1328 list_del_init(&req->list);
1329 spin_unlock(&fiq->waitq.lock);
1330
1331 list_add(&req->list, &fc->io);
1332
1333 in = &req->in;
1334 reqsize = in->h.len;
1335 /* If request is too large, reply with an error and restart the read */
1336 if (nbytes < reqsize) {
1337 req->out.h.error = -EIO;
1338 /* SETXATTR is special, since it may contain too large data */
1339 if (in->h.opcode == FUSE_SETXATTR)
1340 req->out.h.error = -E2BIG;
1341 request_end(fc, req);
1342 goto restart;
1343 }
1344 spin_unlock(&fc->lock);
1345 cs->req = req;
1346 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1347 if (!err)
1348 err = fuse_copy_args(cs, in->numargs, in->argpages,
1349 (struct fuse_arg *) in->args, 0);
1350 fuse_copy_finish(cs);
1351 spin_lock(&fc->lock);
1352 clear_bit(FR_LOCKED, &req->flags);
1353 if (!fc->connected) {
1354 request_end(fc, req);
1355 return -ENODEV;
1356 }
1357 if (err) {
1358 req->out.h.error = -EIO;
1359 request_end(fc, req);
1360 return err;
1361 }
1362 if (!test_bit(FR_ISREPLY, &req->flags)) {
1363 request_end(fc, req);
1364 } else {
1365 list_move_tail(&req->list, &fc->processing);
1366 set_bit(FR_SENT, &req->flags);
1367 /* matches barrier in request_wait_answer() */
1368 smp_mb__after_atomic();
1369 if (test_bit(FR_INTERRUPTED, &req->flags))
1370 queue_interrupt(fiq, req);
1371 spin_unlock(&fc->lock);
1372 }
1373 return reqsize;
1374
1375 err_unlock:
1376 spin_unlock(&fiq->waitq.lock);
1377 spin_unlock(&fc->lock);
1378 return err;
1379 }
1380
1381 static int fuse_dev_open(struct inode *inode, struct file *file)
1382 {
1383 /*
1384 * The fuse device's file's private_data is used to hold
1385 * the fuse_conn(ection) when it is mounted, and is used to
1386 * keep track of whether the file has been mounted already.
1387 */
1388 file->private_data = NULL;
1389 return 0;
1390 }
1391
1392 static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
1393 {
1394 struct fuse_copy_state cs;
1395 struct file *file = iocb->ki_filp;
1396 struct fuse_conn *fc = fuse_get_conn(file);
1397 if (!fc)
1398 return -EPERM;
1399
1400 if (!iter_is_iovec(to))
1401 return -EINVAL;
1402
1403 fuse_copy_init(&cs, 1, to);
1404
1405 return fuse_dev_do_read(fc, file, &cs, iov_iter_count(to));
1406 }
1407
1408 static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1409 struct pipe_inode_info *pipe,
1410 size_t len, unsigned int flags)
1411 {
1412 int ret;
1413 int page_nr = 0;
1414 int do_wakeup = 0;
1415 struct pipe_buffer *bufs;
1416 struct fuse_copy_state cs;
1417 struct fuse_conn *fc = fuse_get_conn(in);
1418 if (!fc)
1419 return -EPERM;
1420
1421 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
1422 if (!bufs)
1423 return -ENOMEM;
1424
1425 fuse_copy_init(&cs, 1, NULL);
1426 cs.pipebufs = bufs;
1427 cs.pipe = pipe;
1428 ret = fuse_dev_do_read(fc, in, &cs, len);
1429 if (ret < 0)
1430 goto out;
1431
1432 ret = 0;
1433 pipe_lock(pipe);
1434
1435 if (!pipe->readers) {
1436 send_sig(SIGPIPE, current, 0);
1437 if (!ret)
1438 ret = -EPIPE;
1439 goto out_unlock;
1440 }
1441
1442 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1443 ret = -EIO;
1444 goto out_unlock;
1445 }
1446
1447 while (page_nr < cs.nr_segs) {
1448 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1449 struct pipe_buffer *buf = pipe->bufs + newbuf;
1450
1451 buf->page = bufs[page_nr].page;
1452 buf->offset = bufs[page_nr].offset;
1453 buf->len = bufs[page_nr].len;
1454 /*
1455 * Need to be careful about this. Having buf->ops in module
1456 * code can Oops if the buffer persists after module unload.
1457 */
1458 buf->ops = &nosteal_pipe_buf_ops;
1459
1460 pipe->nrbufs++;
1461 page_nr++;
1462 ret += buf->len;
1463
1464 if (pipe->files)
1465 do_wakeup = 1;
1466 }
1467
1468 out_unlock:
1469 pipe_unlock(pipe);
1470
1471 if (do_wakeup) {
1472 smp_mb();
1473 if (waitqueue_active(&pipe->wait))
1474 wake_up_interruptible(&pipe->wait);
1475 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1476 }
1477
1478 out:
1479 for (; page_nr < cs.nr_segs; page_nr++)
1480 page_cache_release(bufs[page_nr].page);
1481
1482 kfree(bufs);
1483 return ret;
1484 }
1485
1486 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1487 struct fuse_copy_state *cs)
1488 {
1489 struct fuse_notify_poll_wakeup_out outarg;
1490 int err = -EINVAL;
1491
1492 if (size != sizeof(outarg))
1493 goto err;
1494
1495 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1496 if (err)
1497 goto err;
1498
1499 fuse_copy_finish(cs);
1500 return fuse_notify_poll_wakeup(fc, &outarg);
1501
1502 err:
1503 fuse_copy_finish(cs);
1504 return err;
1505 }
1506
1507 static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1508 struct fuse_copy_state *cs)
1509 {
1510 struct fuse_notify_inval_inode_out outarg;
1511 int err = -EINVAL;
1512
1513 if (size != sizeof(outarg))
1514 goto err;
1515
1516 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1517 if (err)
1518 goto err;
1519 fuse_copy_finish(cs);
1520
1521 down_read(&fc->killsb);
1522 err = -ENOENT;
1523 if (fc->sb) {
1524 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1525 outarg.off, outarg.len);
1526 }
1527 up_read(&fc->killsb);
1528 return err;
1529
1530 err:
1531 fuse_copy_finish(cs);
1532 return err;
1533 }
1534
1535 static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1536 struct fuse_copy_state *cs)
1537 {
1538 struct fuse_notify_inval_entry_out outarg;
1539 int err = -ENOMEM;
1540 char *buf;
1541 struct qstr name;
1542
1543 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1544 if (!buf)
1545 goto err;
1546
1547 err = -EINVAL;
1548 if (size < sizeof(outarg))
1549 goto err;
1550
1551 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1552 if (err)
1553 goto err;
1554
1555 err = -ENAMETOOLONG;
1556 if (outarg.namelen > FUSE_NAME_MAX)
1557 goto err;
1558
1559 err = -EINVAL;
1560 if (size != sizeof(outarg) + outarg.namelen + 1)
1561 goto err;
1562
1563 name.name = buf;
1564 name.len = outarg.namelen;
1565 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1566 if (err)
1567 goto err;
1568 fuse_copy_finish(cs);
1569 buf[outarg.namelen] = 0;
1570 name.hash = full_name_hash(name.name, name.len);
1571
1572 down_read(&fc->killsb);
1573 err = -ENOENT;
1574 if (fc->sb)
1575 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1576 up_read(&fc->killsb);
1577 kfree(buf);
1578 return err;
1579
1580 err:
1581 kfree(buf);
1582 fuse_copy_finish(cs);
1583 return err;
1584 }
1585
1586 static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1587 struct fuse_copy_state *cs)
1588 {
1589 struct fuse_notify_delete_out outarg;
1590 int err = -ENOMEM;
1591 char *buf;
1592 struct qstr name;
1593
1594 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1595 if (!buf)
1596 goto err;
1597
1598 err = -EINVAL;
1599 if (size < sizeof(outarg))
1600 goto err;
1601
1602 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1603 if (err)
1604 goto err;
1605
1606 err = -ENAMETOOLONG;
1607 if (outarg.namelen > FUSE_NAME_MAX)
1608 goto err;
1609
1610 err = -EINVAL;
1611 if (size != sizeof(outarg) + outarg.namelen + 1)
1612 goto err;
1613
1614 name.name = buf;
1615 name.len = outarg.namelen;
1616 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1617 if (err)
1618 goto err;
1619 fuse_copy_finish(cs);
1620 buf[outarg.namelen] = 0;
1621 name.hash = full_name_hash(name.name, name.len);
1622
1623 down_read(&fc->killsb);
1624 err = -ENOENT;
1625 if (fc->sb)
1626 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1627 outarg.child, &name);
1628 up_read(&fc->killsb);
1629 kfree(buf);
1630 return err;
1631
1632 err:
1633 kfree(buf);
1634 fuse_copy_finish(cs);
1635 return err;
1636 }
1637
1638 static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1639 struct fuse_copy_state *cs)
1640 {
1641 struct fuse_notify_store_out outarg;
1642 struct inode *inode;
1643 struct address_space *mapping;
1644 u64 nodeid;
1645 int err;
1646 pgoff_t index;
1647 unsigned int offset;
1648 unsigned int num;
1649 loff_t file_size;
1650 loff_t end;
1651
1652 err = -EINVAL;
1653 if (size < sizeof(outarg))
1654 goto out_finish;
1655
1656 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1657 if (err)
1658 goto out_finish;
1659
1660 err = -EINVAL;
1661 if (size - sizeof(outarg) != outarg.size)
1662 goto out_finish;
1663
1664 nodeid = outarg.nodeid;
1665
1666 down_read(&fc->killsb);
1667
1668 err = -ENOENT;
1669 if (!fc->sb)
1670 goto out_up_killsb;
1671
1672 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1673 if (!inode)
1674 goto out_up_killsb;
1675
1676 mapping = inode->i_mapping;
1677 index = outarg.offset >> PAGE_CACHE_SHIFT;
1678 offset = outarg.offset & ~PAGE_CACHE_MASK;
1679 file_size = i_size_read(inode);
1680 end = outarg.offset + outarg.size;
1681 if (end > file_size) {
1682 file_size = end;
1683 fuse_write_update_size(inode, file_size);
1684 }
1685
1686 num = outarg.size;
1687 while (num) {
1688 struct page *page;
1689 unsigned int this_num;
1690
1691 err = -ENOMEM;
1692 page = find_or_create_page(mapping, index,
1693 mapping_gfp_mask(mapping));
1694 if (!page)
1695 goto out_iput;
1696
1697 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1698 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1699 if (!err && offset == 0 &&
1700 (this_num == PAGE_CACHE_SIZE || file_size == end))
1701 SetPageUptodate(page);
1702 unlock_page(page);
1703 page_cache_release(page);
1704
1705 if (err)
1706 goto out_iput;
1707
1708 num -= this_num;
1709 offset = 0;
1710 index++;
1711 }
1712
1713 err = 0;
1714
1715 out_iput:
1716 iput(inode);
1717 out_up_killsb:
1718 up_read(&fc->killsb);
1719 out_finish:
1720 fuse_copy_finish(cs);
1721 return err;
1722 }
1723
1724 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1725 {
1726 release_pages(req->pages, req->num_pages, false);
1727 }
1728
1729 static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1730 struct fuse_notify_retrieve_out *outarg)
1731 {
1732 int err;
1733 struct address_space *mapping = inode->i_mapping;
1734 struct fuse_req *req;
1735 pgoff_t index;
1736 loff_t file_size;
1737 unsigned int num;
1738 unsigned int offset;
1739 size_t total_len = 0;
1740 int num_pages;
1741
1742 offset = outarg->offset & ~PAGE_CACHE_MASK;
1743 file_size = i_size_read(inode);
1744
1745 num = outarg->size;
1746 if (outarg->offset > file_size)
1747 num = 0;
1748 else if (outarg->offset + num > file_size)
1749 num = file_size - outarg->offset;
1750
1751 num_pages = (num + offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
1752 num_pages = min(num_pages, FUSE_MAX_PAGES_PER_REQ);
1753
1754 req = fuse_get_req(fc, num_pages);
1755 if (IS_ERR(req))
1756 return PTR_ERR(req);
1757
1758 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1759 req->in.h.nodeid = outarg->nodeid;
1760 req->in.numargs = 2;
1761 req->in.argpages = 1;
1762 req->page_descs[0].offset = offset;
1763 req->end = fuse_retrieve_end;
1764
1765 index = outarg->offset >> PAGE_CACHE_SHIFT;
1766
1767 while (num && req->num_pages < num_pages) {
1768 struct page *page;
1769 unsigned int this_num;
1770
1771 page = find_get_page(mapping, index);
1772 if (!page)
1773 break;
1774
1775 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1776 req->pages[req->num_pages] = page;
1777 req->page_descs[req->num_pages].length = this_num;
1778 req->num_pages++;
1779
1780 offset = 0;
1781 num -= this_num;
1782 total_len += this_num;
1783 index++;
1784 }
1785 req->misc.retrieve_in.offset = outarg->offset;
1786 req->misc.retrieve_in.size = total_len;
1787 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1788 req->in.args[0].value = &req->misc.retrieve_in;
1789 req->in.args[1].size = total_len;
1790
1791 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1792 if (err)
1793 fuse_retrieve_end(fc, req);
1794
1795 return err;
1796 }
1797
1798 static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1799 struct fuse_copy_state *cs)
1800 {
1801 struct fuse_notify_retrieve_out outarg;
1802 struct inode *inode;
1803 int err;
1804
1805 err = -EINVAL;
1806 if (size != sizeof(outarg))
1807 goto copy_finish;
1808
1809 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1810 if (err)
1811 goto copy_finish;
1812
1813 fuse_copy_finish(cs);
1814
1815 down_read(&fc->killsb);
1816 err = -ENOENT;
1817 if (fc->sb) {
1818 u64 nodeid = outarg.nodeid;
1819
1820 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1821 if (inode) {
1822 err = fuse_retrieve(fc, inode, &outarg);
1823 iput(inode);
1824 }
1825 }
1826 up_read(&fc->killsb);
1827
1828 return err;
1829
1830 copy_finish:
1831 fuse_copy_finish(cs);
1832 return err;
1833 }
1834
1835 static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1836 unsigned int size, struct fuse_copy_state *cs)
1837 {
1838 /* Don't try to move pages (yet) */
1839 cs->move_pages = 0;
1840
1841 switch (code) {
1842 case FUSE_NOTIFY_POLL:
1843 return fuse_notify_poll(fc, size, cs);
1844
1845 case FUSE_NOTIFY_INVAL_INODE:
1846 return fuse_notify_inval_inode(fc, size, cs);
1847
1848 case FUSE_NOTIFY_INVAL_ENTRY:
1849 return fuse_notify_inval_entry(fc, size, cs);
1850
1851 case FUSE_NOTIFY_STORE:
1852 return fuse_notify_store(fc, size, cs);
1853
1854 case FUSE_NOTIFY_RETRIEVE:
1855 return fuse_notify_retrieve(fc, size, cs);
1856
1857 case FUSE_NOTIFY_DELETE:
1858 return fuse_notify_delete(fc, size, cs);
1859
1860 default:
1861 fuse_copy_finish(cs);
1862 return -EINVAL;
1863 }
1864 }
1865
1866 /* Look up request on processing list by unique ID */
1867 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1868 {
1869 struct fuse_req *req;
1870
1871 list_for_each_entry(req, &fc->processing, list) {
1872 if (req->in.h.unique == unique || req->intr_unique == unique)
1873 return req;
1874 }
1875 return NULL;
1876 }
1877
1878 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1879 unsigned nbytes)
1880 {
1881 unsigned reqsize = sizeof(struct fuse_out_header);
1882
1883 if (out->h.error)
1884 return nbytes != reqsize ? -EINVAL : 0;
1885
1886 reqsize += len_args(out->numargs, out->args);
1887
1888 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1889 return -EINVAL;
1890 else if (reqsize > nbytes) {
1891 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1892 unsigned diffsize = reqsize - nbytes;
1893 if (diffsize > lastarg->size)
1894 return -EINVAL;
1895 lastarg->size -= diffsize;
1896 }
1897 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1898 out->page_zeroing);
1899 }
1900
1901 /*
1902 * Write a single reply to a request. First the header is copied from
1903 * the write buffer. The request is then searched on the processing
1904 * list by the unique ID found in the header. If found, then remove
1905 * it from the list and copy the rest of the buffer to the request.
1906 * The request is finished by calling request_end()
1907 */
1908 static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1909 struct fuse_copy_state *cs, size_t nbytes)
1910 {
1911 int err;
1912 struct fuse_req *req;
1913 struct fuse_out_header oh;
1914
1915 if (nbytes < sizeof(struct fuse_out_header))
1916 return -EINVAL;
1917
1918 err = fuse_copy_one(cs, &oh, sizeof(oh));
1919 if (err)
1920 goto err_finish;
1921
1922 err = -EINVAL;
1923 if (oh.len != nbytes)
1924 goto err_finish;
1925
1926 /*
1927 * Zero oh.unique indicates unsolicited notification message
1928 * and error contains notification code.
1929 */
1930 if (!oh.unique) {
1931 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
1932 return err ? err : nbytes;
1933 }
1934
1935 err = -EINVAL;
1936 if (oh.error <= -1000 || oh.error > 0)
1937 goto err_finish;
1938
1939 spin_lock(&fc->lock);
1940 err = -ENOENT;
1941 if (!fc->connected)
1942 goto err_unlock;
1943
1944 req = request_find(fc, oh.unique);
1945 if (!req)
1946 goto err_unlock;
1947
1948 /* Is it an interrupt reply? */
1949 if (req->intr_unique == oh.unique) {
1950 err = -EINVAL;
1951 if (nbytes != sizeof(struct fuse_out_header))
1952 goto err_unlock;
1953
1954 if (oh.error == -ENOSYS)
1955 fc->no_interrupt = 1;
1956 else if (oh.error == -EAGAIN)
1957 queue_interrupt(&fc->iq, req);
1958
1959 spin_unlock(&fc->lock);
1960 fuse_copy_finish(cs);
1961 return nbytes;
1962 }
1963
1964 clear_bit(FR_SENT, &req->flags);
1965 list_move(&req->list, &fc->io);
1966 req->out.h = oh;
1967 set_bit(FR_LOCKED, &req->flags);
1968 cs->req = req;
1969 if (!req->out.page_replace)
1970 cs->move_pages = 0;
1971 spin_unlock(&fc->lock);
1972
1973 err = copy_out_args(cs, &req->out, nbytes);
1974 fuse_copy_finish(cs);
1975
1976 spin_lock(&fc->lock);
1977 clear_bit(FR_LOCKED, &req->flags);
1978 if (!fc->connected)
1979 err = -ENOENT;
1980 else if (err)
1981 req->out.h.error = -EIO;
1982 request_end(fc, req);
1983
1984 return err ? err : nbytes;
1985
1986 err_unlock:
1987 spin_unlock(&fc->lock);
1988 err_finish:
1989 fuse_copy_finish(cs);
1990 return err;
1991 }
1992
1993 static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
1994 {
1995 struct fuse_copy_state cs;
1996 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1997 if (!fc)
1998 return -EPERM;
1999
2000 if (!iter_is_iovec(from))
2001 return -EINVAL;
2002
2003 fuse_copy_init(&cs, 0, from);
2004
2005 return fuse_dev_do_write(fc, &cs, iov_iter_count(from));
2006 }
2007
2008 static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
2009 struct file *out, loff_t *ppos,
2010 size_t len, unsigned int flags)
2011 {
2012 unsigned nbuf;
2013 unsigned idx;
2014 struct pipe_buffer *bufs;
2015 struct fuse_copy_state cs;
2016 struct fuse_conn *fc;
2017 size_t rem;
2018 ssize_t ret;
2019
2020 fc = fuse_get_conn(out);
2021 if (!fc)
2022 return -EPERM;
2023
2024 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
2025 if (!bufs)
2026 return -ENOMEM;
2027
2028 pipe_lock(pipe);
2029 nbuf = 0;
2030 rem = 0;
2031 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
2032 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
2033
2034 ret = -EINVAL;
2035 if (rem < len) {
2036 pipe_unlock(pipe);
2037 goto out;
2038 }
2039
2040 rem = len;
2041 while (rem) {
2042 struct pipe_buffer *ibuf;
2043 struct pipe_buffer *obuf;
2044
2045 BUG_ON(nbuf >= pipe->buffers);
2046 BUG_ON(!pipe->nrbufs);
2047 ibuf = &pipe->bufs[pipe->curbuf];
2048 obuf = &bufs[nbuf];
2049
2050 if (rem >= ibuf->len) {
2051 *obuf = *ibuf;
2052 ibuf->ops = NULL;
2053 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
2054 pipe->nrbufs--;
2055 } else {
2056 ibuf->ops->get(pipe, ibuf);
2057 *obuf = *ibuf;
2058 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
2059 obuf->len = rem;
2060 ibuf->offset += obuf->len;
2061 ibuf->len -= obuf->len;
2062 }
2063 nbuf++;
2064 rem -= obuf->len;
2065 }
2066 pipe_unlock(pipe);
2067
2068 fuse_copy_init(&cs, 0, NULL);
2069 cs.pipebufs = bufs;
2070 cs.nr_segs = nbuf;
2071 cs.pipe = pipe;
2072
2073 if (flags & SPLICE_F_MOVE)
2074 cs.move_pages = 1;
2075
2076 ret = fuse_dev_do_write(fc, &cs, len);
2077
2078 for (idx = 0; idx < nbuf; idx++) {
2079 struct pipe_buffer *buf = &bufs[idx];
2080 buf->ops->release(pipe, buf);
2081 }
2082 out:
2083 kfree(bufs);
2084 return ret;
2085 }
2086
2087 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
2088 {
2089 unsigned mask = POLLOUT | POLLWRNORM;
2090 struct fuse_iqueue *fiq;
2091 struct fuse_conn *fc = fuse_get_conn(file);
2092 if (!fc)
2093 return POLLERR;
2094
2095 fiq = &fc->iq;
2096 poll_wait(file, &fiq->waitq, wait);
2097
2098 spin_lock(&fc->lock);
2099 spin_lock(&fiq->waitq.lock);
2100 if (!fiq->connected)
2101 mask = POLLERR;
2102 else if (request_pending(fiq))
2103 mask |= POLLIN | POLLRDNORM;
2104 spin_unlock(&fiq->waitq.lock);
2105 spin_unlock(&fc->lock);
2106
2107 return mask;
2108 }
2109
2110 /*
2111 * Abort all requests on the given list (pending or processing)
2112 *
2113 * This function releases and reacquires fc->lock
2114 */
2115 static void end_requests(struct fuse_conn *fc, struct list_head *head)
2116 __releases(fc->lock)
2117 __acquires(fc->lock)
2118 {
2119 while (!list_empty(head)) {
2120 struct fuse_req *req;
2121 req = list_entry(head->next, struct fuse_req, list);
2122 req->out.h.error = -ECONNABORTED;
2123 clear_bit(FR_PENDING, &req->flags);
2124 clear_bit(FR_SENT, &req->flags);
2125 request_end(fc, req);
2126 spin_lock(&fc->lock);
2127 }
2128 }
2129
2130 static void end_polls(struct fuse_conn *fc)
2131 {
2132 struct rb_node *p;
2133
2134 p = rb_first(&fc->polled_files);
2135
2136 while (p) {
2137 struct fuse_file *ff;
2138 ff = rb_entry(p, struct fuse_file, polled_node);
2139 wake_up_interruptible_all(&ff->poll_wait);
2140
2141 p = rb_next(p);
2142 }
2143 }
2144
2145 /*
2146 * Abort all requests.
2147 *
2148 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2149 * filesystem.
2150 *
2151 * The same effect is usually achievable through killing the filesystem daemon
2152 * and all users of the filesystem. The exception is the combination of an
2153 * asynchronous request and the tricky deadlock (see
2154 * Documentation/filesystems/fuse.txt).
2155 *
2156 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2157 * requests, they should be finished off immediately. Locked requests will be
2158 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2159 * requests. It is possible that some request will finish before we can. This
2160 * is OK, the request will in that case be removed from the list before we touch
2161 * it.
2162 */
2163 void fuse_abort_conn(struct fuse_conn *fc)
2164 {
2165 struct fuse_iqueue *fiq = &fc->iq;
2166
2167 spin_lock(&fc->lock);
2168 if (fc->connected) {
2169 struct fuse_req *req, *next;
2170 LIST_HEAD(to_end1);
2171 LIST_HEAD(to_end2);
2172
2173 fc->connected = 0;
2174 fc->blocked = 0;
2175 fuse_set_initialized(fc);
2176 list_for_each_entry_safe(req, next, &fc->io, list) {
2177 req->out.h.error = -ECONNABORTED;
2178 spin_lock(&req->waitq.lock);
2179 set_bit(FR_ABORTED, &req->flags);
2180 if (!test_bit(FR_LOCKED, &req->flags))
2181 list_move(&req->list, &to_end1);
2182 spin_unlock(&req->waitq.lock);
2183 }
2184 fc->max_background = UINT_MAX;
2185 flush_bg_queue(fc);
2186
2187 spin_lock(&fiq->waitq.lock);
2188 fiq->connected = 0;
2189 list_splice_init(&fiq->pending, &to_end2);
2190 while (forget_pending(fiq))
2191 kfree(dequeue_forget(fiq, 1, NULL));
2192 wake_up_all_locked(&fiq->waitq);
2193 spin_unlock(&fiq->waitq.lock);
2194 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
2195
2196 list_splice_init(&fc->processing, &to_end2);
2197 while (!list_empty(&to_end1)) {
2198 req = list_first_entry(&to_end1, struct fuse_req, list);
2199 __fuse_get_request(req);
2200 request_end(fc, req);
2201 spin_lock(&fc->lock);
2202 }
2203 end_requests(fc, &to_end2);
2204 end_polls(fc);
2205 wake_up_all(&fc->blocked_waitq);
2206 }
2207 spin_unlock(&fc->lock);
2208 }
2209 EXPORT_SYMBOL_GPL(fuse_abort_conn);
2210
2211 int fuse_dev_release(struct inode *inode, struct file *file)
2212 {
2213 struct fuse_conn *fc = fuse_get_conn(file);
2214 if (fc) {
2215 WARN_ON(!list_empty(&fc->io));
2216 WARN_ON(fc->iq.fasync != NULL);
2217 fuse_abort_conn(fc);
2218 fuse_conn_put(fc);
2219 }
2220
2221 return 0;
2222 }
2223 EXPORT_SYMBOL_GPL(fuse_dev_release);
2224
2225 static int fuse_dev_fasync(int fd, struct file *file, int on)
2226 {
2227 struct fuse_conn *fc = fuse_get_conn(file);
2228 if (!fc)
2229 return -EPERM;
2230
2231 /* No locking - fasync_helper does its own locking */
2232 return fasync_helper(fd, file, on, &fc->iq.fasync);
2233 }
2234
2235 const struct file_operations fuse_dev_operations = {
2236 .owner = THIS_MODULE,
2237 .open = fuse_dev_open,
2238 .llseek = no_llseek,
2239 .read_iter = fuse_dev_read,
2240 .splice_read = fuse_dev_splice_read,
2241 .write_iter = fuse_dev_write,
2242 .splice_write = fuse_dev_splice_write,
2243 .poll = fuse_dev_poll,
2244 .release = fuse_dev_release,
2245 .fasync = fuse_dev_fasync,
2246 };
2247 EXPORT_SYMBOL_GPL(fuse_dev_operations);
2248
2249 static struct miscdevice fuse_miscdevice = {
2250 .minor = FUSE_MINOR,
2251 .name = "fuse",
2252 .fops = &fuse_dev_operations,
2253 };
2254
2255 int __init fuse_dev_init(void)
2256 {
2257 int err = -ENOMEM;
2258 fuse_req_cachep = kmem_cache_create("fuse_request",
2259 sizeof(struct fuse_req),
2260 0, 0, NULL);
2261 if (!fuse_req_cachep)
2262 goto out;
2263
2264 err = misc_register(&fuse_miscdevice);
2265 if (err)
2266 goto out_cache_clean;
2267
2268 return 0;
2269
2270 out_cache_clean:
2271 kmem_cache_destroy(fuse_req_cachep);
2272 out:
2273 return err;
2274 }
2275
2276 void fuse_dev_cleanup(void)
2277 {
2278 misc_deregister(&fuse_miscdevice);
2279 kmem_cache_destroy(fuse_req_cachep);
2280 }
This page took 0.129804 seconds and 5 git commands to generate.