[PATCH] FUSE - mount options
[deliverable/linux.git] / fs / fuse / dev.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21
22 static kmem_cache_t *fuse_req_cachep;
23
24 static inline struct fuse_conn *fuse_get_conn(struct file *file)
25 {
26 struct fuse_conn *fc;
27 spin_lock(&fuse_lock);
28 fc = file->private_data;
29 if (fc && !fc->mounted)
30 fc = NULL;
31 spin_unlock(&fuse_lock);
32 return fc;
33 }
34
35 static inline void fuse_request_init(struct fuse_req *req)
36 {
37 memset(req, 0, sizeof(*req));
38 INIT_LIST_HEAD(&req->list);
39 init_waitqueue_head(&req->waitq);
40 atomic_set(&req->count, 1);
41 }
42
43 struct fuse_req *fuse_request_alloc(void)
44 {
45 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
46 if (req)
47 fuse_request_init(req);
48 return req;
49 }
50
51 void fuse_request_free(struct fuse_req *req)
52 {
53 kmem_cache_free(fuse_req_cachep, req);
54 }
55
56 static inline void block_sigs(sigset_t *oldset)
57 {
58 sigset_t mask;
59
60 siginitsetinv(&mask, sigmask(SIGKILL));
61 sigprocmask(SIG_BLOCK, &mask, oldset);
62 }
63
64 static inline void restore_sigs(sigset_t *oldset)
65 {
66 sigprocmask(SIG_SETMASK, oldset, NULL);
67 }
68
69 void fuse_reset_request(struct fuse_req *req)
70 {
71 int preallocated = req->preallocated;
72 BUG_ON(atomic_read(&req->count) != 1);
73 fuse_request_init(req);
74 req->preallocated = preallocated;
75 }
76
77 static void __fuse_get_request(struct fuse_req *req)
78 {
79 atomic_inc(&req->count);
80 }
81
82 /* Must be called with > 1 refcount */
83 static void __fuse_put_request(struct fuse_req *req)
84 {
85 BUG_ON(atomic_read(&req->count) < 2);
86 atomic_dec(&req->count);
87 }
88
89 static struct fuse_req *do_get_request(struct fuse_conn *fc)
90 {
91 struct fuse_req *req;
92
93 spin_lock(&fuse_lock);
94 BUG_ON(list_empty(&fc->unused_list));
95 req = list_entry(fc->unused_list.next, struct fuse_req, list);
96 list_del_init(&req->list);
97 spin_unlock(&fuse_lock);
98 fuse_request_init(req);
99 req->preallocated = 1;
100 req->in.h.uid = current->fsuid;
101 req->in.h.gid = current->fsgid;
102 req->in.h.pid = current->pid;
103 return req;
104 }
105
106 struct fuse_req *fuse_get_request(struct fuse_conn *fc)
107 {
108 if (down_interruptible(&fc->outstanding_sem))
109 return NULL;
110 return do_get_request(fc);
111 }
112
113 /*
114 * Non-interruptible version of the above function is for operations
115 * which can't legally return -ERESTART{SYS,NOINTR}. This can still
116 * return NULL, but only in case the signal is SIGKILL.
117 */
118 struct fuse_req *fuse_get_request_nonint(struct fuse_conn *fc)
119 {
120 int intr;
121 sigset_t oldset;
122
123 block_sigs(&oldset);
124 intr = down_interruptible(&fc->outstanding_sem);
125 restore_sigs(&oldset);
126 return intr ? NULL : do_get_request(fc);
127 }
128
129 static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
130 {
131 spin_lock(&fuse_lock);
132 if (req->preallocated)
133 list_add(&req->list, &fc->unused_list);
134 else
135 fuse_request_free(req);
136
137 /* If we are in debt decrease that first */
138 if (fc->outstanding_debt)
139 fc->outstanding_debt--;
140 else
141 up(&fc->outstanding_sem);
142 spin_unlock(&fuse_lock);
143 }
144
145 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
146 {
147 if (atomic_dec_and_test(&req->count))
148 fuse_putback_request(fc, req);
149 }
150
151 void fuse_release_background(struct fuse_req *req)
152 {
153 iput(req->inode);
154 iput(req->inode2);
155 if (req->file)
156 fput(req->file);
157 spin_lock(&fuse_lock);
158 list_del(&req->bg_entry);
159 spin_unlock(&fuse_lock);
160 }
161
162 /*
163 * This function is called when a request is finished. Either a reply
164 * has arrived or it was interrupted (and not yet sent) or some error
165 * occured during communication with userspace, or the device file was
166 * closed. It decreases the referece count for the request. In case
167 * of a background request the referece to the stored objects are
168 * released. The requester thread is woken up (if still waiting), and
169 * finally the request is either freed or put on the unused_list
170 *
171 * Called with fuse_lock, unlocks it
172 */
173 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
174 {
175 int putback;
176 req->finished = 1;
177 putback = atomic_dec_and_test(&req->count);
178 spin_unlock(&fuse_lock);
179 if (req->background) {
180 down_read(&fc->sbput_sem);
181 if (fc->mounted)
182 fuse_release_background(req);
183 up_read(&fc->sbput_sem);
184 }
185 wake_up(&req->waitq);
186 if (req->in.h.opcode == FUSE_INIT) {
187 int i;
188
189 if (req->misc.init_in_out.major != FUSE_KERNEL_VERSION)
190 fc->conn_error = 1;
191
192 /* After INIT reply is received other requests can go
193 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
194 up()s on outstanding_sem. The last up() is done in
195 fuse_putback_request() */
196 for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
197 up(&fc->outstanding_sem);
198 }
199 if (putback)
200 fuse_putback_request(fc, req);
201 }
202
203 /*
204 * Unfortunately request interruption not just solves the deadlock
205 * problem, it causes problems too. These stem from the fact, that an
206 * interrupted request is continued to be processed in userspace,
207 * while all the locks and object references (inode and file) held
208 * during the operation are released.
209 *
210 * To release the locks is exactly why there's a need to interrupt the
211 * request, so there's not a lot that can be done about this, except
212 * introduce additional locking in userspace.
213 *
214 * More important is to keep inode and file references until userspace
215 * has replied, otherwise FORGET and RELEASE could be sent while the
216 * inode/file is still used by the filesystem.
217 *
218 * For this reason the concept of "background" request is introduced.
219 * An interrupted request is backgrounded if it has been already sent
220 * to userspace. Backgrounding involves getting an extra reference to
221 * inode(s) or file used in the request, and adding the request to
222 * fc->background list. When a reply is received for a background
223 * request, the object references are released, and the request is
224 * removed from the list. If the filesystem is unmounted while there
225 * are still background requests, the list is walked and references
226 * are released as if a reply was received.
227 *
228 * There's one more use for a background request. The RELEASE message is
229 * always sent as background, since it doesn't return an error or
230 * data.
231 */
232 static void background_request(struct fuse_conn *fc, struct fuse_req *req)
233 {
234 req->background = 1;
235 list_add(&req->bg_entry, &fc->background);
236 if (req->inode)
237 req->inode = igrab(req->inode);
238 if (req->inode2)
239 req->inode2 = igrab(req->inode2);
240 if (req->file)
241 get_file(req->file);
242 }
243
244 static int request_wait_answer_nonint(struct fuse_req *req)
245 {
246 int err;
247 sigset_t oldset;
248 block_sigs(&oldset);
249 err = wait_event_interruptible(req->waitq, req->finished);
250 restore_sigs(&oldset);
251 return err;
252 }
253
254 /* Called with fuse_lock held. Releases, and then reacquires it. */
255 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req,
256 int interruptible)
257 {
258 int intr;
259
260 spin_unlock(&fuse_lock);
261 if (interruptible)
262 intr = wait_event_interruptible(req->waitq, req->finished);
263 else
264 intr = request_wait_answer_nonint(req);
265 spin_lock(&fuse_lock);
266 if (intr && interruptible && req->sent) {
267 /* If request is already in userspace, only allow KILL
268 signal to interrupt */
269 spin_unlock(&fuse_lock);
270 intr = request_wait_answer_nonint(req);
271 spin_lock(&fuse_lock);
272 }
273 if (!intr)
274 return;
275
276 if (!interruptible || req->sent)
277 req->out.h.error = -EINTR;
278 else
279 req->out.h.error = -ERESTARTNOINTR;
280
281 req->interrupted = 1;
282 if (req->locked) {
283 /* This is uninterruptible sleep, because data is
284 being copied to/from the buffers of req. During
285 locked state, there mustn't be any filesystem
286 operation (e.g. page fault), since that could lead
287 to deadlock */
288 spin_unlock(&fuse_lock);
289 wait_event(req->waitq, !req->locked);
290 spin_lock(&fuse_lock);
291 }
292 if (!req->sent && !list_empty(&req->list)) {
293 list_del(&req->list);
294 __fuse_put_request(req);
295 } else if (!req->finished && req->sent)
296 background_request(fc, req);
297 }
298
299 static unsigned len_args(unsigned numargs, struct fuse_arg *args)
300 {
301 unsigned nbytes = 0;
302 unsigned i;
303
304 for (i = 0; i < numargs; i++)
305 nbytes += args[i].size;
306
307 return nbytes;
308 }
309
310 static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
311 {
312 fc->reqctr++;
313 /* zero is special */
314 if (fc->reqctr == 0)
315 fc->reqctr = 1;
316 req->in.h.unique = fc->reqctr;
317 req->in.h.len = sizeof(struct fuse_in_header) +
318 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
319 if (!req->preallocated) {
320 /* If request is not preallocated (either FORGET or
321 RELEASE), then still decrease outstanding_sem, so
322 user can't open infinite number of files while not
323 processing the RELEASE requests. However for
324 efficiency do it without blocking, so if down()
325 would block, just increase the debt instead */
326 if (down_trylock(&fc->outstanding_sem))
327 fc->outstanding_debt++;
328 }
329 list_add_tail(&req->list, &fc->pending);
330 wake_up(&fc->waitq);
331 }
332
333 static void request_send_wait(struct fuse_conn *fc, struct fuse_req *req,
334 int interruptible)
335 {
336 req->isreply = 1;
337 spin_lock(&fuse_lock);
338 if (!fc->connected)
339 req->out.h.error = -ENOTCONN;
340 else if (fc->conn_error)
341 req->out.h.error = -ECONNREFUSED;
342 else {
343 queue_request(fc, req);
344 /* acquire extra reference, since request is still needed
345 after request_end() */
346 __fuse_get_request(req);
347
348 request_wait_answer(fc, req, interruptible);
349 }
350 spin_unlock(&fuse_lock);
351 }
352
353 void request_send(struct fuse_conn *fc, struct fuse_req *req)
354 {
355 request_send_wait(fc, req, 1);
356 }
357
358 /*
359 * Non-interruptible version of the above function is for operations
360 * which can't legally return -ERESTART{SYS,NOINTR}. This can still
361 * be interrupted but only with SIGKILL.
362 */
363 void request_send_nonint(struct fuse_conn *fc, struct fuse_req *req)
364 {
365 request_send_wait(fc, req, 0);
366 }
367
368 static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
369 {
370 spin_lock(&fuse_lock);
371 if (fc->connected) {
372 queue_request(fc, req);
373 spin_unlock(&fuse_lock);
374 } else {
375 req->out.h.error = -ENOTCONN;
376 request_end(fc, req);
377 }
378 }
379
380 void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
381 {
382 req->isreply = 0;
383 request_send_nowait(fc, req);
384 }
385
386 void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
387 {
388 req->isreply = 1;
389 spin_lock(&fuse_lock);
390 background_request(fc, req);
391 spin_unlock(&fuse_lock);
392 request_send_nowait(fc, req);
393 }
394
395 void fuse_send_init(struct fuse_conn *fc)
396 {
397 /* This is called from fuse_read_super() so there's guaranteed
398 to be a request available */
399 struct fuse_req *req = do_get_request(fc);
400 struct fuse_init_in_out *arg = &req->misc.init_in_out;
401 arg->major = FUSE_KERNEL_VERSION;
402 arg->minor = FUSE_KERNEL_MINOR_VERSION;
403 req->in.h.opcode = FUSE_INIT;
404 req->in.numargs = 1;
405 req->in.args[0].size = sizeof(*arg);
406 req->in.args[0].value = arg;
407 req->out.numargs = 1;
408 req->out.args[0].size = sizeof(*arg);
409 req->out.args[0].value = arg;
410 request_send_background(fc, req);
411 }
412
413 /*
414 * Lock the request. Up to the next unlock_request() there mustn't be
415 * anything that could cause a page-fault. If the request was already
416 * interrupted bail out.
417 */
418 static inline int lock_request(struct fuse_req *req)
419 {
420 int err = 0;
421 if (req) {
422 spin_lock(&fuse_lock);
423 if (req->interrupted)
424 err = -ENOENT;
425 else
426 req->locked = 1;
427 spin_unlock(&fuse_lock);
428 }
429 return err;
430 }
431
432 /*
433 * Unlock request. If it was interrupted during being locked, the
434 * requester thread is currently waiting for it to be unlocked, so
435 * wake it up.
436 */
437 static inline void unlock_request(struct fuse_req *req)
438 {
439 if (req) {
440 spin_lock(&fuse_lock);
441 req->locked = 0;
442 if (req->interrupted)
443 wake_up(&req->waitq);
444 spin_unlock(&fuse_lock);
445 }
446 }
447
448 struct fuse_copy_state {
449 int write;
450 struct fuse_req *req;
451 const struct iovec *iov;
452 unsigned long nr_segs;
453 unsigned long seglen;
454 unsigned long addr;
455 struct page *pg;
456 void *mapaddr;
457 void *buf;
458 unsigned len;
459 };
460
461 static void fuse_copy_init(struct fuse_copy_state *cs, int write,
462 struct fuse_req *req, const struct iovec *iov,
463 unsigned long nr_segs)
464 {
465 memset(cs, 0, sizeof(*cs));
466 cs->write = write;
467 cs->req = req;
468 cs->iov = iov;
469 cs->nr_segs = nr_segs;
470 }
471
472 /* Unmap and put previous page of userspace buffer */
473 static inline void fuse_copy_finish(struct fuse_copy_state *cs)
474 {
475 if (cs->mapaddr) {
476 kunmap_atomic(cs->mapaddr, KM_USER0);
477 if (cs->write) {
478 flush_dcache_page(cs->pg);
479 set_page_dirty_lock(cs->pg);
480 }
481 put_page(cs->pg);
482 cs->mapaddr = NULL;
483 }
484 }
485
486 /*
487 * Get another pagefull of userspace buffer, and map it to kernel
488 * address space, and lock request
489 */
490 static int fuse_copy_fill(struct fuse_copy_state *cs)
491 {
492 unsigned long offset;
493 int err;
494
495 unlock_request(cs->req);
496 fuse_copy_finish(cs);
497 if (!cs->seglen) {
498 BUG_ON(!cs->nr_segs);
499 cs->seglen = cs->iov[0].iov_len;
500 cs->addr = (unsigned long) cs->iov[0].iov_base;
501 cs->iov ++;
502 cs->nr_segs --;
503 }
504 down_read(&current->mm->mmap_sem);
505 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
506 &cs->pg, NULL);
507 up_read(&current->mm->mmap_sem);
508 if (err < 0)
509 return err;
510 BUG_ON(err != 1);
511 offset = cs->addr % PAGE_SIZE;
512 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
513 cs->buf = cs->mapaddr + offset;
514 cs->len = min(PAGE_SIZE - offset, cs->seglen);
515 cs->seglen -= cs->len;
516 cs->addr += cs->len;
517
518 return lock_request(cs->req);
519 }
520
521 /* Do as much copy to/from userspace buffer as we can */
522 static inline int fuse_copy_do(struct fuse_copy_state *cs, void **val,
523 unsigned *size)
524 {
525 unsigned ncpy = min(*size, cs->len);
526 if (val) {
527 if (cs->write)
528 memcpy(cs->buf, *val, ncpy);
529 else
530 memcpy(*val, cs->buf, ncpy);
531 *val += ncpy;
532 }
533 *size -= ncpy;
534 cs->len -= ncpy;
535 cs->buf += ncpy;
536 return ncpy;
537 }
538
539 /*
540 * Copy a page in the request to/from the userspace buffer. Must be
541 * done atomically
542 */
543 static inline int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
544 unsigned offset, unsigned count, int zeroing)
545 {
546 if (page && zeroing && count < PAGE_SIZE) {
547 void *mapaddr = kmap_atomic(page, KM_USER1);
548 memset(mapaddr, 0, PAGE_SIZE);
549 kunmap_atomic(mapaddr, KM_USER1);
550 }
551 while (count) {
552 int err;
553 if (!cs->len && (err = fuse_copy_fill(cs)))
554 return err;
555 if (page) {
556 void *mapaddr = kmap_atomic(page, KM_USER1);
557 void *buf = mapaddr + offset;
558 offset += fuse_copy_do(cs, &buf, &count);
559 kunmap_atomic(mapaddr, KM_USER1);
560 } else
561 offset += fuse_copy_do(cs, NULL, &count);
562 }
563 if (page && !cs->write)
564 flush_dcache_page(page);
565 return 0;
566 }
567
568 /* Copy pages in the request to/from userspace buffer */
569 static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
570 int zeroing)
571 {
572 unsigned i;
573 struct fuse_req *req = cs->req;
574 unsigned offset = req->page_offset;
575 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
576
577 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
578 struct page *page = req->pages[i];
579 int err = fuse_copy_page(cs, page, offset, count, zeroing);
580 if (err)
581 return err;
582
583 nbytes -= count;
584 count = min(nbytes, (unsigned) PAGE_SIZE);
585 offset = 0;
586 }
587 return 0;
588 }
589
590 /* Copy a single argument in the request to/from userspace buffer */
591 static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
592 {
593 while (size) {
594 int err;
595 if (!cs->len && (err = fuse_copy_fill(cs)))
596 return err;
597 fuse_copy_do(cs, &val, &size);
598 }
599 return 0;
600 }
601
602 /* Copy request arguments to/from userspace buffer */
603 static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
604 unsigned argpages, struct fuse_arg *args,
605 int zeroing)
606 {
607 int err = 0;
608 unsigned i;
609
610 for (i = 0; !err && i < numargs; i++) {
611 struct fuse_arg *arg = &args[i];
612 if (i == numargs - 1 && argpages)
613 err = fuse_copy_pages(cs, arg->size, zeroing);
614 else
615 err = fuse_copy_one(cs, arg->value, arg->size);
616 }
617 return err;
618 }
619
620 /* Wait until a request is available on the pending list */
621 static void request_wait(struct fuse_conn *fc)
622 {
623 DECLARE_WAITQUEUE(wait, current);
624
625 add_wait_queue_exclusive(&fc->waitq, &wait);
626 while (fc->mounted && list_empty(&fc->pending)) {
627 set_current_state(TASK_INTERRUPTIBLE);
628 if (signal_pending(current))
629 break;
630
631 spin_unlock(&fuse_lock);
632 schedule();
633 spin_lock(&fuse_lock);
634 }
635 set_current_state(TASK_RUNNING);
636 remove_wait_queue(&fc->waitq, &wait);
637 }
638
639 /*
640 * Read a single request into the userspace filesystem's buffer. This
641 * function waits until a request is available, then removes it from
642 * the pending list and copies request data to userspace buffer. If
643 * no reply is needed (FORGET) or request has been interrupted or
644 * there was an error during the copying then it's finished by calling
645 * request_end(). Otherwise add it to the processing list, and set
646 * the 'sent' flag.
647 */
648 static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
649 unsigned long nr_segs, loff_t *off)
650 {
651 int err;
652 struct fuse_conn *fc;
653 struct fuse_req *req;
654 struct fuse_in *in;
655 struct fuse_copy_state cs;
656 unsigned reqsize;
657
658 spin_lock(&fuse_lock);
659 fc = file->private_data;
660 err = -EPERM;
661 if (!fc)
662 goto err_unlock;
663 request_wait(fc);
664 err = -ENODEV;
665 if (!fc->mounted)
666 goto err_unlock;
667 err = -ERESTARTSYS;
668 if (list_empty(&fc->pending))
669 goto err_unlock;
670
671 req = list_entry(fc->pending.next, struct fuse_req, list);
672 list_del_init(&req->list);
673 spin_unlock(&fuse_lock);
674
675 in = &req->in;
676 reqsize = req->in.h.len;
677 fuse_copy_init(&cs, 1, req, iov, nr_segs);
678 err = -EINVAL;
679 if (iov_length(iov, nr_segs) >= reqsize) {
680 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
681 if (!err)
682 err = fuse_copy_args(&cs, in->numargs, in->argpages,
683 (struct fuse_arg *) in->args, 0);
684 }
685 fuse_copy_finish(&cs);
686
687 spin_lock(&fuse_lock);
688 req->locked = 0;
689 if (!err && req->interrupted)
690 err = -ENOENT;
691 if (err) {
692 if (!req->interrupted)
693 req->out.h.error = -EIO;
694 request_end(fc, req);
695 return err;
696 }
697 if (!req->isreply)
698 request_end(fc, req);
699 else {
700 req->sent = 1;
701 list_add_tail(&req->list, &fc->processing);
702 spin_unlock(&fuse_lock);
703 }
704 return reqsize;
705
706 err_unlock:
707 spin_unlock(&fuse_lock);
708 return err;
709 }
710
711 static ssize_t fuse_dev_read(struct file *file, char __user *buf,
712 size_t nbytes, loff_t *off)
713 {
714 struct iovec iov;
715 iov.iov_len = nbytes;
716 iov.iov_base = buf;
717 return fuse_dev_readv(file, &iov, 1, off);
718 }
719
720 /* Look up request on processing list by unique ID */
721 static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
722 {
723 struct list_head *entry;
724
725 list_for_each(entry, &fc->processing) {
726 struct fuse_req *req;
727 req = list_entry(entry, struct fuse_req, list);
728 if (req->in.h.unique == unique)
729 return req;
730 }
731 return NULL;
732 }
733
734 /* fget() needs to be done in this context */
735 static void process_getdir(struct fuse_req *req)
736 {
737 struct fuse_getdir_out_i *arg = req->out.args[0].value;
738 arg->file = fget(arg->fd);
739 }
740
741 static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
742 unsigned nbytes)
743 {
744 unsigned reqsize = sizeof(struct fuse_out_header);
745
746 if (out->h.error)
747 return nbytes != reqsize ? -EINVAL : 0;
748
749 reqsize += len_args(out->numargs, out->args);
750
751 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
752 return -EINVAL;
753 else if (reqsize > nbytes) {
754 struct fuse_arg *lastarg = &out->args[out->numargs-1];
755 unsigned diffsize = reqsize - nbytes;
756 if (diffsize > lastarg->size)
757 return -EINVAL;
758 lastarg->size -= diffsize;
759 }
760 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
761 out->page_zeroing);
762 }
763
764 /*
765 * Write a single reply to a request. First the header is copied from
766 * the write buffer. The request is then searched on the processing
767 * list by the unique ID found in the header. If found, then remove
768 * it from the list and copy the rest of the buffer to the request.
769 * The request is finished by calling request_end()
770 */
771 static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
772 unsigned long nr_segs, loff_t *off)
773 {
774 int err;
775 unsigned nbytes = iov_length(iov, nr_segs);
776 struct fuse_req *req;
777 struct fuse_out_header oh;
778 struct fuse_copy_state cs;
779 struct fuse_conn *fc = fuse_get_conn(file);
780 if (!fc)
781 return -ENODEV;
782
783 fuse_copy_init(&cs, 0, NULL, iov, nr_segs);
784 if (nbytes < sizeof(struct fuse_out_header))
785 return -EINVAL;
786
787 err = fuse_copy_one(&cs, &oh, sizeof(oh));
788 if (err)
789 goto err_finish;
790 err = -EINVAL;
791 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
792 oh.len != nbytes)
793 goto err_finish;
794
795 spin_lock(&fuse_lock);
796 req = request_find(fc, oh.unique);
797 err = -EINVAL;
798 if (!req)
799 goto err_unlock;
800
801 list_del_init(&req->list);
802 if (req->interrupted) {
803 request_end(fc, req);
804 fuse_copy_finish(&cs);
805 return -ENOENT;
806 }
807 req->out.h = oh;
808 req->locked = 1;
809 cs.req = req;
810 spin_unlock(&fuse_lock);
811
812 err = copy_out_args(&cs, &req->out, nbytes);
813 fuse_copy_finish(&cs);
814
815 spin_lock(&fuse_lock);
816 req->locked = 0;
817 if (!err) {
818 if (req->interrupted)
819 err = -ENOENT;
820 else if (req->in.h.opcode == FUSE_GETDIR && !oh.error)
821 process_getdir(req);
822 } else if (!req->interrupted)
823 req->out.h.error = -EIO;
824 request_end(fc, req);
825
826 return err ? err : nbytes;
827
828 err_unlock:
829 spin_unlock(&fuse_lock);
830 err_finish:
831 fuse_copy_finish(&cs);
832 return err;
833 }
834
835 static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
836 size_t nbytes, loff_t *off)
837 {
838 struct iovec iov;
839 iov.iov_len = nbytes;
840 iov.iov_base = (char __user *) buf;
841 return fuse_dev_writev(file, &iov, 1, off);
842 }
843
844 static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
845 {
846 struct fuse_conn *fc = fuse_get_conn(file);
847 unsigned mask = POLLOUT | POLLWRNORM;
848
849 if (!fc)
850 return -ENODEV;
851
852 poll_wait(file, &fc->waitq, wait);
853
854 spin_lock(&fuse_lock);
855 if (!list_empty(&fc->pending))
856 mask |= POLLIN | POLLRDNORM;
857 spin_unlock(&fuse_lock);
858
859 return mask;
860 }
861
862 /* Abort all requests on the given list (pending or processing) */
863 static void end_requests(struct fuse_conn *fc, struct list_head *head)
864 {
865 while (!list_empty(head)) {
866 struct fuse_req *req;
867 req = list_entry(head->next, struct fuse_req, list);
868 list_del_init(&req->list);
869 req->out.h.error = -ECONNABORTED;
870 request_end(fc, req);
871 spin_lock(&fuse_lock);
872 }
873 }
874
875 static int fuse_dev_release(struct inode *inode, struct file *file)
876 {
877 struct fuse_conn *fc;
878
879 spin_lock(&fuse_lock);
880 fc = file->private_data;
881 if (fc) {
882 fc->connected = 0;
883 end_requests(fc, &fc->pending);
884 end_requests(fc, &fc->processing);
885 fuse_release_conn(fc);
886 }
887 spin_unlock(&fuse_lock);
888 return 0;
889 }
890
891 struct file_operations fuse_dev_operations = {
892 .owner = THIS_MODULE,
893 .llseek = no_llseek,
894 .read = fuse_dev_read,
895 .readv = fuse_dev_readv,
896 .write = fuse_dev_write,
897 .writev = fuse_dev_writev,
898 .poll = fuse_dev_poll,
899 .release = fuse_dev_release,
900 };
901
902 static struct miscdevice fuse_miscdevice = {
903 .minor = FUSE_MINOR,
904 .name = "fuse",
905 .fops = &fuse_dev_operations,
906 };
907
908 int __init fuse_dev_init(void)
909 {
910 int err = -ENOMEM;
911 fuse_req_cachep = kmem_cache_create("fuse_request",
912 sizeof(struct fuse_req),
913 0, 0, NULL, NULL);
914 if (!fuse_req_cachep)
915 goto out;
916
917 err = misc_register(&fuse_miscdevice);
918 if (err)
919 goto out_cache_clean;
920
921 return 0;
922
923 out_cache_clean:
924 kmem_cache_destroy(fuse_req_cachep);
925 out:
926 return err;
927 }
928
929 void fuse_dev_cleanup(void)
930 {
931 misc_deregister(&fuse_miscdevice);
932 kmem_cache_destroy(fuse_req_cachep);
933 }
This page took 0.051712 seconds and 5 git commands to generate.