[PATCH] fuse: introduce unified request state
[deliverable/linux.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2005 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
19
20MODULE_ALIAS_MISCDEV(FUSE_MINOR);
21
22static kmem_cache_t *fuse_req_cachep;
23
8bfc016d 24static struct fuse_conn *fuse_get_conn(struct file *file)
334f485d
MS
25{
26 struct fuse_conn *fc;
27 spin_lock(&fuse_lock);
28 fc = file->private_data;
1e9a4ed9 29 if (fc && !fc->mounted)
334f485d
MS
30 fc = NULL;
31 spin_unlock(&fuse_lock);
32 return fc;
33}
34
8bfc016d 35static void fuse_request_init(struct fuse_req *req)
334f485d
MS
36{
37 memset(req, 0, sizeof(*req));
38 INIT_LIST_HEAD(&req->list);
39 init_waitqueue_head(&req->waitq);
40 atomic_set(&req->count, 1);
41}
42
43struct fuse_req *fuse_request_alloc(void)
44{
45 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
46 if (req)
47 fuse_request_init(req);
48 return req;
49}
50
51void fuse_request_free(struct fuse_req *req)
52{
53 kmem_cache_free(fuse_req_cachep, req);
54}
55
8bfc016d 56static void block_sigs(sigset_t *oldset)
334f485d
MS
57{
58 sigset_t mask;
59
60 siginitsetinv(&mask, sigmask(SIGKILL));
61 sigprocmask(SIG_BLOCK, &mask, oldset);
62}
63
8bfc016d 64static void restore_sigs(sigset_t *oldset)
334f485d
MS
65{
66 sigprocmask(SIG_SETMASK, oldset, NULL);
67}
68
69void fuse_reset_request(struct fuse_req *req)
70{
71 int preallocated = req->preallocated;
72 BUG_ON(atomic_read(&req->count) != 1);
73 fuse_request_init(req);
74 req->preallocated = preallocated;
75}
76
77static void __fuse_get_request(struct fuse_req *req)
78{
79 atomic_inc(&req->count);
80}
81
82/* Must be called with > 1 refcount */
83static void __fuse_put_request(struct fuse_req *req)
84{
85 BUG_ON(atomic_read(&req->count) < 2);
86 atomic_dec(&req->count);
87}
88
89static struct fuse_req *do_get_request(struct fuse_conn *fc)
90{
91 struct fuse_req *req;
92
93 spin_lock(&fuse_lock);
94 BUG_ON(list_empty(&fc->unused_list));
95 req = list_entry(fc->unused_list.next, struct fuse_req, list);
96 list_del_init(&req->list);
97 spin_unlock(&fuse_lock);
98 fuse_request_init(req);
99 req->preallocated = 1;
100 req->in.h.uid = current->fsuid;
101 req->in.h.gid = current->fsgid;
102 req->in.h.pid = current->pid;
103 return req;
104}
105
7c352bdf 106/* This can return NULL, but only in case it's interrupted by a SIGKILL */
334f485d 107struct fuse_req *fuse_get_request(struct fuse_conn *fc)
334f485d
MS
108{
109 int intr;
110 sigset_t oldset;
111
112 block_sigs(&oldset);
113 intr = down_interruptible(&fc->outstanding_sem);
114 restore_sigs(&oldset);
115 return intr ? NULL : do_get_request(fc);
116}
117
118static void fuse_putback_request(struct fuse_conn *fc, struct fuse_req *req)
119{
120 spin_lock(&fuse_lock);
121 if (req->preallocated)
122 list_add(&req->list, &fc->unused_list);
123 else
124 fuse_request_free(req);
125
126 /* If we are in debt decrease that first */
127 if (fc->outstanding_debt)
128 fc->outstanding_debt--;
129 else
130 up(&fc->outstanding_sem);
131 spin_unlock(&fuse_lock);
132}
133
134void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
135{
136 if (atomic_dec_and_test(&req->count))
137 fuse_putback_request(fc, req);
138}
139
1e9a4ed9
MS
140void fuse_release_background(struct fuse_req *req)
141{
142 iput(req->inode);
143 iput(req->inode2);
144 if (req->file)
145 fput(req->file);
146 spin_lock(&fuse_lock);
147 list_del(&req->bg_entry);
148 spin_unlock(&fuse_lock);
149}
150
3ec870d5
MS
151static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
152{
153 int i;
154 struct fuse_init_out *arg = &req->misc.init_out;
155
b3bebd94 156 if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
3ec870d5
MS
157 fc->conn_error = 1;
158 else {
159 fc->minor = arg->minor;
160 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
161 }
162
163 /* After INIT reply is received other requests can go
164 out. So do (FUSE_MAX_OUTSTANDING - 1) number of
165 up()s on outstanding_sem. The last up() is done in
166 fuse_putback_request() */
167 for (i = 1; i < FUSE_MAX_OUTSTANDING; i++)
168 up(&fc->outstanding_sem);
169}
170
334f485d
MS
171/*
172 * This function is called when a request is finished. Either a reply
173 * has arrived or it was interrupted (and not yet sent) or some error
f43b155a
MS
174 * occurred during communication with userspace, or the device file
175 * was closed. In case of a background request the reference to the
176 * stored objects are released. The requester thread is woken up (if
177 * still waiting), and finally the reference to the request is
178 * released
334f485d
MS
179 *
180 * Called with fuse_lock, unlocks it
181 */
182static void request_end(struct fuse_conn *fc, struct fuse_req *req)
183{
83cfd493 184 req->state = FUSE_REQ_FINISHED;
334f485d
MS
185 spin_unlock(&fuse_lock);
186 if (req->background) {
1e9a4ed9
MS
187 down_read(&fc->sbput_sem);
188 if (fc->mounted)
189 fuse_release_background(req);
190 up_read(&fc->sbput_sem);
334f485d
MS
191 }
192 wake_up(&req->waitq);
3ec870d5
MS
193 if (req->in.h.opcode == FUSE_INIT)
194 process_init_reply(fc, req);
195 else if (req->in.h.opcode == FUSE_RELEASE && req->inode == NULL) {
fd72faac
MS
196 /* Special case for failed iget in CREATE */
197 u64 nodeid = req->in.h.nodeid;
fd72faac
MS
198 fuse_reset_request(req);
199 fuse_send_forget(fc, req, nodeid, 1);
f43b155a 200 return;
334f485d 201 }
f43b155a 202 fuse_put_request(fc, req);
334f485d
MS
203}
204
1e9a4ed9
MS
205/*
206 * Unfortunately request interruption not just solves the deadlock
207 * problem, it causes problems too. These stem from the fact, that an
208 * interrupted request is continued to be processed in userspace,
209 * while all the locks and object references (inode and file) held
210 * during the operation are released.
211 *
212 * To release the locks is exactly why there's a need to interrupt the
213 * request, so there's not a lot that can be done about this, except
214 * introduce additional locking in userspace.
215 *
216 * More important is to keep inode and file references until userspace
217 * has replied, otherwise FORGET and RELEASE could be sent while the
218 * inode/file is still used by the filesystem.
219 *
220 * For this reason the concept of "background" request is introduced.
221 * An interrupted request is backgrounded if it has been already sent
222 * to userspace. Backgrounding involves getting an extra reference to
223 * inode(s) or file used in the request, and adding the request to
224 * fc->background list. When a reply is received for a background
225 * request, the object references are released, and the request is
226 * removed from the list. If the filesystem is unmounted while there
227 * are still background requests, the list is walked and references
228 * are released as if a reply was received.
229 *
230 * There's one more use for a background request. The RELEASE message is
231 * always sent as background, since it doesn't return an error or
232 * data.
233 */
234static void background_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d 235{
334f485d 236 req->background = 1;
1e9a4ed9 237 list_add(&req->bg_entry, &fc->background);
334f485d
MS
238 if (req->inode)
239 req->inode = igrab(req->inode);
240 if (req->inode2)
241 req->inode2 = igrab(req->inode2);
242 if (req->file)
243 get_file(req->file);
244}
245
334f485d 246/* Called with fuse_lock held. Releases, and then reacquires it. */
7c352bdf 247static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
334f485d 248{
7c352bdf 249 sigset_t oldset;
334f485d
MS
250
251 spin_unlock(&fuse_lock);
7c352bdf 252 block_sigs(&oldset);
83cfd493 253 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
7c352bdf 254 restore_sigs(&oldset);
334f485d 255 spin_lock(&fuse_lock);
83cfd493 256 if (req->state == FUSE_REQ_FINISHED)
334f485d
MS
257 return;
258
7c352bdf 259 req->out.h.error = -EINTR;
334f485d
MS
260 req->interrupted = 1;
261 if (req->locked) {
262 /* This is uninterruptible sleep, because data is
263 being copied to/from the buffers of req. During
264 locked state, there mustn't be any filesystem
265 operation (e.g. page fault), since that could lead
266 to deadlock */
267 spin_unlock(&fuse_lock);
268 wait_event(req->waitq, !req->locked);
269 spin_lock(&fuse_lock);
270 }
83cfd493 271 if (req->state == FUSE_REQ_PENDING) {
334f485d
MS
272 list_del(&req->list);
273 __fuse_put_request(req);
83cfd493 274 } else if (req->state == FUSE_REQ_SENT)
1e9a4ed9 275 background_request(fc, req);
334f485d
MS
276}
277
278static unsigned len_args(unsigned numargs, struct fuse_arg *args)
279{
280 unsigned nbytes = 0;
281 unsigned i;
282
283 for (i = 0; i < numargs; i++)
284 nbytes += args[i].size;
285
286 return nbytes;
287}
288
289static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
290{
291 fc->reqctr++;
292 /* zero is special */
293 if (fc->reqctr == 0)
294 fc->reqctr = 1;
295 req->in.h.unique = fc->reqctr;
296 req->in.h.len = sizeof(struct fuse_in_header) +
297 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
298 if (!req->preallocated) {
299 /* If request is not preallocated (either FORGET or
300 RELEASE), then still decrease outstanding_sem, so
301 user can't open infinite number of files while not
302 processing the RELEASE requests. However for
303 efficiency do it without blocking, so if down()
304 would block, just increase the debt instead */
305 if (down_trylock(&fc->outstanding_sem))
306 fc->outstanding_debt++;
307 }
308 list_add_tail(&req->list, &fc->pending);
83cfd493 309 req->state = FUSE_REQ_PENDING;
334f485d
MS
310 wake_up(&fc->waitq);
311}
312
7c352bdf
MS
313/*
314 * This can only be interrupted by a SIGKILL
315 */
316void request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
317{
318 req->isreply = 1;
319 spin_lock(&fuse_lock);
1e9a4ed9 320 if (!fc->connected)
334f485d
MS
321 req->out.h.error = -ENOTCONN;
322 else if (fc->conn_error)
323 req->out.h.error = -ECONNREFUSED;
324 else {
325 queue_request(fc, req);
326 /* acquire extra reference, since request is still needed
327 after request_end() */
328 __fuse_get_request(req);
329
7c352bdf 330 request_wait_answer(fc, req);
334f485d
MS
331 }
332 spin_unlock(&fuse_lock);
333}
334
334f485d
MS
335static void request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
336{
337 spin_lock(&fuse_lock);
1e9a4ed9 338 if (fc->connected) {
334f485d
MS
339 queue_request(fc, req);
340 spin_unlock(&fuse_lock);
341 } else {
342 req->out.h.error = -ENOTCONN;
343 request_end(fc, req);
344 }
345}
346
347void request_send_noreply(struct fuse_conn *fc, struct fuse_req *req)
348{
349 req->isreply = 0;
350 request_send_nowait(fc, req);
351}
352
353void request_send_background(struct fuse_conn *fc, struct fuse_req *req)
354{
355 req->isreply = 1;
1e9a4ed9
MS
356 spin_lock(&fuse_lock);
357 background_request(fc, req);
358 spin_unlock(&fuse_lock);
334f485d
MS
359 request_send_nowait(fc, req);
360}
361
362void fuse_send_init(struct fuse_conn *fc)
363{
364 /* This is called from fuse_read_super() so there's guaranteed
6383bdaa
MS
365 to be exactly one request available */
366 struct fuse_req *req = fuse_get_request(fc);
3ec870d5 367 struct fuse_init_in *arg = &req->misc.init_in;
334f485d
MS
368 arg->major = FUSE_KERNEL_VERSION;
369 arg->minor = FUSE_KERNEL_MINOR_VERSION;
370 req->in.h.opcode = FUSE_INIT;
371 req->in.numargs = 1;
372 req->in.args[0].size = sizeof(*arg);
373 req->in.args[0].value = arg;
374 req->out.numargs = 1;
3ec870d5
MS
375 /* Variable length arguement used for backward compatibility
376 with interface version < 7.5. Rest of init_out is zeroed
377 by do_get_request(), so a short reply is not a problem */
378 req->out.argvar = 1;
379 req->out.args[0].size = sizeof(struct fuse_init_out);
380 req->out.args[0].value = &req->misc.init_out;
334f485d
MS
381 request_send_background(fc, req);
382}
383
384/*
385 * Lock the request. Up to the next unlock_request() there mustn't be
386 * anything that could cause a page-fault. If the request was already
387 * interrupted bail out.
388 */
8bfc016d 389static int lock_request(struct fuse_req *req)
334f485d
MS
390{
391 int err = 0;
392 if (req) {
393 spin_lock(&fuse_lock);
394 if (req->interrupted)
395 err = -ENOENT;
396 else
397 req->locked = 1;
398 spin_unlock(&fuse_lock);
399 }
400 return err;
401}
402
403/*
404 * Unlock request. If it was interrupted during being locked, the
405 * requester thread is currently waiting for it to be unlocked, so
406 * wake it up.
407 */
8bfc016d 408static void unlock_request(struct fuse_req *req)
334f485d
MS
409{
410 if (req) {
411 spin_lock(&fuse_lock);
412 req->locked = 0;
413 if (req->interrupted)
414 wake_up(&req->waitq);
415 spin_unlock(&fuse_lock);
416 }
417}
418
419struct fuse_copy_state {
420 int write;
421 struct fuse_req *req;
422 const struct iovec *iov;
423 unsigned long nr_segs;
424 unsigned long seglen;
425 unsigned long addr;
426 struct page *pg;
427 void *mapaddr;
428 void *buf;
429 unsigned len;
430};
431
432static void fuse_copy_init(struct fuse_copy_state *cs, int write,
433 struct fuse_req *req, const struct iovec *iov,
434 unsigned long nr_segs)
435{
436 memset(cs, 0, sizeof(*cs));
437 cs->write = write;
438 cs->req = req;
439 cs->iov = iov;
440 cs->nr_segs = nr_segs;
441}
442
443/* Unmap and put previous page of userspace buffer */
8bfc016d 444static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d
MS
445{
446 if (cs->mapaddr) {
447 kunmap_atomic(cs->mapaddr, KM_USER0);
448 if (cs->write) {
449 flush_dcache_page(cs->pg);
450 set_page_dirty_lock(cs->pg);
451 }
452 put_page(cs->pg);
453 cs->mapaddr = NULL;
454 }
455}
456
457/*
458 * Get another pagefull of userspace buffer, and map it to kernel
459 * address space, and lock request
460 */
461static int fuse_copy_fill(struct fuse_copy_state *cs)
462{
463 unsigned long offset;
464 int err;
465
466 unlock_request(cs->req);
467 fuse_copy_finish(cs);
468 if (!cs->seglen) {
469 BUG_ON(!cs->nr_segs);
470 cs->seglen = cs->iov[0].iov_len;
471 cs->addr = (unsigned long) cs->iov[0].iov_base;
472 cs->iov ++;
473 cs->nr_segs --;
474 }
475 down_read(&current->mm->mmap_sem);
476 err = get_user_pages(current, current->mm, cs->addr, 1, cs->write, 0,
477 &cs->pg, NULL);
478 up_read(&current->mm->mmap_sem);
479 if (err < 0)
480 return err;
481 BUG_ON(err != 1);
482 offset = cs->addr % PAGE_SIZE;
483 cs->mapaddr = kmap_atomic(cs->pg, KM_USER0);
484 cs->buf = cs->mapaddr + offset;
485 cs->len = min(PAGE_SIZE - offset, cs->seglen);
486 cs->seglen -= cs->len;
487 cs->addr += cs->len;
488
489 return lock_request(cs->req);
490}
491
492/* Do as much copy to/from userspace buffer as we can */
8bfc016d 493static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
494{
495 unsigned ncpy = min(*size, cs->len);
496 if (val) {
497 if (cs->write)
498 memcpy(cs->buf, *val, ncpy);
499 else
500 memcpy(*val, cs->buf, ncpy);
501 *val += ncpy;
502 }
503 *size -= ncpy;
504 cs->len -= ncpy;
505 cs->buf += ncpy;
506 return ncpy;
507}
508
509/*
510 * Copy a page in the request to/from the userspace buffer. Must be
511 * done atomically
512 */
8bfc016d
MS
513static int fuse_copy_page(struct fuse_copy_state *cs, struct page *page,
514 unsigned offset, unsigned count, int zeroing)
334f485d
MS
515{
516 if (page && zeroing && count < PAGE_SIZE) {
517 void *mapaddr = kmap_atomic(page, KM_USER1);
518 memset(mapaddr, 0, PAGE_SIZE);
519 kunmap_atomic(mapaddr, KM_USER1);
520 }
521 while (count) {
522 int err;
523 if (!cs->len && (err = fuse_copy_fill(cs)))
524 return err;
525 if (page) {
526 void *mapaddr = kmap_atomic(page, KM_USER1);
527 void *buf = mapaddr + offset;
528 offset += fuse_copy_do(cs, &buf, &count);
529 kunmap_atomic(mapaddr, KM_USER1);
530 } else
531 offset += fuse_copy_do(cs, NULL, &count);
532 }
533 if (page && !cs->write)
534 flush_dcache_page(page);
535 return 0;
536}
537
538/* Copy pages in the request to/from userspace buffer */
539static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
540 int zeroing)
541{
542 unsigned i;
543 struct fuse_req *req = cs->req;
544 unsigned offset = req->page_offset;
545 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
546
547 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
548 struct page *page = req->pages[i];
549 int err = fuse_copy_page(cs, page, offset, count, zeroing);
550 if (err)
551 return err;
552
553 nbytes -= count;
554 count = min(nbytes, (unsigned) PAGE_SIZE);
555 offset = 0;
556 }
557 return 0;
558}
559
560/* Copy a single argument in the request to/from userspace buffer */
561static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
562{
563 while (size) {
564 int err;
565 if (!cs->len && (err = fuse_copy_fill(cs)))
566 return err;
567 fuse_copy_do(cs, &val, &size);
568 }
569 return 0;
570}
571
572/* Copy request arguments to/from userspace buffer */
573static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
574 unsigned argpages, struct fuse_arg *args,
575 int zeroing)
576{
577 int err = 0;
578 unsigned i;
579
580 for (i = 0; !err && i < numargs; i++) {
581 struct fuse_arg *arg = &args[i];
582 if (i == numargs - 1 && argpages)
583 err = fuse_copy_pages(cs, arg->size, zeroing);
584 else
585 err = fuse_copy_one(cs, arg->value, arg->size);
586 }
587 return err;
588}
589
590/* Wait until a request is available on the pending list */
591static void request_wait(struct fuse_conn *fc)
592{
593 DECLARE_WAITQUEUE(wait, current);
594
595 add_wait_queue_exclusive(&fc->waitq, &wait);
1e9a4ed9 596 while (fc->mounted && list_empty(&fc->pending)) {
334f485d
MS
597 set_current_state(TASK_INTERRUPTIBLE);
598 if (signal_pending(current))
599 break;
600
601 spin_unlock(&fuse_lock);
602 schedule();
603 spin_lock(&fuse_lock);
604 }
605 set_current_state(TASK_RUNNING);
606 remove_wait_queue(&fc->waitq, &wait);
607}
608
609/*
610 * Read a single request into the userspace filesystem's buffer. This
611 * function waits until a request is available, then removes it from
612 * the pending list and copies request data to userspace buffer. If
613 * no reply is needed (FORGET) or request has been interrupted or
614 * there was an error during the copying then it's finished by calling
615 * request_end(). Otherwise add it to the processing list, and set
616 * the 'sent' flag.
617 */
618static ssize_t fuse_dev_readv(struct file *file, const struct iovec *iov,
619 unsigned long nr_segs, loff_t *off)
620{
621 int err;
622 struct fuse_conn *fc;
623 struct fuse_req *req;
624 struct fuse_in *in;
625 struct fuse_copy_state cs;
626 unsigned reqsize;
627
1d3d752b 628 restart:
334f485d
MS
629 spin_lock(&fuse_lock);
630 fc = file->private_data;
631 err = -EPERM;
632 if (!fc)
633 goto err_unlock;
634 request_wait(fc);
635 err = -ENODEV;
1e9a4ed9 636 if (!fc->mounted)
334f485d
MS
637 goto err_unlock;
638 err = -ERESTARTSYS;
639 if (list_empty(&fc->pending))
640 goto err_unlock;
641
642 req = list_entry(fc->pending.next, struct fuse_req, list);
83cfd493 643 req->state = FUSE_REQ_READING;
334f485d 644 list_del_init(&req->list);
334f485d
MS
645
646 in = &req->in;
1d3d752b
MS
647 reqsize = in->h.len;
648 /* If request is too large, reply with an error and restart the read */
649 if (iov_length(iov, nr_segs) < reqsize) {
650 req->out.h.error = -EIO;
651 /* SETXATTR is special, since it may contain too large data */
652 if (in->h.opcode == FUSE_SETXATTR)
653 req->out.h.error = -E2BIG;
654 request_end(fc, req);
655 goto restart;
334f485d 656 }
1d3d752b
MS
657 spin_unlock(&fuse_lock);
658 fuse_copy_init(&cs, 1, req, iov, nr_segs);
659 err = fuse_copy_one(&cs, &in->h, sizeof(in->h));
660 if (!err)
661 err = fuse_copy_args(&cs, in->numargs, in->argpages,
662 (struct fuse_arg *) in->args, 0);
334f485d 663 fuse_copy_finish(&cs);
334f485d
MS
664 spin_lock(&fuse_lock);
665 req->locked = 0;
666 if (!err && req->interrupted)
667 err = -ENOENT;
668 if (err) {
669 if (!req->interrupted)
670 req->out.h.error = -EIO;
671 request_end(fc, req);
672 return err;
673 }
674 if (!req->isreply)
675 request_end(fc, req);
676 else {
83cfd493 677 req->state = FUSE_REQ_SENT;
334f485d
MS
678 list_add_tail(&req->list, &fc->processing);
679 spin_unlock(&fuse_lock);
680 }
681 return reqsize;
682
683 err_unlock:
684 spin_unlock(&fuse_lock);
685 return err;
686}
687
688static ssize_t fuse_dev_read(struct file *file, char __user *buf,
689 size_t nbytes, loff_t *off)
690{
691 struct iovec iov;
692 iov.iov_len = nbytes;
693 iov.iov_base = buf;
694 return fuse_dev_readv(file, &iov, 1, off);
695}
696
697/* Look up request on processing list by unique ID */
698static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
699{
700 struct list_head *entry;
701
702 list_for_each(entry, &fc->processing) {
703 struct fuse_req *req;
704 req = list_entry(entry, struct fuse_req, list);
705 if (req->in.h.unique == unique)
706 return req;
707 }
708 return NULL;
709}
710
711static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
712 unsigned nbytes)
713{
714 unsigned reqsize = sizeof(struct fuse_out_header);
715
716 if (out->h.error)
717 return nbytes != reqsize ? -EINVAL : 0;
718
719 reqsize += len_args(out->numargs, out->args);
720
721 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
722 return -EINVAL;
723 else if (reqsize > nbytes) {
724 struct fuse_arg *lastarg = &out->args[out->numargs-1];
725 unsigned diffsize = reqsize - nbytes;
726 if (diffsize > lastarg->size)
727 return -EINVAL;
728 lastarg->size -= diffsize;
729 }
730 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
731 out->page_zeroing);
732}
733
734/*
735 * Write a single reply to a request. First the header is copied from
736 * the write buffer. The request is then searched on the processing
737 * list by the unique ID found in the header. If found, then remove
738 * it from the list and copy the rest of the buffer to the request.
739 * The request is finished by calling request_end()
740 */
741static ssize_t fuse_dev_writev(struct file *file, const struct iovec *iov,
742 unsigned long nr_segs, loff_t *off)
743{
744 int err;
745 unsigned nbytes = iov_length(iov, nr_segs);
746 struct fuse_req *req;
747 struct fuse_out_header oh;
748 struct fuse_copy_state cs;
749 struct fuse_conn *fc = fuse_get_conn(file);
750 if (!fc)
751 return -ENODEV;
752
753 fuse_copy_init(&cs, 0, NULL, iov, nr_segs);
754 if (nbytes < sizeof(struct fuse_out_header))
755 return -EINVAL;
756
757 err = fuse_copy_one(&cs, &oh, sizeof(oh));
758 if (err)
759 goto err_finish;
760 err = -EINVAL;
761 if (!oh.unique || oh.error <= -1000 || oh.error > 0 ||
762 oh.len != nbytes)
763 goto err_finish;
764
765 spin_lock(&fuse_lock);
766 req = request_find(fc, oh.unique);
767 err = -EINVAL;
768 if (!req)
769 goto err_unlock;
770
771 list_del_init(&req->list);
772 if (req->interrupted) {
222f1d69 773 spin_unlock(&fuse_lock);
334f485d 774 fuse_copy_finish(&cs);
222f1d69
MS
775 spin_lock(&fuse_lock);
776 request_end(fc, req);
334f485d
MS
777 return -ENOENT;
778 }
779 req->out.h = oh;
780 req->locked = 1;
781 cs.req = req;
782 spin_unlock(&fuse_lock);
783
784 err = copy_out_args(&cs, &req->out, nbytes);
785 fuse_copy_finish(&cs);
786
787 spin_lock(&fuse_lock);
788 req->locked = 0;
789 if (!err) {
790 if (req->interrupted)
791 err = -ENOENT;
792 } else if (!req->interrupted)
793 req->out.h.error = -EIO;
794 request_end(fc, req);
795
796 return err ? err : nbytes;
797
798 err_unlock:
799 spin_unlock(&fuse_lock);
800 err_finish:
801 fuse_copy_finish(&cs);
802 return err;
803}
804
805static ssize_t fuse_dev_write(struct file *file, const char __user *buf,
806 size_t nbytes, loff_t *off)
807{
808 struct iovec iov;
809 iov.iov_len = nbytes;
810 iov.iov_base = (char __user *) buf;
811 return fuse_dev_writev(file, &iov, 1, off);
812}
813
814static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
815{
816 struct fuse_conn *fc = fuse_get_conn(file);
817 unsigned mask = POLLOUT | POLLWRNORM;
818
819 if (!fc)
820 return -ENODEV;
821
822 poll_wait(file, &fc->waitq, wait);
823
824 spin_lock(&fuse_lock);
825 if (!list_empty(&fc->pending))
826 mask |= POLLIN | POLLRDNORM;
827 spin_unlock(&fuse_lock);
828
829 return mask;
830}
831
832/* Abort all requests on the given list (pending or processing) */
833static void end_requests(struct fuse_conn *fc, struct list_head *head)
834{
835 while (!list_empty(head)) {
836 struct fuse_req *req;
837 req = list_entry(head->next, struct fuse_req, list);
838 list_del_init(&req->list);
839 req->out.h.error = -ECONNABORTED;
840 request_end(fc, req);
841 spin_lock(&fuse_lock);
842 }
843}
844
845static int fuse_dev_release(struct inode *inode, struct file *file)
846{
847 struct fuse_conn *fc;
848
849 spin_lock(&fuse_lock);
850 fc = file->private_data;
851 if (fc) {
1e9a4ed9 852 fc->connected = 0;
334f485d
MS
853 end_requests(fc, &fc->pending);
854 end_requests(fc, &fc->processing);
855 fuse_release_conn(fc);
856 }
857 spin_unlock(&fuse_lock);
858 return 0;
859}
860
861struct file_operations fuse_dev_operations = {
862 .owner = THIS_MODULE,
863 .llseek = no_llseek,
864 .read = fuse_dev_read,
865 .readv = fuse_dev_readv,
866 .write = fuse_dev_write,
867 .writev = fuse_dev_writev,
868 .poll = fuse_dev_poll,
869 .release = fuse_dev_release,
870};
871
872static struct miscdevice fuse_miscdevice = {
873 .minor = FUSE_MINOR,
874 .name = "fuse",
875 .fops = &fuse_dev_operations,
876};
877
878int __init fuse_dev_init(void)
879{
880 int err = -ENOMEM;
881 fuse_req_cachep = kmem_cache_create("fuse_request",
882 sizeof(struct fuse_req),
883 0, 0, NULL, NULL);
884 if (!fuse_req_cachep)
885 goto out;
886
887 err = misc_register(&fuse_miscdevice);
888 if (err)
889 goto out_cache_clean;
890
891 return 0;
892
893 out_cache_clean:
894 kmem_cache_destroy(fuse_req_cachep);
895 out:
896 return err;
897}
898
899void fuse_dev_cleanup(void)
900{
901 misc_deregister(&fuse_miscdevice);
902 kmem_cache_destroy(fuse_req_cachep);
903}
This page took 0.097763 seconds and 5 git commands to generate.