2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
20 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
22 static kmem_cache_t
*fuse_req_cachep
;
24 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
27 * Lockless access is OK, because file->private data is set
28 * once during mount and is valid until the file is released.
30 return file
->private_data
;
33 static void fuse_request_init(struct fuse_req
*req
)
35 memset(req
, 0, sizeof(*req
));
36 INIT_LIST_HEAD(&req
->list
);
37 INIT_LIST_HEAD(&req
->intr_entry
);
38 init_waitqueue_head(&req
->waitq
);
39 atomic_set(&req
->count
, 1);
42 struct fuse_req
*fuse_request_alloc(void)
44 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, SLAB_KERNEL
);
46 fuse_request_init(req
);
50 void fuse_request_free(struct fuse_req
*req
)
52 kmem_cache_free(fuse_req_cachep
, req
);
55 static void block_sigs(sigset_t
*oldset
)
59 siginitsetinv(&mask
, sigmask(SIGKILL
));
60 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
63 static void restore_sigs(sigset_t
*oldset
)
65 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
68 static void __fuse_get_request(struct fuse_req
*req
)
70 atomic_inc(&req
->count
);
73 /* Must be called with > 1 refcount */
74 static void __fuse_put_request(struct fuse_req
*req
)
76 BUG_ON(atomic_read(&req
->count
) < 2);
77 atomic_dec(&req
->count
);
80 static void fuse_req_init_context(struct fuse_req
*req
)
82 req
->in
.h
.uid
= current
->fsuid
;
83 req
->in
.h
.gid
= current
->fsgid
;
84 req
->in
.h
.pid
= current
->pid
;
87 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
)
94 atomic_inc(&fc
->num_waiting
);
96 intr
= wait_event_interruptible(fc
->blocked_waitq
, !fc
->blocked
);
97 restore_sigs(&oldset
);
106 req
= fuse_request_alloc();
111 fuse_req_init_context(req
);
116 atomic_dec(&fc
->num_waiting
);
121 * Return request in fuse_file->reserved_req. However that may
122 * currently be in use. If that is the case, wait for it to become
125 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
128 struct fuse_req
*req
= NULL
;
129 struct fuse_file
*ff
= file
->private_data
;
132 wait_event(fc
->blocked_waitq
, ff
->reserved_req
);
133 spin_lock(&fc
->lock
);
134 if (ff
->reserved_req
) {
135 req
= ff
->reserved_req
;
136 ff
->reserved_req
= NULL
;
138 req
->stolen_file
= file
;
140 spin_unlock(&fc
->lock
);
147 * Put stolen request back into fuse_file->reserved_req
149 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
151 struct file
*file
= req
->stolen_file
;
152 struct fuse_file
*ff
= file
->private_data
;
154 spin_lock(&fc
->lock
);
155 fuse_request_init(req
);
156 BUG_ON(ff
->reserved_req
);
157 ff
->reserved_req
= req
;
158 wake_up(&fc
->blocked_waitq
);
159 spin_unlock(&fc
->lock
);
164 * Gets a requests for a file operation, always succeeds
166 * This is used for sending the FLUSH request, which must get to
167 * userspace, due to POSIX locks which may need to be unlocked.
169 * If allocation fails due to OOM, use the reserved request in
172 * This is very unlikely to deadlock accidentally, since the
173 * filesystem should not have it's own file open. If deadlock is
174 * intentional, it can still be broken by "aborting" the filesystem.
176 struct fuse_req
*fuse_get_req_nofail(struct fuse_conn
*fc
, struct file
*file
)
178 struct fuse_req
*req
;
180 atomic_inc(&fc
->num_waiting
);
181 wait_event(fc
->blocked_waitq
, !fc
->blocked
);
182 req
= fuse_request_alloc();
184 req
= get_reserved_req(fc
, file
);
186 fuse_req_init_context(req
);
191 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
193 if (atomic_dec_and_test(&req
->count
)) {
195 atomic_dec(&fc
->num_waiting
);
197 if (req
->stolen_file
)
198 put_reserved_req(fc
, req
);
200 fuse_request_free(req
);
205 * This function is called when a request is finished. Either a reply
206 * has arrived or it was aborted (and not yet sent) or some error
207 * occurred during communication with userspace, or the device file
208 * was closed. The requester thread is woken up (if still waiting),
209 * the 'end' callback is called if given, else the reference to the
210 * request is released
212 * Called with fc->lock, unlocks it
214 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
217 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
219 list_del(&req
->list
);
220 list_del(&req
->intr_entry
);
221 req
->state
= FUSE_REQ_FINISHED
;
222 if (req
->background
) {
223 if (fc
->num_background
== FUSE_MAX_BACKGROUND
) {
225 wake_up_all(&fc
->blocked_waitq
);
227 fc
->num_background
--;
229 spin_unlock(&fc
->lock
);
231 mntput(req
->vfsmount
);
234 wake_up(&req
->waitq
);
238 fuse_put_request(fc
, req
);
241 static void wait_answer_interruptible(struct fuse_conn
*fc
,
242 struct fuse_req
*req
)
244 if (signal_pending(current
))
247 spin_unlock(&fc
->lock
);
248 wait_event_interruptible(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
249 spin_lock(&fc
->lock
);
252 static void queue_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
)
254 list_add_tail(&req
->intr_entry
, &fc
->interrupts
);
256 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
259 /* Called with fc->lock held. Releases, and then reacquires it. */
260 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
262 if (!fc
->no_interrupt
) {
263 /* Any signal may interrupt this */
264 wait_answer_interruptible(fc
, req
);
268 if (req
->state
== FUSE_REQ_FINISHED
)
271 req
->interrupted
= 1;
272 if (req
->state
== FUSE_REQ_SENT
)
273 queue_interrupt(fc
, req
);
277 spin_unlock(&fc
->lock
);
278 wait_event(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
279 spin_lock(&fc
->lock
);
283 /* Only fatal signals may interrupt this */
285 wait_answer_interruptible(fc
, req
);
286 restore_sigs(&oldset
);
291 if (req
->state
== FUSE_REQ_FINISHED
)
294 req
->out
.h
.error
= -EINTR
;
299 /* This is uninterruptible sleep, because data is
300 being copied to/from the buffers of req. During
301 locked state, there mustn't be any filesystem
302 operation (e.g. page fault), since that could lead
304 spin_unlock(&fc
->lock
);
305 wait_event(req
->waitq
, !req
->locked
);
306 spin_lock(&fc
->lock
);
308 if (req
->state
== FUSE_REQ_PENDING
) {
309 list_del(&req
->list
);
310 __fuse_put_request(req
);
311 } else if (req
->state
== FUSE_REQ_SENT
) {
312 spin_unlock(&fc
->lock
);
313 wait_event(req
->waitq
, req
->state
== FUSE_REQ_FINISHED
);
314 spin_lock(&fc
->lock
);
318 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
323 for (i
= 0; i
< numargs
; i
++)
324 nbytes
+= args
[i
].size
;
329 static u64
fuse_get_unique(struct fuse_conn
*fc
)
332 /* zero is special */
339 static void queue_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
341 req
->in
.h
.unique
= fuse_get_unique(fc
);
342 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
343 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
344 list_add_tail(&req
->list
, &fc
->pending
);
345 req
->state
= FUSE_REQ_PENDING
;
348 atomic_inc(&fc
->num_waiting
);
351 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
354 void request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
357 spin_lock(&fc
->lock
);
359 req
->out
.h
.error
= -ENOTCONN
;
360 else if (fc
->conn_error
)
361 req
->out
.h
.error
= -ECONNREFUSED
;
363 queue_request(fc
, req
);
364 /* acquire extra reference, since request is still needed
365 after request_end() */
366 __fuse_get_request(req
);
368 request_wait_answer(fc
, req
);
370 spin_unlock(&fc
->lock
);
373 static void request_send_nowait(struct fuse_conn
*fc
, struct fuse_req
*req
)
375 spin_lock(&fc
->lock
);
378 fc
->num_background
++;
379 if (fc
->num_background
== FUSE_MAX_BACKGROUND
)
382 queue_request(fc
, req
);
383 spin_unlock(&fc
->lock
);
385 req
->out
.h
.error
= -ENOTCONN
;
386 request_end(fc
, req
);
390 void request_send_noreply(struct fuse_conn
*fc
, struct fuse_req
*req
)
393 request_send_nowait(fc
, req
);
396 void request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
399 request_send_nowait(fc
, req
);
403 * Lock the request. Up to the next unlock_request() there mustn't be
404 * anything that could cause a page-fault. If the request was already
407 static int lock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
411 spin_lock(&fc
->lock
);
416 spin_unlock(&fc
->lock
);
422 * Unlock request. If it was aborted during being locked, the
423 * requester thread is currently waiting for it to be unlocked, so
426 static void unlock_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
429 spin_lock(&fc
->lock
);
432 wake_up(&req
->waitq
);
433 spin_unlock(&fc
->lock
);
437 struct fuse_copy_state
{
438 struct fuse_conn
*fc
;
440 struct fuse_req
*req
;
441 const struct iovec
*iov
;
442 unsigned long nr_segs
;
443 unsigned long seglen
;
451 static void fuse_copy_init(struct fuse_copy_state
*cs
, struct fuse_conn
*fc
,
452 int write
, struct fuse_req
*req
,
453 const struct iovec
*iov
, unsigned long nr_segs
)
455 memset(cs
, 0, sizeof(*cs
));
460 cs
->nr_segs
= nr_segs
;
463 /* Unmap and put previous page of userspace buffer */
464 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
467 kunmap_atomic(cs
->mapaddr
, KM_USER0
);
469 flush_dcache_page(cs
->pg
);
470 set_page_dirty_lock(cs
->pg
);
478 * Get another pagefull of userspace buffer, and map it to kernel
479 * address space, and lock request
481 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
483 unsigned long offset
;
486 unlock_request(cs
->fc
, cs
->req
);
487 fuse_copy_finish(cs
);
489 BUG_ON(!cs
->nr_segs
);
490 cs
->seglen
= cs
->iov
[0].iov_len
;
491 cs
->addr
= (unsigned long) cs
->iov
[0].iov_base
;
495 down_read(¤t
->mm
->mmap_sem
);
496 err
= get_user_pages(current
, current
->mm
, cs
->addr
, 1, cs
->write
, 0,
498 up_read(¤t
->mm
->mmap_sem
);
502 offset
= cs
->addr
% PAGE_SIZE
;
503 cs
->mapaddr
= kmap_atomic(cs
->pg
, KM_USER0
);
504 cs
->buf
= cs
->mapaddr
+ offset
;
505 cs
->len
= min(PAGE_SIZE
- offset
, cs
->seglen
);
506 cs
->seglen
-= cs
->len
;
509 return lock_request(cs
->fc
, cs
->req
);
512 /* Do as much copy to/from userspace buffer as we can */
513 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
515 unsigned ncpy
= min(*size
, cs
->len
);
518 memcpy(cs
->buf
, *val
, ncpy
);
520 memcpy(*val
, cs
->buf
, ncpy
);
530 * Copy a page in the request to/from the userspace buffer. Must be
533 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
*page
,
534 unsigned offset
, unsigned count
, int zeroing
)
536 if (page
&& zeroing
&& count
< PAGE_SIZE
) {
537 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
538 memset(mapaddr
, 0, PAGE_SIZE
);
539 kunmap_atomic(mapaddr
, KM_USER1
);
543 if (!cs
->len
&& (err
= fuse_copy_fill(cs
)))
546 void *mapaddr
= kmap_atomic(page
, KM_USER1
);
547 void *buf
= mapaddr
+ offset
;
548 offset
+= fuse_copy_do(cs
, &buf
, &count
);
549 kunmap_atomic(mapaddr
, KM_USER1
);
551 offset
+= fuse_copy_do(cs
, NULL
, &count
);
553 if (page
&& !cs
->write
)
554 flush_dcache_page(page
);
558 /* Copy pages in the request to/from userspace buffer */
559 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
563 struct fuse_req
*req
= cs
->req
;
564 unsigned offset
= req
->page_offset
;
565 unsigned count
= min(nbytes
, (unsigned) PAGE_SIZE
- offset
);
567 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
568 struct page
*page
= req
->pages
[i
];
569 int err
= fuse_copy_page(cs
, page
, offset
, count
, zeroing
);
574 count
= min(nbytes
, (unsigned) PAGE_SIZE
);
580 /* Copy a single argument in the request to/from userspace buffer */
581 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
585 if (!cs
->len
&& (err
= fuse_copy_fill(cs
)))
587 fuse_copy_do(cs
, &val
, &size
);
592 /* Copy request arguments to/from userspace buffer */
593 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
594 unsigned argpages
, struct fuse_arg
*args
,
600 for (i
= 0; !err
&& i
< numargs
; i
++) {
601 struct fuse_arg
*arg
= &args
[i
];
602 if (i
== numargs
- 1 && argpages
)
603 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
605 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
610 static int request_pending(struct fuse_conn
*fc
)
612 return !list_empty(&fc
->pending
) || !list_empty(&fc
->interrupts
);
615 /* Wait until a request is available on the pending list */
616 static void request_wait(struct fuse_conn
*fc
)
618 DECLARE_WAITQUEUE(wait
, current
);
620 add_wait_queue_exclusive(&fc
->waitq
, &wait
);
621 while (fc
->connected
&& !request_pending(fc
)) {
622 set_current_state(TASK_INTERRUPTIBLE
);
623 if (signal_pending(current
))
626 spin_unlock(&fc
->lock
);
628 spin_lock(&fc
->lock
);
630 set_current_state(TASK_RUNNING
);
631 remove_wait_queue(&fc
->waitq
, &wait
);
635 * Transfer an interrupt request to userspace
637 * Unlike other requests this is assembled on demand, without a need
638 * to allocate a separate fuse_req structure.
640 * Called with fc->lock held, releases it
642 static int fuse_read_interrupt(struct fuse_conn
*fc
, struct fuse_req
*req
,
643 const struct iovec
*iov
, unsigned long nr_segs
)
646 struct fuse_copy_state cs
;
647 struct fuse_in_header ih
;
648 struct fuse_interrupt_in arg
;
649 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
652 list_del_init(&req
->intr_entry
);
653 req
->intr_unique
= fuse_get_unique(fc
);
654 memset(&ih
, 0, sizeof(ih
));
655 memset(&arg
, 0, sizeof(arg
));
657 ih
.opcode
= FUSE_INTERRUPT
;
658 ih
.unique
= req
->intr_unique
;
659 arg
.unique
= req
->in
.h
.unique
;
661 spin_unlock(&fc
->lock
);
662 if (iov_length(iov
, nr_segs
) < reqsize
)
665 fuse_copy_init(&cs
, fc
, 1, NULL
, iov
, nr_segs
);
666 err
= fuse_copy_one(&cs
, &ih
, sizeof(ih
));
668 err
= fuse_copy_one(&cs
, &arg
, sizeof(arg
));
669 fuse_copy_finish(&cs
);
671 return err
? err
: reqsize
;
675 * Read a single request into the userspace filesystem's buffer. This
676 * function waits until a request is available, then removes it from
677 * the pending list and copies request data to userspace buffer. If
678 * no reply is needed (FORGET) or request has been aborted or there
679 * was an error during the copying then it's finished by calling
680 * request_end(). Otherwise add it to the processing list, and set
683 static ssize_t
fuse_dev_readv(struct file
*file
, const struct iovec
*iov
,
684 unsigned long nr_segs
, loff_t
*off
)
687 struct fuse_req
*req
;
689 struct fuse_copy_state cs
;
691 struct fuse_conn
*fc
= fuse_get_conn(file
);
696 spin_lock(&fc
->lock
);
698 if ((file
->f_flags
& O_NONBLOCK
) && fc
->connected
&&
699 !request_pending(fc
))
707 if (!request_pending(fc
))
710 if (!list_empty(&fc
->interrupts
)) {
711 req
= list_entry(fc
->interrupts
.next
, struct fuse_req
,
713 return fuse_read_interrupt(fc
, req
, iov
, nr_segs
);
716 req
= list_entry(fc
->pending
.next
, struct fuse_req
, list
);
717 req
->state
= FUSE_REQ_READING
;
718 list_move(&req
->list
, &fc
->io
);
722 /* If request is too large, reply with an error and restart the read */
723 if (iov_length(iov
, nr_segs
) < reqsize
) {
724 req
->out
.h
.error
= -EIO
;
725 /* SETXATTR is special, since it may contain too large data */
726 if (in
->h
.opcode
== FUSE_SETXATTR
)
727 req
->out
.h
.error
= -E2BIG
;
728 request_end(fc
, req
);
731 spin_unlock(&fc
->lock
);
732 fuse_copy_init(&cs
, fc
, 1, req
, iov
, nr_segs
);
733 err
= fuse_copy_one(&cs
, &in
->h
, sizeof(in
->h
));
735 err
= fuse_copy_args(&cs
, in
->numargs
, in
->argpages
,
736 (struct fuse_arg
*) in
->args
, 0);
737 fuse_copy_finish(&cs
);
738 spin_lock(&fc
->lock
);
740 if (!err
&& req
->aborted
)
744 req
->out
.h
.error
= -EIO
;
745 request_end(fc
, req
);
749 request_end(fc
, req
);
751 req
->state
= FUSE_REQ_SENT
;
752 list_move_tail(&req
->list
, &fc
->processing
);
753 if (req
->interrupted
)
754 queue_interrupt(fc
, req
);
755 spin_unlock(&fc
->lock
);
760 spin_unlock(&fc
->lock
);
764 static ssize_t
fuse_dev_read(struct file
*file
, char __user
*buf
,
765 size_t nbytes
, loff_t
*off
)
768 iov
.iov_len
= nbytes
;
770 return fuse_dev_readv(file
, &iov
, 1, off
);
773 /* Look up request on processing list by unique ID */
774 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
776 struct list_head
*entry
;
778 list_for_each(entry
, &fc
->processing
) {
779 struct fuse_req
*req
;
780 req
= list_entry(entry
, struct fuse_req
, list
);
781 if (req
->in
.h
.unique
== unique
|| req
->intr_unique
== unique
)
787 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
790 unsigned reqsize
= sizeof(struct fuse_out_header
);
793 return nbytes
!= reqsize
? -EINVAL
: 0;
795 reqsize
+= len_args(out
->numargs
, out
->args
);
797 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
799 else if (reqsize
> nbytes
) {
800 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
801 unsigned diffsize
= reqsize
- nbytes
;
802 if (diffsize
> lastarg
->size
)
804 lastarg
->size
-= diffsize
;
806 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
811 * Write a single reply to a request. First the header is copied from
812 * the write buffer. The request is then searched on the processing
813 * list by the unique ID found in the header. If found, then remove
814 * it from the list and copy the rest of the buffer to the request.
815 * The request is finished by calling request_end()
817 static ssize_t
fuse_dev_writev(struct file
*file
, const struct iovec
*iov
,
818 unsigned long nr_segs
, loff_t
*off
)
821 unsigned nbytes
= iov_length(iov
, nr_segs
);
822 struct fuse_req
*req
;
823 struct fuse_out_header oh
;
824 struct fuse_copy_state cs
;
825 struct fuse_conn
*fc
= fuse_get_conn(file
);
829 fuse_copy_init(&cs
, fc
, 0, NULL
, iov
, nr_segs
);
830 if (nbytes
< sizeof(struct fuse_out_header
))
833 err
= fuse_copy_one(&cs
, &oh
, sizeof(oh
));
837 if (!oh
.unique
|| oh
.error
<= -1000 || oh
.error
> 0 ||
841 spin_lock(&fc
->lock
);
846 req
= request_find(fc
, oh
.unique
);
851 spin_unlock(&fc
->lock
);
852 fuse_copy_finish(&cs
);
853 spin_lock(&fc
->lock
);
854 request_end(fc
, req
);
857 /* Is it an interrupt reply? */
858 if (req
->intr_unique
== oh
.unique
) {
860 if (nbytes
!= sizeof(struct fuse_out_header
))
863 if (oh
.error
== -ENOSYS
)
864 fc
->no_interrupt
= 1;
865 else if (oh
.error
== -EAGAIN
)
866 queue_interrupt(fc
, req
);
868 spin_unlock(&fc
->lock
);
869 fuse_copy_finish(&cs
);
873 req
->state
= FUSE_REQ_WRITING
;
874 list_move(&req
->list
, &fc
->io
);
878 spin_unlock(&fc
->lock
);
880 err
= copy_out_args(&cs
, &req
->out
, nbytes
);
881 fuse_copy_finish(&cs
);
883 spin_lock(&fc
->lock
);
888 } else if (!req
->aborted
)
889 req
->out
.h
.error
= -EIO
;
890 request_end(fc
, req
);
892 return err
? err
: nbytes
;
895 spin_unlock(&fc
->lock
);
897 fuse_copy_finish(&cs
);
901 static ssize_t
fuse_dev_write(struct file
*file
, const char __user
*buf
,
902 size_t nbytes
, loff_t
*off
)
905 iov
.iov_len
= nbytes
;
906 iov
.iov_base
= (char __user
*) buf
;
907 return fuse_dev_writev(file
, &iov
, 1, off
);
910 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
912 unsigned mask
= POLLOUT
| POLLWRNORM
;
913 struct fuse_conn
*fc
= fuse_get_conn(file
);
917 poll_wait(file
, &fc
->waitq
, wait
);
919 spin_lock(&fc
->lock
);
922 else if (request_pending(fc
))
923 mask
|= POLLIN
| POLLRDNORM
;
924 spin_unlock(&fc
->lock
);
930 * Abort all requests on the given list (pending or processing)
932 * This function releases and reacquires fc->lock
934 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
936 while (!list_empty(head
)) {
937 struct fuse_req
*req
;
938 req
= list_entry(head
->next
, struct fuse_req
, list
);
939 req
->out
.h
.error
= -ECONNABORTED
;
940 request_end(fc
, req
);
941 spin_lock(&fc
->lock
);
946 * Abort requests under I/O
948 * The requests are set to aborted and finished, and the request
949 * waiter is woken up. This will make request_wait_answer() wait
950 * until the request is unlocked and then return.
952 * If the request is asynchronous, then the end function needs to be
953 * called after waiting for the request to be unlocked (if it was
956 static void end_io_requests(struct fuse_conn
*fc
)
958 while (!list_empty(&fc
->io
)) {
959 struct fuse_req
*req
=
960 list_entry(fc
->io
.next
, struct fuse_req
, list
);
961 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
964 req
->out
.h
.error
= -ECONNABORTED
;
965 req
->state
= FUSE_REQ_FINISHED
;
966 list_del_init(&req
->list
);
967 wake_up(&req
->waitq
);
970 /* The end function will consume this reference */
971 __fuse_get_request(req
);
972 spin_unlock(&fc
->lock
);
973 wait_event(req
->waitq
, !req
->locked
);
975 spin_lock(&fc
->lock
);
981 * Abort all requests.
983 * Emergency exit in case of a malicious or accidental deadlock, or
984 * just a hung filesystem.
986 * The same effect is usually achievable through killing the
987 * filesystem daemon and all users of the filesystem. The exception
988 * is the combination of an asynchronous request and the tricky
989 * deadlock (see Documentation/filesystems/fuse.txt).
991 * During the aborting, progression of requests from the pending and
992 * processing lists onto the io list, and progression of new requests
993 * onto the pending list is prevented by req->connected being false.
995 * Progression of requests under I/O to the processing list is
996 * prevented by the req->aborted flag being true for these requests.
997 * For this reason requests on the io list must be aborted first.
999 void fuse_abort_conn(struct fuse_conn
*fc
)
1001 spin_lock(&fc
->lock
);
1002 if (fc
->connected
) {
1005 end_io_requests(fc
);
1006 end_requests(fc
, &fc
->pending
);
1007 end_requests(fc
, &fc
->processing
);
1008 wake_up_all(&fc
->waitq
);
1009 wake_up_all(&fc
->blocked_waitq
);
1010 kill_fasync(&fc
->fasync
, SIGIO
, POLL_IN
);
1012 spin_unlock(&fc
->lock
);
1015 static int fuse_dev_release(struct inode
*inode
, struct file
*file
)
1017 struct fuse_conn
*fc
= fuse_get_conn(file
);
1019 spin_lock(&fc
->lock
);
1021 end_requests(fc
, &fc
->pending
);
1022 end_requests(fc
, &fc
->processing
);
1023 spin_unlock(&fc
->lock
);
1024 fasync_helper(-1, file
, 0, &fc
->fasync
);
1031 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
1033 struct fuse_conn
*fc
= fuse_get_conn(file
);
1037 /* No locking - fasync_helper does its own locking */
1038 return fasync_helper(fd
, file
, on
, &fc
->fasync
);
1041 const struct file_operations fuse_dev_operations
= {
1042 .owner
= THIS_MODULE
,
1043 .llseek
= no_llseek
,
1044 .read
= fuse_dev_read
,
1045 .readv
= fuse_dev_readv
,
1046 .write
= fuse_dev_write
,
1047 .writev
= fuse_dev_writev
,
1048 .poll
= fuse_dev_poll
,
1049 .release
= fuse_dev_release
,
1050 .fasync
= fuse_dev_fasync
,
1053 static struct miscdevice fuse_miscdevice
= {
1054 .minor
= FUSE_MINOR
,
1056 .fops
= &fuse_dev_operations
,
1059 int __init
fuse_dev_init(void)
1062 fuse_req_cachep
= kmem_cache_create("fuse_request",
1063 sizeof(struct fuse_req
),
1065 if (!fuse_req_cachep
)
1068 err
= misc_register(&fuse_miscdevice
);
1070 goto out_cache_clean
;
1075 kmem_cache_destroy(fuse_req_cachep
);
1080 void fuse_dev_cleanup(void)
1082 misc_deregister(&fuse_miscdevice
);
1083 kmem_cache_destroy(fuse_req_cachep
);