2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
5 This program can be distributed under the terms of the GNU GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/poll.h>
14 #include <linux/uio.h>
15 #include <linux/miscdevice.h>
16 #include <linux/pagemap.h>
17 #include <linux/file.h>
18 #include <linux/slab.h>
19 #include <linux/pipe_fs_i.h>
20 #include <linux/swap.h>
21 #include <linux/splice.h>
23 MODULE_ALIAS_MISCDEV(FUSE_MINOR
);
24 MODULE_ALIAS("devname:fuse");
26 static struct kmem_cache
*fuse_req_cachep
;
28 static struct fuse_conn
*fuse_get_conn(struct file
*file
)
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
34 return file
->private_data
;
37 static void fuse_request_init(struct fuse_req
*req
, struct page
**pages
,
38 struct fuse_page_desc
*page_descs
,
41 memset(req
, 0, sizeof(*req
));
42 memset(pages
, 0, sizeof(*pages
) * npages
);
43 memset(page_descs
, 0, sizeof(*page_descs
) * npages
);
44 INIT_LIST_HEAD(&req
->list
);
45 INIT_LIST_HEAD(&req
->intr_entry
);
46 init_waitqueue_head(&req
->waitq
);
47 atomic_set(&req
->count
, 1);
49 req
->page_descs
= page_descs
;
50 req
->max_pages
= npages
;
51 __set_bit(FR_PENDING
, &req
->flags
);
54 static struct fuse_req
*__fuse_request_alloc(unsigned npages
, gfp_t flags
)
56 struct fuse_req
*req
= kmem_cache_alloc(fuse_req_cachep
, flags
);
59 struct fuse_page_desc
*page_descs
;
61 if (npages
<= FUSE_REQ_INLINE_PAGES
) {
62 pages
= req
->inline_pages
;
63 page_descs
= req
->inline_page_descs
;
65 pages
= kmalloc(sizeof(struct page
*) * npages
, flags
);
66 page_descs
= kmalloc(sizeof(struct fuse_page_desc
) *
70 if (!pages
|| !page_descs
) {
73 kmem_cache_free(fuse_req_cachep
, req
);
77 fuse_request_init(req
, pages
, page_descs
, npages
);
82 struct fuse_req
*fuse_request_alloc(unsigned npages
)
84 return __fuse_request_alloc(npages
, GFP_KERNEL
);
86 EXPORT_SYMBOL_GPL(fuse_request_alloc
);
88 struct fuse_req
*fuse_request_alloc_nofs(unsigned npages
)
90 return __fuse_request_alloc(npages
, GFP_NOFS
);
93 void fuse_request_free(struct fuse_req
*req
)
95 if (req
->pages
!= req
->inline_pages
) {
97 kfree(req
->page_descs
);
99 kmem_cache_free(fuse_req_cachep
, req
);
102 static void block_sigs(sigset_t
*oldset
)
106 siginitsetinv(&mask
, sigmask(SIGKILL
));
107 sigprocmask(SIG_BLOCK
, &mask
, oldset
);
110 static void restore_sigs(sigset_t
*oldset
)
112 sigprocmask(SIG_SETMASK
, oldset
, NULL
);
115 void __fuse_get_request(struct fuse_req
*req
)
117 atomic_inc(&req
->count
);
120 /* Must be called with > 1 refcount */
121 static void __fuse_put_request(struct fuse_req
*req
)
123 BUG_ON(atomic_read(&req
->count
) < 2);
124 atomic_dec(&req
->count
);
127 static void fuse_req_init_context(struct fuse_req
*req
)
129 req
->in
.h
.uid
= from_kuid_munged(&init_user_ns
, current_fsuid());
130 req
->in
.h
.gid
= from_kgid_munged(&init_user_ns
, current_fsgid());
131 req
->in
.h
.pid
= current
->pid
;
134 void fuse_set_initialized(struct fuse_conn
*fc
)
136 /* Make sure stores before this are seen on another CPU */
141 static bool fuse_block_alloc(struct fuse_conn
*fc
, bool for_background
)
143 return !fc
->initialized
|| (for_background
&& fc
->blocked
);
146 static struct fuse_req
*__fuse_get_req(struct fuse_conn
*fc
, unsigned npages
,
149 struct fuse_req
*req
;
151 atomic_inc(&fc
->num_waiting
);
153 if (fuse_block_alloc(fc
, for_background
)) {
158 intr
= wait_event_interruptible_exclusive(fc
->blocked_waitq
,
159 !fuse_block_alloc(fc
, for_background
));
160 restore_sigs(&oldset
);
165 /* Matches smp_wmb() in fuse_set_initialized() */
176 req
= fuse_request_alloc(npages
);
180 wake_up(&fc
->blocked_waitq
);
184 fuse_req_init_context(req
);
185 __set_bit(FR_WAITING
, &req
->flags
);
187 __set_bit(FR_BACKGROUND
, &req
->flags
);
192 atomic_dec(&fc
->num_waiting
);
196 struct fuse_req
*fuse_get_req(struct fuse_conn
*fc
, unsigned npages
)
198 return __fuse_get_req(fc
, npages
, false);
200 EXPORT_SYMBOL_GPL(fuse_get_req
);
202 struct fuse_req
*fuse_get_req_for_background(struct fuse_conn
*fc
,
205 return __fuse_get_req(fc
, npages
, true);
207 EXPORT_SYMBOL_GPL(fuse_get_req_for_background
);
210 * Return request in fuse_file->reserved_req. However that may
211 * currently be in use. If that is the case, wait for it to become
214 static struct fuse_req
*get_reserved_req(struct fuse_conn
*fc
,
217 struct fuse_req
*req
= NULL
;
218 struct fuse_file
*ff
= file
->private_data
;
221 wait_event(fc
->reserved_req_waitq
, ff
->reserved_req
);
222 spin_lock(&fc
->lock
);
223 if (ff
->reserved_req
) {
224 req
= ff
->reserved_req
;
225 ff
->reserved_req
= NULL
;
226 req
->stolen_file
= get_file(file
);
228 spin_unlock(&fc
->lock
);
235 * Put stolen request back into fuse_file->reserved_req
237 static void put_reserved_req(struct fuse_conn
*fc
, struct fuse_req
*req
)
239 struct file
*file
= req
->stolen_file
;
240 struct fuse_file
*ff
= file
->private_data
;
242 spin_lock(&fc
->lock
);
243 fuse_request_init(req
, req
->pages
, req
->page_descs
, req
->max_pages
);
244 BUG_ON(ff
->reserved_req
);
245 ff
->reserved_req
= req
;
246 wake_up_all(&fc
->reserved_req_waitq
);
247 spin_unlock(&fc
->lock
);
252 * Gets a requests for a file operation, always succeeds
254 * This is used for sending the FLUSH request, which must get to
255 * userspace, due to POSIX locks which may need to be unlocked.
257 * If allocation fails due to OOM, use the reserved request in
260 * This is very unlikely to deadlock accidentally, since the
261 * filesystem should not have it's own file open. If deadlock is
262 * intentional, it can still be broken by "aborting" the filesystem.
264 struct fuse_req
*fuse_get_req_nofail_nopages(struct fuse_conn
*fc
,
267 struct fuse_req
*req
;
269 atomic_inc(&fc
->num_waiting
);
270 wait_event(fc
->blocked_waitq
, fc
->initialized
);
271 /* Matches smp_wmb() in fuse_set_initialized() */
273 req
= fuse_request_alloc(0);
275 req
= get_reserved_req(fc
, file
);
277 fuse_req_init_context(req
);
278 __set_bit(FR_WAITING
, &req
->flags
);
279 __clear_bit(FR_BACKGROUND
, &req
->flags
);
283 void fuse_put_request(struct fuse_conn
*fc
, struct fuse_req
*req
)
285 if (atomic_dec_and_test(&req
->count
)) {
286 if (test_bit(FR_BACKGROUND
, &req
->flags
)) {
288 * We get here in the unlikely case that a background
289 * request was allocated but not sent
291 spin_lock(&fc
->lock
);
293 wake_up(&fc
->blocked_waitq
);
294 spin_unlock(&fc
->lock
);
297 if (test_bit(FR_WAITING
, &req
->flags
)) {
298 __clear_bit(FR_WAITING
, &req
->flags
);
299 atomic_dec(&fc
->num_waiting
);
302 if (req
->stolen_file
)
303 put_reserved_req(fc
, req
);
305 fuse_request_free(req
);
308 EXPORT_SYMBOL_GPL(fuse_put_request
);
310 static unsigned len_args(unsigned numargs
, struct fuse_arg
*args
)
315 for (i
= 0; i
< numargs
; i
++)
316 nbytes
+= args
[i
].size
;
321 static u64
fuse_get_unique(struct fuse_iqueue
*fiq
)
323 return ++fiq
->reqctr
;
326 static void queue_request(struct fuse_iqueue
*fiq
, struct fuse_req
*req
)
328 req
->in
.h
.len
= sizeof(struct fuse_in_header
) +
329 len_args(req
->in
.numargs
, (struct fuse_arg
*) req
->in
.args
);
330 list_add_tail(&req
->list
, &fiq
->pending
);
331 wake_up_locked(&fiq
->waitq
);
332 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
335 void fuse_queue_forget(struct fuse_conn
*fc
, struct fuse_forget_link
*forget
,
336 u64 nodeid
, u64 nlookup
)
338 struct fuse_iqueue
*fiq
= &fc
->iq
;
340 forget
->forget_one
.nodeid
= nodeid
;
341 forget
->forget_one
.nlookup
= nlookup
;
343 spin_lock(&fc
->lock
);
344 spin_lock(&fiq
->waitq
.lock
);
345 if (fiq
->connected
) {
346 fiq
->forget_list_tail
->next
= forget
;
347 fiq
->forget_list_tail
= forget
;
348 wake_up_locked(&fiq
->waitq
);
349 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
353 spin_unlock(&fiq
->waitq
.lock
);
354 spin_unlock(&fc
->lock
);
357 static void flush_bg_queue(struct fuse_conn
*fc
)
359 while (fc
->active_background
< fc
->max_background
&&
360 !list_empty(&fc
->bg_queue
)) {
361 struct fuse_req
*req
;
362 struct fuse_iqueue
*fiq
= &fc
->iq
;
364 req
= list_entry(fc
->bg_queue
.next
, struct fuse_req
, list
);
365 list_del(&req
->list
);
366 fc
->active_background
++;
367 spin_lock(&fiq
->waitq
.lock
);
368 req
->in
.h
.unique
= fuse_get_unique(fiq
);
369 queue_request(fiq
, req
);
370 spin_unlock(&fiq
->waitq
.lock
);
375 * This function is called when a request is finished. Either a reply
376 * has arrived or it was aborted (and not yet sent) or some error
377 * occurred during communication with userspace, or the device file
378 * was closed. The requester thread is woken up (if still waiting),
379 * the 'end' callback is called if given, else the reference to the
380 * request is released
382 * Called with fc->lock, unlocks it
384 static void request_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
387 struct fuse_iqueue
*fiq
= &fc
->iq
;
388 void (*end
) (struct fuse_conn
*, struct fuse_req
*) = req
->end
;
390 list_del_init(&req
->list
);
391 spin_lock(&fiq
->waitq
.lock
);
392 list_del_init(&req
->intr_entry
);
393 spin_unlock(&fiq
->waitq
.lock
);
394 WARN_ON(test_bit(FR_PENDING
, &req
->flags
));
395 WARN_ON(test_bit(FR_SENT
, &req
->flags
));
397 set_bit(FR_FINISHED
, &req
->flags
);
398 if (test_bit(FR_BACKGROUND
, &req
->flags
)) {
399 clear_bit(FR_BACKGROUND
, &req
->flags
);
400 if (fc
->num_background
== fc
->max_background
)
403 /* Wake up next waiter, if any */
404 if (!fc
->blocked
&& waitqueue_active(&fc
->blocked_waitq
))
405 wake_up(&fc
->blocked_waitq
);
407 if (fc
->num_background
== fc
->congestion_threshold
&&
408 fc
->connected
&& fc
->bdi_initialized
) {
409 clear_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
410 clear_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
412 fc
->num_background
--;
413 fc
->active_background
--;
416 spin_unlock(&fc
->lock
);
417 wake_up(&req
->waitq
);
420 fuse_put_request(fc
, req
);
423 static void queue_interrupt(struct fuse_iqueue
*fiq
, struct fuse_req
*req
)
425 spin_lock(&fiq
->waitq
.lock
);
426 if (list_empty(&req
->intr_entry
)) {
427 list_add_tail(&req
->intr_entry
, &fiq
->interrupts
);
428 wake_up_locked(&fiq
->waitq
);
430 spin_unlock(&fiq
->waitq
.lock
);
431 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
434 static void request_wait_answer(struct fuse_conn
*fc
, struct fuse_req
*req
)
436 struct fuse_iqueue
*fiq
= &fc
->iq
;
439 if (!fc
->no_interrupt
) {
440 /* Any signal may interrupt this */
441 err
= wait_event_interruptible(req
->waitq
,
442 test_bit(FR_FINISHED
, &req
->flags
));
446 spin_lock(&fc
->lock
);
447 set_bit(FR_INTERRUPTED
, &req
->flags
);
448 /* matches barrier in fuse_dev_do_read() */
449 smp_mb__after_atomic();
450 if (test_bit(FR_SENT
, &req
->flags
))
451 queue_interrupt(fiq
, req
);
452 spin_unlock(&fc
->lock
);
455 if (!test_bit(FR_FORCE
, &req
->flags
)) {
458 /* Only fatal signals may interrupt this */
460 err
= wait_event_interruptible(req
->waitq
,
461 test_bit(FR_FINISHED
, &req
->flags
));
462 restore_sigs(&oldset
);
467 spin_lock(&fc
->lock
);
468 spin_lock(&fiq
->waitq
.lock
);
469 /* Request is not yet in userspace, bail out */
470 if (test_bit(FR_PENDING
, &req
->flags
)) {
471 list_del(&req
->list
);
472 spin_unlock(&fiq
->waitq
.lock
);
473 spin_unlock(&fc
->lock
);
474 __fuse_put_request(req
);
475 req
->out
.h
.error
= -EINTR
;
478 spin_unlock(&fiq
->waitq
.lock
);
479 spin_unlock(&fc
->lock
);
483 * Either request is already in userspace, or it was forced.
486 wait_event(req
->waitq
, test_bit(FR_FINISHED
, &req
->flags
));
489 static void __fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
491 struct fuse_iqueue
*fiq
= &fc
->iq
;
493 BUG_ON(test_bit(FR_BACKGROUND
, &req
->flags
));
494 spin_lock(&fc
->lock
);
495 spin_lock(&fiq
->waitq
.lock
);
496 if (!fiq
->connected
) {
497 spin_unlock(&fc
->lock
);
498 spin_unlock(&fiq
->waitq
.lock
);
499 req
->out
.h
.error
= -ENOTCONN
;
501 req
->in
.h
.unique
= fuse_get_unique(fiq
);
502 queue_request(fiq
, req
);
503 /* acquire extra reference, since request is still needed
504 after request_end() */
505 __fuse_get_request(req
);
506 spin_unlock(&fiq
->waitq
.lock
);
507 spin_unlock(&fc
->lock
);
509 request_wait_answer(fc
, req
);
510 /* Pairs with smp_wmb() in request_end() */
515 void fuse_request_send(struct fuse_conn
*fc
, struct fuse_req
*req
)
517 __set_bit(FR_ISREPLY
, &req
->flags
);
518 if (!test_bit(FR_WAITING
, &req
->flags
)) {
519 __set_bit(FR_WAITING
, &req
->flags
);
520 atomic_inc(&fc
->num_waiting
);
522 __fuse_request_send(fc
, req
);
524 EXPORT_SYMBOL_GPL(fuse_request_send
);
526 static void fuse_adjust_compat(struct fuse_conn
*fc
, struct fuse_args
*args
)
528 if (fc
->minor
< 4 && args
->in
.h
.opcode
== FUSE_STATFS
)
529 args
->out
.args
[0].size
= FUSE_COMPAT_STATFS_SIZE
;
532 switch (args
->in
.h
.opcode
) {
539 args
->out
.args
[0].size
= FUSE_COMPAT_ENTRY_OUT_SIZE
;
543 args
->out
.args
[0].size
= FUSE_COMPAT_ATTR_OUT_SIZE
;
547 if (fc
->minor
< 12) {
548 switch (args
->in
.h
.opcode
) {
550 args
->in
.args
[0].size
= sizeof(struct fuse_open_in
);
553 args
->in
.args
[0].size
= FUSE_COMPAT_MKNOD_IN_SIZE
;
559 ssize_t
fuse_simple_request(struct fuse_conn
*fc
, struct fuse_args
*args
)
561 struct fuse_req
*req
;
564 req
= fuse_get_req(fc
, 0);
568 /* Needs to be done after fuse_get_req() so that fc->minor is valid */
569 fuse_adjust_compat(fc
, args
);
571 req
->in
.h
.opcode
= args
->in
.h
.opcode
;
572 req
->in
.h
.nodeid
= args
->in
.h
.nodeid
;
573 req
->in
.numargs
= args
->in
.numargs
;
574 memcpy(req
->in
.args
, args
->in
.args
,
575 args
->in
.numargs
* sizeof(struct fuse_in_arg
));
576 req
->out
.argvar
= args
->out
.argvar
;
577 req
->out
.numargs
= args
->out
.numargs
;
578 memcpy(req
->out
.args
, args
->out
.args
,
579 args
->out
.numargs
* sizeof(struct fuse_arg
));
580 fuse_request_send(fc
, req
);
581 ret
= req
->out
.h
.error
;
582 if (!ret
&& args
->out
.argvar
) {
583 BUG_ON(args
->out
.numargs
!= 1);
584 ret
= req
->out
.args
[0].size
;
586 fuse_put_request(fc
, req
);
592 * Called under fc->lock
594 * fc->connected must have been checked previously
596 void fuse_request_send_background_locked(struct fuse_conn
*fc
,
597 struct fuse_req
*req
)
599 BUG_ON(!test_bit(FR_BACKGROUND
, &req
->flags
));
600 if (!test_bit(FR_WAITING
, &req
->flags
)) {
601 __set_bit(FR_WAITING
, &req
->flags
);
602 atomic_inc(&fc
->num_waiting
);
604 __set_bit(FR_ISREPLY
, &req
->flags
);
605 fc
->num_background
++;
606 if (fc
->num_background
== fc
->max_background
)
608 if (fc
->num_background
== fc
->congestion_threshold
&&
609 fc
->bdi_initialized
) {
610 set_bdi_congested(&fc
->bdi
, BLK_RW_SYNC
);
611 set_bdi_congested(&fc
->bdi
, BLK_RW_ASYNC
);
613 list_add_tail(&req
->list
, &fc
->bg_queue
);
617 void fuse_request_send_background(struct fuse_conn
*fc
, struct fuse_req
*req
)
620 spin_lock(&fc
->lock
);
622 fuse_request_send_background_locked(fc
, req
);
623 spin_unlock(&fc
->lock
);
625 spin_unlock(&fc
->lock
);
626 req
->out
.h
.error
= -ENOTCONN
;
628 fuse_put_request(fc
, req
);
631 EXPORT_SYMBOL_GPL(fuse_request_send_background
);
633 static int fuse_request_send_notify_reply(struct fuse_conn
*fc
,
634 struct fuse_req
*req
, u64 unique
)
637 struct fuse_iqueue
*fiq
= &fc
->iq
;
639 __clear_bit(FR_ISREPLY
, &req
->flags
);
640 req
->in
.h
.unique
= unique
;
641 spin_lock(&fc
->lock
);
642 spin_lock(&fiq
->waitq
.lock
);
643 if (fiq
->connected
) {
644 queue_request(fiq
, req
);
647 spin_unlock(&fiq
->waitq
.lock
);
648 spin_unlock(&fc
->lock
);
653 void fuse_force_forget(struct file
*file
, u64 nodeid
)
655 struct inode
*inode
= file_inode(file
);
656 struct fuse_conn
*fc
= get_fuse_conn(inode
);
657 struct fuse_req
*req
;
658 struct fuse_forget_in inarg
;
660 memset(&inarg
, 0, sizeof(inarg
));
662 req
= fuse_get_req_nofail_nopages(fc
, file
);
663 req
->in
.h
.opcode
= FUSE_FORGET
;
664 req
->in
.h
.nodeid
= nodeid
;
666 req
->in
.args
[0].size
= sizeof(inarg
);
667 req
->in
.args
[0].value
= &inarg
;
668 __clear_bit(FR_ISREPLY
, &req
->flags
);
669 __fuse_request_send(fc
, req
);
671 fuse_put_request(fc
, req
);
675 * Lock the request. Up to the next unlock_request() there mustn't be
676 * anything that could cause a page-fault. If the request was already
679 static int lock_request(struct fuse_req
*req
)
683 spin_lock(&req
->waitq
.lock
);
684 if (test_bit(FR_ABORTED
, &req
->flags
))
687 set_bit(FR_LOCKED
, &req
->flags
);
688 spin_unlock(&req
->waitq
.lock
);
694 * Unlock request. If it was aborted while locked, caller is responsible
695 * for unlocking and ending the request.
697 static int unlock_request(struct fuse_req
*req
)
701 spin_lock(&req
->waitq
.lock
);
702 if (test_bit(FR_ABORTED
, &req
->flags
))
705 clear_bit(FR_LOCKED
, &req
->flags
);
706 spin_unlock(&req
->waitq
.lock
);
711 struct fuse_copy_state
{
713 struct fuse_req
*req
;
714 struct iov_iter
*iter
;
715 struct pipe_buffer
*pipebufs
;
716 struct pipe_buffer
*currbuf
;
717 struct pipe_inode_info
*pipe
;
718 unsigned long nr_segs
;
722 unsigned move_pages
:1;
725 static void fuse_copy_init(struct fuse_copy_state
*cs
, int write
,
726 struct iov_iter
*iter
)
728 memset(cs
, 0, sizeof(*cs
));
733 /* Unmap and put previous page of userspace buffer */
734 static void fuse_copy_finish(struct fuse_copy_state
*cs
)
737 struct pipe_buffer
*buf
= cs
->currbuf
;
740 buf
->len
= PAGE_SIZE
- cs
->len
;
744 flush_dcache_page(cs
->pg
);
745 set_page_dirty_lock(cs
->pg
);
753 * Get another pagefull of userspace buffer, and map it to kernel
754 * address space, and lock request
756 static int fuse_copy_fill(struct fuse_copy_state
*cs
)
761 err
= unlock_request(cs
->req
);
765 fuse_copy_finish(cs
);
767 struct pipe_buffer
*buf
= cs
->pipebufs
;
770 err
= buf
->ops
->confirm(cs
->pipe
, buf
);
774 BUG_ON(!cs
->nr_segs
);
777 cs
->offset
= buf
->offset
;
782 if (cs
->nr_segs
== cs
->pipe
->buffers
)
785 page
= alloc_page(GFP_HIGHUSER
);
802 err
= iov_iter_get_pages(cs
->iter
, &page
, PAGE_SIZE
, 1, &off
);
810 iov_iter_advance(cs
->iter
, err
);
813 return lock_request(cs
->req
);
816 /* Do as much copy to/from userspace buffer as we can */
817 static int fuse_copy_do(struct fuse_copy_state
*cs
, void **val
, unsigned *size
)
819 unsigned ncpy
= min(*size
, cs
->len
);
821 void *pgaddr
= kmap_atomic(cs
->pg
);
822 void *buf
= pgaddr
+ cs
->offset
;
825 memcpy(buf
, *val
, ncpy
);
827 memcpy(*val
, buf
, ncpy
);
829 kunmap_atomic(pgaddr
);
838 static int fuse_check_page(struct page
*page
)
840 if (page_mapcount(page
) ||
841 page
->mapping
!= NULL
||
842 page_count(page
) != 1 ||
843 (page
->flags
& PAGE_FLAGS_CHECK_AT_PREP
&
850 printk(KERN_WARNING
"fuse: trying to steal weird page\n");
851 printk(KERN_WARNING
" page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page
, page
->index
, page
->flags
, page_count(page
), page_mapcount(page
), page
->mapping
);
857 static int fuse_try_move_page(struct fuse_copy_state
*cs
, struct page
**pagep
)
860 struct page
*oldpage
= *pagep
;
861 struct page
*newpage
;
862 struct pipe_buffer
*buf
= cs
->pipebufs
;
864 err
= unlock_request(cs
->req
);
868 fuse_copy_finish(cs
);
870 err
= buf
->ops
->confirm(cs
->pipe
, buf
);
874 BUG_ON(!cs
->nr_segs
);
880 if (cs
->len
!= PAGE_SIZE
)
883 if (buf
->ops
->steal(cs
->pipe
, buf
) != 0)
888 if (!PageUptodate(newpage
))
889 SetPageUptodate(newpage
);
891 ClearPageMappedToDisk(newpage
);
893 if (fuse_check_page(newpage
) != 0)
894 goto out_fallback_unlock
;
897 * This is a new and locked page, it shouldn't be mapped or
898 * have any special flags on it
900 if (WARN_ON(page_mapped(oldpage
)))
901 goto out_fallback_unlock
;
902 if (WARN_ON(page_has_private(oldpage
)))
903 goto out_fallback_unlock
;
904 if (WARN_ON(PageDirty(oldpage
) || PageWriteback(oldpage
)))
905 goto out_fallback_unlock
;
906 if (WARN_ON(PageMlocked(oldpage
)))
907 goto out_fallback_unlock
;
909 err
= replace_page_cache_page(oldpage
, newpage
, GFP_KERNEL
);
911 unlock_page(newpage
);
915 page_cache_get(newpage
);
917 if (!(buf
->flags
& PIPE_BUF_FLAG_LRU
))
918 lru_cache_add_file(newpage
);
921 spin_lock(&cs
->req
->waitq
.lock
);
922 if (test_bit(FR_ABORTED
, &cs
->req
->flags
))
926 spin_unlock(&cs
->req
->waitq
.lock
);
929 unlock_page(newpage
);
930 page_cache_release(newpage
);
934 unlock_page(oldpage
);
935 page_cache_release(oldpage
);
941 unlock_page(newpage
);
944 cs
->offset
= buf
->offset
;
946 err
= lock_request(cs
->req
);
953 static int fuse_ref_page(struct fuse_copy_state
*cs
, struct page
*page
,
954 unsigned offset
, unsigned count
)
956 struct pipe_buffer
*buf
;
959 if (cs
->nr_segs
== cs
->pipe
->buffers
)
962 err
= unlock_request(cs
->req
);
966 fuse_copy_finish(cs
);
969 page_cache_get(page
);
971 buf
->offset
= offset
;
982 * Copy a page in the request to/from the userspace buffer. Must be
985 static int fuse_copy_page(struct fuse_copy_state
*cs
, struct page
**pagep
,
986 unsigned offset
, unsigned count
, int zeroing
)
989 struct page
*page
= *pagep
;
991 if (page
&& zeroing
&& count
< PAGE_SIZE
)
992 clear_highpage(page
);
995 if (cs
->write
&& cs
->pipebufs
&& page
) {
996 return fuse_ref_page(cs
, page
, offset
, count
);
997 } else if (!cs
->len
) {
998 if (cs
->move_pages
&& page
&&
999 offset
== 0 && count
== PAGE_SIZE
) {
1000 err
= fuse_try_move_page(cs
, pagep
);
1004 err
= fuse_copy_fill(cs
);
1010 void *mapaddr
= kmap_atomic(page
);
1011 void *buf
= mapaddr
+ offset
;
1012 offset
+= fuse_copy_do(cs
, &buf
, &count
);
1013 kunmap_atomic(mapaddr
);
1015 offset
+= fuse_copy_do(cs
, NULL
, &count
);
1017 if (page
&& !cs
->write
)
1018 flush_dcache_page(page
);
1022 /* Copy pages in the request to/from userspace buffer */
1023 static int fuse_copy_pages(struct fuse_copy_state
*cs
, unsigned nbytes
,
1027 struct fuse_req
*req
= cs
->req
;
1029 for (i
= 0; i
< req
->num_pages
&& (nbytes
|| zeroing
); i
++) {
1031 unsigned offset
= req
->page_descs
[i
].offset
;
1032 unsigned count
= min(nbytes
, req
->page_descs
[i
].length
);
1034 err
= fuse_copy_page(cs
, &req
->pages
[i
], offset
, count
,
1044 /* Copy a single argument in the request to/from userspace buffer */
1045 static int fuse_copy_one(struct fuse_copy_state
*cs
, void *val
, unsigned size
)
1049 int err
= fuse_copy_fill(cs
);
1053 fuse_copy_do(cs
, &val
, &size
);
1058 /* Copy request arguments to/from userspace buffer */
1059 static int fuse_copy_args(struct fuse_copy_state
*cs
, unsigned numargs
,
1060 unsigned argpages
, struct fuse_arg
*args
,
1066 for (i
= 0; !err
&& i
< numargs
; i
++) {
1067 struct fuse_arg
*arg
= &args
[i
];
1068 if (i
== numargs
- 1 && argpages
)
1069 err
= fuse_copy_pages(cs
, arg
->size
, zeroing
);
1071 err
= fuse_copy_one(cs
, arg
->value
, arg
->size
);
1076 static int forget_pending(struct fuse_iqueue
*fiq
)
1078 return fiq
->forget_list_head
.next
!= NULL
;
1081 static int request_pending(struct fuse_iqueue
*fiq
)
1083 return !list_empty(&fiq
->pending
) || !list_empty(&fiq
->interrupts
) ||
1084 forget_pending(fiq
);
1087 /* Wait until a request is available on the pending list */
1088 static void request_wait(struct fuse_conn
*fc
)
1089 __releases(fc
->iq
.waitq
.lock
)
1090 __releases(fc
->lock
)
1091 __acquires(fc
->lock
)
1092 __acquires(fc
->iq
.waitq
.lock
)
1094 struct fuse_iqueue
*fiq
= &fc
->iq
;
1095 DECLARE_WAITQUEUE(wait
, current
);
1097 add_wait_queue_exclusive(&fiq
->waitq
, &wait
);
1098 while (fiq
->connected
&& !request_pending(fiq
)) {
1099 set_current_state(TASK_INTERRUPTIBLE
);
1100 if (signal_pending(current
))
1103 spin_unlock(&fiq
->waitq
.lock
);
1104 spin_unlock(&fc
->lock
);
1106 spin_lock(&fc
->lock
);
1107 spin_lock(&fiq
->waitq
.lock
);
1109 set_current_state(TASK_RUNNING
);
1110 remove_wait_queue(&fiq
->waitq
, &wait
);
1114 * Transfer an interrupt request to userspace
1116 * Unlike other requests this is assembled on demand, without a need
1117 * to allocate a separate fuse_req structure.
1119 * Called with fc->lock held, releases it
1121 static int fuse_read_interrupt(struct fuse_conn
*fc
, struct fuse_copy_state
*cs
,
1122 size_t nbytes
, struct fuse_req
*req
)
1123 __releases(fc
->iq
.waitq
.lock
)
1124 __releases(fc
->lock
)
1126 struct fuse_iqueue
*fiq
= &fc
->iq
;
1127 struct fuse_in_header ih
;
1128 struct fuse_interrupt_in arg
;
1129 unsigned reqsize
= sizeof(ih
) + sizeof(arg
);
1132 list_del_init(&req
->intr_entry
);
1133 req
->intr_unique
= fuse_get_unique(fiq
);
1134 memset(&ih
, 0, sizeof(ih
));
1135 memset(&arg
, 0, sizeof(arg
));
1137 ih
.opcode
= FUSE_INTERRUPT
;
1138 ih
.unique
= req
->intr_unique
;
1139 arg
.unique
= req
->in
.h
.unique
;
1141 spin_unlock(&fiq
->waitq
.lock
);
1142 spin_unlock(&fc
->lock
);
1143 if (nbytes
< reqsize
)
1146 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1148 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1149 fuse_copy_finish(cs
);
1151 return err
? err
: reqsize
;
1154 static struct fuse_forget_link
*dequeue_forget(struct fuse_iqueue
*fiq
,
1158 struct fuse_forget_link
*head
= fiq
->forget_list_head
.next
;
1159 struct fuse_forget_link
**newhead
= &head
;
1162 for (count
= 0; *newhead
!= NULL
&& count
< max
; count
++)
1163 newhead
= &(*newhead
)->next
;
1165 fiq
->forget_list_head
.next
= *newhead
;
1167 if (fiq
->forget_list_head
.next
== NULL
)
1168 fiq
->forget_list_tail
= &fiq
->forget_list_head
;
1176 static int fuse_read_single_forget(struct fuse_conn
*fc
,
1177 struct fuse_copy_state
*cs
,
1179 __releases(fc
->iq
.waitq
.lock
)
1180 __releases(fc
->lock
)
1183 struct fuse_iqueue
*fiq
= &fc
->iq
;
1184 struct fuse_forget_link
*forget
= dequeue_forget(fiq
, 1, NULL
);
1185 struct fuse_forget_in arg
= {
1186 .nlookup
= forget
->forget_one
.nlookup
,
1188 struct fuse_in_header ih
= {
1189 .opcode
= FUSE_FORGET
,
1190 .nodeid
= forget
->forget_one
.nodeid
,
1191 .unique
= fuse_get_unique(fiq
),
1192 .len
= sizeof(ih
) + sizeof(arg
),
1195 spin_unlock(&fiq
->waitq
.lock
);
1196 spin_unlock(&fc
->lock
);
1198 if (nbytes
< ih
.len
)
1201 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1203 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1204 fuse_copy_finish(cs
);
1212 static int fuse_read_batch_forget(struct fuse_conn
*fc
,
1213 struct fuse_copy_state
*cs
, size_t nbytes
)
1214 __releases(fc
->iq
.waitq
.lock
)
1215 __releases(fc
->lock
)
1218 unsigned max_forgets
;
1220 struct fuse_forget_link
*head
;
1221 struct fuse_iqueue
*fiq
= &fc
->iq
;
1222 struct fuse_batch_forget_in arg
= { .count
= 0 };
1223 struct fuse_in_header ih
= {
1224 .opcode
= FUSE_BATCH_FORGET
,
1225 .unique
= fuse_get_unique(fiq
),
1226 .len
= sizeof(ih
) + sizeof(arg
),
1229 if (nbytes
< ih
.len
) {
1230 spin_unlock(&fiq
->waitq
.lock
);
1231 spin_unlock(&fc
->lock
);
1235 max_forgets
= (nbytes
- ih
.len
) / sizeof(struct fuse_forget_one
);
1236 head
= dequeue_forget(fiq
, max_forgets
, &count
);
1237 spin_unlock(&fiq
->waitq
.lock
);
1238 spin_unlock(&fc
->lock
);
1241 ih
.len
+= count
* sizeof(struct fuse_forget_one
);
1242 err
= fuse_copy_one(cs
, &ih
, sizeof(ih
));
1244 err
= fuse_copy_one(cs
, &arg
, sizeof(arg
));
1247 struct fuse_forget_link
*forget
= head
;
1250 err
= fuse_copy_one(cs
, &forget
->forget_one
,
1251 sizeof(forget
->forget_one
));
1253 head
= forget
->next
;
1257 fuse_copy_finish(cs
);
1265 static int fuse_read_forget(struct fuse_conn
*fc
, struct fuse_copy_state
*cs
,
1267 __releases(fc
->iq
.waitq
.lock
)
1268 __releases(fc
->lock
)
1270 struct fuse_iqueue
*fiq
= &fc
->iq
;
1272 if (fc
->minor
< 16 || fiq
->forget_list_head
.next
->next
== NULL
)
1273 return fuse_read_single_forget(fc
, cs
, nbytes
);
1275 return fuse_read_batch_forget(fc
, cs
, nbytes
);
1279 * Read a single request into the userspace filesystem's buffer. This
1280 * function waits until a request is available, then removes it from
1281 * the pending list and copies request data to userspace buffer. If
1282 * no reply is needed (FORGET) or request has been aborted or there
1283 * was an error during the copying then it's finished by calling
1284 * request_end(). Otherwise add it to the processing list, and set
1287 static ssize_t
fuse_dev_do_read(struct fuse_conn
*fc
, struct file
*file
,
1288 struct fuse_copy_state
*cs
, size_t nbytes
)
1291 struct fuse_iqueue
*fiq
= &fc
->iq
;
1292 struct fuse_req
*req
;
1297 spin_lock(&fc
->lock
);
1298 spin_lock(&fiq
->waitq
.lock
);
1300 if ((file
->f_flags
& O_NONBLOCK
) && fiq
->connected
&&
1301 !request_pending(fiq
))
1306 if (!fiq
->connected
)
1309 if (!request_pending(fiq
))
1312 if (!list_empty(&fiq
->interrupts
)) {
1313 req
= list_entry(fiq
->interrupts
.next
, struct fuse_req
,
1315 return fuse_read_interrupt(fc
, cs
, nbytes
, req
);
1318 if (forget_pending(fiq
)) {
1319 if (list_empty(&fiq
->pending
) || fiq
->forget_batch
-- > 0)
1320 return fuse_read_forget(fc
, cs
, nbytes
);
1322 if (fiq
->forget_batch
<= -8)
1323 fiq
->forget_batch
= 16;
1326 req
= list_entry(fiq
->pending
.next
, struct fuse_req
, list
);
1327 clear_bit(FR_PENDING
, &req
->flags
);
1328 list_del_init(&req
->list
);
1329 spin_unlock(&fiq
->waitq
.lock
);
1331 list_add(&req
->list
, &fc
->io
);
1334 reqsize
= in
->h
.len
;
1335 /* If request is too large, reply with an error and restart the read */
1336 if (nbytes
< reqsize
) {
1337 req
->out
.h
.error
= -EIO
;
1338 /* SETXATTR is special, since it may contain too large data */
1339 if (in
->h
.opcode
== FUSE_SETXATTR
)
1340 req
->out
.h
.error
= -E2BIG
;
1341 request_end(fc
, req
);
1344 spin_unlock(&fc
->lock
);
1346 err
= fuse_copy_one(cs
, &in
->h
, sizeof(in
->h
));
1348 err
= fuse_copy_args(cs
, in
->numargs
, in
->argpages
,
1349 (struct fuse_arg
*) in
->args
, 0);
1350 fuse_copy_finish(cs
);
1351 spin_lock(&fc
->lock
);
1352 clear_bit(FR_LOCKED
, &req
->flags
);
1353 if (!fc
->connected
) {
1354 request_end(fc
, req
);
1358 req
->out
.h
.error
= -EIO
;
1359 request_end(fc
, req
);
1362 if (!test_bit(FR_ISREPLY
, &req
->flags
)) {
1363 request_end(fc
, req
);
1365 list_move_tail(&req
->list
, &fc
->processing
);
1366 set_bit(FR_SENT
, &req
->flags
);
1367 /* matches barrier in request_wait_answer() */
1368 smp_mb__after_atomic();
1369 if (test_bit(FR_INTERRUPTED
, &req
->flags
))
1370 queue_interrupt(fiq
, req
);
1371 spin_unlock(&fc
->lock
);
1376 spin_unlock(&fiq
->waitq
.lock
);
1377 spin_unlock(&fc
->lock
);
1381 static int fuse_dev_open(struct inode
*inode
, struct file
*file
)
1384 * The fuse device's file's private_data is used to hold
1385 * the fuse_conn(ection) when it is mounted, and is used to
1386 * keep track of whether the file has been mounted already.
1388 file
->private_data
= NULL
;
1392 static ssize_t
fuse_dev_read(struct kiocb
*iocb
, struct iov_iter
*to
)
1394 struct fuse_copy_state cs
;
1395 struct file
*file
= iocb
->ki_filp
;
1396 struct fuse_conn
*fc
= fuse_get_conn(file
);
1400 if (!iter_is_iovec(to
))
1403 fuse_copy_init(&cs
, 1, to
);
1405 return fuse_dev_do_read(fc
, file
, &cs
, iov_iter_count(to
));
1408 static ssize_t
fuse_dev_splice_read(struct file
*in
, loff_t
*ppos
,
1409 struct pipe_inode_info
*pipe
,
1410 size_t len
, unsigned int flags
)
1415 struct pipe_buffer
*bufs
;
1416 struct fuse_copy_state cs
;
1417 struct fuse_conn
*fc
= fuse_get_conn(in
);
1421 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
1425 fuse_copy_init(&cs
, 1, NULL
);
1428 ret
= fuse_dev_do_read(fc
, in
, &cs
, len
);
1435 if (!pipe
->readers
) {
1436 send_sig(SIGPIPE
, current
, 0);
1442 if (pipe
->nrbufs
+ cs
.nr_segs
> pipe
->buffers
) {
1447 while (page_nr
< cs
.nr_segs
) {
1448 int newbuf
= (pipe
->curbuf
+ pipe
->nrbufs
) & (pipe
->buffers
- 1);
1449 struct pipe_buffer
*buf
= pipe
->bufs
+ newbuf
;
1451 buf
->page
= bufs
[page_nr
].page
;
1452 buf
->offset
= bufs
[page_nr
].offset
;
1453 buf
->len
= bufs
[page_nr
].len
;
1455 * Need to be careful about this. Having buf->ops in module
1456 * code can Oops if the buffer persists after module unload.
1458 buf
->ops
= &nosteal_pipe_buf_ops
;
1473 if (waitqueue_active(&pipe
->wait
))
1474 wake_up_interruptible(&pipe
->wait
);
1475 kill_fasync(&pipe
->fasync_readers
, SIGIO
, POLL_IN
);
1479 for (; page_nr
< cs
.nr_segs
; page_nr
++)
1480 page_cache_release(bufs
[page_nr
].page
);
1486 static int fuse_notify_poll(struct fuse_conn
*fc
, unsigned int size
,
1487 struct fuse_copy_state
*cs
)
1489 struct fuse_notify_poll_wakeup_out outarg
;
1492 if (size
!= sizeof(outarg
))
1495 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1499 fuse_copy_finish(cs
);
1500 return fuse_notify_poll_wakeup(fc
, &outarg
);
1503 fuse_copy_finish(cs
);
1507 static int fuse_notify_inval_inode(struct fuse_conn
*fc
, unsigned int size
,
1508 struct fuse_copy_state
*cs
)
1510 struct fuse_notify_inval_inode_out outarg
;
1513 if (size
!= sizeof(outarg
))
1516 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1519 fuse_copy_finish(cs
);
1521 down_read(&fc
->killsb
);
1524 err
= fuse_reverse_inval_inode(fc
->sb
, outarg
.ino
,
1525 outarg
.off
, outarg
.len
);
1527 up_read(&fc
->killsb
);
1531 fuse_copy_finish(cs
);
1535 static int fuse_notify_inval_entry(struct fuse_conn
*fc
, unsigned int size
,
1536 struct fuse_copy_state
*cs
)
1538 struct fuse_notify_inval_entry_out outarg
;
1543 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1548 if (size
< sizeof(outarg
))
1551 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1555 err
= -ENAMETOOLONG
;
1556 if (outarg
.namelen
> FUSE_NAME_MAX
)
1560 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1564 name
.len
= outarg
.namelen
;
1565 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1568 fuse_copy_finish(cs
);
1569 buf
[outarg
.namelen
] = 0;
1570 name
.hash
= full_name_hash(name
.name
, name
.len
);
1572 down_read(&fc
->killsb
);
1575 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
, 0, &name
);
1576 up_read(&fc
->killsb
);
1582 fuse_copy_finish(cs
);
1586 static int fuse_notify_delete(struct fuse_conn
*fc
, unsigned int size
,
1587 struct fuse_copy_state
*cs
)
1589 struct fuse_notify_delete_out outarg
;
1594 buf
= kzalloc(FUSE_NAME_MAX
+ 1, GFP_KERNEL
);
1599 if (size
< sizeof(outarg
))
1602 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1606 err
= -ENAMETOOLONG
;
1607 if (outarg
.namelen
> FUSE_NAME_MAX
)
1611 if (size
!= sizeof(outarg
) + outarg
.namelen
+ 1)
1615 name
.len
= outarg
.namelen
;
1616 err
= fuse_copy_one(cs
, buf
, outarg
.namelen
+ 1);
1619 fuse_copy_finish(cs
);
1620 buf
[outarg
.namelen
] = 0;
1621 name
.hash
= full_name_hash(name
.name
, name
.len
);
1623 down_read(&fc
->killsb
);
1626 err
= fuse_reverse_inval_entry(fc
->sb
, outarg
.parent
,
1627 outarg
.child
, &name
);
1628 up_read(&fc
->killsb
);
1634 fuse_copy_finish(cs
);
1638 static int fuse_notify_store(struct fuse_conn
*fc
, unsigned int size
,
1639 struct fuse_copy_state
*cs
)
1641 struct fuse_notify_store_out outarg
;
1642 struct inode
*inode
;
1643 struct address_space
*mapping
;
1647 unsigned int offset
;
1653 if (size
< sizeof(outarg
))
1656 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1661 if (size
- sizeof(outarg
) != outarg
.size
)
1664 nodeid
= outarg
.nodeid
;
1666 down_read(&fc
->killsb
);
1672 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1676 mapping
= inode
->i_mapping
;
1677 index
= outarg
.offset
>> PAGE_CACHE_SHIFT
;
1678 offset
= outarg
.offset
& ~PAGE_CACHE_MASK
;
1679 file_size
= i_size_read(inode
);
1680 end
= outarg
.offset
+ outarg
.size
;
1681 if (end
> file_size
) {
1683 fuse_write_update_size(inode
, file_size
);
1689 unsigned int this_num
;
1692 page
= find_or_create_page(mapping
, index
,
1693 mapping_gfp_mask(mapping
));
1697 this_num
= min_t(unsigned, num
, PAGE_CACHE_SIZE
- offset
);
1698 err
= fuse_copy_page(cs
, &page
, offset
, this_num
, 0);
1699 if (!err
&& offset
== 0 &&
1700 (this_num
== PAGE_CACHE_SIZE
|| file_size
== end
))
1701 SetPageUptodate(page
);
1703 page_cache_release(page
);
1718 up_read(&fc
->killsb
);
1720 fuse_copy_finish(cs
);
1724 static void fuse_retrieve_end(struct fuse_conn
*fc
, struct fuse_req
*req
)
1726 release_pages(req
->pages
, req
->num_pages
, false);
1729 static int fuse_retrieve(struct fuse_conn
*fc
, struct inode
*inode
,
1730 struct fuse_notify_retrieve_out
*outarg
)
1733 struct address_space
*mapping
= inode
->i_mapping
;
1734 struct fuse_req
*req
;
1738 unsigned int offset
;
1739 size_t total_len
= 0;
1742 offset
= outarg
->offset
& ~PAGE_CACHE_MASK
;
1743 file_size
= i_size_read(inode
);
1746 if (outarg
->offset
> file_size
)
1748 else if (outarg
->offset
+ num
> file_size
)
1749 num
= file_size
- outarg
->offset
;
1751 num_pages
= (num
+ offset
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1752 num_pages
= min(num_pages
, FUSE_MAX_PAGES_PER_REQ
);
1754 req
= fuse_get_req(fc
, num_pages
);
1756 return PTR_ERR(req
);
1758 req
->in
.h
.opcode
= FUSE_NOTIFY_REPLY
;
1759 req
->in
.h
.nodeid
= outarg
->nodeid
;
1760 req
->in
.numargs
= 2;
1761 req
->in
.argpages
= 1;
1762 req
->page_descs
[0].offset
= offset
;
1763 req
->end
= fuse_retrieve_end
;
1765 index
= outarg
->offset
>> PAGE_CACHE_SHIFT
;
1767 while (num
&& req
->num_pages
< num_pages
) {
1769 unsigned int this_num
;
1771 page
= find_get_page(mapping
, index
);
1775 this_num
= min_t(unsigned, num
, PAGE_CACHE_SIZE
- offset
);
1776 req
->pages
[req
->num_pages
] = page
;
1777 req
->page_descs
[req
->num_pages
].length
= this_num
;
1782 total_len
+= this_num
;
1785 req
->misc
.retrieve_in
.offset
= outarg
->offset
;
1786 req
->misc
.retrieve_in
.size
= total_len
;
1787 req
->in
.args
[0].size
= sizeof(req
->misc
.retrieve_in
);
1788 req
->in
.args
[0].value
= &req
->misc
.retrieve_in
;
1789 req
->in
.args
[1].size
= total_len
;
1791 err
= fuse_request_send_notify_reply(fc
, req
, outarg
->notify_unique
);
1793 fuse_retrieve_end(fc
, req
);
1798 static int fuse_notify_retrieve(struct fuse_conn
*fc
, unsigned int size
,
1799 struct fuse_copy_state
*cs
)
1801 struct fuse_notify_retrieve_out outarg
;
1802 struct inode
*inode
;
1806 if (size
!= sizeof(outarg
))
1809 err
= fuse_copy_one(cs
, &outarg
, sizeof(outarg
));
1813 fuse_copy_finish(cs
);
1815 down_read(&fc
->killsb
);
1818 u64 nodeid
= outarg
.nodeid
;
1820 inode
= ilookup5(fc
->sb
, nodeid
, fuse_inode_eq
, &nodeid
);
1822 err
= fuse_retrieve(fc
, inode
, &outarg
);
1826 up_read(&fc
->killsb
);
1831 fuse_copy_finish(cs
);
1835 static int fuse_notify(struct fuse_conn
*fc
, enum fuse_notify_code code
,
1836 unsigned int size
, struct fuse_copy_state
*cs
)
1838 /* Don't try to move pages (yet) */
1842 case FUSE_NOTIFY_POLL
:
1843 return fuse_notify_poll(fc
, size
, cs
);
1845 case FUSE_NOTIFY_INVAL_INODE
:
1846 return fuse_notify_inval_inode(fc
, size
, cs
);
1848 case FUSE_NOTIFY_INVAL_ENTRY
:
1849 return fuse_notify_inval_entry(fc
, size
, cs
);
1851 case FUSE_NOTIFY_STORE
:
1852 return fuse_notify_store(fc
, size
, cs
);
1854 case FUSE_NOTIFY_RETRIEVE
:
1855 return fuse_notify_retrieve(fc
, size
, cs
);
1857 case FUSE_NOTIFY_DELETE
:
1858 return fuse_notify_delete(fc
, size
, cs
);
1861 fuse_copy_finish(cs
);
1866 /* Look up request on processing list by unique ID */
1867 static struct fuse_req
*request_find(struct fuse_conn
*fc
, u64 unique
)
1869 struct fuse_req
*req
;
1871 list_for_each_entry(req
, &fc
->processing
, list
) {
1872 if (req
->in
.h
.unique
== unique
|| req
->intr_unique
== unique
)
1878 static int copy_out_args(struct fuse_copy_state
*cs
, struct fuse_out
*out
,
1881 unsigned reqsize
= sizeof(struct fuse_out_header
);
1884 return nbytes
!= reqsize
? -EINVAL
: 0;
1886 reqsize
+= len_args(out
->numargs
, out
->args
);
1888 if (reqsize
< nbytes
|| (reqsize
> nbytes
&& !out
->argvar
))
1890 else if (reqsize
> nbytes
) {
1891 struct fuse_arg
*lastarg
= &out
->args
[out
->numargs
-1];
1892 unsigned diffsize
= reqsize
- nbytes
;
1893 if (diffsize
> lastarg
->size
)
1895 lastarg
->size
-= diffsize
;
1897 return fuse_copy_args(cs
, out
->numargs
, out
->argpages
, out
->args
,
1902 * Write a single reply to a request. First the header is copied from
1903 * the write buffer. The request is then searched on the processing
1904 * list by the unique ID found in the header. If found, then remove
1905 * it from the list and copy the rest of the buffer to the request.
1906 * The request is finished by calling request_end()
1908 static ssize_t
fuse_dev_do_write(struct fuse_conn
*fc
,
1909 struct fuse_copy_state
*cs
, size_t nbytes
)
1912 struct fuse_req
*req
;
1913 struct fuse_out_header oh
;
1915 if (nbytes
< sizeof(struct fuse_out_header
))
1918 err
= fuse_copy_one(cs
, &oh
, sizeof(oh
));
1923 if (oh
.len
!= nbytes
)
1927 * Zero oh.unique indicates unsolicited notification message
1928 * and error contains notification code.
1931 err
= fuse_notify(fc
, oh
.error
, nbytes
- sizeof(oh
), cs
);
1932 return err
? err
: nbytes
;
1936 if (oh
.error
<= -1000 || oh
.error
> 0)
1939 spin_lock(&fc
->lock
);
1944 req
= request_find(fc
, oh
.unique
);
1948 /* Is it an interrupt reply? */
1949 if (req
->intr_unique
== oh
.unique
) {
1951 if (nbytes
!= sizeof(struct fuse_out_header
))
1954 if (oh
.error
== -ENOSYS
)
1955 fc
->no_interrupt
= 1;
1956 else if (oh
.error
== -EAGAIN
)
1957 queue_interrupt(&fc
->iq
, req
);
1959 spin_unlock(&fc
->lock
);
1960 fuse_copy_finish(cs
);
1964 clear_bit(FR_SENT
, &req
->flags
);
1965 list_move(&req
->list
, &fc
->io
);
1967 set_bit(FR_LOCKED
, &req
->flags
);
1969 if (!req
->out
.page_replace
)
1971 spin_unlock(&fc
->lock
);
1973 err
= copy_out_args(cs
, &req
->out
, nbytes
);
1974 fuse_copy_finish(cs
);
1976 spin_lock(&fc
->lock
);
1977 clear_bit(FR_LOCKED
, &req
->flags
);
1981 req
->out
.h
.error
= -EIO
;
1982 request_end(fc
, req
);
1984 return err
? err
: nbytes
;
1987 spin_unlock(&fc
->lock
);
1989 fuse_copy_finish(cs
);
1993 static ssize_t
fuse_dev_write(struct kiocb
*iocb
, struct iov_iter
*from
)
1995 struct fuse_copy_state cs
;
1996 struct fuse_conn
*fc
= fuse_get_conn(iocb
->ki_filp
);
2000 if (!iter_is_iovec(from
))
2003 fuse_copy_init(&cs
, 0, from
);
2005 return fuse_dev_do_write(fc
, &cs
, iov_iter_count(from
));
2008 static ssize_t
fuse_dev_splice_write(struct pipe_inode_info
*pipe
,
2009 struct file
*out
, loff_t
*ppos
,
2010 size_t len
, unsigned int flags
)
2014 struct pipe_buffer
*bufs
;
2015 struct fuse_copy_state cs
;
2016 struct fuse_conn
*fc
;
2020 fc
= fuse_get_conn(out
);
2024 bufs
= kmalloc(pipe
->buffers
* sizeof(struct pipe_buffer
), GFP_KERNEL
);
2031 for (idx
= 0; idx
< pipe
->nrbufs
&& rem
< len
; idx
++)
2032 rem
+= pipe
->bufs
[(pipe
->curbuf
+ idx
) & (pipe
->buffers
- 1)].len
;
2042 struct pipe_buffer
*ibuf
;
2043 struct pipe_buffer
*obuf
;
2045 BUG_ON(nbuf
>= pipe
->buffers
);
2046 BUG_ON(!pipe
->nrbufs
);
2047 ibuf
= &pipe
->bufs
[pipe
->curbuf
];
2050 if (rem
>= ibuf
->len
) {
2053 pipe
->curbuf
= (pipe
->curbuf
+ 1) & (pipe
->buffers
- 1);
2056 ibuf
->ops
->get(pipe
, ibuf
);
2058 obuf
->flags
&= ~PIPE_BUF_FLAG_GIFT
;
2060 ibuf
->offset
+= obuf
->len
;
2061 ibuf
->len
-= obuf
->len
;
2068 fuse_copy_init(&cs
, 0, NULL
);
2073 if (flags
& SPLICE_F_MOVE
)
2076 ret
= fuse_dev_do_write(fc
, &cs
, len
);
2078 for (idx
= 0; idx
< nbuf
; idx
++) {
2079 struct pipe_buffer
*buf
= &bufs
[idx
];
2080 buf
->ops
->release(pipe
, buf
);
2087 static unsigned fuse_dev_poll(struct file
*file
, poll_table
*wait
)
2089 unsigned mask
= POLLOUT
| POLLWRNORM
;
2090 struct fuse_iqueue
*fiq
;
2091 struct fuse_conn
*fc
= fuse_get_conn(file
);
2096 poll_wait(file
, &fiq
->waitq
, wait
);
2098 spin_lock(&fc
->lock
);
2099 spin_lock(&fiq
->waitq
.lock
);
2100 if (!fiq
->connected
)
2102 else if (request_pending(fiq
))
2103 mask
|= POLLIN
| POLLRDNORM
;
2104 spin_unlock(&fiq
->waitq
.lock
);
2105 spin_unlock(&fc
->lock
);
2111 * Abort all requests on the given list (pending or processing)
2113 * This function releases and reacquires fc->lock
2115 static void end_requests(struct fuse_conn
*fc
, struct list_head
*head
)
2116 __releases(fc
->lock
)
2117 __acquires(fc
->lock
)
2119 while (!list_empty(head
)) {
2120 struct fuse_req
*req
;
2121 req
= list_entry(head
->next
, struct fuse_req
, list
);
2122 req
->out
.h
.error
= -ECONNABORTED
;
2123 clear_bit(FR_PENDING
, &req
->flags
);
2124 clear_bit(FR_SENT
, &req
->flags
);
2125 request_end(fc
, req
);
2126 spin_lock(&fc
->lock
);
2130 static void end_polls(struct fuse_conn
*fc
)
2134 p
= rb_first(&fc
->polled_files
);
2137 struct fuse_file
*ff
;
2138 ff
= rb_entry(p
, struct fuse_file
, polled_node
);
2139 wake_up_interruptible_all(&ff
->poll_wait
);
2146 * Abort all requests.
2148 * Emergency exit in case of a malicious or accidental deadlock, or just a hung
2151 * The same effect is usually achievable through killing the filesystem daemon
2152 * and all users of the filesystem. The exception is the combination of an
2153 * asynchronous request and the tricky deadlock (see
2154 * Documentation/filesystems/fuse.txt).
2156 * Aborting requests under I/O goes as follows: 1: Separate out unlocked
2157 * requests, they should be finished off immediately. Locked requests will be
2158 * finished after unlock; see unlock_request(). 2: Finish off the unlocked
2159 * requests. It is possible that some request will finish before we can. This
2160 * is OK, the request will in that case be removed from the list before we touch
2163 void fuse_abort_conn(struct fuse_conn
*fc
)
2165 struct fuse_iqueue
*fiq
= &fc
->iq
;
2167 spin_lock(&fc
->lock
);
2168 if (fc
->connected
) {
2169 struct fuse_req
*req
, *next
;
2175 fuse_set_initialized(fc
);
2176 list_for_each_entry_safe(req
, next
, &fc
->io
, list
) {
2177 req
->out
.h
.error
= -ECONNABORTED
;
2178 spin_lock(&req
->waitq
.lock
);
2179 set_bit(FR_ABORTED
, &req
->flags
);
2180 if (!test_bit(FR_LOCKED
, &req
->flags
))
2181 list_move(&req
->list
, &to_end1
);
2182 spin_unlock(&req
->waitq
.lock
);
2184 fc
->max_background
= UINT_MAX
;
2187 spin_lock(&fiq
->waitq
.lock
);
2189 list_splice_init(&fiq
->pending
, &to_end2
);
2190 while (forget_pending(fiq
))
2191 kfree(dequeue_forget(fiq
, 1, NULL
));
2192 wake_up_all_locked(&fiq
->waitq
);
2193 spin_unlock(&fiq
->waitq
.lock
);
2194 kill_fasync(&fiq
->fasync
, SIGIO
, POLL_IN
);
2196 list_splice_init(&fc
->processing
, &to_end2
);
2197 while (!list_empty(&to_end1
)) {
2198 req
= list_first_entry(&to_end1
, struct fuse_req
, list
);
2199 __fuse_get_request(req
);
2200 request_end(fc
, req
);
2201 spin_lock(&fc
->lock
);
2203 end_requests(fc
, &to_end2
);
2205 wake_up_all(&fc
->blocked_waitq
);
2207 spin_unlock(&fc
->lock
);
2209 EXPORT_SYMBOL_GPL(fuse_abort_conn
);
2211 int fuse_dev_release(struct inode
*inode
, struct file
*file
)
2213 struct fuse_conn
*fc
= fuse_get_conn(file
);
2215 WARN_ON(!list_empty(&fc
->io
));
2216 WARN_ON(fc
->iq
.fasync
!= NULL
);
2217 fuse_abort_conn(fc
);
2223 EXPORT_SYMBOL_GPL(fuse_dev_release
);
2225 static int fuse_dev_fasync(int fd
, struct file
*file
, int on
)
2227 struct fuse_conn
*fc
= fuse_get_conn(file
);
2231 /* No locking - fasync_helper does its own locking */
2232 return fasync_helper(fd
, file
, on
, &fc
->iq
.fasync
);
2235 const struct file_operations fuse_dev_operations
= {
2236 .owner
= THIS_MODULE
,
2237 .open
= fuse_dev_open
,
2238 .llseek
= no_llseek
,
2239 .read_iter
= fuse_dev_read
,
2240 .splice_read
= fuse_dev_splice_read
,
2241 .write_iter
= fuse_dev_write
,
2242 .splice_write
= fuse_dev_splice_write
,
2243 .poll
= fuse_dev_poll
,
2244 .release
= fuse_dev_release
,
2245 .fasync
= fuse_dev_fasync
,
2247 EXPORT_SYMBOL_GPL(fuse_dev_operations
);
2249 static struct miscdevice fuse_miscdevice
= {
2250 .minor
= FUSE_MINOR
,
2252 .fops
= &fuse_dev_operations
,
2255 int __init
fuse_dev_init(void)
2258 fuse_req_cachep
= kmem_cache_create("fuse_request",
2259 sizeof(struct fuse_req
),
2261 if (!fuse_req_cachep
)
2264 err
= misc_register(&fuse_miscdevice
);
2266 goto out_cache_clean
;
2271 kmem_cache_destroy(fuse_req_cachep
);
2276 void fuse_dev_cleanup(void)
2278 misc_deregister(&fuse_miscdevice
);
2279 kmem_cache_destroy(fuse_req_cachep
);