fuse: implement NFS-like readdirplus support
[deliverable/linux.git] / fs / fuse / dev.c
CommitLineData
334f485d
MS
1/*
2 FUSE: Filesystem in Userspace
1729a16c 3 Copyright (C) 2001-2008 Miklos Szeredi <miklos@szeredi.hu>
334f485d
MS
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7*/
8
9#include "fuse_i.h"
10
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/poll.h>
14#include <linux/uio.h>
15#include <linux/miscdevice.h>
16#include <linux/pagemap.h>
17#include <linux/file.h>
18#include <linux/slab.h>
dd3bb14f 19#include <linux/pipe_fs_i.h>
ce534fb0
MS
20#include <linux/swap.h>
21#include <linux/splice.h>
334f485d
MS
22
23MODULE_ALIAS_MISCDEV(FUSE_MINOR);
578454ff 24MODULE_ALIAS("devname:fuse");
334f485d 25
e18b890b 26static struct kmem_cache *fuse_req_cachep;
334f485d 27
8bfc016d 28static struct fuse_conn *fuse_get_conn(struct file *file)
334f485d 29{
0720b315
MS
30 /*
31 * Lockless access is OK, because file->private data is set
32 * once during mount and is valid until the file is released.
33 */
34 return file->private_data;
334f485d
MS
35}
36
8bfc016d 37static void fuse_request_init(struct fuse_req *req)
334f485d
MS
38{
39 memset(req, 0, sizeof(*req));
40 INIT_LIST_HEAD(&req->list);
a4d27e75 41 INIT_LIST_HEAD(&req->intr_entry);
334f485d
MS
42 init_waitqueue_head(&req->waitq);
43 atomic_set(&req->count, 1);
44}
45
46struct fuse_req *fuse_request_alloc(void)
47{
e94b1766 48 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_KERNEL);
334f485d
MS
49 if (req)
50 fuse_request_init(req);
51 return req;
52}
08cbf542 53EXPORT_SYMBOL_GPL(fuse_request_alloc);
334f485d 54
3be5a52b
MS
55struct fuse_req *fuse_request_alloc_nofs(void)
56{
57 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, GFP_NOFS);
58 if (req)
59 fuse_request_init(req);
60 return req;
61}
62
334f485d
MS
63void fuse_request_free(struct fuse_req *req)
64{
65 kmem_cache_free(fuse_req_cachep, req);
66}
67
8bfc016d 68static void block_sigs(sigset_t *oldset)
334f485d
MS
69{
70 sigset_t mask;
71
72 siginitsetinv(&mask, sigmask(SIGKILL));
73 sigprocmask(SIG_BLOCK, &mask, oldset);
74}
75
8bfc016d 76static void restore_sigs(sigset_t *oldset)
334f485d
MS
77{
78 sigprocmask(SIG_SETMASK, oldset, NULL);
79}
80
334f485d
MS
81static void __fuse_get_request(struct fuse_req *req)
82{
83 atomic_inc(&req->count);
84}
85
86/* Must be called with > 1 refcount */
87static void __fuse_put_request(struct fuse_req *req)
88{
89 BUG_ON(atomic_read(&req->count) < 2);
90 atomic_dec(&req->count);
91}
92
33649c91
MS
93static void fuse_req_init_context(struct fuse_req *req)
94{
499dcf20
EB
95 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid());
96 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid());
33649c91
MS
97 req->in.h.pid = current->pid;
98}
99
ce1d5a49 100struct fuse_req *fuse_get_req(struct fuse_conn *fc)
334f485d 101{
08a53cdc
MS
102 struct fuse_req *req;
103 sigset_t oldset;
9bc5ddda 104 int intr;
08a53cdc
MS
105 int err;
106
9bc5ddda 107 atomic_inc(&fc->num_waiting);
08a53cdc 108 block_sigs(&oldset);
9bc5ddda 109 intr = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
08a53cdc 110 restore_sigs(&oldset);
9bc5ddda
MS
111 err = -EINTR;
112 if (intr)
113 goto out;
08a53cdc 114
51eb01e7
MS
115 err = -ENOTCONN;
116 if (!fc->connected)
117 goto out;
118
08a53cdc 119 req = fuse_request_alloc();
9bc5ddda 120 err = -ENOMEM;
ce1d5a49 121 if (!req)
9bc5ddda 122 goto out;
334f485d 123
33649c91 124 fuse_req_init_context(req);
9bc5ddda 125 req->waiting = 1;
334f485d 126 return req;
9bc5ddda
MS
127
128 out:
129 atomic_dec(&fc->num_waiting);
130 return ERR_PTR(err);
334f485d 131}
08cbf542 132EXPORT_SYMBOL_GPL(fuse_get_req);
334f485d 133
33649c91
MS
134/*
135 * Return request in fuse_file->reserved_req. However that may
136 * currently be in use. If that is the case, wait for it to become
137 * available.
138 */
139static struct fuse_req *get_reserved_req(struct fuse_conn *fc,
140 struct file *file)
141{
142 struct fuse_req *req = NULL;
143 struct fuse_file *ff = file->private_data;
144
145 do {
de5e3dec 146 wait_event(fc->reserved_req_waitq, ff->reserved_req);
33649c91
MS
147 spin_lock(&fc->lock);
148 if (ff->reserved_req) {
149 req = ff->reserved_req;
150 ff->reserved_req = NULL;
cb0942b8 151 req->stolen_file = get_file(file);
33649c91
MS
152 }
153 spin_unlock(&fc->lock);
154 } while (!req);
155
156 return req;
157}
158
159/*
160 * Put stolen request back into fuse_file->reserved_req
161 */
162static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req)
163{
164 struct file *file = req->stolen_file;
165 struct fuse_file *ff = file->private_data;
166
167 spin_lock(&fc->lock);
168 fuse_request_init(req);
169 BUG_ON(ff->reserved_req);
170 ff->reserved_req = req;
de5e3dec 171 wake_up_all(&fc->reserved_req_waitq);
33649c91
MS
172 spin_unlock(&fc->lock);
173 fput(file);
174}
175
176/*
177 * Gets a requests for a file operation, always succeeds
178 *
179 * This is used for sending the FLUSH request, which must get to
180 * userspace, due to POSIX locks which may need to be unlocked.
181 *
182 * If allocation fails due to OOM, use the reserved request in
183 * fuse_file.
184 *
185 * This is very unlikely to deadlock accidentally, since the
186 * filesystem should not have it's own file open. If deadlock is
187 * intentional, it can still be broken by "aborting" the filesystem.
188 */
189struct fuse_req *fuse_get_req_nofail(struct fuse_conn *fc, struct file *file)
190{
191 struct fuse_req *req;
192
193 atomic_inc(&fc->num_waiting);
194 wait_event(fc->blocked_waitq, !fc->blocked);
195 req = fuse_request_alloc();
196 if (!req)
197 req = get_reserved_req(fc, file);
198
199 fuse_req_init_context(req);
200 req->waiting = 1;
201 return req;
202}
203
334f485d 204void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
7128ec2a
MS
205{
206 if (atomic_dec_and_test(&req->count)) {
9bc5ddda
MS
207 if (req->waiting)
208 atomic_dec(&fc->num_waiting);
33649c91
MS
209
210 if (req->stolen_file)
211 put_reserved_req(fc, req);
212 else
213 fuse_request_free(req);
7128ec2a
MS
214 }
215}
08cbf542 216EXPORT_SYMBOL_GPL(fuse_put_request);
7128ec2a 217
d12def1b
MS
218static unsigned len_args(unsigned numargs, struct fuse_arg *args)
219{
220 unsigned nbytes = 0;
221 unsigned i;
222
223 for (i = 0; i < numargs; i++)
224 nbytes += args[i].size;
225
226 return nbytes;
227}
228
229static u64 fuse_get_unique(struct fuse_conn *fc)
230{
231 fc->reqctr++;
232 /* zero is special */
233 if (fc->reqctr == 0)
234 fc->reqctr = 1;
235
236 return fc->reqctr;
237}
238
239static void queue_request(struct fuse_conn *fc, struct fuse_req *req)
240{
d12def1b
MS
241 req->in.h.len = sizeof(struct fuse_in_header) +
242 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
243 list_add_tail(&req->list, &fc->pending);
244 req->state = FUSE_REQ_PENDING;
245 if (!req->waiting) {
246 req->waiting = 1;
247 atomic_inc(&fc->num_waiting);
248 }
249 wake_up(&fc->waitq);
250 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
251}
252
07e77dca
MS
253void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
254 u64 nodeid, u64 nlookup)
255{
02c048b9
MS
256 forget->forget_one.nodeid = nodeid;
257 forget->forget_one.nlookup = nlookup;
07e77dca
MS
258
259 spin_lock(&fc->lock);
5dfcc87f
MS
260 if (fc->connected) {
261 fc->forget_list_tail->next = forget;
262 fc->forget_list_tail = forget;
263 wake_up(&fc->waitq);
264 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
265 } else {
266 kfree(forget);
267 }
07e77dca
MS
268 spin_unlock(&fc->lock);
269}
270
d12def1b
MS
271static void flush_bg_queue(struct fuse_conn *fc)
272{
7a6d3c8b 273 while (fc->active_background < fc->max_background &&
d12def1b
MS
274 !list_empty(&fc->bg_queue)) {
275 struct fuse_req *req;
276
277 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
278 list_del(&req->list);
279 fc->active_background++;
2d45ba38 280 req->in.h.unique = fuse_get_unique(fc);
d12def1b
MS
281 queue_request(fc, req);
282 }
283}
284
334f485d
MS
285/*
286 * This function is called when a request is finished. Either a reply
f9a2842e 287 * has arrived or it was aborted (and not yet sent) or some error
f43b155a 288 * occurred during communication with userspace, or the device file
51eb01e7
MS
289 * was closed. The requester thread is woken up (if still waiting),
290 * the 'end' callback is called if given, else the reference to the
291 * request is released
7128ec2a 292 *
d7133114 293 * Called with fc->lock, unlocks it
334f485d
MS
294 */
295static void request_end(struct fuse_conn *fc, struct fuse_req *req)
b9ca67b2 296__releases(fc->lock)
334f485d 297{
51eb01e7
MS
298 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
299 req->end = NULL;
d77a1d5b 300 list_del(&req->list);
a4d27e75 301 list_del(&req->intr_entry);
83cfd493 302 req->state = FUSE_REQ_FINISHED;
51eb01e7 303 if (req->background) {
7a6d3c8b 304 if (fc->num_background == fc->max_background) {
51eb01e7
MS
305 fc->blocked = 0;
306 wake_up_all(&fc->blocked_waitq);
307 }
7a6d3c8b 308 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 309 fc->connected && fc->bdi_initialized) {
8aa7e847
JA
310 clear_bdi_congested(&fc->bdi, BLK_RW_SYNC);
311 clear_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
f92b99b9 312 }
51eb01e7 313 fc->num_background--;
d12def1b
MS
314 fc->active_background--;
315 flush_bg_queue(fc);
334f485d 316 }
51eb01e7 317 spin_unlock(&fc->lock);
51eb01e7
MS
318 wake_up(&req->waitq);
319 if (end)
320 end(fc, req);
e9bb09dd 321 fuse_put_request(fc, req);
334f485d
MS
322}
323
a4d27e75
MS
324static void wait_answer_interruptible(struct fuse_conn *fc,
325 struct fuse_req *req)
b9ca67b2
MS
326__releases(fc->lock)
327__acquires(fc->lock)
a4d27e75
MS
328{
329 if (signal_pending(current))
330 return;
331
332 spin_unlock(&fc->lock);
333 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
334 spin_lock(&fc->lock);
335}
336
337static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
338{
339 list_add_tail(&req->intr_entry, &fc->interrupts);
340 wake_up(&fc->waitq);
341 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
342}
343
7c352bdf 344static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
b9ca67b2
MS
345__releases(fc->lock)
346__acquires(fc->lock)
334f485d 347{
a4d27e75
MS
348 if (!fc->no_interrupt) {
349 /* Any signal may interrupt this */
350 wait_answer_interruptible(fc, req);
334f485d 351
a4d27e75
MS
352 if (req->aborted)
353 goto aborted;
354 if (req->state == FUSE_REQ_FINISHED)
355 return;
356
357 req->interrupted = 1;
358 if (req->state == FUSE_REQ_SENT)
359 queue_interrupt(fc, req);
360 }
361
a131de0a 362 if (!req->force) {
a4d27e75
MS
363 sigset_t oldset;
364
365 /* Only fatal signals may interrupt this */
51eb01e7 366 block_sigs(&oldset);
a4d27e75 367 wait_answer_interruptible(fc, req);
51eb01e7 368 restore_sigs(&oldset);
a131de0a
MS
369
370 if (req->aborted)
371 goto aborted;
372 if (req->state == FUSE_REQ_FINISHED)
373 return;
374
375 /* Request is not yet in userspace, bail out */
376 if (req->state == FUSE_REQ_PENDING) {
377 list_del(&req->list);
378 __fuse_put_request(req);
379 req->out.h.error = -EINTR;
380 return;
381 }
51eb01e7 382 }
334f485d 383
a131de0a
MS
384 /*
385 * Either request is already in userspace, or it was forced.
386 * Wait it out.
387 */
388 spin_unlock(&fc->lock);
389 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
390 spin_lock(&fc->lock);
a4d27e75 391
a131de0a
MS
392 if (!req->aborted)
393 return;
a4d27e75
MS
394
395 aborted:
a131de0a 396 BUG_ON(req->state != FUSE_REQ_FINISHED);
334f485d
MS
397 if (req->locked) {
398 /* This is uninterruptible sleep, because data is
399 being copied to/from the buffers of req. During
400 locked state, there mustn't be any filesystem
401 operation (e.g. page fault), since that could lead
402 to deadlock */
d7133114 403 spin_unlock(&fc->lock);
334f485d 404 wait_event(req->waitq, !req->locked);
d7133114 405 spin_lock(&fc->lock);
334f485d 406 }
334f485d
MS
407}
408
b93f858a 409void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
410{
411 req->isreply = 1;
d7133114 412 spin_lock(&fc->lock);
1e9a4ed9 413 if (!fc->connected)
334f485d
MS
414 req->out.h.error = -ENOTCONN;
415 else if (fc->conn_error)
416 req->out.h.error = -ECONNREFUSED;
417 else {
2d45ba38 418 req->in.h.unique = fuse_get_unique(fc);
334f485d
MS
419 queue_request(fc, req);
420 /* acquire extra reference, since request is still needed
421 after request_end() */
422 __fuse_get_request(req);
423
7c352bdf 424 request_wait_answer(fc, req);
334f485d 425 }
d7133114 426 spin_unlock(&fc->lock);
334f485d 427}
08cbf542 428EXPORT_SYMBOL_GPL(fuse_request_send);
334f485d 429
b93f858a
TH
430static void fuse_request_send_nowait_locked(struct fuse_conn *fc,
431 struct fuse_req *req)
d12def1b
MS
432{
433 req->background = 1;
434 fc->num_background++;
7a6d3c8b 435 if (fc->num_background == fc->max_background)
d12def1b 436 fc->blocked = 1;
7a6d3c8b 437 if (fc->num_background == fc->congestion_threshold &&
a325f9b9 438 fc->bdi_initialized) {
8aa7e847
JA
439 set_bdi_congested(&fc->bdi, BLK_RW_SYNC);
440 set_bdi_congested(&fc->bdi, BLK_RW_ASYNC);
d12def1b
MS
441 }
442 list_add_tail(&req->list, &fc->bg_queue);
443 flush_bg_queue(fc);
444}
445
b93f858a 446static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req)
334f485d 447{
d7133114 448 spin_lock(&fc->lock);
1e9a4ed9 449 if (fc->connected) {
b93f858a 450 fuse_request_send_nowait_locked(fc, req);
d7133114 451 spin_unlock(&fc->lock);
334f485d
MS
452 } else {
453 req->out.h.error = -ENOTCONN;
454 request_end(fc, req);
455 }
456}
457
b93f858a 458void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
459{
460 req->isreply = 1;
b93f858a 461 fuse_request_send_nowait(fc, req);
334f485d 462}
08cbf542 463EXPORT_SYMBOL_GPL(fuse_request_send_background);
334f485d 464
2d45ba38
MS
465static int fuse_request_send_notify_reply(struct fuse_conn *fc,
466 struct fuse_req *req, u64 unique)
467{
468 int err = -ENODEV;
469
470 req->isreply = 0;
471 req->in.h.unique = unique;
472 spin_lock(&fc->lock);
473 if (fc->connected) {
474 queue_request(fc, req);
475 err = 0;
476 }
477 spin_unlock(&fc->lock);
478
479 return err;
480}
481
3be5a52b
MS
482/*
483 * Called under fc->lock
484 *
485 * fc->connected must have been checked previously
486 */
b93f858a
TH
487void fuse_request_send_background_locked(struct fuse_conn *fc,
488 struct fuse_req *req)
3be5a52b
MS
489{
490 req->isreply = 1;
b93f858a 491 fuse_request_send_nowait_locked(fc, req);
3be5a52b
MS
492}
493
0b05b183
AA
494void fuse_force_forget(struct file *file, u64 nodeid)
495{
496 struct inode *inode = file->f_path.dentry->d_inode;
497 struct fuse_conn *fc = get_fuse_conn(inode);
498 struct fuse_req *req;
499 struct fuse_forget_in inarg;
500
501 memset(&inarg, 0, sizeof(inarg));
502 inarg.nlookup = 1;
503 req = fuse_get_req_nofail(fc, file);
504 req->in.h.opcode = FUSE_FORGET;
505 req->in.h.nodeid = nodeid;
506 req->in.numargs = 1;
507 req->in.args[0].size = sizeof(inarg);
508 req->in.args[0].value = &inarg;
509 req->isreply = 0;
510 fuse_request_send_nowait(fc, req);
511}
512
334f485d
MS
513/*
514 * Lock the request. Up to the next unlock_request() there mustn't be
515 * anything that could cause a page-fault. If the request was already
f9a2842e 516 * aborted bail out.
334f485d 517 */
d7133114 518static int lock_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
519{
520 int err = 0;
521 if (req) {
d7133114 522 spin_lock(&fc->lock);
f9a2842e 523 if (req->aborted)
334f485d
MS
524 err = -ENOENT;
525 else
526 req->locked = 1;
d7133114 527 spin_unlock(&fc->lock);
334f485d
MS
528 }
529 return err;
530}
531
532/*
f9a2842e 533 * Unlock request. If it was aborted during being locked, the
334f485d
MS
534 * requester thread is currently waiting for it to be unlocked, so
535 * wake it up.
536 */
d7133114 537static void unlock_request(struct fuse_conn *fc, struct fuse_req *req)
334f485d
MS
538{
539 if (req) {
d7133114 540 spin_lock(&fc->lock);
334f485d 541 req->locked = 0;
f9a2842e 542 if (req->aborted)
334f485d 543 wake_up(&req->waitq);
d7133114 544 spin_unlock(&fc->lock);
334f485d
MS
545 }
546}
547
548struct fuse_copy_state {
d7133114 549 struct fuse_conn *fc;
334f485d
MS
550 int write;
551 struct fuse_req *req;
552 const struct iovec *iov;
dd3bb14f
MS
553 struct pipe_buffer *pipebufs;
554 struct pipe_buffer *currbuf;
555 struct pipe_inode_info *pipe;
334f485d
MS
556 unsigned long nr_segs;
557 unsigned long seglen;
558 unsigned long addr;
559 struct page *pg;
560 void *mapaddr;
561 void *buf;
562 unsigned len;
ce534fb0 563 unsigned move_pages:1;
334f485d
MS
564};
565
d7133114 566static void fuse_copy_init(struct fuse_copy_state *cs, struct fuse_conn *fc,
c3021629 567 int write,
d7133114 568 const struct iovec *iov, unsigned long nr_segs)
334f485d
MS
569{
570 memset(cs, 0, sizeof(*cs));
d7133114 571 cs->fc = fc;
334f485d 572 cs->write = write;
334f485d
MS
573 cs->iov = iov;
574 cs->nr_segs = nr_segs;
575}
576
577/* Unmap and put previous page of userspace buffer */
8bfc016d 578static void fuse_copy_finish(struct fuse_copy_state *cs)
334f485d 579{
dd3bb14f
MS
580 if (cs->currbuf) {
581 struct pipe_buffer *buf = cs->currbuf;
582
c3021629
MS
583 if (!cs->write) {
584 buf->ops->unmap(cs->pipe, buf, cs->mapaddr);
585 } else {
7909b1c6 586 kunmap(buf->page);
c3021629
MS
587 buf->len = PAGE_SIZE - cs->len;
588 }
dd3bb14f
MS
589 cs->currbuf = NULL;
590 cs->mapaddr = NULL;
591 } else if (cs->mapaddr) {
7909b1c6 592 kunmap(cs->pg);
334f485d
MS
593 if (cs->write) {
594 flush_dcache_page(cs->pg);
595 set_page_dirty_lock(cs->pg);
596 }
597 put_page(cs->pg);
598 cs->mapaddr = NULL;
599 }
600}
601
602/*
603 * Get another pagefull of userspace buffer, and map it to kernel
604 * address space, and lock request
605 */
606static int fuse_copy_fill(struct fuse_copy_state *cs)
607{
608 unsigned long offset;
609 int err;
610
d7133114 611 unlock_request(cs->fc, cs->req);
334f485d 612 fuse_copy_finish(cs);
dd3bb14f
MS
613 if (cs->pipebufs) {
614 struct pipe_buffer *buf = cs->pipebufs;
615
c3021629
MS
616 if (!cs->write) {
617 err = buf->ops->confirm(cs->pipe, buf);
618 if (err)
619 return err;
620
621 BUG_ON(!cs->nr_segs);
622 cs->currbuf = buf;
7909b1c6 623 cs->mapaddr = buf->ops->map(cs->pipe, buf, 0);
c3021629
MS
624 cs->len = buf->len;
625 cs->buf = cs->mapaddr + buf->offset;
626 cs->pipebufs++;
627 cs->nr_segs--;
628 } else {
629 struct page *page;
dd3bb14f 630
c3021629
MS
631 if (cs->nr_segs == cs->pipe->buffers)
632 return -EIO;
633
634 page = alloc_page(GFP_HIGHUSER);
635 if (!page)
636 return -ENOMEM;
637
638 buf->page = page;
639 buf->offset = 0;
640 buf->len = 0;
641
642 cs->currbuf = buf;
7909b1c6 643 cs->mapaddr = kmap(page);
c3021629
MS
644 cs->buf = cs->mapaddr;
645 cs->len = PAGE_SIZE;
646 cs->pipebufs++;
647 cs->nr_segs++;
648 }
dd3bb14f
MS
649 } else {
650 if (!cs->seglen) {
651 BUG_ON(!cs->nr_segs);
652 cs->seglen = cs->iov[0].iov_len;
653 cs->addr = (unsigned long) cs->iov[0].iov_base;
654 cs->iov++;
655 cs->nr_segs--;
656 }
657 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
658 if (err < 0)
659 return err;
660 BUG_ON(err != 1);
661 offset = cs->addr % PAGE_SIZE;
7909b1c6 662 cs->mapaddr = kmap(cs->pg);
dd3bb14f
MS
663 cs->buf = cs->mapaddr + offset;
664 cs->len = min(PAGE_SIZE - offset, cs->seglen);
665 cs->seglen -= cs->len;
666 cs->addr += cs->len;
334f485d 667 }
334f485d 668
d7133114 669 return lock_request(cs->fc, cs->req);
334f485d
MS
670}
671
672/* Do as much copy to/from userspace buffer as we can */
8bfc016d 673static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
334f485d
MS
674{
675 unsigned ncpy = min(*size, cs->len);
676 if (val) {
677 if (cs->write)
678 memcpy(cs->buf, *val, ncpy);
679 else
680 memcpy(*val, cs->buf, ncpy);
681 *val += ncpy;
682 }
683 *size -= ncpy;
684 cs->len -= ncpy;
685 cs->buf += ncpy;
686 return ncpy;
687}
688
ce534fb0
MS
689static int fuse_check_page(struct page *page)
690{
691 if (page_mapcount(page) ||
692 page->mapping != NULL ||
693 page_count(page) != 1 ||
694 (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
695 ~(1 << PG_locked |
696 1 << PG_referenced |
697 1 << PG_uptodate |
698 1 << PG_lru |
699 1 << PG_active |
700 1 << PG_reclaim))) {
701 printk(KERN_WARNING "fuse: trying to steal weird page\n");
702 printk(KERN_WARNING " page=%p index=%li flags=%08lx, count=%i, mapcount=%i, mapping=%p\n", page, page->index, page->flags, page_count(page), page_mapcount(page), page->mapping);
703 return 1;
704 }
705 return 0;
706}
707
708static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
709{
710 int err;
711 struct page *oldpage = *pagep;
712 struct page *newpage;
713 struct pipe_buffer *buf = cs->pipebufs;
ce534fb0
MS
714
715 unlock_request(cs->fc, cs->req);
716 fuse_copy_finish(cs);
717
718 err = buf->ops->confirm(cs->pipe, buf);
719 if (err)
720 return err;
721
722 BUG_ON(!cs->nr_segs);
723 cs->currbuf = buf;
724 cs->len = buf->len;
725 cs->pipebufs++;
726 cs->nr_segs--;
727
728 if (cs->len != PAGE_SIZE)
729 goto out_fallback;
730
731 if (buf->ops->steal(cs->pipe, buf) != 0)
732 goto out_fallback;
733
734 newpage = buf->page;
735
736 if (WARN_ON(!PageUptodate(newpage)))
737 return -EIO;
738
739 ClearPageMappedToDisk(newpage);
740
741 if (fuse_check_page(newpage) != 0)
742 goto out_fallback_unlock;
743
ce534fb0
MS
744 /*
745 * This is a new and locked page, it shouldn't be mapped or
746 * have any special flags on it
747 */
748 if (WARN_ON(page_mapped(oldpage)))
749 goto out_fallback_unlock;
750 if (WARN_ON(page_has_private(oldpage)))
751 goto out_fallback_unlock;
752 if (WARN_ON(PageDirty(oldpage) || PageWriteback(oldpage)))
753 goto out_fallback_unlock;
754 if (WARN_ON(PageMlocked(oldpage)))
755 goto out_fallback_unlock;
756
ef6a3c63 757 err = replace_page_cache_page(oldpage, newpage, GFP_KERNEL);
ce534fb0 758 if (err) {
ef6a3c63
MS
759 unlock_page(newpage);
760 return err;
ce534fb0 761 }
ef6a3c63 762
ce534fb0
MS
763 page_cache_get(newpage);
764
765 if (!(buf->flags & PIPE_BUF_FLAG_LRU))
766 lru_cache_add_file(newpage);
767
768 err = 0;
769 spin_lock(&cs->fc->lock);
770 if (cs->req->aborted)
771 err = -ENOENT;
772 else
773 *pagep = newpage;
774 spin_unlock(&cs->fc->lock);
775
776 if (err) {
777 unlock_page(newpage);
778 page_cache_release(newpage);
779 return err;
780 }
781
782 unlock_page(oldpage);
783 page_cache_release(oldpage);
784 cs->len = 0;
785
786 return 0;
787
788out_fallback_unlock:
789 unlock_page(newpage);
790out_fallback:
791 cs->mapaddr = buf->ops->map(cs->pipe, buf, 1);
792 cs->buf = cs->mapaddr + buf->offset;
793
794 err = lock_request(cs->fc, cs->req);
795 if (err)
796 return err;
797
798 return 1;
799}
800
c3021629
MS
801static int fuse_ref_page(struct fuse_copy_state *cs, struct page *page,
802 unsigned offset, unsigned count)
803{
804 struct pipe_buffer *buf;
805
806 if (cs->nr_segs == cs->pipe->buffers)
807 return -EIO;
808
809 unlock_request(cs->fc, cs->req);
810 fuse_copy_finish(cs);
811
812 buf = cs->pipebufs;
813 page_cache_get(page);
814 buf->page = page;
815 buf->offset = offset;
816 buf->len = count;
817
818 cs->pipebufs++;
819 cs->nr_segs++;
820 cs->len = 0;
821
822 return 0;
823}
824
334f485d
MS
825/*
826 * Copy a page in the request to/from the userspace buffer. Must be
827 * done atomically
828 */
ce534fb0 829static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
8bfc016d 830 unsigned offset, unsigned count, int zeroing)
334f485d 831{
ce534fb0
MS
832 int err;
833 struct page *page = *pagep;
834
b6777c40
MS
835 if (page && zeroing && count < PAGE_SIZE)
836 clear_highpage(page);
837
334f485d 838 while (count) {
c3021629
MS
839 if (cs->write && cs->pipebufs && page) {
840 return fuse_ref_page(cs, page, offset, count);
841 } else if (!cs->len) {
ce534fb0
MS
842 if (cs->move_pages && page &&
843 offset == 0 && count == PAGE_SIZE) {
844 err = fuse_try_move_page(cs, pagep);
845 if (err <= 0)
846 return err;
847 } else {
848 err = fuse_copy_fill(cs);
849 if (err)
850 return err;
851 }
1729a16c 852 }
334f485d 853 if (page) {
2408f6ef 854 void *mapaddr = kmap_atomic(page);
334f485d
MS
855 void *buf = mapaddr + offset;
856 offset += fuse_copy_do(cs, &buf, &count);
2408f6ef 857 kunmap_atomic(mapaddr);
334f485d
MS
858 } else
859 offset += fuse_copy_do(cs, NULL, &count);
860 }
861 if (page && !cs->write)
862 flush_dcache_page(page);
863 return 0;
864}
865
866/* Copy pages in the request to/from userspace buffer */
867static int fuse_copy_pages(struct fuse_copy_state *cs, unsigned nbytes,
868 int zeroing)
869{
870 unsigned i;
871 struct fuse_req *req = cs->req;
872 unsigned offset = req->page_offset;
873 unsigned count = min(nbytes, (unsigned) PAGE_SIZE - offset);
874
875 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) {
ce534fb0
MS
876 int err;
877
878 err = fuse_copy_page(cs, &req->pages[i], offset, count,
879 zeroing);
334f485d
MS
880 if (err)
881 return err;
882
883 nbytes -= count;
884 count = min(nbytes, (unsigned) PAGE_SIZE);
885 offset = 0;
886 }
887 return 0;
888}
889
890/* Copy a single argument in the request to/from userspace buffer */
891static int fuse_copy_one(struct fuse_copy_state *cs, void *val, unsigned size)
892{
893 while (size) {
1729a16c
MS
894 if (!cs->len) {
895 int err = fuse_copy_fill(cs);
896 if (err)
897 return err;
898 }
334f485d
MS
899 fuse_copy_do(cs, &val, &size);
900 }
901 return 0;
902}
903
904/* Copy request arguments to/from userspace buffer */
905static int fuse_copy_args(struct fuse_copy_state *cs, unsigned numargs,
906 unsigned argpages, struct fuse_arg *args,
907 int zeroing)
908{
909 int err = 0;
910 unsigned i;
911
912 for (i = 0; !err && i < numargs; i++) {
913 struct fuse_arg *arg = &args[i];
914 if (i == numargs - 1 && argpages)
915 err = fuse_copy_pages(cs, arg->size, zeroing);
916 else
917 err = fuse_copy_one(cs, arg->value, arg->size);
918 }
919 return err;
920}
921
07e77dca
MS
922static int forget_pending(struct fuse_conn *fc)
923{
924 return fc->forget_list_head.next != NULL;
925}
926
a4d27e75
MS
927static int request_pending(struct fuse_conn *fc)
928{
07e77dca
MS
929 return !list_empty(&fc->pending) || !list_empty(&fc->interrupts) ||
930 forget_pending(fc);
a4d27e75
MS
931}
932
334f485d
MS
933/* Wait until a request is available on the pending list */
934static void request_wait(struct fuse_conn *fc)
b9ca67b2
MS
935__releases(fc->lock)
936__acquires(fc->lock)
334f485d
MS
937{
938 DECLARE_WAITQUEUE(wait, current);
939
940 add_wait_queue_exclusive(&fc->waitq, &wait);
a4d27e75 941 while (fc->connected && !request_pending(fc)) {
334f485d
MS
942 set_current_state(TASK_INTERRUPTIBLE);
943 if (signal_pending(current))
944 break;
945
d7133114 946 spin_unlock(&fc->lock);
334f485d 947 schedule();
d7133114 948 spin_lock(&fc->lock);
334f485d
MS
949 }
950 set_current_state(TASK_RUNNING);
951 remove_wait_queue(&fc->waitq, &wait);
952}
953
a4d27e75
MS
954/*
955 * Transfer an interrupt request to userspace
956 *
957 * Unlike other requests this is assembled on demand, without a need
958 * to allocate a separate fuse_req structure.
959 *
960 * Called with fc->lock held, releases it
961 */
c3021629
MS
962static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
963 size_t nbytes, struct fuse_req *req)
b9ca67b2 964__releases(fc->lock)
a4d27e75 965{
a4d27e75
MS
966 struct fuse_in_header ih;
967 struct fuse_interrupt_in arg;
968 unsigned reqsize = sizeof(ih) + sizeof(arg);
969 int err;
970
971 list_del_init(&req->intr_entry);
972 req->intr_unique = fuse_get_unique(fc);
973 memset(&ih, 0, sizeof(ih));
974 memset(&arg, 0, sizeof(arg));
975 ih.len = reqsize;
976 ih.opcode = FUSE_INTERRUPT;
977 ih.unique = req->intr_unique;
978 arg.unique = req->in.h.unique;
979
980 spin_unlock(&fc->lock);
c3021629 981 if (nbytes < reqsize)
a4d27e75
MS
982 return -EINVAL;
983
c3021629 984 err = fuse_copy_one(cs, &ih, sizeof(ih));
a4d27e75 985 if (!err)
c3021629
MS
986 err = fuse_copy_one(cs, &arg, sizeof(arg));
987 fuse_copy_finish(cs);
a4d27e75
MS
988
989 return err ? err : reqsize;
990}
991
02c048b9
MS
992static struct fuse_forget_link *dequeue_forget(struct fuse_conn *fc,
993 unsigned max,
994 unsigned *countp)
07e77dca 995{
02c048b9
MS
996 struct fuse_forget_link *head = fc->forget_list_head.next;
997 struct fuse_forget_link **newhead = &head;
998 unsigned count;
07e77dca 999
02c048b9
MS
1000 for (count = 0; *newhead != NULL && count < max; count++)
1001 newhead = &(*newhead)->next;
1002
1003 fc->forget_list_head.next = *newhead;
1004 *newhead = NULL;
07e77dca
MS
1005 if (fc->forget_list_head.next == NULL)
1006 fc->forget_list_tail = &fc->forget_list_head;
1007
02c048b9
MS
1008 if (countp != NULL)
1009 *countp = count;
1010
1011 return head;
07e77dca
MS
1012}
1013
1014static int fuse_read_single_forget(struct fuse_conn *fc,
1015 struct fuse_copy_state *cs,
1016 size_t nbytes)
1017__releases(fc->lock)
1018{
1019 int err;
02c048b9 1020 struct fuse_forget_link *forget = dequeue_forget(fc, 1, NULL);
07e77dca 1021 struct fuse_forget_in arg = {
02c048b9 1022 .nlookup = forget->forget_one.nlookup,
07e77dca
MS
1023 };
1024 struct fuse_in_header ih = {
1025 .opcode = FUSE_FORGET,
02c048b9 1026 .nodeid = forget->forget_one.nodeid,
07e77dca
MS
1027 .unique = fuse_get_unique(fc),
1028 .len = sizeof(ih) + sizeof(arg),
1029 };
1030
1031 spin_unlock(&fc->lock);
1032 kfree(forget);
1033 if (nbytes < ih.len)
1034 return -EINVAL;
1035
1036 err = fuse_copy_one(cs, &ih, sizeof(ih));
1037 if (!err)
1038 err = fuse_copy_one(cs, &arg, sizeof(arg));
1039 fuse_copy_finish(cs);
1040
1041 if (err)
1042 return err;
1043
1044 return ih.len;
1045}
1046
02c048b9
MS
1047static int fuse_read_batch_forget(struct fuse_conn *fc,
1048 struct fuse_copy_state *cs, size_t nbytes)
1049__releases(fc->lock)
1050{
1051 int err;
1052 unsigned max_forgets;
1053 unsigned count;
1054 struct fuse_forget_link *head;
1055 struct fuse_batch_forget_in arg = { .count = 0 };
1056 struct fuse_in_header ih = {
1057 .opcode = FUSE_BATCH_FORGET,
1058 .unique = fuse_get_unique(fc),
1059 .len = sizeof(ih) + sizeof(arg),
1060 };
1061
1062 if (nbytes < ih.len) {
1063 spin_unlock(&fc->lock);
1064 return -EINVAL;
1065 }
1066
1067 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
1068 head = dequeue_forget(fc, max_forgets, &count);
1069 spin_unlock(&fc->lock);
1070
1071 arg.count = count;
1072 ih.len += count * sizeof(struct fuse_forget_one);
1073 err = fuse_copy_one(cs, &ih, sizeof(ih));
1074 if (!err)
1075 err = fuse_copy_one(cs, &arg, sizeof(arg));
1076
1077 while (head) {
1078 struct fuse_forget_link *forget = head;
1079
1080 if (!err) {
1081 err = fuse_copy_one(cs, &forget->forget_one,
1082 sizeof(forget->forget_one));
1083 }
1084 head = forget->next;
1085 kfree(forget);
1086 }
1087
1088 fuse_copy_finish(cs);
1089
1090 if (err)
1091 return err;
1092
1093 return ih.len;
1094}
1095
1096static int fuse_read_forget(struct fuse_conn *fc, struct fuse_copy_state *cs,
1097 size_t nbytes)
1098__releases(fc->lock)
1099{
1100 if (fc->minor < 16 || fc->forget_list_head.next->next == NULL)
1101 return fuse_read_single_forget(fc, cs, nbytes);
1102 else
1103 return fuse_read_batch_forget(fc, cs, nbytes);
1104}
1105
334f485d
MS
1106/*
1107 * Read a single request into the userspace filesystem's buffer. This
1108 * function waits until a request is available, then removes it from
1109 * the pending list and copies request data to userspace buffer. If
f9a2842e
MS
1110 * no reply is needed (FORGET) or request has been aborted or there
1111 * was an error during the copying then it's finished by calling
334f485d
MS
1112 * request_end(). Otherwise add it to the processing list, and set
1113 * the 'sent' flag.
1114 */
c3021629
MS
1115static ssize_t fuse_dev_do_read(struct fuse_conn *fc, struct file *file,
1116 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1117{
1118 int err;
334f485d
MS
1119 struct fuse_req *req;
1120 struct fuse_in *in;
334f485d
MS
1121 unsigned reqsize;
1122
1d3d752b 1123 restart:
d7133114 1124 spin_lock(&fc->lock);
e5ac1d1e
JD
1125 err = -EAGAIN;
1126 if ((file->f_flags & O_NONBLOCK) && fc->connected &&
a4d27e75 1127 !request_pending(fc))
e5ac1d1e
JD
1128 goto err_unlock;
1129
334f485d
MS
1130 request_wait(fc);
1131 err = -ENODEV;
9ba7cbba 1132 if (!fc->connected)
334f485d
MS
1133 goto err_unlock;
1134 err = -ERESTARTSYS;
a4d27e75 1135 if (!request_pending(fc))
334f485d
MS
1136 goto err_unlock;
1137
a4d27e75
MS
1138 if (!list_empty(&fc->interrupts)) {
1139 req = list_entry(fc->interrupts.next, struct fuse_req,
1140 intr_entry);
c3021629 1141 return fuse_read_interrupt(fc, cs, nbytes, req);
a4d27e75
MS
1142 }
1143
07e77dca
MS
1144 if (forget_pending(fc)) {
1145 if (list_empty(&fc->pending) || fc->forget_batch-- > 0)
02c048b9 1146 return fuse_read_forget(fc, cs, nbytes);
07e77dca
MS
1147
1148 if (fc->forget_batch <= -8)
1149 fc->forget_batch = 16;
1150 }
1151
334f485d 1152 req = list_entry(fc->pending.next, struct fuse_req, list);
83cfd493 1153 req->state = FUSE_REQ_READING;
d77a1d5b 1154 list_move(&req->list, &fc->io);
334f485d
MS
1155
1156 in = &req->in;
1d3d752b
MS
1157 reqsize = in->h.len;
1158 /* If request is too large, reply with an error and restart the read */
c3021629 1159 if (nbytes < reqsize) {
1d3d752b
MS
1160 req->out.h.error = -EIO;
1161 /* SETXATTR is special, since it may contain too large data */
1162 if (in->h.opcode == FUSE_SETXATTR)
1163 req->out.h.error = -E2BIG;
1164 request_end(fc, req);
1165 goto restart;
334f485d 1166 }
d7133114 1167 spin_unlock(&fc->lock);
c3021629
MS
1168 cs->req = req;
1169 err = fuse_copy_one(cs, &in->h, sizeof(in->h));
1d3d752b 1170 if (!err)
c3021629 1171 err = fuse_copy_args(cs, in->numargs, in->argpages,
1d3d752b 1172 (struct fuse_arg *) in->args, 0);
c3021629 1173 fuse_copy_finish(cs);
d7133114 1174 spin_lock(&fc->lock);
334f485d 1175 req->locked = 0;
c9c9d7df
MS
1176 if (req->aborted) {
1177 request_end(fc, req);
1178 return -ENODEV;
1179 }
334f485d 1180 if (err) {
c9c9d7df 1181 req->out.h.error = -EIO;
334f485d
MS
1182 request_end(fc, req);
1183 return err;
1184 }
1185 if (!req->isreply)
1186 request_end(fc, req);
1187 else {
83cfd493 1188 req->state = FUSE_REQ_SENT;
d77a1d5b 1189 list_move_tail(&req->list, &fc->processing);
a4d27e75
MS
1190 if (req->interrupted)
1191 queue_interrupt(fc, req);
d7133114 1192 spin_unlock(&fc->lock);
334f485d
MS
1193 }
1194 return reqsize;
1195
1196 err_unlock:
d7133114 1197 spin_unlock(&fc->lock);
334f485d
MS
1198 return err;
1199}
1200
c3021629
MS
1201static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
1202 unsigned long nr_segs, loff_t pos)
1203{
1204 struct fuse_copy_state cs;
1205 struct file *file = iocb->ki_filp;
1206 struct fuse_conn *fc = fuse_get_conn(file);
1207 if (!fc)
1208 return -EPERM;
1209
1210 fuse_copy_init(&cs, fc, 1, iov, nr_segs);
1211
1212 return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs));
1213}
1214
1215static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe,
1216 struct pipe_buffer *buf)
1217{
1218 return 1;
1219}
1220
1221static const struct pipe_buf_operations fuse_dev_pipe_buf_ops = {
1222 .can_merge = 0,
1223 .map = generic_pipe_buf_map,
1224 .unmap = generic_pipe_buf_unmap,
1225 .confirm = generic_pipe_buf_confirm,
1226 .release = generic_pipe_buf_release,
1227 .steal = fuse_dev_pipe_buf_steal,
1228 .get = generic_pipe_buf_get,
1229};
1230
1231static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
1232 struct pipe_inode_info *pipe,
1233 size_t len, unsigned int flags)
1234{
1235 int ret;
1236 int page_nr = 0;
1237 int do_wakeup = 0;
1238 struct pipe_buffer *bufs;
1239 struct fuse_copy_state cs;
1240 struct fuse_conn *fc = fuse_get_conn(in);
1241 if (!fc)
1242 return -EPERM;
1243
07e77dca 1244 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
c3021629
MS
1245 if (!bufs)
1246 return -ENOMEM;
1247
1248 fuse_copy_init(&cs, fc, 1, NULL, 0);
1249 cs.pipebufs = bufs;
1250 cs.pipe = pipe;
1251 ret = fuse_dev_do_read(fc, in, &cs, len);
1252 if (ret < 0)
1253 goto out;
1254
1255 ret = 0;
1256 pipe_lock(pipe);
1257
1258 if (!pipe->readers) {
1259 send_sig(SIGPIPE, current, 0);
1260 if (!ret)
1261 ret = -EPIPE;
1262 goto out_unlock;
1263 }
1264
1265 if (pipe->nrbufs + cs.nr_segs > pipe->buffers) {
1266 ret = -EIO;
1267 goto out_unlock;
1268 }
1269
1270 while (page_nr < cs.nr_segs) {
1271 int newbuf = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
1272 struct pipe_buffer *buf = pipe->bufs + newbuf;
1273
1274 buf->page = bufs[page_nr].page;
1275 buf->offset = bufs[page_nr].offset;
1276 buf->len = bufs[page_nr].len;
1277 buf->ops = &fuse_dev_pipe_buf_ops;
1278
1279 pipe->nrbufs++;
1280 page_nr++;
1281 ret += buf->len;
1282
1283 if (pipe->inode)
1284 do_wakeup = 1;
1285 }
1286
1287out_unlock:
1288 pipe_unlock(pipe);
1289
1290 if (do_wakeup) {
1291 smp_mb();
1292 if (waitqueue_active(&pipe->wait))
1293 wake_up_interruptible(&pipe->wait);
1294 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
1295 }
1296
1297out:
1298 for (; page_nr < cs.nr_segs; page_nr++)
1299 page_cache_release(bufs[page_nr].page);
1300
1301 kfree(bufs);
1302 return ret;
1303}
1304
95668a69
TH
1305static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
1306 struct fuse_copy_state *cs)
1307{
1308 struct fuse_notify_poll_wakeup_out outarg;
f6d47a17 1309 int err = -EINVAL;
95668a69
TH
1310
1311 if (size != sizeof(outarg))
f6d47a17 1312 goto err;
95668a69
TH
1313
1314 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1315 if (err)
f6d47a17 1316 goto err;
95668a69 1317
f6d47a17 1318 fuse_copy_finish(cs);
95668a69 1319 return fuse_notify_poll_wakeup(fc, &outarg);
f6d47a17
MS
1320
1321err:
1322 fuse_copy_finish(cs);
1323 return err;
95668a69
TH
1324}
1325
3b463ae0
JM
1326static int fuse_notify_inval_inode(struct fuse_conn *fc, unsigned int size,
1327 struct fuse_copy_state *cs)
1328{
1329 struct fuse_notify_inval_inode_out outarg;
1330 int err = -EINVAL;
1331
1332 if (size != sizeof(outarg))
1333 goto err;
1334
1335 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1336 if (err)
1337 goto err;
1338 fuse_copy_finish(cs);
1339
1340 down_read(&fc->killsb);
1341 err = -ENOENT;
b21dda43
MS
1342 if (fc->sb) {
1343 err = fuse_reverse_inval_inode(fc->sb, outarg.ino,
1344 outarg.off, outarg.len);
1345 }
3b463ae0
JM
1346 up_read(&fc->killsb);
1347 return err;
1348
1349err:
1350 fuse_copy_finish(cs);
1351 return err;
1352}
1353
1354static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
1355 struct fuse_copy_state *cs)
1356{
1357 struct fuse_notify_inval_entry_out outarg;
b2d82ee3
FW
1358 int err = -ENOMEM;
1359 char *buf;
3b463ae0
JM
1360 struct qstr name;
1361
b2d82ee3
FW
1362 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1363 if (!buf)
1364 goto err;
1365
1366 err = -EINVAL;
3b463ae0
JM
1367 if (size < sizeof(outarg))
1368 goto err;
1369
1370 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1371 if (err)
1372 goto err;
1373
1374 err = -ENAMETOOLONG;
1375 if (outarg.namelen > FUSE_NAME_MAX)
1376 goto err;
1377
c2183d1e
MS
1378 err = -EINVAL;
1379 if (size != sizeof(outarg) + outarg.namelen + 1)
1380 goto err;
1381
3b463ae0
JM
1382 name.name = buf;
1383 name.len = outarg.namelen;
1384 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1385 if (err)
1386 goto err;
1387 fuse_copy_finish(cs);
1388 buf[outarg.namelen] = 0;
1389 name.hash = full_name_hash(name.name, name.len);
1390
1391 down_read(&fc->killsb);
1392 err = -ENOENT;
b21dda43 1393 if (fc->sb)
451d0f59
JM
1394 err = fuse_reverse_inval_entry(fc->sb, outarg.parent, 0, &name);
1395 up_read(&fc->killsb);
1396 kfree(buf);
1397 return err;
1398
1399err:
1400 kfree(buf);
1401 fuse_copy_finish(cs);
1402 return err;
1403}
1404
1405static int fuse_notify_delete(struct fuse_conn *fc, unsigned int size,
1406 struct fuse_copy_state *cs)
1407{
1408 struct fuse_notify_delete_out outarg;
1409 int err = -ENOMEM;
1410 char *buf;
1411 struct qstr name;
1412
1413 buf = kzalloc(FUSE_NAME_MAX + 1, GFP_KERNEL);
1414 if (!buf)
1415 goto err;
1416
1417 err = -EINVAL;
1418 if (size < sizeof(outarg))
1419 goto err;
1420
1421 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1422 if (err)
1423 goto err;
1424
1425 err = -ENAMETOOLONG;
1426 if (outarg.namelen > FUSE_NAME_MAX)
1427 goto err;
1428
1429 err = -EINVAL;
1430 if (size != sizeof(outarg) + outarg.namelen + 1)
1431 goto err;
1432
1433 name.name = buf;
1434 name.len = outarg.namelen;
1435 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
1436 if (err)
1437 goto err;
1438 fuse_copy_finish(cs);
1439 buf[outarg.namelen] = 0;
1440 name.hash = full_name_hash(name.name, name.len);
1441
1442 down_read(&fc->killsb);
1443 err = -ENOENT;
1444 if (fc->sb)
1445 err = fuse_reverse_inval_entry(fc->sb, outarg.parent,
1446 outarg.child, &name);
3b463ae0 1447 up_read(&fc->killsb);
b2d82ee3 1448 kfree(buf);
3b463ae0
JM
1449 return err;
1450
1451err:
b2d82ee3 1452 kfree(buf);
3b463ae0
JM
1453 fuse_copy_finish(cs);
1454 return err;
1455}
1456
a1d75f25
MS
1457static int fuse_notify_store(struct fuse_conn *fc, unsigned int size,
1458 struct fuse_copy_state *cs)
1459{
1460 struct fuse_notify_store_out outarg;
1461 struct inode *inode;
1462 struct address_space *mapping;
1463 u64 nodeid;
1464 int err;
1465 pgoff_t index;
1466 unsigned int offset;
1467 unsigned int num;
1468 loff_t file_size;
1469 loff_t end;
1470
1471 err = -EINVAL;
1472 if (size < sizeof(outarg))
1473 goto out_finish;
1474
1475 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1476 if (err)
1477 goto out_finish;
1478
1479 err = -EINVAL;
1480 if (size - sizeof(outarg) != outarg.size)
1481 goto out_finish;
1482
1483 nodeid = outarg.nodeid;
1484
1485 down_read(&fc->killsb);
1486
1487 err = -ENOENT;
1488 if (!fc->sb)
1489 goto out_up_killsb;
1490
1491 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1492 if (!inode)
1493 goto out_up_killsb;
1494
1495 mapping = inode->i_mapping;
1496 index = outarg.offset >> PAGE_CACHE_SHIFT;
1497 offset = outarg.offset & ~PAGE_CACHE_MASK;
1498 file_size = i_size_read(inode);
1499 end = outarg.offset + outarg.size;
1500 if (end > file_size) {
1501 file_size = end;
1502 fuse_write_update_size(inode, file_size);
1503 }
1504
1505 num = outarg.size;
1506 while (num) {
1507 struct page *page;
1508 unsigned int this_num;
1509
1510 err = -ENOMEM;
1511 page = find_or_create_page(mapping, index,
1512 mapping_gfp_mask(mapping));
1513 if (!page)
1514 goto out_iput;
1515
1516 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1517 err = fuse_copy_page(cs, &page, offset, this_num, 0);
1518 if (!err && offset == 0 && (num != 0 || file_size == end))
1519 SetPageUptodate(page);
1520 unlock_page(page);
1521 page_cache_release(page);
1522
1523 if (err)
1524 goto out_iput;
1525
1526 num -= this_num;
1527 offset = 0;
1528 index++;
1529 }
1530
1531 err = 0;
1532
1533out_iput:
1534 iput(inode);
1535out_up_killsb:
1536 up_read(&fc->killsb);
1537out_finish:
1538 fuse_copy_finish(cs);
1539 return err;
1540}
1541
2d45ba38
MS
1542static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req)
1543{
0be8557b 1544 release_pages(req->pages, req->num_pages, 0);
2d45ba38
MS
1545}
1546
1547static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
1548 struct fuse_notify_retrieve_out *outarg)
1549{
1550 int err;
1551 struct address_space *mapping = inode->i_mapping;
1552 struct fuse_req *req;
1553 pgoff_t index;
1554 loff_t file_size;
1555 unsigned int num;
1556 unsigned int offset;
0157443c 1557 size_t total_len = 0;
2d45ba38
MS
1558
1559 req = fuse_get_req(fc);
1560 if (IS_ERR(req))
1561 return PTR_ERR(req);
1562
1563 offset = outarg->offset & ~PAGE_CACHE_MASK;
1564
1565 req->in.h.opcode = FUSE_NOTIFY_REPLY;
1566 req->in.h.nodeid = outarg->nodeid;
1567 req->in.numargs = 2;
1568 req->in.argpages = 1;
1569 req->page_offset = offset;
1570 req->end = fuse_retrieve_end;
1571
1572 index = outarg->offset >> PAGE_CACHE_SHIFT;
1573 file_size = i_size_read(inode);
1574 num = outarg->size;
1575 if (outarg->offset > file_size)
1576 num = 0;
1577 else if (outarg->offset + num > file_size)
1578 num = file_size - outarg->offset;
1579
48706d0a 1580 while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
2d45ba38
MS
1581 struct page *page;
1582 unsigned int this_num;
1583
1584 page = find_get_page(mapping, index);
1585 if (!page)
1586 break;
1587
1588 this_num = min_t(unsigned, num, PAGE_CACHE_SIZE - offset);
1589 req->pages[req->num_pages] = page;
1590 req->num_pages++;
1591
c9e67d48 1592 offset = 0;
2d45ba38
MS
1593 num -= this_num;
1594 total_len += this_num;
48706d0a 1595 index++;
2d45ba38
MS
1596 }
1597 req->misc.retrieve_in.offset = outarg->offset;
1598 req->misc.retrieve_in.size = total_len;
1599 req->in.args[0].size = sizeof(req->misc.retrieve_in);
1600 req->in.args[0].value = &req->misc.retrieve_in;
1601 req->in.args[1].size = total_len;
1602
1603 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique);
1604 if (err)
1605 fuse_retrieve_end(fc, req);
1606
1607 return err;
1608}
1609
1610static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size,
1611 struct fuse_copy_state *cs)
1612{
1613 struct fuse_notify_retrieve_out outarg;
1614 struct inode *inode;
1615 int err;
1616
1617 err = -EINVAL;
1618 if (size != sizeof(outarg))
1619 goto copy_finish;
1620
1621 err = fuse_copy_one(cs, &outarg, sizeof(outarg));
1622 if (err)
1623 goto copy_finish;
1624
1625 fuse_copy_finish(cs);
1626
1627 down_read(&fc->killsb);
1628 err = -ENOENT;
1629 if (fc->sb) {
1630 u64 nodeid = outarg.nodeid;
1631
1632 inode = ilookup5(fc->sb, nodeid, fuse_inode_eq, &nodeid);
1633 if (inode) {
1634 err = fuse_retrieve(fc, inode, &outarg);
1635 iput(inode);
1636 }
1637 }
1638 up_read(&fc->killsb);
1639
1640 return err;
1641
1642copy_finish:
1643 fuse_copy_finish(cs);
1644 return err;
1645}
1646
8599396b
TH
1647static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
1648 unsigned int size, struct fuse_copy_state *cs)
1649{
1650 switch (code) {
95668a69
TH
1651 case FUSE_NOTIFY_POLL:
1652 return fuse_notify_poll(fc, size, cs);
1653
3b463ae0
JM
1654 case FUSE_NOTIFY_INVAL_INODE:
1655 return fuse_notify_inval_inode(fc, size, cs);
1656
1657 case FUSE_NOTIFY_INVAL_ENTRY:
1658 return fuse_notify_inval_entry(fc, size, cs);
1659
a1d75f25
MS
1660 case FUSE_NOTIFY_STORE:
1661 return fuse_notify_store(fc, size, cs);
1662
2d45ba38
MS
1663 case FUSE_NOTIFY_RETRIEVE:
1664 return fuse_notify_retrieve(fc, size, cs);
1665
451d0f59
JM
1666 case FUSE_NOTIFY_DELETE:
1667 return fuse_notify_delete(fc, size, cs);
1668
8599396b 1669 default:
f6d47a17 1670 fuse_copy_finish(cs);
8599396b
TH
1671 return -EINVAL;
1672 }
1673}
1674
334f485d
MS
1675/* Look up request on processing list by unique ID */
1676static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique)
1677{
1678 struct list_head *entry;
1679
1680 list_for_each(entry, &fc->processing) {
1681 struct fuse_req *req;
1682 req = list_entry(entry, struct fuse_req, list);
a4d27e75 1683 if (req->in.h.unique == unique || req->intr_unique == unique)
334f485d
MS
1684 return req;
1685 }
1686 return NULL;
1687}
1688
1689static int copy_out_args(struct fuse_copy_state *cs, struct fuse_out *out,
1690 unsigned nbytes)
1691{
1692 unsigned reqsize = sizeof(struct fuse_out_header);
1693
1694 if (out->h.error)
1695 return nbytes != reqsize ? -EINVAL : 0;
1696
1697 reqsize += len_args(out->numargs, out->args);
1698
1699 if (reqsize < nbytes || (reqsize > nbytes && !out->argvar))
1700 return -EINVAL;
1701 else if (reqsize > nbytes) {
1702 struct fuse_arg *lastarg = &out->args[out->numargs-1];
1703 unsigned diffsize = reqsize - nbytes;
1704 if (diffsize > lastarg->size)
1705 return -EINVAL;
1706 lastarg->size -= diffsize;
1707 }
1708 return fuse_copy_args(cs, out->numargs, out->argpages, out->args,
1709 out->page_zeroing);
1710}
1711
1712/*
1713 * Write a single reply to a request. First the header is copied from
1714 * the write buffer. The request is then searched on the processing
1715 * list by the unique ID found in the header. If found, then remove
1716 * it from the list and copy the rest of the buffer to the request.
1717 * The request is finished by calling request_end()
1718 */
dd3bb14f
MS
1719static ssize_t fuse_dev_do_write(struct fuse_conn *fc,
1720 struct fuse_copy_state *cs, size_t nbytes)
334f485d
MS
1721{
1722 int err;
334f485d
MS
1723 struct fuse_req *req;
1724 struct fuse_out_header oh;
334f485d 1725
334f485d
MS
1726 if (nbytes < sizeof(struct fuse_out_header))
1727 return -EINVAL;
1728
dd3bb14f 1729 err = fuse_copy_one(cs, &oh, sizeof(oh));
334f485d
MS
1730 if (err)
1731 goto err_finish;
8599396b
TH
1732
1733 err = -EINVAL;
1734 if (oh.len != nbytes)
1735 goto err_finish;
1736
1737 /*
1738 * Zero oh.unique indicates unsolicited notification message
1739 * and error contains notification code.
1740 */
1741 if (!oh.unique) {
dd3bb14f 1742 err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), cs);
8599396b
TH
1743 return err ? err : nbytes;
1744 }
1745
334f485d 1746 err = -EINVAL;
8599396b 1747 if (oh.error <= -1000 || oh.error > 0)
334f485d
MS
1748 goto err_finish;
1749
d7133114 1750 spin_lock(&fc->lock);
69a53bf2
MS
1751 err = -ENOENT;
1752 if (!fc->connected)
1753 goto err_unlock;
1754
334f485d 1755 req = request_find(fc, oh.unique);
334f485d
MS
1756 if (!req)
1757 goto err_unlock;
1758
f9a2842e 1759 if (req->aborted) {
d7133114 1760 spin_unlock(&fc->lock);
dd3bb14f 1761 fuse_copy_finish(cs);
d7133114 1762 spin_lock(&fc->lock);
222f1d69 1763 request_end(fc, req);
334f485d
MS
1764 return -ENOENT;
1765 }
a4d27e75
MS
1766 /* Is it an interrupt reply? */
1767 if (req->intr_unique == oh.unique) {
1768 err = -EINVAL;
1769 if (nbytes != sizeof(struct fuse_out_header))
1770 goto err_unlock;
1771
1772 if (oh.error == -ENOSYS)
1773 fc->no_interrupt = 1;
1774 else if (oh.error == -EAGAIN)
1775 queue_interrupt(fc, req);
1776
1777 spin_unlock(&fc->lock);
dd3bb14f 1778 fuse_copy_finish(cs);
a4d27e75
MS
1779 return nbytes;
1780 }
1781
1782 req->state = FUSE_REQ_WRITING;
d77a1d5b 1783 list_move(&req->list, &fc->io);
334f485d
MS
1784 req->out.h = oh;
1785 req->locked = 1;
dd3bb14f 1786 cs->req = req;
ce534fb0
MS
1787 if (!req->out.page_replace)
1788 cs->move_pages = 0;
d7133114 1789 spin_unlock(&fc->lock);
334f485d 1790
dd3bb14f
MS
1791 err = copy_out_args(cs, &req->out, nbytes);
1792 fuse_copy_finish(cs);
334f485d 1793
d7133114 1794 spin_lock(&fc->lock);
334f485d
MS
1795 req->locked = 0;
1796 if (!err) {
f9a2842e 1797 if (req->aborted)
334f485d 1798 err = -ENOENT;
f9a2842e 1799 } else if (!req->aborted)
334f485d
MS
1800 req->out.h.error = -EIO;
1801 request_end(fc, req);
1802
1803 return err ? err : nbytes;
1804
1805 err_unlock:
d7133114 1806 spin_unlock(&fc->lock);
334f485d 1807 err_finish:
dd3bb14f 1808 fuse_copy_finish(cs);
334f485d
MS
1809 return err;
1810}
1811
dd3bb14f
MS
1812static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
1813 unsigned long nr_segs, loff_t pos)
1814{
1815 struct fuse_copy_state cs;
1816 struct fuse_conn *fc = fuse_get_conn(iocb->ki_filp);
1817 if (!fc)
1818 return -EPERM;
1819
c3021629 1820 fuse_copy_init(&cs, fc, 0, iov, nr_segs);
dd3bb14f
MS
1821
1822 return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs));
1823}
1824
1825static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe,
1826 struct file *out, loff_t *ppos,
1827 size_t len, unsigned int flags)
1828{
1829 unsigned nbuf;
1830 unsigned idx;
1831 struct pipe_buffer *bufs;
1832 struct fuse_copy_state cs;
1833 struct fuse_conn *fc;
1834 size_t rem;
1835 ssize_t ret;
1836
1837 fc = fuse_get_conn(out);
1838 if (!fc)
1839 return -EPERM;
1840
07e77dca 1841 bufs = kmalloc(pipe->buffers * sizeof(struct pipe_buffer), GFP_KERNEL);
dd3bb14f
MS
1842 if (!bufs)
1843 return -ENOMEM;
1844
1845 pipe_lock(pipe);
1846 nbuf = 0;
1847 rem = 0;
1848 for (idx = 0; idx < pipe->nrbufs && rem < len; idx++)
1849 rem += pipe->bufs[(pipe->curbuf + idx) & (pipe->buffers - 1)].len;
1850
1851 ret = -EINVAL;
1852 if (rem < len) {
1853 pipe_unlock(pipe);
1854 goto out;
1855 }
1856
1857 rem = len;
1858 while (rem) {
1859 struct pipe_buffer *ibuf;
1860 struct pipe_buffer *obuf;
1861
1862 BUG_ON(nbuf >= pipe->buffers);
1863 BUG_ON(!pipe->nrbufs);
1864 ibuf = &pipe->bufs[pipe->curbuf];
1865 obuf = &bufs[nbuf];
1866
1867 if (rem >= ibuf->len) {
1868 *obuf = *ibuf;
1869 ibuf->ops = NULL;
1870 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
1871 pipe->nrbufs--;
1872 } else {
1873 ibuf->ops->get(pipe, ibuf);
1874 *obuf = *ibuf;
1875 obuf->flags &= ~PIPE_BUF_FLAG_GIFT;
1876 obuf->len = rem;
1877 ibuf->offset += obuf->len;
1878 ibuf->len -= obuf->len;
1879 }
1880 nbuf++;
1881 rem -= obuf->len;
1882 }
1883 pipe_unlock(pipe);
1884
c3021629 1885 fuse_copy_init(&cs, fc, 0, NULL, nbuf);
dd3bb14f 1886 cs.pipebufs = bufs;
dd3bb14f
MS
1887 cs.pipe = pipe;
1888
ce534fb0
MS
1889 if (flags & SPLICE_F_MOVE)
1890 cs.move_pages = 1;
1891
dd3bb14f
MS
1892 ret = fuse_dev_do_write(fc, &cs, len);
1893
1894 for (idx = 0; idx < nbuf; idx++) {
1895 struct pipe_buffer *buf = &bufs[idx];
1896 buf->ops->release(pipe, buf);
1897 }
1898out:
1899 kfree(bufs);
1900 return ret;
1901}
1902
334f485d
MS
1903static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
1904{
334f485d 1905 unsigned mask = POLLOUT | POLLWRNORM;
7025d9ad 1906 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 1907 if (!fc)
7025d9ad 1908 return POLLERR;
334f485d
MS
1909
1910 poll_wait(file, &fc->waitq, wait);
1911
d7133114 1912 spin_lock(&fc->lock);
7025d9ad
MS
1913 if (!fc->connected)
1914 mask = POLLERR;
a4d27e75 1915 else if (request_pending(fc))
7025d9ad 1916 mask |= POLLIN | POLLRDNORM;
d7133114 1917 spin_unlock(&fc->lock);
334f485d
MS
1918
1919 return mask;
1920}
1921
69a53bf2
MS
1922/*
1923 * Abort all requests on the given list (pending or processing)
1924 *
d7133114 1925 * This function releases and reacquires fc->lock
69a53bf2 1926 */
334f485d 1927static void end_requests(struct fuse_conn *fc, struct list_head *head)
b9ca67b2
MS
1928__releases(fc->lock)
1929__acquires(fc->lock)
334f485d
MS
1930{
1931 while (!list_empty(head)) {
1932 struct fuse_req *req;
1933 req = list_entry(head->next, struct fuse_req, list);
334f485d
MS
1934 req->out.h.error = -ECONNABORTED;
1935 request_end(fc, req);
d7133114 1936 spin_lock(&fc->lock);
334f485d
MS
1937 }
1938}
1939
69a53bf2
MS
1940/*
1941 * Abort requests under I/O
1942 *
f9a2842e 1943 * The requests are set to aborted and finished, and the request
69a53bf2
MS
1944 * waiter is woken up. This will make request_wait_answer() wait
1945 * until the request is unlocked and then return.
64c6d8ed
MS
1946 *
1947 * If the request is asynchronous, then the end function needs to be
1948 * called after waiting for the request to be unlocked (if it was
1949 * locked).
69a53bf2
MS
1950 */
1951static void end_io_requests(struct fuse_conn *fc)
b9ca67b2
MS
1952__releases(fc->lock)
1953__acquires(fc->lock)
69a53bf2
MS
1954{
1955 while (!list_empty(&fc->io)) {
64c6d8ed
MS
1956 struct fuse_req *req =
1957 list_entry(fc->io.next, struct fuse_req, list);
1958 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
1959
f9a2842e 1960 req->aborted = 1;
69a53bf2
MS
1961 req->out.h.error = -ECONNABORTED;
1962 req->state = FUSE_REQ_FINISHED;
1963 list_del_init(&req->list);
1964 wake_up(&req->waitq);
64c6d8ed
MS
1965 if (end) {
1966 req->end = NULL;
64c6d8ed 1967 __fuse_get_request(req);
d7133114 1968 spin_unlock(&fc->lock);
64c6d8ed
MS
1969 wait_event(req->waitq, !req->locked);
1970 end(fc, req);
e9bb09dd 1971 fuse_put_request(fc, req);
d7133114 1972 spin_lock(&fc->lock);
64c6d8ed 1973 }
69a53bf2
MS
1974 }
1975}
1976
595afaf9 1977static void end_queued_requests(struct fuse_conn *fc)
b9ca67b2
MS
1978__releases(fc->lock)
1979__acquires(fc->lock)
595afaf9
MS
1980{
1981 fc->max_background = UINT_MAX;
1982 flush_bg_queue(fc);
1983 end_requests(fc, &fc->pending);
1984 end_requests(fc, &fc->processing);
07e77dca 1985 while (forget_pending(fc))
02c048b9 1986 kfree(dequeue_forget(fc, 1, NULL));
595afaf9
MS
1987}
1988
357ccf2b
BG
1989static void end_polls(struct fuse_conn *fc)
1990{
1991 struct rb_node *p;
1992
1993 p = rb_first(&fc->polled_files);
1994
1995 while (p) {
1996 struct fuse_file *ff;
1997 ff = rb_entry(p, struct fuse_file, polled_node);
1998 wake_up_interruptible_all(&ff->poll_wait);
1999
2000 p = rb_next(p);
2001 }
2002}
2003
69a53bf2
MS
2004/*
2005 * Abort all requests.
2006 *
2007 * Emergency exit in case of a malicious or accidental deadlock, or
2008 * just a hung filesystem.
2009 *
2010 * The same effect is usually achievable through killing the
2011 * filesystem daemon and all users of the filesystem. The exception
2012 * is the combination of an asynchronous request and the tricky
2013 * deadlock (see Documentation/filesystems/fuse.txt).
2014 *
2015 * During the aborting, progression of requests from the pending and
2016 * processing lists onto the io list, and progression of new requests
2017 * onto the pending list is prevented by req->connected being false.
2018 *
2019 * Progression of requests under I/O to the processing list is
f9a2842e
MS
2020 * prevented by the req->aborted flag being true for these requests.
2021 * For this reason requests on the io list must be aborted first.
69a53bf2
MS
2022 */
2023void fuse_abort_conn(struct fuse_conn *fc)
2024{
d7133114 2025 spin_lock(&fc->lock);
69a53bf2
MS
2026 if (fc->connected) {
2027 fc->connected = 0;
51eb01e7 2028 fc->blocked = 0;
69a53bf2 2029 end_io_requests(fc);
595afaf9 2030 end_queued_requests(fc);
357ccf2b 2031 end_polls(fc);
69a53bf2 2032 wake_up_all(&fc->waitq);
51eb01e7 2033 wake_up_all(&fc->blocked_waitq);
385a17bf 2034 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
69a53bf2 2035 }
d7133114 2036 spin_unlock(&fc->lock);
69a53bf2 2037}
08cbf542 2038EXPORT_SYMBOL_GPL(fuse_abort_conn);
69a53bf2 2039
08cbf542 2040int fuse_dev_release(struct inode *inode, struct file *file)
334f485d 2041{
0720b315 2042 struct fuse_conn *fc = fuse_get_conn(file);
334f485d 2043 if (fc) {
d7133114 2044 spin_lock(&fc->lock);
1e9a4ed9 2045 fc->connected = 0;
595afaf9
MS
2046 fc->blocked = 0;
2047 end_queued_requests(fc);
357ccf2b 2048 end_polls(fc);
595afaf9 2049 wake_up_all(&fc->blocked_waitq);
d7133114 2050 spin_unlock(&fc->lock);
bafa9654 2051 fuse_conn_put(fc);
385a17bf 2052 }
f543f253 2053
334f485d
MS
2054 return 0;
2055}
08cbf542 2056EXPORT_SYMBOL_GPL(fuse_dev_release);
334f485d 2057
385a17bf
JD
2058static int fuse_dev_fasync(int fd, struct file *file, int on)
2059{
2060 struct fuse_conn *fc = fuse_get_conn(file);
2061 if (!fc)
a87046d8 2062 return -EPERM;
385a17bf
JD
2063
2064 /* No locking - fasync_helper does its own locking */
2065 return fasync_helper(fd, file, on, &fc->fasync);
2066}
2067
4b6f5d20 2068const struct file_operations fuse_dev_operations = {
334f485d
MS
2069 .owner = THIS_MODULE,
2070 .llseek = no_llseek,
ee0b3e67
BP
2071 .read = do_sync_read,
2072 .aio_read = fuse_dev_read,
c3021629 2073 .splice_read = fuse_dev_splice_read,
ee0b3e67
BP
2074 .write = do_sync_write,
2075 .aio_write = fuse_dev_write,
dd3bb14f 2076 .splice_write = fuse_dev_splice_write,
334f485d
MS
2077 .poll = fuse_dev_poll,
2078 .release = fuse_dev_release,
385a17bf 2079 .fasync = fuse_dev_fasync,
334f485d 2080};
08cbf542 2081EXPORT_SYMBOL_GPL(fuse_dev_operations);
334f485d
MS
2082
2083static struct miscdevice fuse_miscdevice = {
2084 .minor = FUSE_MINOR,
2085 .name = "fuse",
2086 .fops = &fuse_dev_operations,
2087};
2088
2089int __init fuse_dev_init(void)
2090{
2091 int err = -ENOMEM;
2092 fuse_req_cachep = kmem_cache_create("fuse_request",
2093 sizeof(struct fuse_req),
20c2df83 2094 0, 0, NULL);
334f485d
MS
2095 if (!fuse_req_cachep)
2096 goto out;
2097
2098 err = misc_register(&fuse_miscdevice);
2099 if (err)
2100 goto out_cache_clean;
2101
2102 return 0;
2103
2104 out_cache_clean:
2105 kmem_cache_destroy(fuse_req_cachep);
2106 out:
2107 return err;
2108}
2109
2110void fuse_dev_cleanup(void)
2111{
2112 misc_deregister(&fuse_miscdevice);
2113 kmem_cache_destroy(fuse_req_cachep);
2114}
This page took 0.686065 seconds and 5 git commands to generate.