[PATCH] get rid of indirect users of namei.h
[deliverable/linux.git] / fs / fuse / inode.c
1 /*
2 FUSE: Filesystem in Userspace
3 Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
4
5 This program can be distributed under the terms of the GNU GPL.
6 See the file COPYING.
7 */
8
9 #include "fuse_i.h"
10
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/file.h>
14 #include <linux/seq_file.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/parser.h>
18 #include <linux/statfs.h>
19 #include <linux/random.h>
20 #include <linux/sched.h>
21 #include <linux/exportfs.h>
22
23 MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
24 MODULE_DESCRIPTION("Filesystem in Userspace");
25 MODULE_LICENSE("GPL");
26
27 static struct kmem_cache *fuse_inode_cachep;
28 struct list_head fuse_conn_list;
29 DEFINE_MUTEX(fuse_mutex);
30
31 #define FUSE_SUPER_MAGIC 0x65735546
32
33 #define FUSE_DEFAULT_BLKSIZE 512
34
35 struct fuse_mount_data {
36 int fd;
37 unsigned rootmode;
38 unsigned user_id;
39 unsigned group_id;
40 unsigned fd_present : 1;
41 unsigned rootmode_present : 1;
42 unsigned user_id_present : 1;
43 unsigned group_id_present : 1;
44 unsigned flags;
45 unsigned max_read;
46 unsigned blksize;
47 };
48
49 static struct inode *fuse_alloc_inode(struct super_block *sb)
50 {
51 struct inode *inode;
52 struct fuse_inode *fi;
53
54 inode = kmem_cache_alloc(fuse_inode_cachep, GFP_KERNEL);
55 if (!inode)
56 return NULL;
57
58 fi = get_fuse_inode(inode);
59 fi->i_time = 0;
60 fi->nodeid = 0;
61 fi->nlookup = 0;
62 fi->attr_version = 0;
63 fi->writectr = 0;
64 INIT_LIST_HEAD(&fi->write_files);
65 INIT_LIST_HEAD(&fi->queued_writes);
66 INIT_LIST_HEAD(&fi->writepages);
67 init_waitqueue_head(&fi->page_waitq);
68 fi->forget_req = fuse_request_alloc();
69 if (!fi->forget_req) {
70 kmem_cache_free(fuse_inode_cachep, inode);
71 return NULL;
72 }
73
74 return inode;
75 }
76
77 static void fuse_destroy_inode(struct inode *inode)
78 {
79 struct fuse_inode *fi = get_fuse_inode(inode);
80 BUG_ON(!list_empty(&fi->write_files));
81 BUG_ON(!list_empty(&fi->queued_writes));
82 if (fi->forget_req)
83 fuse_request_free(fi->forget_req);
84 kmem_cache_free(fuse_inode_cachep, inode);
85 }
86
87 void fuse_send_forget(struct fuse_conn *fc, struct fuse_req *req,
88 u64 nodeid, u64 nlookup)
89 {
90 struct fuse_forget_in *inarg = &req->misc.forget_in;
91 inarg->nlookup = nlookup;
92 req->in.h.opcode = FUSE_FORGET;
93 req->in.h.nodeid = nodeid;
94 req->in.numargs = 1;
95 req->in.args[0].size = sizeof(struct fuse_forget_in);
96 req->in.args[0].value = inarg;
97 request_send_noreply(fc, req);
98 }
99
100 static void fuse_clear_inode(struct inode *inode)
101 {
102 if (inode->i_sb->s_flags & MS_ACTIVE) {
103 struct fuse_conn *fc = get_fuse_conn(inode);
104 struct fuse_inode *fi = get_fuse_inode(inode);
105 fuse_send_forget(fc, fi->forget_req, fi->nodeid, fi->nlookup);
106 fi->forget_req = NULL;
107 }
108 }
109
110 static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
111 {
112 if (*flags & MS_MANDLOCK)
113 return -EINVAL;
114
115 return 0;
116 }
117
118 void fuse_truncate(struct address_space *mapping, loff_t offset)
119 {
120 /* See vmtruncate() */
121 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
122 truncate_inode_pages(mapping, offset);
123 unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
124 }
125
126 void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
127 u64 attr_valid)
128 {
129 struct fuse_conn *fc = get_fuse_conn(inode);
130 struct fuse_inode *fi = get_fuse_inode(inode);
131
132 fi->attr_version = ++fc->attr_version;
133 fi->i_time = attr_valid;
134
135 inode->i_ino = attr->ino;
136 inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
137 inode->i_nlink = attr->nlink;
138 inode->i_uid = attr->uid;
139 inode->i_gid = attr->gid;
140 inode->i_blocks = attr->blocks;
141 inode->i_atime.tv_sec = attr->atime;
142 inode->i_atime.tv_nsec = attr->atimensec;
143 inode->i_mtime.tv_sec = attr->mtime;
144 inode->i_mtime.tv_nsec = attr->mtimensec;
145 inode->i_ctime.tv_sec = attr->ctime;
146 inode->i_ctime.tv_nsec = attr->ctimensec;
147
148 if (attr->blksize != 0)
149 inode->i_blkbits = ilog2(attr->blksize);
150 else
151 inode->i_blkbits = inode->i_sb->s_blocksize_bits;
152
153 /*
154 * Don't set the sticky bit in i_mode, unless we want the VFS
155 * to check permissions. This prevents failures due to the
156 * check in may_delete().
157 */
158 fi->orig_i_mode = inode->i_mode;
159 if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
160 inode->i_mode &= ~S_ISVTX;
161 }
162
163 void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
164 u64 attr_valid, u64 attr_version)
165 {
166 struct fuse_conn *fc = get_fuse_conn(inode);
167 struct fuse_inode *fi = get_fuse_inode(inode);
168 loff_t oldsize;
169
170 spin_lock(&fc->lock);
171 if (attr_version != 0 && fi->attr_version > attr_version) {
172 spin_unlock(&fc->lock);
173 return;
174 }
175
176 fuse_change_attributes_common(inode, attr, attr_valid);
177
178 oldsize = inode->i_size;
179 i_size_write(inode, attr->size);
180 spin_unlock(&fc->lock);
181
182 if (S_ISREG(inode->i_mode) && oldsize != attr->size) {
183 if (attr->size < oldsize)
184 fuse_truncate(inode->i_mapping, attr->size);
185 invalidate_inode_pages2(inode->i_mapping);
186 }
187 }
188
189 static void fuse_init_inode(struct inode *inode, struct fuse_attr *attr)
190 {
191 inode->i_mode = attr->mode & S_IFMT;
192 inode->i_size = attr->size;
193 if (S_ISREG(inode->i_mode)) {
194 fuse_init_common(inode);
195 fuse_init_file_inode(inode);
196 } else if (S_ISDIR(inode->i_mode))
197 fuse_init_dir(inode);
198 else if (S_ISLNK(inode->i_mode))
199 fuse_init_symlink(inode);
200 else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
201 S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
202 fuse_init_common(inode);
203 init_special_inode(inode, inode->i_mode,
204 new_decode_dev(attr->rdev));
205 } else
206 BUG();
207 }
208
209 static int fuse_inode_eq(struct inode *inode, void *_nodeidp)
210 {
211 u64 nodeid = *(u64 *) _nodeidp;
212 if (get_node_id(inode) == nodeid)
213 return 1;
214 else
215 return 0;
216 }
217
218 static int fuse_inode_set(struct inode *inode, void *_nodeidp)
219 {
220 u64 nodeid = *(u64 *) _nodeidp;
221 get_fuse_inode(inode)->nodeid = nodeid;
222 return 0;
223 }
224
225 struct inode *fuse_iget(struct super_block *sb, u64 nodeid,
226 int generation, struct fuse_attr *attr,
227 u64 attr_valid, u64 attr_version)
228 {
229 struct inode *inode;
230 struct fuse_inode *fi;
231 struct fuse_conn *fc = get_fuse_conn_super(sb);
232
233 retry:
234 inode = iget5_locked(sb, nodeid, fuse_inode_eq, fuse_inode_set, &nodeid);
235 if (!inode)
236 return NULL;
237
238 if ((inode->i_state & I_NEW)) {
239 inode->i_flags |= S_NOATIME|S_NOCMTIME;
240 inode->i_generation = generation;
241 inode->i_data.backing_dev_info = &fc->bdi;
242 fuse_init_inode(inode, attr);
243 unlock_new_inode(inode);
244 } else if ((inode->i_mode ^ attr->mode) & S_IFMT) {
245 /* Inode has changed type, any I/O on the old should fail */
246 make_bad_inode(inode);
247 iput(inode);
248 goto retry;
249 }
250
251 fi = get_fuse_inode(inode);
252 spin_lock(&fc->lock);
253 fi->nlookup ++;
254 spin_unlock(&fc->lock);
255 fuse_change_attributes(inode, attr, attr_valid, attr_version);
256
257 return inode;
258 }
259
260 static void fuse_umount_begin(struct super_block *sb)
261 {
262 fuse_abort_conn(get_fuse_conn_super(sb));
263 }
264
265 static void fuse_send_destroy(struct fuse_conn *fc)
266 {
267 struct fuse_req *req = fc->destroy_req;
268 if (req && fc->conn_init) {
269 fc->destroy_req = NULL;
270 req->in.h.opcode = FUSE_DESTROY;
271 req->force = 1;
272 request_send(fc, req);
273 fuse_put_request(fc, req);
274 }
275 }
276
277 static void fuse_put_super(struct super_block *sb)
278 {
279 struct fuse_conn *fc = get_fuse_conn_super(sb);
280
281 fuse_send_destroy(fc);
282 spin_lock(&fc->lock);
283 fc->connected = 0;
284 fc->blocked = 0;
285 spin_unlock(&fc->lock);
286 /* Flush all readers on this fs */
287 kill_fasync(&fc->fasync, SIGIO, POLL_IN);
288 wake_up_all(&fc->waitq);
289 wake_up_all(&fc->blocked_waitq);
290 wake_up_all(&fc->reserved_req_waitq);
291 mutex_lock(&fuse_mutex);
292 list_del(&fc->entry);
293 fuse_ctl_remove_conn(fc);
294 mutex_unlock(&fuse_mutex);
295 fuse_conn_put(fc);
296 }
297
298 static void convert_fuse_statfs(struct kstatfs *stbuf, struct fuse_kstatfs *attr)
299 {
300 stbuf->f_type = FUSE_SUPER_MAGIC;
301 stbuf->f_bsize = attr->bsize;
302 stbuf->f_frsize = attr->frsize;
303 stbuf->f_blocks = attr->blocks;
304 stbuf->f_bfree = attr->bfree;
305 stbuf->f_bavail = attr->bavail;
306 stbuf->f_files = attr->files;
307 stbuf->f_ffree = attr->ffree;
308 stbuf->f_namelen = attr->namelen;
309 /* fsid is left zero */
310 }
311
312 static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
313 {
314 struct super_block *sb = dentry->d_sb;
315 struct fuse_conn *fc = get_fuse_conn_super(sb);
316 struct fuse_req *req;
317 struct fuse_statfs_out outarg;
318 int err;
319
320 if (!fuse_allow_task(fc, current)) {
321 buf->f_type = FUSE_SUPER_MAGIC;
322 return 0;
323 }
324
325 req = fuse_get_req(fc);
326 if (IS_ERR(req))
327 return PTR_ERR(req);
328
329 memset(&outarg, 0, sizeof(outarg));
330 req->in.numargs = 0;
331 req->in.h.opcode = FUSE_STATFS;
332 req->in.h.nodeid = get_node_id(dentry->d_inode);
333 req->out.numargs = 1;
334 req->out.args[0].size =
335 fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
336 req->out.args[0].value = &outarg;
337 request_send(fc, req);
338 err = req->out.h.error;
339 if (!err)
340 convert_fuse_statfs(buf, &outarg.st);
341 fuse_put_request(fc, req);
342 return err;
343 }
344
345 enum {
346 OPT_FD,
347 OPT_ROOTMODE,
348 OPT_USER_ID,
349 OPT_GROUP_ID,
350 OPT_DEFAULT_PERMISSIONS,
351 OPT_ALLOW_OTHER,
352 OPT_MAX_READ,
353 OPT_BLKSIZE,
354 OPT_ERR
355 };
356
357 static match_table_t tokens = {
358 {OPT_FD, "fd=%u"},
359 {OPT_ROOTMODE, "rootmode=%o"},
360 {OPT_USER_ID, "user_id=%u"},
361 {OPT_GROUP_ID, "group_id=%u"},
362 {OPT_DEFAULT_PERMISSIONS, "default_permissions"},
363 {OPT_ALLOW_OTHER, "allow_other"},
364 {OPT_MAX_READ, "max_read=%u"},
365 {OPT_BLKSIZE, "blksize=%u"},
366 {OPT_ERR, NULL}
367 };
368
369 static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
370 {
371 char *p;
372 memset(d, 0, sizeof(struct fuse_mount_data));
373 d->max_read = ~0;
374 d->blksize = FUSE_DEFAULT_BLKSIZE;
375
376 while ((p = strsep(&opt, ",")) != NULL) {
377 int token;
378 int value;
379 substring_t args[MAX_OPT_ARGS];
380 if (!*p)
381 continue;
382
383 token = match_token(p, tokens, args);
384 switch (token) {
385 case OPT_FD:
386 if (match_int(&args[0], &value))
387 return 0;
388 d->fd = value;
389 d->fd_present = 1;
390 break;
391
392 case OPT_ROOTMODE:
393 if (match_octal(&args[0], &value))
394 return 0;
395 if (!fuse_valid_type(value))
396 return 0;
397 d->rootmode = value;
398 d->rootmode_present = 1;
399 break;
400
401 case OPT_USER_ID:
402 if (match_int(&args[0], &value))
403 return 0;
404 d->user_id = value;
405 d->user_id_present = 1;
406 break;
407
408 case OPT_GROUP_ID:
409 if (match_int(&args[0], &value))
410 return 0;
411 d->group_id = value;
412 d->group_id_present = 1;
413 break;
414
415 case OPT_DEFAULT_PERMISSIONS:
416 d->flags |= FUSE_DEFAULT_PERMISSIONS;
417 break;
418
419 case OPT_ALLOW_OTHER:
420 d->flags |= FUSE_ALLOW_OTHER;
421 break;
422
423 case OPT_MAX_READ:
424 if (match_int(&args[0], &value))
425 return 0;
426 d->max_read = value;
427 break;
428
429 case OPT_BLKSIZE:
430 if (!is_bdev || match_int(&args[0], &value))
431 return 0;
432 d->blksize = value;
433 break;
434
435 default:
436 return 0;
437 }
438 }
439
440 if (!d->fd_present || !d->rootmode_present ||
441 !d->user_id_present || !d->group_id_present)
442 return 0;
443
444 return 1;
445 }
446
447 static int fuse_show_options(struct seq_file *m, struct vfsmount *mnt)
448 {
449 struct fuse_conn *fc = get_fuse_conn_super(mnt->mnt_sb);
450
451 seq_printf(m, ",user_id=%u", fc->user_id);
452 seq_printf(m, ",group_id=%u", fc->group_id);
453 if (fc->flags & FUSE_DEFAULT_PERMISSIONS)
454 seq_puts(m, ",default_permissions");
455 if (fc->flags & FUSE_ALLOW_OTHER)
456 seq_puts(m, ",allow_other");
457 if (fc->max_read != ~0)
458 seq_printf(m, ",max_read=%u", fc->max_read);
459 if (mnt->mnt_sb->s_bdev &&
460 mnt->mnt_sb->s_blocksize != FUSE_DEFAULT_BLKSIZE)
461 seq_printf(m, ",blksize=%lu", mnt->mnt_sb->s_blocksize);
462 return 0;
463 }
464
465 static struct fuse_conn *new_conn(struct super_block *sb)
466 {
467 struct fuse_conn *fc;
468 int err;
469
470 fc = kzalloc(sizeof(*fc), GFP_KERNEL);
471 if (fc) {
472 spin_lock_init(&fc->lock);
473 mutex_init(&fc->inst_mutex);
474 atomic_set(&fc->count, 1);
475 init_waitqueue_head(&fc->waitq);
476 init_waitqueue_head(&fc->blocked_waitq);
477 init_waitqueue_head(&fc->reserved_req_waitq);
478 INIT_LIST_HEAD(&fc->pending);
479 INIT_LIST_HEAD(&fc->processing);
480 INIT_LIST_HEAD(&fc->io);
481 INIT_LIST_HEAD(&fc->interrupts);
482 INIT_LIST_HEAD(&fc->bg_queue);
483 atomic_set(&fc->num_waiting, 0);
484 fc->bdi.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
485 fc->bdi.unplug_io_fn = default_unplug_io_fn;
486 /* fuse does it's own writeback accounting */
487 fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB;
488 fc->dev = sb->s_dev;
489 err = bdi_init(&fc->bdi);
490 if (err)
491 goto error_kfree;
492 if (sb->s_bdev) {
493 err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk",
494 MAJOR(fc->dev), MINOR(fc->dev));
495 } else {
496 err = bdi_register_dev(&fc->bdi, fc->dev);
497 }
498 if (err)
499 goto error_bdi_destroy;
500 /*
501 * For a single fuse filesystem use max 1% of dirty +
502 * writeback threshold.
503 *
504 * This gives about 1M of write buffer for memory maps on a
505 * machine with 1G and 10% dirty_ratio, which should be more
506 * than enough.
507 *
508 * Privileged users can raise it by writing to
509 *
510 * /sys/class/bdi/<bdi>/max_ratio
511 */
512 bdi_set_max_ratio(&fc->bdi, 1);
513 fc->reqctr = 0;
514 fc->blocked = 1;
515 fc->attr_version = 1;
516 get_random_bytes(&fc->scramble_key, sizeof(fc->scramble_key));
517 }
518 return fc;
519
520 error_bdi_destroy:
521 bdi_destroy(&fc->bdi);
522 error_kfree:
523 mutex_destroy(&fc->inst_mutex);
524 kfree(fc);
525 return NULL;
526 }
527
528 void fuse_conn_put(struct fuse_conn *fc)
529 {
530 if (atomic_dec_and_test(&fc->count)) {
531 if (fc->destroy_req)
532 fuse_request_free(fc->destroy_req);
533 mutex_destroy(&fc->inst_mutex);
534 bdi_destroy(&fc->bdi);
535 kfree(fc);
536 }
537 }
538
539 struct fuse_conn *fuse_conn_get(struct fuse_conn *fc)
540 {
541 atomic_inc(&fc->count);
542 return fc;
543 }
544
545 static struct inode *get_root_inode(struct super_block *sb, unsigned mode)
546 {
547 struct fuse_attr attr;
548 memset(&attr, 0, sizeof(attr));
549
550 attr.mode = mode;
551 attr.ino = FUSE_ROOT_ID;
552 attr.nlink = 1;
553 return fuse_iget(sb, 1, 0, &attr, 0, 0);
554 }
555
556 struct fuse_inode_handle
557 {
558 u64 nodeid;
559 u32 generation;
560 };
561
562 static struct dentry *fuse_get_dentry(struct super_block *sb,
563 struct fuse_inode_handle *handle)
564 {
565 struct fuse_conn *fc = get_fuse_conn_super(sb);
566 struct inode *inode;
567 struct dentry *entry;
568 int err = -ESTALE;
569
570 if (handle->nodeid == 0)
571 goto out_err;
572
573 inode = ilookup5(sb, handle->nodeid, fuse_inode_eq, &handle->nodeid);
574 if (!inode) {
575 struct fuse_entry_out outarg;
576 struct qstr name;
577
578 if (!fc->export_support)
579 goto out_err;
580
581 name.len = 1;
582 name.name = ".";
583 err = fuse_lookup_name(sb, handle->nodeid, &name, &outarg,
584 &inode);
585 if (err && err != -ENOENT)
586 goto out_err;
587 if (err || !inode) {
588 err = -ESTALE;
589 goto out_err;
590 }
591 err = -EIO;
592 if (get_node_id(inode) != handle->nodeid)
593 goto out_iput;
594 }
595 err = -ESTALE;
596 if (inode->i_generation != handle->generation)
597 goto out_iput;
598
599 entry = d_alloc_anon(inode);
600 err = -ENOMEM;
601 if (!entry)
602 goto out_iput;
603
604 if (get_node_id(inode) != FUSE_ROOT_ID) {
605 entry->d_op = &fuse_dentry_operations;
606 fuse_invalidate_entry_cache(entry);
607 }
608
609 return entry;
610
611 out_iput:
612 iput(inode);
613 out_err:
614 return ERR_PTR(err);
615 }
616
617 static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
618 int connectable)
619 {
620 struct inode *inode = dentry->d_inode;
621 bool encode_parent = connectable && !S_ISDIR(inode->i_mode);
622 int len = encode_parent ? 6 : 3;
623 u64 nodeid;
624 u32 generation;
625
626 if (*max_len < len)
627 return 255;
628
629 nodeid = get_fuse_inode(inode)->nodeid;
630 generation = inode->i_generation;
631
632 fh[0] = (u32)(nodeid >> 32);
633 fh[1] = (u32)(nodeid & 0xffffffff);
634 fh[2] = generation;
635
636 if (encode_parent) {
637 struct inode *parent;
638
639 spin_lock(&dentry->d_lock);
640 parent = dentry->d_parent->d_inode;
641 nodeid = get_fuse_inode(parent)->nodeid;
642 generation = parent->i_generation;
643 spin_unlock(&dentry->d_lock);
644
645 fh[3] = (u32)(nodeid >> 32);
646 fh[4] = (u32)(nodeid & 0xffffffff);
647 fh[5] = generation;
648 }
649
650 *max_len = len;
651 return encode_parent ? 0x82 : 0x81;
652 }
653
654 static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
655 struct fid *fid, int fh_len, int fh_type)
656 {
657 struct fuse_inode_handle handle;
658
659 if ((fh_type != 0x81 && fh_type != 0x82) || fh_len < 3)
660 return NULL;
661
662 handle.nodeid = (u64) fid->raw[0] << 32;
663 handle.nodeid |= (u64) fid->raw[1];
664 handle.generation = fid->raw[2];
665 return fuse_get_dentry(sb, &handle);
666 }
667
668 static struct dentry *fuse_fh_to_parent(struct super_block *sb,
669 struct fid *fid, int fh_len, int fh_type)
670 {
671 struct fuse_inode_handle parent;
672
673 if (fh_type != 0x82 || fh_len < 6)
674 return NULL;
675
676 parent.nodeid = (u64) fid->raw[3] << 32;
677 parent.nodeid |= (u64) fid->raw[4];
678 parent.generation = fid->raw[5];
679 return fuse_get_dentry(sb, &parent);
680 }
681
682 static struct dentry *fuse_get_parent(struct dentry *child)
683 {
684 struct inode *child_inode = child->d_inode;
685 struct fuse_conn *fc = get_fuse_conn(child_inode);
686 struct inode *inode;
687 struct dentry *parent;
688 struct fuse_entry_out outarg;
689 struct qstr name;
690 int err;
691
692 if (!fc->export_support)
693 return ERR_PTR(-ESTALE);
694
695 name.len = 2;
696 name.name = "..";
697 err = fuse_lookup_name(child_inode->i_sb, get_node_id(child_inode),
698 &name, &outarg, &inode);
699 if (err && err != -ENOENT)
700 return ERR_PTR(err);
701 if (err || !inode)
702 return ERR_PTR(-ESTALE);
703
704 parent = d_alloc_anon(inode);
705 if (!parent) {
706 iput(inode);
707 return ERR_PTR(-ENOMEM);
708 }
709 if (get_node_id(inode) != FUSE_ROOT_ID) {
710 parent->d_op = &fuse_dentry_operations;
711 fuse_invalidate_entry_cache(parent);
712 }
713
714 return parent;
715 }
716
717 static const struct export_operations fuse_export_operations = {
718 .fh_to_dentry = fuse_fh_to_dentry,
719 .fh_to_parent = fuse_fh_to_parent,
720 .encode_fh = fuse_encode_fh,
721 .get_parent = fuse_get_parent,
722 };
723
724 static const struct super_operations fuse_super_operations = {
725 .alloc_inode = fuse_alloc_inode,
726 .destroy_inode = fuse_destroy_inode,
727 .clear_inode = fuse_clear_inode,
728 .drop_inode = generic_delete_inode,
729 .remount_fs = fuse_remount_fs,
730 .put_super = fuse_put_super,
731 .umount_begin = fuse_umount_begin,
732 .statfs = fuse_statfs,
733 .show_options = fuse_show_options,
734 };
735
736 static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
737 {
738 struct fuse_init_out *arg = &req->misc.init_out;
739
740 if (req->out.h.error || arg->major != FUSE_KERNEL_VERSION)
741 fc->conn_error = 1;
742 else {
743 unsigned long ra_pages;
744
745 if (arg->minor >= 6) {
746 ra_pages = arg->max_readahead / PAGE_CACHE_SIZE;
747 if (arg->flags & FUSE_ASYNC_READ)
748 fc->async_read = 1;
749 if (!(arg->flags & FUSE_POSIX_LOCKS))
750 fc->no_lock = 1;
751 if (arg->flags & FUSE_ATOMIC_O_TRUNC)
752 fc->atomic_o_trunc = 1;
753 if (arg->minor >= 9) {
754 /* LOOKUP has dependency on proto version */
755 if (arg->flags & FUSE_EXPORT_SUPPORT)
756 fc->export_support = 1;
757 }
758 if (arg->flags & FUSE_BIG_WRITES)
759 fc->big_writes = 1;
760 } else {
761 ra_pages = fc->max_read / PAGE_CACHE_SIZE;
762 fc->no_lock = 1;
763 }
764
765 fc->bdi.ra_pages = min(fc->bdi.ra_pages, ra_pages);
766 fc->minor = arg->minor;
767 fc->max_write = arg->minor < 5 ? 4096 : arg->max_write;
768 fc->max_write = max_t(unsigned, 4096, fc->max_write);
769 fc->conn_init = 1;
770 }
771 fuse_put_request(fc, req);
772 fc->blocked = 0;
773 wake_up_all(&fc->blocked_waitq);
774 }
775
776 static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
777 {
778 struct fuse_init_in *arg = &req->misc.init_in;
779
780 arg->major = FUSE_KERNEL_VERSION;
781 arg->minor = FUSE_KERNEL_MINOR_VERSION;
782 arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE;
783 arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC |
784 FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES;
785 req->in.h.opcode = FUSE_INIT;
786 req->in.numargs = 1;
787 req->in.args[0].size = sizeof(*arg);
788 req->in.args[0].value = arg;
789 req->out.numargs = 1;
790 /* Variable length arguement used for backward compatibility
791 with interface version < 7.5. Rest of init_out is zeroed
792 by do_get_request(), so a short reply is not a problem */
793 req->out.argvar = 1;
794 req->out.args[0].size = sizeof(struct fuse_init_out);
795 req->out.args[0].value = &req->misc.init_out;
796 req->end = process_init_reply;
797 request_send_background(fc, req);
798 }
799
800 static int fuse_fill_super(struct super_block *sb, void *data, int silent)
801 {
802 struct fuse_conn *fc;
803 struct inode *root;
804 struct fuse_mount_data d;
805 struct file *file;
806 struct dentry *root_dentry;
807 struct fuse_req *init_req;
808 int err;
809 int is_bdev = sb->s_bdev != NULL;
810
811 if (sb->s_flags & MS_MANDLOCK)
812 return -EINVAL;
813
814 if (!parse_fuse_opt((char *) data, &d, is_bdev))
815 return -EINVAL;
816
817 if (is_bdev) {
818 #ifdef CONFIG_BLOCK
819 if (!sb_set_blocksize(sb, d.blksize))
820 return -EINVAL;
821 #endif
822 } else {
823 sb->s_blocksize = PAGE_CACHE_SIZE;
824 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
825 }
826 sb->s_magic = FUSE_SUPER_MAGIC;
827 sb->s_op = &fuse_super_operations;
828 sb->s_maxbytes = MAX_LFS_FILESIZE;
829 sb->s_export_op = &fuse_export_operations;
830
831 file = fget(d.fd);
832 if (!file)
833 return -EINVAL;
834
835 if (file->f_op != &fuse_dev_operations)
836 return -EINVAL;
837
838 fc = new_conn(sb);
839 if (!fc)
840 return -ENOMEM;
841
842 fc->flags = d.flags;
843 fc->user_id = d.user_id;
844 fc->group_id = d.group_id;
845 fc->max_read = max_t(unsigned, 4096, d.max_read);
846
847 /* Used by get_root_inode() */
848 sb->s_fs_info = fc;
849
850 err = -ENOMEM;
851 root = get_root_inode(sb, d.rootmode);
852 if (!root)
853 goto err;
854
855 root_dentry = d_alloc_root(root);
856 if (!root_dentry) {
857 iput(root);
858 goto err;
859 }
860
861 init_req = fuse_request_alloc();
862 if (!init_req)
863 goto err_put_root;
864
865 if (is_bdev) {
866 fc->destroy_req = fuse_request_alloc();
867 if (!fc->destroy_req)
868 goto err_put_root;
869 }
870
871 mutex_lock(&fuse_mutex);
872 err = -EINVAL;
873 if (file->private_data)
874 goto err_unlock;
875
876 err = fuse_ctl_add_conn(fc);
877 if (err)
878 goto err_unlock;
879
880 list_add_tail(&fc->entry, &fuse_conn_list);
881 sb->s_root = root_dentry;
882 fc->connected = 1;
883 file->private_data = fuse_conn_get(fc);
884 mutex_unlock(&fuse_mutex);
885 /*
886 * atomic_dec_and_test() in fput() provides the necessary
887 * memory barrier for file->private_data to be visible on all
888 * CPUs after this
889 */
890 fput(file);
891
892 fuse_send_init(fc, init_req);
893
894 return 0;
895
896 err_unlock:
897 mutex_unlock(&fuse_mutex);
898 fuse_request_free(init_req);
899 err_put_root:
900 dput(root_dentry);
901 err:
902 fput(file);
903 fuse_conn_put(fc);
904 return err;
905 }
906
907 static int fuse_get_sb(struct file_system_type *fs_type,
908 int flags, const char *dev_name,
909 void *raw_data, struct vfsmount *mnt)
910 {
911 return get_sb_nodev(fs_type, flags, raw_data, fuse_fill_super, mnt);
912 }
913
914 static struct file_system_type fuse_fs_type = {
915 .owner = THIS_MODULE,
916 .name = "fuse",
917 .fs_flags = FS_HAS_SUBTYPE,
918 .get_sb = fuse_get_sb,
919 .kill_sb = kill_anon_super,
920 };
921
922 #ifdef CONFIG_BLOCK
923 static int fuse_get_sb_blk(struct file_system_type *fs_type,
924 int flags, const char *dev_name,
925 void *raw_data, struct vfsmount *mnt)
926 {
927 return get_sb_bdev(fs_type, flags, dev_name, raw_data, fuse_fill_super,
928 mnt);
929 }
930
931 static struct file_system_type fuseblk_fs_type = {
932 .owner = THIS_MODULE,
933 .name = "fuseblk",
934 .get_sb = fuse_get_sb_blk,
935 .kill_sb = kill_block_super,
936 .fs_flags = FS_REQUIRES_DEV | FS_HAS_SUBTYPE,
937 };
938
939 static inline int register_fuseblk(void)
940 {
941 return register_filesystem(&fuseblk_fs_type);
942 }
943
944 static inline void unregister_fuseblk(void)
945 {
946 unregister_filesystem(&fuseblk_fs_type);
947 }
948 #else
949 static inline int register_fuseblk(void)
950 {
951 return 0;
952 }
953
954 static inline void unregister_fuseblk(void)
955 {
956 }
957 #endif
958
959 static void fuse_inode_init_once(void *foo)
960 {
961 struct inode * inode = foo;
962
963 inode_init_once(inode);
964 }
965
966 static int __init fuse_fs_init(void)
967 {
968 int err;
969
970 err = register_filesystem(&fuse_fs_type);
971 if (err)
972 goto out;
973
974 err = register_fuseblk();
975 if (err)
976 goto out_unreg;
977
978 fuse_inode_cachep = kmem_cache_create("fuse_inode",
979 sizeof(struct fuse_inode),
980 0, SLAB_HWCACHE_ALIGN,
981 fuse_inode_init_once);
982 err = -ENOMEM;
983 if (!fuse_inode_cachep)
984 goto out_unreg2;
985
986 return 0;
987
988 out_unreg2:
989 unregister_fuseblk();
990 out_unreg:
991 unregister_filesystem(&fuse_fs_type);
992 out:
993 return err;
994 }
995
996 static void fuse_fs_cleanup(void)
997 {
998 unregister_filesystem(&fuse_fs_type);
999 unregister_fuseblk();
1000 kmem_cache_destroy(fuse_inode_cachep);
1001 }
1002
1003 static struct kobject *fuse_kobj;
1004 static struct kobject *connections_kobj;
1005
1006 static int fuse_sysfs_init(void)
1007 {
1008 int err;
1009
1010 fuse_kobj = kobject_create_and_add("fuse", fs_kobj);
1011 if (!fuse_kobj) {
1012 err = -ENOMEM;
1013 goto out_err;
1014 }
1015
1016 connections_kobj = kobject_create_and_add("connections", fuse_kobj);
1017 if (!connections_kobj) {
1018 err = -ENOMEM;
1019 goto out_fuse_unregister;
1020 }
1021
1022 return 0;
1023
1024 out_fuse_unregister:
1025 kobject_put(fuse_kobj);
1026 out_err:
1027 return err;
1028 }
1029
1030 static void fuse_sysfs_cleanup(void)
1031 {
1032 kobject_put(connections_kobj);
1033 kobject_put(fuse_kobj);
1034 }
1035
1036 static int __init fuse_init(void)
1037 {
1038 int res;
1039
1040 printk("fuse init (API version %i.%i)\n",
1041 FUSE_KERNEL_VERSION, FUSE_KERNEL_MINOR_VERSION);
1042
1043 INIT_LIST_HEAD(&fuse_conn_list);
1044 res = fuse_fs_init();
1045 if (res)
1046 goto err;
1047
1048 res = fuse_dev_init();
1049 if (res)
1050 goto err_fs_cleanup;
1051
1052 res = fuse_sysfs_init();
1053 if (res)
1054 goto err_dev_cleanup;
1055
1056 res = fuse_ctl_init();
1057 if (res)
1058 goto err_sysfs_cleanup;
1059
1060 return 0;
1061
1062 err_sysfs_cleanup:
1063 fuse_sysfs_cleanup();
1064 err_dev_cleanup:
1065 fuse_dev_cleanup();
1066 err_fs_cleanup:
1067 fuse_fs_cleanup();
1068 err:
1069 return res;
1070 }
1071
1072 static void __exit fuse_exit(void)
1073 {
1074 printk(KERN_DEBUG "fuse exit\n");
1075
1076 fuse_ctl_cleanup();
1077 fuse_sysfs_cleanup();
1078 fuse_fs_cleanup();
1079 fuse_dev_cleanup();
1080 }
1081
1082 module_init(fuse_init);
1083 module_exit(fuse_exit);
This page took 0.052215 seconds and 5 git commands to generate.