ceph: fix xattr rbtree search
[deliverable/linux.git] / fs / ceph / super.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/backing-dev.h>
5 #include <linux/ctype.h>
6 #include <linux/fs.h>
7 #include <linux/inet.h>
8 #include <linux/in6.h>
9 #include <linux/module.h>
10 #include <linux/mount.h>
11 #include <linux/parser.h>
12 #include <linux/sched.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/statfs.h>
16 #include <linux/string.h>
17
18 #include "super.h"
19 #include "mds_client.h"
20
21 #include <linux/ceph/decode.h>
22 #include <linux/ceph/mon_client.h>
23 #include <linux/ceph/auth.h>
24 #include <linux/ceph/debugfs.h>
25
26 /*
27 * Ceph superblock operations
28 *
29 * Handle the basics of mounting, unmounting.
30 */
31
32 /*
33 * super ops
34 */
35 static void ceph_put_super(struct super_block *s)
36 {
37 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
38
39 dout("put_super\n");
40 ceph_mdsc_close_sessions(fsc->mdsc);
41
42 /*
43 * ensure we release the bdi before put_anon_super releases
44 * the device name.
45 */
46 if (s->s_bdi == &fsc->backing_dev_info) {
47 bdi_unregister(&fsc->backing_dev_info);
48 s->s_bdi = NULL;
49 }
50
51 return;
52 }
53
54 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
55 {
56 struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
57 struct ceph_monmap *monmap = fsc->client->monc.monmap;
58 struct ceph_statfs st;
59 u64 fsid;
60 int err;
61
62 dout("statfs\n");
63 err = ceph_monc_do_statfs(&fsc->client->monc, &st);
64 if (err < 0)
65 return err;
66
67 /* fill in kstatfs */
68 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
69
70 /*
71 * express utilization in terms of large blocks to avoid
72 * overflow on 32-bit machines.
73 */
74 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
75 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
76 buf->f_bfree = (le64_to_cpu(st.kb) - le64_to_cpu(st.kb_used)) >>
77 (CEPH_BLOCK_SHIFT-10);
78 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
79
80 buf->f_files = le64_to_cpu(st.num_objects);
81 buf->f_ffree = -1;
82 buf->f_namelen = NAME_MAX;
83 buf->f_frsize = PAGE_CACHE_SIZE;
84
85 /* leave fsid little-endian, regardless of host endianness */
86 fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
87 buf->f_fsid.val[0] = fsid & 0xffffffff;
88 buf->f_fsid.val[1] = fsid >> 32;
89
90 return 0;
91 }
92
93
94 static int ceph_sync_fs(struct super_block *sb, int wait)
95 {
96 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
97
98 if (!wait) {
99 dout("sync_fs (non-blocking)\n");
100 ceph_flush_dirty_caps(fsc->mdsc);
101 dout("sync_fs (non-blocking) done\n");
102 return 0;
103 }
104
105 dout("sync_fs (blocking)\n");
106 ceph_osdc_sync(&fsc->client->osdc);
107 ceph_mdsc_sync(fsc->mdsc);
108 dout("sync_fs (blocking) done\n");
109 return 0;
110 }
111
112 /*
113 * mount options
114 */
115 enum {
116 Opt_wsize,
117 Opt_rsize,
118 Opt_caps_wanted_delay_min,
119 Opt_caps_wanted_delay_max,
120 Opt_cap_release_safety,
121 Opt_readdir_max_entries,
122 Opt_readdir_max_bytes,
123 Opt_congestion_kb,
124 Opt_last_int,
125 /* int args above */
126 Opt_snapdirname,
127 Opt_last_string,
128 /* string args above */
129 Opt_dirstat,
130 Opt_nodirstat,
131 Opt_rbytes,
132 Opt_norbytes,
133 Opt_noasyncreaddir,
134 };
135
136 static match_table_t fsopt_tokens = {
137 {Opt_wsize, "wsize=%d"},
138 {Opt_rsize, "rsize=%d"},
139 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
140 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
141 {Opt_cap_release_safety, "cap_release_safety=%d"},
142 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
143 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
144 {Opt_congestion_kb, "write_congestion_kb=%d"},
145 /* int args above */
146 {Opt_snapdirname, "snapdirname=%s"},
147 /* string args above */
148 {Opt_dirstat, "dirstat"},
149 {Opt_nodirstat, "nodirstat"},
150 {Opt_rbytes, "rbytes"},
151 {Opt_norbytes, "norbytes"},
152 {Opt_noasyncreaddir, "noasyncreaddir"},
153 {-1, NULL}
154 };
155
156 static int parse_fsopt_token(char *c, void *private)
157 {
158 struct ceph_mount_options *fsopt = private;
159 substring_t argstr[MAX_OPT_ARGS];
160 int token, intval, ret;
161
162 token = match_token((char *)c, fsopt_tokens, argstr);
163 if (token < 0)
164 return -EINVAL;
165
166 if (token < Opt_last_int) {
167 ret = match_int(&argstr[0], &intval);
168 if (ret < 0) {
169 pr_err("bad mount option arg (not int) "
170 "at '%s'\n", c);
171 return ret;
172 }
173 dout("got int token %d val %d\n", token, intval);
174 } else if (token > Opt_last_int && token < Opt_last_string) {
175 dout("got string token %d val %s\n", token,
176 argstr[0].from);
177 } else {
178 dout("got token %d\n", token);
179 }
180
181 switch (token) {
182 case Opt_snapdirname:
183 kfree(fsopt->snapdir_name);
184 fsopt->snapdir_name = kstrndup(argstr[0].from,
185 argstr[0].to-argstr[0].from,
186 GFP_KERNEL);
187 if (!fsopt->snapdir_name)
188 return -ENOMEM;
189 break;
190
191 /* misc */
192 case Opt_wsize:
193 fsopt->wsize = intval;
194 break;
195 case Opt_rsize:
196 fsopt->rsize = intval;
197 break;
198 case Opt_caps_wanted_delay_min:
199 fsopt->caps_wanted_delay_min = intval;
200 break;
201 case Opt_caps_wanted_delay_max:
202 fsopt->caps_wanted_delay_max = intval;
203 break;
204 case Opt_readdir_max_entries:
205 fsopt->max_readdir = intval;
206 break;
207 case Opt_readdir_max_bytes:
208 fsopt->max_readdir_bytes = intval;
209 break;
210 case Opt_congestion_kb:
211 fsopt->congestion_kb = intval;
212 break;
213 case Opt_dirstat:
214 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
215 break;
216 case Opt_nodirstat:
217 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
218 break;
219 case Opt_rbytes:
220 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
221 break;
222 case Opt_norbytes:
223 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
224 break;
225 case Opt_noasyncreaddir:
226 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
227 break;
228 default:
229 BUG_ON(token);
230 }
231 return 0;
232 }
233
234 static void destroy_mount_options(struct ceph_mount_options *args)
235 {
236 dout("destroy_mount_options %p\n", args);
237 kfree(args->snapdir_name);
238 kfree(args);
239 }
240
241 static int strcmp_null(const char *s1, const char *s2)
242 {
243 if (!s1 && !s2)
244 return 0;
245 if (s1 && !s2)
246 return -1;
247 if (!s1 && s2)
248 return 1;
249 return strcmp(s1, s2);
250 }
251
252 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
253 struct ceph_options *new_opt,
254 struct ceph_fs_client *fsc)
255 {
256 struct ceph_mount_options *fsopt1 = new_fsopt;
257 struct ceph_mount_options *fsopt2 = fsc->mount_options;
258 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
259 int ret;
260
261 ret = memcmp(fsopt1, fsopt2, ofs);
262 if (ret)
263 return ret;
264
265 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
266 if (ret)
267 return ret;
268
269 return ceph_compare_options(new_opt, fsc->client);
270 }
271
272 static int parse_mount_options(struct ceph_mount_options **pfsopt,
273 struct ceph_options **popt,
274 int flags, char *options,
275 const char *dev_name,
276 const char **path)
277 {
278 struct ceph_mount_options *fsopt;
279 const char *dev_name_end;
280 int err = -ENOMEM;
281
282 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
283 if (!fsopt)
284 return -ENOMEM;
285
286 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
287
288 fsopt->sb_flags = flags;
289 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
290
291 fsopt->rsize = CEPH_MOUNT_RSIZE_DEFAULT;
292 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
293 fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
294 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
295 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
296 fsopt->congestion_kb = default_congestion_kb();
297
298 /* ip1[:port1][,ip2[:port2]...]:/subdir/in/fs */
299 err = -EINVAL;
300 if (!dev_name)
301 goto out;
302 *path = strstr(dev_name, ":/");
303 if (*path == NULL) {
304 pr_err("device name is missing path (no :/ in %s)\n",
305 dev_name);
306 goto out;
307 }
308 dev_name_end = *path;
309 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
310
311 /* path on server */
312 *path += 2;
313 dout("server path '%s'\n", *path);
314
315 err = ceph_parse_options(popt, options, dev_name, dev_name_end,
316 parse_fsopt_token, (void *)fsopt);
317 if (err)
318 goto out;
319
320 /* success */
321 *pfsopt = fsopt;
322 return 0;
323
324 out:
325 destroy_mount_options(fsopt);
326 return err;
327 }
328
329 /**
330 * ceph_show_options - Show mount options in /proc/mounts
331 * @m: seq_file to write to
332 * @mnt: mount descriptor
333 */
334 static int ceph_show_options(struct seq_file *m, struct vfsmount *mnt)
335 {
336 struct ceph_fs_client *fsc = ceph_sb_to_client(mnt->mnt_sb);
337 struct ceph_mount_options *fsopt = fsc->mount_options;
338 struct ceph_options *opt = fsc->client->options;
339
340 if (opt->flags & CEPH_OPT_FSID)
341 seq_printf(m, ",fsid=%pU", &opt->fsid);
342 if (opt->flags & CEPH_OPT_NOSHARE)
343 seq_puts(m, ",noshare");
344 if (opt->flags & CEPH_OPT_NOCRC)
345 seq_puts(m, ",nocrc");
346
347 if (opt->name)
348 seq_printf(m, ",name=%s", opt->name);
349 if (opt->secret)
350 seq_puts(m, ",secret=<hidden>");
351
352 if (opt->mount_timeout != CEPH_MOUNT_TIMEOUT_DEFAULT)
353 seq_printf(m, ",mount_timeout=%d", opt->mount_timeout);
354 if (opt->osd_idle_ttl != CEPH_OSD_IDLE_TTL_DEFAULT)
355 seq_printf(m, ",osd_idle_ttl=%d", opt->osd_idle_ttl);
356 if (opt->osd_timeout != CEPH_OSD_TIMEOUT_DEFAULT)
357 seq_printf(m, ",osdtimeout=%d", opt->osd_timeout);
358 if (opt->osd_keepalive_timeout != CEPH_OSD_KEEPALIVE_DEFAULT)
359 seq_printf(m, ",osdkeepalivetimeout=%d",
360 opt->osd_keepalive_timeout);
361
362 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
363 seq_puts(m, ",dirstat");
364 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
365 seq_puts(m, ",norbytes");
366 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
367 seq_puts(m, ",noasyncreaddir");
368
369 if (fsopt->wsize)
370 seq_printf(m, ",wsize=%d", fsopt->wsize);
371 if (fsopt->rsize != CEPH_MOUNT_RSIZE_DEFAULT)
372 seq_printf(m, ",rsize=%d", fsopt->rsize);
373 if (fsopt->congestion_kb != default_congestion_kb())
374 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
375 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
376 seq_printf(m, ",caps_wanted_delay_min=%d",
377 fsopt->caps_wanted_delay_min);
378 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
379 seq_printf(m, ",caps_wanted_delay_max=%d",
380 fsopt->caps_wanted_delay_max);
381 if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
382 seq_printf(m, ",cap_release_safety=%d",
383 fsopt->cap_release_safety);
384 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
385 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
386 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
387 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
388 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
389 seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
390 return 0;
391 }
392
393 /*
394 * handle any mon messages the standard library doesn't understand.
395 * return error if we don't either.
396 */
397 static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
398 {
399 struct ceph_fs_client *fsc = client->private;
400 int type = le16_to_cpu(msg->hdr.type);
401
402 switch (type) {
403 case CEPH_MSG_MDS_MAP:
404 ceph_mdsc_handle_map(fsc->mdsc, msg);
405 return 0;
406
407 default:
408 return -1;
409 }
410 }
411
412 /*
413 * create a new fs client
414 */
415 struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
416 struct ceph_options *opt)
417 {
418 struct ceph_fs_client *fsc;
419 int err = -ENOMEM;
420
421 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
422 if (!fsc)
423 return ERR_PTR(-ENOMEM);
424
425 fsc->client = ceph_create_client(opt, fsc);
426 if (IS_ERR(fsc->client)) {
427 err = PTR_ERR(fsc->client);
428 goto fail;
429 }
430 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
431 fsc->client->supported_features |= CEPH_FEATURE_FLOCK |
432 CEPH_FEATURE_DIRLAYOUTHASH;
433 fsc->client->monc.want_mdsmap = 1;
434
435 fsc->mount_options = fsopt;
436
437 fsc->sb = NULL;
438 fsc->mount_state = CEPH_MOUNT_MOUNTING;
439
440 atomic_long_set(&fsc->writeback_count, 0);
441
442 err = bdi_init(&fsc->backing_dev_info);
443 if (err < 0)
444 goto fail_client;
445
446 err = -ENOMEM;
447 /*
448 * The number of concurrent works can be high but they don't need
449 * to be processed in parallel, limit concurrency.
450 */
451 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
452 if (fsc->wb_wq == NULL)
453 goto fail_bdi;
454 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
455 if (fsc->pg_inv_wq == NULL)
456 goto fail_wb_wq;
457 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
458 if (fsc->trunc_wq == NULL)
459 goto fail_pg_inv_wq;
460
461 /* set up mempools */
462 err = -ENOMEM;
463 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10,
464 fsc->mount_options->wsize >> PAGE_CACHE_SHIFT);
465 if (!fsc->wb_pagevec_pool)
466 goto fail_trunc_wq;
467
468 /* caps */
469 fsc->min_caps = fsopt->max_readdir;
470
471 return fsc;
472
473 fail_trunc_wq:
474 destroy_workqueue(fsc->trunc_wq);
475 fail_pg_inv_wq:
476 destroy_workqueue(fsc->pg_inv_wq);
477 fail_wb_wq:
478 destroy_workqueue(fsc->wb_wq);
479 fail_bdi:
480 bdi_destroy(&fsc->backing_dev_info);
481 fail_client:
482 ceph_destroy_client(fsc->client);
483 fail:
484 kfree(fsc);
485 return ERR_PTR(err);
486 }
487
488 void destroy_fs_client(struct ceph_fs_client *fsc)
489 {
490 dout("destroy_fs_client %p\n", fsc);
491
492 destroy_workqueue(fsc->wb_wq);
493 destroy_workqueue(fsc->pg_inv_wq);
494 destroy_workqueue(fsc->trunc_wq);
495
496 bdi_destroy(&fsc->backing_dev_info);
497
498 mempool_destroy(fsc->wb_pagevec_pool);
499
500 destroy_mount_options(fsc->mount_options);
501
502 ceph_fs_debugfs_cleanup(fsc);
503
504 ceph_destroy_client(fsc->client);
505
506 kfree(fsc);
507 dout("destroy_fs_client %p done\n", fsc);
508 }
509
510 /*
511 * caches
512 */
513 struct kmem_cache *ceph_inode_cachep;
514 struct kmem_cache *ceph_cap_cachep;
515 struct kmem_cache *ceph_dentry_cachep;
516 struct kmem_cache *ceph_file_cachep;
517
518 static void ceph_inode_init_once(void *foo)
519 {
520 struct ceph_inode_info *ci = foo;
521 inode_init_once(&ci->vfs_inode);
522 }
523
524 static int __init init_caches(void)
525 {
526 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
527 sizeof(struct ceph_inode_info),
528 __alignof__(struct ceph_inode_info),
529 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
530 ceph_inode_init_once);
531 if (ceph_inode_cachep == NULL)
532 return -ENOMEM;
533
534 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
535 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
536 if (ceph_cap_cachep == NULL)
537 goto bad_cap;
538
539 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
540 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
541 if (ceph_dentry_cachep == NULL)
542 goto bad_dentry;
543
544 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
545 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
546 if (ceph_file_cachep == NULL)
547 goto bad_file;
548
549 return 0;
550
551 bad_file:
552 kmem_cache_destroy(ceph_dentry_cachep);
553 bad_dentry:
554 kmem_cache_destroy(ceph_cap_cachep);
555 bad_cap:
556 kmem_cache_destroy(ceph_inode_cachep);
557 return -ENOMEM;
558 }
559
560 static void destroy_caches(void)
561 {
562 kmem_cache_destroy(ceph_inode_cachep);
563 kmem_cache_destroy(ceph_cap_cachep);
564 kmem_cache_destroy(ceph_dentry_cachep);
565 kmem_cache_destroy(ceph_file_cachep);
566 }
567
568
569 /*
570 * ceph_umount_begin - initiate forced umount. Tear down down the
571 * mount, skipping steps that may hang while waiting for server(s).
572 */
573 static void ceph_umount_begin(struct super_block *sb)
574 {
575 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
576
577 dout("ceph_umount_begin - starting forced umount\n");
578 if (!fsc)
579 return;
580 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
581 return;
582 }
583
584 static const struct super_operations ceph_super_ops = {
585 .alloc_inode = ceph_alloc_inode,
586 .destroy_inode = ceph_destroy_inode,
587 .write_inode = ceph_write_inode,
588 .sync_fs = ceph_sync_fs,
589 .put_super = ceph_put_super,
590 .show_options = ceph_show_options,
591 .statfs = ceph_statfs,
592 .umount_begin = ceph_umount_begin,
593 };
594
595 /*
596 * Bootstrap mount by opening the root directory. Note the mount
597 * @started time from caller, and time out if this takes too long.
598 */
599 static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
600 const char *path,
601 unsigned long started)
602 {
603 struct ceph_mds_client *mdsc = fsc->mdsc;
604 struct ceph_mds_request *req = NULL;
605 int err;
606 struct dentry *root;
607
608 /* open dir */
609 dout("open_root_inode opening '%s'\n", path);
610 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
611 if (IS_ERR(req))
612 return ERR_CAST(req);
613 req->r_path1 = kstrdup(path, GFP_NOFS);
614 req->r_ino1.ino = CEPH_INO_ROOT;
615 req->r_ino1.snap = CEPH_NOSNAP;
616 req->r_started = started;
617 req->r_timeout = fsc->client->options->mount_timeout * HZ;
618 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
619 req->r_num_caps = 2;
620 err = ceph_mdsc_do_request(mdsc, NULL, req);
621 if (err == 0) {
622 dout("open_root_inode success\n");
623 if (ceph_ino(req->r_target_inode) == CEPH_INO_ROOT &&
624 fsc->sb->s_root == NULL)
625 root = d_alloc_root(req->r_target_inode);
626 else
627 root = d_obtain_alias(req->r_target_inode);
628 req->r_target_inode = NULL;
629 dout("open_root_inode success, root dentry is %p\n", root);
630 } else {
631 root = ERR_PTR(err);
632 }
633 ceph_mdsc_put_request(req);
634 return root;
635 }
636
637
638
639
640 /*
641 * mount: join the ceph cluster, and open root directory.
642 */
643 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
644 const char *path)
645 {
646 int err;
647 unsigned long started = jiffies; /* note the start time */
648 struct dentry *root;
649 int first = 0; /* first vfsmount for this super_block */
650
651 dout("mount start\n");
652 mutex_lock(&fsc->client->mount_mutex);
653
654 err = __ceph_open_session(fsc->client, started);
655 if (err < 0)
656 goto out;
657
658 dout("mount opening root\n");
659 root = open_root_dentry(fsc, "", started);
660 if (IS_ERR(root)) {
661 err = PTR_ERR(root);
662 goto out;
663 }
664 if (fsc->sb->s_root) {
665 dput(root);
666 } else {
667 fsc->sb->s_root = root;
668 first = 1;
669
670 err = ceph_fs_debugfs_init(fsc);
671 if (err < 0)
672 goto fail;
673 }
674
675 if (path[0] == 0) {
676 dget(root);
677 } else {
678 dout("mount opening base mountpoint\n");
679 root = open_root_dentry(fsc, path, started);
680 if (IS_ERR(root)) {
681 err = PTR_ERR(root);
682 goto fail;
683 }
684 }
685
686 fsc->mount_state = CEPH_MOUNT_MOUNTED;
687 dout("mount success\n");
688 mutex_unlock(&fsc->client->mount_mutex);
689 return root;
690
691 out:
692 mutex_unlock(&fsc->client->mount_mutex);
693 return ERR_PTR(err);
694
695 fail:
696 if (first) {
697 dput(fsc->sb->s_root);
698 fsc->sb->s_root = NULL;
699 }
700 goto out;
701 }
702
703 static int ceph_set_super(struct super_block *s, void *data)
704 {
705 struct ceph_fs_client *fsc = data;
706 int ret;
707
708 dout("set_super %p data %p\n", s, data);
709
710 s->s_flags = fsc->mount_options->sb_flags;
711 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
712
713 s->s_fs_info = fsc;
714 fsc->sb = s;
715
716 s->s_op = &ceph_super_ops;
717 s->s_export_op = &ceph_export_ops;
718
719 s->s_time_gran = 1000; /* 1000 ns == 1 us */
720
721 ret = set_anon_super(s, NULL); /* what is that second arg for? */
722 if (ret != 0)
723 goto fail;
724
725 return ret;
726
727 fail:
728 s->s_fs_info = NULL;
729 fsc->sb = NULL;
730 return ret;
731 }
732
733 /*
734 * share superblock if same fs AND options
735 */
736 static int ceph_compare_super(struct super_block *sb, void *data)
737 {
738 struct ceph_fs_client *new = data;
739 struct ceph_mount_options *fsopt = new->mount_options;
740 struct ceph_options *opt = new->client->options;
741 struct ceph_fs_client *other = ceph_sb_to_client(sb);
742
743 dout("ceph_compare_super %p\n", sb);
744
745 if (compare_mount_options(fsopt, opt, other)) {
746 dout("monitor(s)/mount options don't match\n");
747 return 0;
748 }
749 if ((opt->flags & CEPH_OPT_FSID) &&
750 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
751 dout("fsid doesn't match\n");
752 return 0;
753 }
754 if (fsopt->sb_flags != other->mount_options->sb_flags) {
755 dout("flags differ\n");
756 return 0;
757 }
758 return 1;
759 }
760
761 /*
762 * construct our own bdi so we can control readahead, etc.
763 */
764 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
765
766 static int ceph_register_bdi(struct super_block *sb,
767 struct ceph_fs_client *fsc)
768 {
769 int err;
770
771 /* set ra_pages based on rsize mount option? */
772 if (fsc->mount_options->rsize >= PAGE_CACHE_SIZE)
773 fsc->backing_dev_info.ra_pages =
774 (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1)
775 >> PAGE_SHIFT;
776 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%d",
777 atomic_long_inc_return(&bdi_seq));
778 if (!err)
779 sb->s_bdi = &fsc->backing_dev_info;
780 return err;
781 }
782
783 static struct dentry *ceph_mount(struct file_system_type *fs_type,
784 int flags, const char *dev_name, void *data)
785 {
786 struct super_block *sb;
787 struct ceph_fs_client *fsc;
788 struct dentry *res;
789 int err;
790 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
791 const char *path = NULL;
792 struct ceph_mount_options *fsopt = NULL;
793 struct ceph_options *opt = NULL;
794
795 dout("ceph_mount\n");
796 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
797 if (err < 0) {
798 res = ERR_PTR(err);
799 goto out_final;
800 }
801
802 /* create client (which we may/may not use) */
803 fsc = create_fs_client(fsopt, opt);
804 if (IS_ERR(fsc)) {
805 res = ERR_CAST(fsc);
806 kfree(fsopt);
807 kfree(opt);
808 goto out_final;
809 }
810
811 err = ceph_mdsc_init(fsc);
812 if (err < 0) {
813 res = ERR_PTR(err);
814 goto out;
815 }
816
817 if (ceph_test_opt(fsc->client, NOSHARE))
818 compare_super = NULL;
819 sb = sget(fs_type, compare_super, ceph_set_super, fsc);
820 if (IS_ERR(sb)) {
821 res = ERR_CAST(sb);
822 goto out;
823 }
824
825 if (ceph_sb_to_client(sb) != fsc) {
826 ceph_mdsc_destroy(fsc);
827 destroy_fs_client(fsc);
828 fsc = ceph_sb_to_client(sb);
829 dout("get_sb got existing client %p\n", fsc);
830 } else {
831 dout("get_sb using new client %p\n", fsc);
832 err = ceph_register_bdi(sb, fsc);
833 if (err < 0) {
834 res = ERR_PTR(err);
835 goto out_splat;
836 }
837 }
838
839 res = ceph_real_mount(fsc, path);
840 if (IS_ERR(res))
841 goto out_splat;
842 dout("root %p inode %p ino %llx.%llx\n", res,
843 res->d_inode, ceph_vinop(res->d_inode));
844 return res;
845
846 out_splat:
847 ceph_mdsc_close_sessions(fsc->mdsc);
848 deactivate_locked_super(sb);
849 goto out_final;
850
851 out:
852 ceph_mdsc_destroy(fsc);
853 destroy_fs_client(fsc);
854 out_final:
855 dout("ceph_mount fail %ld\n", PTR_ERR(res));
856 return res;
857 }
858
859 static void ceph_kill_sb(struct super_block *s)
860 {
861 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
862 dout("kill_sb %p\n", s);
863 ceph_mdsc_pre_umount(fsc->mdsc);
864 kill_anon_super(s); /* will call put_super after sb is r/o */
865 ceph_mdsc_destroy(fsc);
866 destroy_fs_client(fsc);
867 }
868
869 static struct file_system_type ceph_fs_type = {
870 .owner = THIS_MODULE,
871 .name = "ceph",
872 .mount = ceph_mount,
873 .kill_sb = ceph_kill_sb,
874 .fs_flags = FS_RENAME_DOES_D_MOVE,
875 };
876
877 #define _STRINGIFY(x) #x
878 #define STRINGIFY(x) _STRINGIFY(x)
879
880 static int __init init_ceph(void)
881 {
882 int ret = init_caches();
883 if (ret)
884 goto out;
885
886 ret = register_filesystem(&ceph_fs_type);
887 if (ret)
888 goto out_icache;
889
890 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
891
892 return 0;
893
894 out_icache:
895 destroy_caches();
896 out:
897 return ret;
898 }
899
900 static void __exit exit_ceph(void)
901 {
902 dout("exit_ceph\n");
903 unregister_filesystem(&ceph_fs_type);
904 destroy_caches();
905 }
906
907 module_init(init_ceph);
908 module_exit(exit_ceph);
909
910 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
911 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
912 MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
913 MODULE_DESCRIPTION("Ceph filesystem for Linux");
914 MODULE_LICENSE("GPL");
This page took 0.057513 seconds and 6 git commands to generate.