ceph: multiple filesystem support
[deliverable/linux.git] / fs / ceph / super.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/backing-dev.h>
5 #include <linux/ctype.h>
6 #include <linux/fs.h>
7 #include <linux/inet.h>
8 #include <linux/in6.h>
9 #include <linux/module.h>
10 #include <linux/mount.h>
11 #include <linux/parser.h>
12 #include <linux/sched.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/statfs.h>
16 #include <linux/string.h>
17
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21
22 #include <linux/ceph/ceph_features.h>
23 #include <linux/ceph/decode.h>
24 #include <linux/ceph/mon_client.h>
25 #include <linux/ceph/auth.h>
26 #include <linux/ceph/debugfs.h>
27
28 /*
29 * Ceph superblock operations
30 *
31 * Handle the basics of mounting, unmounting.
32 */
33
34 /*
35 * super ops
36 */
37 static void ceph_put_super(struct super_block *s)
38 {
39 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
40
41 dout("put_super\n");
42 ceph_mdsc_close_sessions(fsc->mdsc);
43 }
44
45 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
46 {
47 struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
48 struct ceph_monmap *monmap = fsc->client->monc.monmap;
49 struct ceph_statfs st;
50 u64 fsid;
51 int err;
52
53 dout("statfs\n");
54 err = ceph_monc_do_statfs(&fsc->client->monc, &st);
55 if (err < 0)
56 return err;
57
58 /* fill in kstatfs */
59 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
60
61 /*
62 * express utilization in terms of large blocks to avoid
63 * overflow on 32-bit machines.
64 *
65 * NOTE: for the time being, we make bsize == frsize to humor
66 * not-yet-ancient versions of glibc that are broken.
67 * Someday, we will probably want to report a real block
68 * size... whatever that may mean for a network file system!
69 */
70 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
71 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
72 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
73 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
74 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
75
76 buf->f_files = le64_to_cpu(st.num_objects);
77 buf->f_ffree = -1;
78 buf->f_namelen = NAME_MAX;
79
80 /* leave fsid little-endian, regardless of host endianness */
81 fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
82 buf->f_fsid.val[0] = fsid & 0xffffffff;
83 buf->f_fsid.val[1] = fsid >> 32;
84
85 return 0;
86 }
87
88
89 static int ceph_sync_fs(struct super_block *sb, int wait)
90 {
91 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
92
93 if (!wait) {
94 dout("sync_fs (non-blocking)\n");
95 ceph_flush_dirty_caps(fsc->mdsc);
96 dout("sync_fs (non-blocking) done\n");
97 return 0;
98 }
99
100 dout("sync_fs (blocking)\n");
101 ceph_osdc_sync(&fsc->client->osdc);
102 ceph_mdsc_sync(fsc->mdsc);
103 dout("sync_fs (blocking) done\n");
104 return 0;
105 }
106
107 /*
108 * mount options
109 */
110 enum {
111 Opt_mds_namespace,
112 Opt_wsize,
113 Opt_rsize,
114 Opt_rasize,
115 Opt_caps_wanted_delay_min,
116 Opt_caps_wanted_delay_max,
117 Opt_cap_release_safety,
118 Opt_readdir_max_entries,
119 Opt_readdir_max_bytes,
120 Opt_congestion_kb,
121 Opt_last_int,
122 /* int args above */
123 Opt_snapdirname,
124 Opt_last_string,
125 /* string args above */
126 Opt_dirstat,
127 Opt_nodirstat,
128 Opt_rbytes,
129 Opt_norbytes,
130 Opt_asyncreaddir,
131 Opt_noasyncreaddir,
132 Opt_dcache,
133 Opt_nodcache,
134 Opt_ino32,
135 Opt_noino32,
136 Opt_fscache,
137 Opt_nofscache,
138 Opt_poolperm,
139 Opt_nopoolperm,
140 #ifdef CONFIG_CEPH_FS_POSIX_ACL
141 Opt_acl,
142 #endif
143 Opt_noacl,
144 };
145
146 static match_table_t fsopt_tokens = {
147 {Opt_mds_namespace, "mds_namespace=%d"},
148 {Opt_wsize, "wsize=%d"},
149 {Opt_rsize, "rsize=%d"},
150 {Opt_rasize, "rasize=%d"},
151 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
152 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
153 {Opt_cap_release_safety, "cap_release_safety=%d"},
154 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
155 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
156 {Opt_congestion_kb, "write_congestion_kb=%d"},
157 /* int args above */
158 {Opt_snapdirname, "snapdirname=%s"},
159 /* string args above */
160 {Opt_dirstat, "dirstat"},
161 {Opt_nodirstat, "nodirstat"},
162 {Opt_rbytes, "rbytes"},
163 {Opt_norbytes, "norbytes"},
164 {Opt_asyncreaddir, "asyncreaddir"},
165 {Opt_noasyncreaddir, "noasyncreaddir"},
166 {Opt_dcache, "dcache"},
167 {Opt_nodcache, "nodcache"},
168 {Opt_ino32, "ino32"},
169 {Opt_noino32, "noino32"},
170 {Opt_fscache, "fsc"},
171 {Opt_nofscache, "nofsc"},
172 {Opt_poolperm, "poolperm"},
173 {Opt_nopoolperm, "nopoolperm"},
174 #ifdef CONFIG_CEPH_FS_POSIX_ACL
175 {Opt_acl, "acl"},
176 #endif
177 {Opt_noacl, "noacl"},
178 {-1, NULL}
179 };
180
181 static int parse_fsopt_token(char *c, void *private)
182 {
183 struct ceph_mount_options *fsopt = private;
184 substring_t argstr[MAX_OPT_ARGS];
185 int token, intval, ret;
186
187 token = match_token((char *)c, fsopt_tokens, argstr);
188 if (token < 0)
189 return -EINVAL;
190
191 if (token < Opt_last_int) {
192 ret = match_int(&argstr[0], &intval);
193 if (ret < 0) {
194 pr_err("bad mount option arg (not int) "
195 "at '%s'\n", c);
196 return ret;
197 }
198 dout("got int token %d val %d\n", token, intval);
199 } else if (token > Opt_last_int && token < Opt_last_string) {
200 dout("got string token %d val %s\n", token,
201 argstr[0].from);
202 } else {
203 dout("got token %d\n", token);
204 }
205
206 switch (token) {
207 case Opt_snapdirname:
208 kfree(fsopt->snapdir_name);
209 fsopt->snapdir_name = kstrndup(argstr[0].from,
210 argstr[0].to-argstr[0].from,
211 GFP_KERNEL);
212 if (!fsopt->snapdir_name)
213 return -ENOMEM;
214 break;
215
216 /* misc */
217 case Opt_mds_namespace:
218 fsopt->mds_namespace = intval;
219 break;
220 case Opt_wsize:
221 fsopt->wsize = intval;
222 break;
223 case Opt_rsize:
224 fsopt->rsize = intval;
225 break;
226 case Opt_rasize:
227 fsopt->rasize = intval;
228 break;
229 case Opt_caps_wanted_delay_min:
230 fsopt->caps_wanted_delay_min = intval;
231 break;
232 case Opt_caps_wanted_delay_max:
233 fsopt->caps_wanted_delay_max = intval;
234 break;
235 case Opt_readdir_max_entries:
236 fsopt->max_readdir = intval;
237 break;
238 case Opt_readdir_max_bytes:
239 fsopt->max_readdir_bytes = intval;
240 break;
241 case Opt_congestion_kb:
242 fsopt->congestion_kb = intval;
243 break;
244 case Opt_dirstat:
245 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
246 break;
247 case Opt_nodirstat:
248 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
249 break;
250 case Opt_rbytes:
251 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
252 break;
253 case Opt_norbytes:
254 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
255 break;
256 case Opt_asyncreaddir:
257 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
258 break;
259 case Opt_noasyncreaddir:
260 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
261 break;
262 case Opt_dcache:
263 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
264 break;
265 case Opt_nodcache:
266 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
267 break;
268 case Opt_ino32:
269 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
270 break;
271 case Opt_noino32:
272 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
273 break;
274 case Opt_fscache:
275 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
276 break;
277 case Opt_nofscache:
278 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
279 break;
280 case Opt_poolperm:
281 fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
282 printk ("pool perm");
283 break;
284 case Opt_nopoolperm:
285 fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
286 break;
287 #ifdef CONFIG_CEPH_FS_POSIX_ACL
288 case Opt_acl:
289 fsopt->sb_flags |= MS_POSIXACL;
290 break;
291 #endif
292 case Opt_noacl:
293 fsopt->sb_flags &= ~MS_POSIXACL;
294 break;
295 default:
296 BUG_ON(token);
297 }
298 return 0;
299 }
300
301 static void destroy_mount_options(struct ceph_mount_options *args)
302 {
303 dout("destroy_mount_options %p\n", args);
304 kfree(args->snapdir_name);
305 kfree(args);
306 }
307
308 static int strcmp_null(const char *s1, const char *s2)
309 {
310 if (!s1 && !s2)
311 return 0;
312 if (s1 && !s2)
313 return -1;
314 if (!s1 && s2)
315 return 1;
316 return strcmp(s1, s2);
317 }
318
319 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
320 struct ceph_options *new_opt,
321 struct ceph_fs_client *fsc)
322 {
323 struct ceph_mount_options *fsopt1 = new_fsopt;
324 struct ceph_mount_options *fsopt2 = fsc->mount_options;
325 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
326 int ret;
327
328 ret = memcmp(fsopt1, fsopt2, ofs);
329 if (ret)
330 return ret;
331
332 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
333 if (ret)
334 return ret;
335
336 return ceph_compare_options(new_opt, fsc->client);
337 }
338
339 static int parse_mount_options(struct ceph_mount_options **pfsopt,
340 struct ceph_options **popt,
341 int flags, char *options,
342 const char *dev_name,
343 const char **path)
344 {
345 struct ceph_mount_options *fsopt;
346 const char *dev_name_end;
347 int err;
348
349 if (!dev_name || !*dev_name)
350 return -EINVAL;
351
352 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
353 if (!fsopt)
354 return -ENOMEM;
355
356 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
357
358 fsopt->sb_flags = flags;
359 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
360
361 fsopt->rsize = CEPH_RSIZE_DEFAULT;
362 fsopt->rasize = CEPH_RASIZE_DEFAULT;
363 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
364 if (!fsopt->snapdir_name) {
365 err = -ENOMEM;
366 goto out;
367 }
368
369 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
370 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
371 fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
372 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
373 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
374 fsopt->congestion_kb = default_congestion_kb();
375 fsopt->mds_namespace = CEPH_FS_CLUSTER_ID_NONE;
376
377 /*
378 * Distinguish the server list from the path in "dev_name".
379 * Internally we do not include the leading '/' in the path.
380 *
381 * "dev_name" will look like:
382 * <server_spec>[,<server_spec>...]:[<path>]
383 * where
384 * <server_spec> is <ip>[:<port>]
385 * <path> is optional, but if present must begin with '/'
386 */
387 dev_name_end = strchr(dev_name, '/');
388 if (dev_name_end) {
389 /* skip over leading '/' for path */
390 *path = dev_name_end + 1;
391 } else {
392 /* path is empty */
393 dev_name_end = dev_name + strlen(dev_name);
394 *path = dev_name_end;
395 }
396 err = -EINVAL;
397 dev_name_end--; /* back up to ':' separator */
398 if (dev_name_end < dev_name || *dev_name_end != ':') {
399 pr_err("device name is missing path (no : separator in %s)\n",
400 dev_name);
401 goto out;
402 }
403 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
404 dout("server path '%s'\n", *path);
405
406 *popt = ceph_parse_options(options, dev_name, dev_name_end,
407 parse_fsopt_token, (void *)fsopt);
408 if (IS_ERR(*popt)) {
409 err = PTR_ERR(*popt);
410 goto out;
411 }
412
413 /* success */
414 *pfsopt = fsopt;
415 return 0;
416
417 out:
418 destroy_mount_options(fsopt);
419 return err;
420 }
421
422 /**
423 * ceph_show_options - Show mount options in /proc/mounts
424 * @m: seq_file to write to
425 * @root: root of that (sub)tree
426 */
427 static int ceph_show_options(struct seq_file *m, struct dentry *root)
428 {
429 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
430 struct ceph_mount_options *fsopt = fsc->mount_options;
431 size_t pos;
432 int ret;
433
434 /* a comma between MNT/MS and client options */
435 seq_putc(m, ',');
436 pos = m->count;
437
438 ret = ceph_print_client_options(m, fsc->client);
439 if (ret)
440 return ret;
441
442 /* retract our comma if no client options */
443 if (m->count == pos)
444 m->count--;
445
446 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
447 seq_puts(m, ",dirstat");
448 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
449 seq_puts(m, ",rbytes");
450 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
451 seq_puts(m, ",noasyncreaddir");
452 if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
453 seq_puts(m, ",nodcache");
454 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE)
455 seq_puts(m, ",fsc");
456 if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
457 seq_puts(m, ",nopoolperm");
458
459 #ifdef CONFIG_CEPH_FS_POSIX_ACL
460 if (fsopt->sb_flags & MS_POSIXACL)
461 seq_puts(m, ",acl");
462 else
463 seq_puts(m, ",noacl");
464 #endif
465
466 if (fsopt->mds_namespace != CEPH_FS_CLUSTER_ID_NONE)
467 seq_printf(m, ",mds_namespace=%d", fsopt->mds_namespace);
468 if (fsopt->wsize)
469 seq_printf(m, ",wsize=%d", fsopt->wsize);
470 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
471 seq_printf(m, ",rsize=%d", fsopt->rsize);
472 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
473 seq_printf(m, ",rasize=%d", fsopt->rasize);
474 if (fsopt->congestion_kb != default_congestion_kb())
475 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
476 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
477 seq_printf(m, ",caps_wanted_delay_min=%d",
478 fsopt->caps_wanted_delay_min);
479 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
480 seq_printf(m, ",caps_wanted_delay_max=%d",
481 fsopt->caps_wanted_delay_max);
482 if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
483 seq_printf(m, ",cap_release_safety=%d",
484 fsopt->cap_release_safety);
485 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
486 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
487 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
488 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
489 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
490 seq_show_option(m, "snapdirname", fsopt->snapdir_name);
491
492 return 0;
493 }
494
495 /*
496 * handle any mon messages the standard library doesn't understand.
497 * return error if we don't either.
498 */
499 static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
500 {
501 struct ceph_fs_client *fsc = client->private;
502 int type = le16_to_cpu(msg->hdr.type);
503
504 switch (type) {
505 case CEPH_MSG_MDS_MAP:
506 ceph_mdsc_handle_map(fsc->mdsc, msg);
507 return 0;
508
509 default:
510 return -1;
511 }
512 }
513
514 /*
515 * create a new fs client
516 */
517 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
518 struct ceph_options *opt)
519 {
520 struct ceph_fs_client *fsc;
521 const u64 supported_features =
522 CEPH_FEATURE_FLOCK |
523 CEPH_FEATURE_DIRLAYOUTHASH |
524 CEPH_FEATURE_MDS_INLINE_DATA;
525 const u64 required_features = 0;
526 int page_count;
527 size_t size;
528 int err = -ENOMEM;
529
530 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
531 if (!fsc)
532 return ERR_PTR(-ENOMEM);
533
534 fsc->client = ceph_create_client(opt, fsc, supported_features,
535 required_features);
536 if (IS_ERR(fsc->client)) {
537 err = PTR_ERR(fsc->client);
538 goto fail;
539 }
540 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
541 fsc->client->monc.fs_cluster_id = fsopt->mds_namespace;
542 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 0, true);
543
544 fsc->mount_options = fsopt;
545
546 fsc->sb = NULL;
547 fsc->mount_state = CEPH_MOUNT_MOUNTING;
548
549 atomic_long_set(&fsc->writeback_count, 0);
550
551 err = bdi_init(&fsc->backing_dev_info);
552 if (err < 0)
553 goto fail_client;
554
555 err = -ENOMEM;
556 /*
557 * The number of concurrent works can be high but they don't need
558 * to be processed in parallel, limit concurrency.
559 */
560 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
561 if (fsc->wb_wq == NULL)
562 goto fail_bdi;
563 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
564 if (fsc->pg_inv_wq == NULL)
565 goto fail_wb_wq;
566 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
567 if (fsc->trunc_wq == NULL)
568 goto fail_pg_inv_wq;
569
570 /* set up mempools */
571 err = -ENOMEM;
572 page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
573 size = sizeof (struct page *) * (page_count ? page_count : 1);
574 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
575 if (!fsc->wb_pagevec_pool)
576 goto fail_trunc_wq;
577
578 /* setup fscache */
579 if ((fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) &&
580 (ceph_fscache_register_fs(fsc) != 0))
581 goto fail_fscache;
582
583 /* caps */
584 fsc->min_caps = fsopt->max_readdir;
585
586 return fsc;
587
588 fail_fscache:
589 ceph_fscache_unregister_fs(fsc);
590 fail_trunc_wq:
591 destroy_workqueue(fsc->trunc_wq);
592 fail_pg_inv_wq:
593 destroy_workqueue(fsc->pg_inv_wq);
594 fail_wb_wq:
595 destroy_workqueue(fsc->wb_wq);
596 fail_bdi:
597 bdi_destroy(&fsc->backing_dev_info);
598 fail_client:
599 ceph_destroy_client(fsc->client);
600 fail:
601 kfree(fsc);
602 return ERR_PTR(err);
603 }
604
605 static void destroy_fs_client(struct ceph_fs_client *fsc)
606 {
607 dout("destroy_fs_client %p\n", fsc);
608
609 ceph_fscache_unregister_fs(fsc);
610
611 destroy_workqueue(fsc->wb_wq);
612 destroy_workqueue(fsc->pg_inv_wq);
613 destroy_workqueue(fsc->trunc_wq);
614
615 bdi_destroy(&fsc->backing_dev_info);
616
617 mempool_destroy(fsc->wb_pagevec_pool);
618
619 destroy_mount_options(fsc->mount_options);
620
621 ceph_fs_debugfs_cleanup(fsc);
622
623 ceph_destroy_client(fsc->client);
624
625 kfree(fsc);
626 dout("destroy_fs_client %p done\n", fsc);
627 }
628
629 /*
630 * caches
631 */
632 struct kmem_cache *ceph_inode_cachep;
633 struct kmem_cache *ceph_cap_cachep;
634 struct kmem_cache *ceph_cap_flush_cachep;
635 struct kmem_cache *ceph_dentry_cachep;
636 struct kmem_cache *ceph_file_cachep;
637
638 static void ceph_inode_init_once(void *foo)
639 {
640 struct ceph_inode_info *ci = foo;
641 inode_init_once(&ci->vfs_inode);
642 }
643
644 static int __init init_caches(void)
645 {
646 int error = -ENOMEM;
647
648 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
649 sizeof(struct ceph_inode_info),
650 __alignof__(struct ceph_inode_info),
651 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
652 SLAB_ACCOUNT, ceph_inode_init_once);
653 if (ceph_inode_cachep == NULL)
654 return -ENOMEM;
655
656 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
657 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
658 if (ceph_cap_cachep == NULL)
659 goto bad_cap;
660 ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
661 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
662 if (ceph_cap_flush_cachep == NULL)
663 goto bad_cap_flush;
664
665 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
666 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
667 if (ceph_dentry_cachep == NULL)
668 goto bad_dentry;
669
670 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
671 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
672 if (ceph_file_cachep == NULL)
673 goto bad_file;
674
675 if ((error = ceph_fscache_register()))
676 goto bad_file;
677
678 return 0;
679 bad_file:
680 kmem_cache_destroy(ceph_dentry_cachep);
681 bad_dentry:
682 kmem_cache_destroy(ceph_cap_flush_cachep);
683 bad_cap_flush:
684 kmem_cache_destroy(ceph_cap_cachep);
685 bad_cap:
686 kmem_cache_destroy(ceph_inode_cachep);
687 return error;
688 }
689
690 static void destroy_caches(void)
691 {
692 /*
693 * Make sure all delayed rcu free inodes are flushed before we
694 * destroy cache.
695 */
696 rcu_barrier();
697
698 kmem_cache_destroy(ceph_inode_cachep);
699 kmem_cache_destroy(ceph_cap_cachep);
700 kmem_cache_destroy(ceph_cap_flush_cachep);
701 kmem_cache_destroy(ceph_dentry_cachep);
702 kmem_cache_destroy(ceph_file_cachep);
703
704 ceph_fscache_unregister();
705 }
706
707
708 /*
709 * ceph_umount_begin - initiate forced umount. Tear down down the
710 * mount, skipping steps that may hang while waiting for server(s).
711 */
712 static void ceph_umount_begin(struct super_block *sb)
713 {
714 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
715
716 dout("ceph_umount_begin - starting forced umount\n");
717 if (!fsc)
718 return;
719 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
720 ceph_mdsc_force_umount(fsc->mdsc);
721 return;
722 }
723
724 static const struct super_operations ceph_super_ops = {
725 .alloc_inode = ceph_alloc_inode,
726 .destroy_inode = ceph_destroy_inode,
727 .write_inode = ceph_write_inode,
728 .drop_inode = ceph_drop_inode,
729 .sync_fs = ceph_sync_fs,
730 .put_super = ceph_put_super,
731 .show_options = ceph_show_options,
732 .statfs = ceph_statfs,
733 .umount_begin = ceph_umount_begin,
734 };
735
736 /*
737 * Bootstrap mount by opening the root directory. Note the mount
738 * @started time from caller, and time out if this takes too long.
739 */
740 static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
741 const char *path,
742 unsigned long started)
743 {
744 struct ceph_mds_client *mdsc = fsc->mdsc;
745 struct ceph_mds_request *req = NULL;
746 int err;
747 struct dentry *root;
748
749 /* open dir */
750 dout("open_root_inode opening '%s'\n", path);
751 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
752 if (IS_ERR(req))
753 return ERR_CAST(req);
754 req->r_path1 = kstrdup(path, GFP_NOFS);
755 if (!req->r_path1) {
756 root = ERR_PTR(-ENOMEM);
757 goto out;
758 }
759
760 req->r_ino1.ino = CEPH_INO_ROOT;
761 req->r_ino1.snap = CEPH_NOSNAP;
762 req->r_started = started;
763 req->r_timeout = fsc->client->options->mount_timeout;
764 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
765 req->r_num_caps = 2;
766 err = ceph_mdsc_do_request(mdsc, NULL, req);
767 if (err == 0) {
768 struct inode *inode = req->r_target_inode;
769 req->r_target_inode = NULL;
770 dout("open_root_inode success\n");
771 if (ceph_ino(inode) == CEPH_INO_ROOT &&
772 fsc->sb->s_root == NULL) {
773 root = d_make_root(inode);
774 if (!root) {
775 root = ERR_PTR(-ENOMEM);
776 goto out;
777 }
778 } else {
779 root = d_obtain_root(inode);
780 }
781 ceph_init_dentry(root);
782 dout("open_root_inode success, root dentry is %p\n", root);
783 } else {
784 root = ERR_PTR(err);
785 }
786 out:
787 ceph_mdsc_put_request(req);
788 return root;
789 }
790
791
792
793
794 /*
795 * mount: join the ceph cluster, and open root directory.
796 */
797 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
798 const char *path)
799 {
800 int err;
801 unsigned long started = jiffies; /* note the start time */
802 struct dentry *root;
803 int first = 0; /* first vfsmount for this super_block */
804
805 dout("mount start %p\n", fsc);
806 mutex_lock(&fsc->client->mount_mutex);
807
808 if (!fsc->sb->s_root) {
809 err = __ceph_open_session(fsc->client, started);
810 if (err < 0)
811 goto out;
812
813 dout("mount opening root\n");
814 root = open_root_dentry(fsc, "", started);
815 if (IS_ERR(root)) {
816 err = PTR_ERR(root);
817 goto out;
818 }
819 fsc->sb->s_root = root;
820 first = 1;
821
822 err = ceph_fs_debugfs_init(fsc);
823 if (err < 0)
824 goto fail;
825 }
826
827 if (path[0] == 0) {
828 root = fsc->sb->s_root;
829 dget(root);
830 } else {
831 dout("mount opening base mountpoint\n");
832 root = open_root_dentry(fsc, path, started);
833 if (IS_ERR(root)) {
834 err = PTR_ERR(root);
835 goto fail;
836 }
837 }
838
839 fsc->mount_state = CEPH_MOUNT_MOUNTED;
840 dout("mount success\n");
841 mutex_unlock(&fsc->client->mount_mutex);
842 return root;
843
844 fail:
845 if (first) {
846 dput(fsc->sb->s_root);
847 fsc->sb->s_root = NULL;
848 }
849 out:
850 mutex_unlock(&fsc->client->mount_mutex);
851 return ERR_PTR(err);
852 }
853
854 static int ceph_set_super(struct super_block *s, void *data)
855 {
856 struct ceph_fs_client *fsc = data;
857 int ret;
858
859 dout("set_super %p data %p\n", s, data);
860
861 s->s_flags = fsc->mount_options->sb_flags;
862 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
863
864 s->s_xattr = ceph_xattr_handlers;
865 s->s_fs_info = fsc;
866 fsc->sb = s;
867
868 s->s_op = &ceph_super_ops;
869 s->s_export_op = &ceph_export_ops;
870
871 s->s_time_gran = 1000; /* 1000 ns == 1 us */
872
873 ret = set_anon_super(s, NULL); /* what is that second arg for? */
874 if (ret != 0)
875 goto fail;
876
877 return ret;
878
879 fail:
880 s->s_fs_info = NULL;
881 fsc->sb = NULL;
882 return ret;
883 }
884
885 /*
886 * share superblock if same fs AND options
887 */
888 static int ceph_compare_super(struct super_block *sb, void *data)
889 {
890 struct ceph_fs_client *new = data;
891 struct ceph_mount_options *fsopt = new->mount_options;
892 struct ceph_options *opt = new->client->options;
893 struct ceph_fs_client *other = ceph_sb_to_client(sb);
894
895 dout("ceph_compare_super %p\n", sb);
896
897 if (compare_mount_options(fsopt, opt, other)) {
898 dout("monitor(s)/mount options don't match\n");
899 return 0;
900 }
901 if ((opt->flags & CEPH_OPT_FSID) &&
902 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
903 dout("fsid doesn't match\n");
904 return 0;
905 }
906 if (fsopt->sb_flags != other->mount_options->sb_flags) {
907 dout("flags differ\n");
908 return 0;
909 }
910 return 1;
911 }
912
913 /*
914 * construct our own bdi so we can control readahead, etc.
915 */
916 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
917
918 static int ceph_register_bdi(struct super_block *sb,
919 struct ceph_fs_client *fsc)
920 {
921 int err;
922
923 /* set ra_pages based on rasize mount option? */
924 if (fsc->mount_options->rasize >= PAGE_SIZE)
925 fsc->backing_dev_info.ra_pages =
926 (fsc->mount_options->rasize + PAGE_SIZE - 1)
927 >> PAGE_SHIFT;
928 else
929 fsc->backing_dev_info.ra_pages =
930 VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
931
932 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
933 atomic_long_inc_return(&bdi_seq));
934 if (!err)
935 sb->s_bdi = &fsc->backing_dev_info;
936 return err;
937 }
938
939 static struct dentry *ceph_mount(struct file_system_type *fs_type,
940 int flags, const char *dev_name, void *data)
941 {
942 struct super_block *sb;
943 struct ceph_fs_client *fsc;
944 struct dentry *res;
945 int err;
946 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
947 const char *path = NULL;
948 struct ceph_mount_options *fsopt = NULL;
949 struct ceph_options *opt = NULL;
950
951 dout("ceph_mount\n");
952
953 #ifdef CONFIG_CEPH_FS_POSIX_ACL
954 flags |= MS_POSIXACL;
955 #endif
956 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
957 if (err < 0) {
958 res = ERR_PTR(err);
959 goto out_final;
960 }
961
962 /* create client (which we may/may not use) */
963 fsc = create_fs_client(fsopt, opt);
964 if (IS_ERR(fsc)) {
965 res = ERR_CAST(fsc);
966 destroy_mount_options(fsopt);
967 ceph_destroy_options(opt);
968 goto out_final;
969 }
970
971 err = ceph_mdsc_init(fsc);
972 if (err < 0) {
973 res = ERR_PTR(err);
974 goto out;
975 }
976
977 if (ceph_test_opt(fsc->client, NOSHARE))
978 compare_super = NULL;
979 sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
980 if (IS_ERR(sb)) {
981 res = ERR_CAST(sb);
982 goto out;
983 }
984
985 if (ceph_sb_to_client(sb) != fsc) {
986 ceph_mdsc_destroy(fsc);
987 destroy_fs_client(fsc);
988 fsc = ceph_sb_to_client(sb);
989 dout("get_sb got existing client %p\n", fsc);
990 } else {
991 dout("get_sb using new client %p\n", fsc);
992 err = ceph_register_bdi(sb, fsc);
993 if (err < 0) {
994 res = ERR_PTR(err);
995 goto out_splat;
996 }
997 }
998
999 res = ceph_real_mount(fsc, path);
1000 if (IS_ERR(res))
1001 goto out_splat;
1002 dout("root %p inode %p ino %llx.%llx\n", res,
1003 d_inode(res), ceph_vinop(d_inode(res)));
1004 return res;
1005
1006 out_splat:
1007 ceph_mdsc_close_sessions(fsc->mdsc);
1008 deactivate_locked_super(sb);
1009 goto out_final;
1010
1011 out:
1012 ceph_mdsc_destroy(fsc);
1013 destroy_fs_client(fsc);
1014 out_final:
1015 dout("ceph_mount fail %ld\n", PTR_ERR(res));
1016 return res;
1017 }
1018
1019 static void ceph_kill_sb(struct super_block *s)
1020 {
1021 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1022 dev_t dev = s->s_dev;
1023
1024 dout("kill_sb %p\n", s);
1025
1026 ceph_mdsc_pre_umount(fsc->mdsc);
1027 generic_shutdown_super(s);
1028 ceph_mdsc_destroy(fsc);
1029
1030 destroy_fs_client(fsc);
1031 free_anon_bdev(dev);
1032 }
1033
1034 static struct file_system_type ceph_fs_type = {
1035 .owner = THIS_MODULE,
1036 .name = "ceph",
1037 .mount = ceph_mount,
1038 .kill_sb = ceph_kill_sb,
1039 .fs_flags = FS_RENAME_DOES_D_MOVE,
1040 };
1041 MODULE_ALIAS_FS("ceph");
1042
1043 static int __init init_ceph(void)
1044 {
1045 int ret = init_caches();
1046 if (ret)
1047 goto out;
1048
1049 ceph_flock_init();
1050 ceph_xattr_init();
1051 ret = register_filesystem(&ceph_fs_type);
1052 if (ret)
1053 goto out_xattr;
1054
1055 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1056
1057 return 0;
1058
1059 out_xattr:
1060 ceph_xattr_exit();
1061 destroy_caches();
1062 out:
1063 return ret;
1064 }
1065
1066 static void __exit exit_ceph(void)
1067 {
1068 dout("exit_ceph\n");
1069 unregister_filesystem(&ceph_fs_type);
1070 ceph_xattr_exit();
1071 destroy_caches();
1072 }
1073
1074 module_init(init_ceph);
1075 module_exit(exit_ceph);
1076
1077 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1078 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1079 MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1080 MODULE_DESCRIPTION("Ceph filesystem for Linux");
1081 MODULE_LICENSE("GPL");
This page took 0.052821 seconds and 5 git commands to generate.