Merge tag 'vfio-v4.8-rc1' of git://github.com/awilliam/linux-vfio
[deliverable/linux.git] / fs / ceph / super.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/backing-dev.h>
5 #include <linux/ctype.h>
6 #include <linux/fs.h>
7 #include <linux/inet.h>
8 #include <linux/in6.h>
9 #include <linux/module.h>
10 #include <linux/mount.h>
11 #include <linux/parser.h>
12 #include <linux/sched.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/statfs.h>
16 #include <linux/string.h>
17
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21
22 #include <linux/ceph/ceph_features.h>
23 #include <linux/ceph/decode.h>
24 #include <linux/ceph/mon_client.h>
25 #include <linux/ceph/auth.h>
26 #include <linux/ceph/debugfs.h>
27
28 /*
29 * Ceph superblock operations
30 *
31 * Handle the basics of mounting, unmounting.
32 */
33
34 /*
35 * super ops
36 */
37 static void ceph_put_super(struct super_block *s)
38 {
39 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
40
41 dout("put_super\n");
42 ceph_mdsc_close_sessions(fsc->mdsc);
43 }
44
45 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
46 {
47 struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
48 struct ceph_monmap *monmap = fsc->client->monc.monmap;
49 struct ceph_statfs st;
50 u64 fsid;
51 int err;
52
53 dout("statfs\n");
54 err = ceph_monc_do_statfs(&fsc->client->monc, &st);
55 if (err < 0)
56 return err;
57
58 /* fill in kstatfs */
59 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
60
61 /*
62 * express utilization in terms of large blocks to avoid
63 * overflow on 32-bit machines.
64 *
65 * NOTE: for the time being, we make bsize == frsize to humor
66 * not-yet-ancient versions of glibc that are broken.
67 * Someday, we will probably want to report a real block
68 * size... whatever that may mean for a network file system!
69 */
70 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
71 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
72 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
73 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
74 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
75
76 buf->f_files = le64_to_cpu(st.num_objects);
77 buf->f_ffree = -1;
78 buf->f_namelen = NAME_MAX;
79
80 /* leave fsid little-endian, regardless of host endianness */
81 fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
82 buf->f_fsid.val[0] = fsid & 0xffffffff;
83 buf->f_fsid.val[1] = fsid >> 32;
84
85 return 0;
86 }
87
88
89 static int ceph_sync_fs(struct super_block *sb, int wait)
90 {
91 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
92
93 if (!wait) {
94 dout("sync_fs (non-blocking)\n");
95 ceph_flush_dirty_caps(fsc->mdsc);
96 dout("sync_fs (non-blocking) done\n");
97 return 0;
98 }
99
100 dout("sync_fs (blocking)\n");
101 ceph_osdc_sync(&fsc->client->osdc);
102 ceph_mdsc_sync(fsc->mdsc);
103 dout("sync_fs (blocking) done\n");
104 return 0;
105 }
106
107 /*
108 * mount options
109 */
110 enum {
111 Opt_mds_namespace,
112 Opt_wsize,
113 Opt_rsize,
114 Opt_rasize,
115 Opt_caps_wanted_delay_min,
116 Opt_caps_wanted_delay_max,
117 Opt_cap_release_safety,
118 Opt_readdir_max_entries,
119 Opt_readdir_max_bytes,
120 Opt_congestion_kb,
121 Opt_last_int,
122 /* int args above */
123 Opt_snapdirname,
124 Opt_last_string,
125 /* string args above */
126 Opt_dirstat,
127 Opt_nodirstat,
128 Opt_rbytes,
129 Opt_norbytes,
130 Opt_asyncreaddir,
131 Opt_noasyncreaddir,
132 Opt_dcache,
133 Opt_nodcache,
134 Opt_ino32,
135 Opt_noino32,
136 Opt_fscache,
137 Opt_nofscache,
138 Opt_poolperm,
139 Opt_nopoolperm,
140 #ifdef CONFIG_CEPH_FS_POSIX_ACL
141 Opt_acl,
142 #endif
143 Opt_noacl,
144 };
145
146 static match_table_t fsopt_tokens = {
147 {Opt_mds_namespace, "mds_namespace=%d"},
148 {Opt_wsize, "wsize=%d"},
149 {Opt_rsize, "rsize=%d"},
150 {Opt_rasize, "rasize=%d"},
151 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
152 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
153 {Opt_cap_release_safety, "cap_release_safety=%d"},
154 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
155 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
156 {Opt_congestion_kb, "write_congestion_kb=%d"},
157 /* int args above */
158 {Opt_snapdirname, "snapdirname=%s"},
159 /* string args above */
160 {Opt_dirstat, "dirstat"},
161 {Opt_nodirstat, "nodirstat"},
162 {Opt_rbytes, "rbytes"},
163 {Opt_norbytes, "norbytes"},
164 {Opt_asyncreaddir, "asyncreaddir"},
165 {Opt_noasyncreaddir, "noasyncreaddir"},
166 {Opt_dcache, "dcache"},
167 {Opt_nodcache, "nodcache"},
168 {Opt_ino32, "ino32"},
169 {Opt_noino32, "noino32"},
170 {Opt_fscache, "fsc"},
171 {Opt_nofscache, "nofsc"},
172 {Opt_poolperm, "poolperm"},
173 {Opt_nopoolperm, "nopoolperm"},
174 #ifdef CONFIG_CEPH_FS_POSIX_ACL
175 {Opt_acl, "acl"},
176 #endif
177 {Opt_noacl, "noacl"},
178 {-1, NULL}
179 };
180
181 static int parse_fsopt_token(char *c, void *private)
182 {
183 struct ceph_mount_options *fsopt = private;
184 substring_t argstr[MAX_OPT_ARGS];
185 int token, intval, ret;
186
187 token = match_token((char *)c, fsopt_tokens, argstr);
188 if (token < 0)
189 return -EINVAL;
190
191 if (token < Opt_last_int) {
192 ret = match_int(&argstr[0], &intval);
193 if (ret < 0) {
194 pr_err("bad mount option arg (not int) "
195 "at '%s'\n", c);
196 return ret;
197 }
198 dout("got int token %d val %d\n", token, intval);
199 } else if (token > Opt_last_int && token < Opt_last_string) {
200 dout("got string token %d val %s\n", token,
201 argstr[0].from);
202 } else {
203 dout("got token %d\n", token);
204 }
205
206 switch (token) {
207 case Opt_snapdirname:
208 kfree(fsopt->snapdir_name);
209 fsopt->snapdir_name = kstrndup(argstr[0].from,
210 argstr[0].to-argstr[0].from,
211 GFP_KERNEL);
212 if (!fsopt->snapdir_name)
213 return -ENOMEM;
214 break;
215
216 /* misc */
217 case Opt_mds_namespace:
218 fsopt->mds_namespace = intval;
219 break;
220 case Opt_wsize:
221 fsopt->wsize = intval;
222 break;
223 case Opt_rsize:
224 fsopt->rsize = intval;
225 break;
226 case Opt_rasize:
227 fsopt->rasize = intval;
228 break;
229 case Opt_caps_wanted_delay_min:
230 fsopt->caps_wanted_delay_min = intval;
231 break;
232 case Opt_caps_wanted_delay_max:
233 fsopt->caps_wanted_delay_max = intval;
234 break;
235 case Opt_readdir_max_entries:
236 fsopt->max_readdir = intval;
237 break;
238 case Opt_readdir_max_bytes:
239 fsopt->max_readdir_bytes = intval;
240 break;
241 case Opt_congestion_kb:
242 fsopt->congestion_kb = intval;
243 break;
244 case Opt_dirstat:
245 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
246 break;
247 case Opt_nodirstat:
248 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
249 break;
250 case Opt_rbytes:
251 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
252 break;
253 case Opt_norbytes:
254 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
255 break;
256 case Opt_asyncreaddir:
257 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
258 break;
259 case Opt_noasyncreaddir:
260 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
261 break;
262 case Opt_dcache:
263 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
264 break;
265 case Opt_nodcache:
266 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
267 break;
268 case Opt_ino32:
269 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
270 break;
271 case Opt_noino32:
272 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
273 break;
274 case Opt_fscache:
275 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
276 break;
277 case Opt_nofscache:
278 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
279 break;
280 case Opt_poolperm:
281 fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
282 printk ("pool perm");
283 break;
284 case Opt_nopoolperm:
285 fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
286 break;
287 #ifdef CONFIG_CEPH_FS_POSIX_ACL
288 case Opt_acl:
289 fsopt->sb_flags |= MS_POSIXACL;
290 break;
291 #endif
292 case Opt_noacl:
293 fsopt->sb_flags &= ~MS_POSIXACL;
294 break;
295 default:
296 BUG_ON(token);
297 }
298 return 0;
299 }
300
301 static void destroy_mount_options(struct ceph_mount_options *args)
302 {
303 dout("destroy_mount_options %p\n", args);
304 kfree(args->snapdir_name);
305 kfree(args->server_path);
306 kfree(args);
307 }
308
309 static int strcmp_null(const char *s1, const char *s2)
310 {
311 if (!s1 && !s2)
312 return 0;
313 if (s1 && !s2)
314 return -1;
315 if (!s1 && s2)
316 return 1;
317 return strcmp(s1, s2);
318 }
319
320 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
321 struct ceph_options *new_opt,
322 struct ceph_fs_client *fsc)
323 {
324 struct ceph_mount_options *fsopt1 = new_fsopt;
325 struct ceph_mount_options *fsopt2 = fsc->mount_options;
326 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
327 int ret;
328
329 ret = memcmp(fsopt1, fsopt2, ofs);
330 if (ret)
331 return ret;
332
333 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
334 if (ret)
335 return ret;
336
337 ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
338 if (ret)
339 return ret;
340
341 return ceph_compare_options(new_opt, fsc->client);
342 }
343
344 static int parse_mount_options(struct ceph_mount_options **pfsopt,
345 struct ceph_options **popt,
346 int flags, char *options,
347 const char *dev_name)
348 {
349 struct ceph_mount_options *fsopt;
350 const char *dev_name_end;
351 int err;
352
353 if (!dev_name || !*dev_name)
354 return -EINVAL;
355
356 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
357 if (!fsopt)
358 return -ENOMEM;
359
360 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
361
362 fsopt->sb_flags = flags;
363 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
364
365 fsopt->rsize = CEPH_RSIZE_DEFAULT;
366 fsopt->rasize = CEPH_RASIZE_DEFAULT;
367 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
368 if (!fsopt->snapdir_name) {
369 err = -ENOMEM;
370 goto out;
371 }
372
373 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
374 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
375 fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
376 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
377 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
378 fsopt->congestion_kb = default_congestion_kb();
379 fsopt->mds_namespace = CEPH_FS_CLUSTER_ID_NONE;
380
381 /*
382 * Distinguish the server list from the path in "dev_name".
383 * Internally we do not include the leading '/' in the path.
384 *
385 * "dev_name" will look like:
386 * <server_spec>[,<server_spec>...]:[<path>]
387 * where
388 * <server_spec> is <ip>[:<port>]
389 * <path> is optional, but if present must begin with '/'
390 */
391 dev_name_end = strchr(dev_name, '/');
392 if (dev_name_end) {
393 fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
394 if (!fsopt->server_path) {
395 err = -ENOMEM;
396 goto out;
397 }
398 } else {
399 dev_name_end = dev_name + strlen(dev_name);
400 }
401 err = -EINVAL;
402 dev_name_end--; /* back up to ':' separator */
403 if (dev_name_end < dev_name || *dev_name_end != ':') {
404 pr_err("device name is missing path (no : separator in %s)\n",
405 dev_name);
406 goto out;
407 }
408 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
409 if (fsopt->server_path)
410 dout("server path '%s'\n", fsopt->server_path);
411
412 *popt = ceph_parse_options(options, dev_name, dev_name_end,
413 parse_fsopt_token, (void *)fsopt);
414 if (IS_ERR(*popt)) {
415 err = PTR_ERR(*popt);
416 goto out;
417 }
418
419 /* success */
420 *pfsopt = fsopt;
421 return 0;
422
423 out:
424 destroy_mount_options(fsopt);
425 return err;
426 }
427
428 /**
429 * ceph_show_options - Show mount options in /proc/mounts
430 * @m: seq_file to write to
431 * @root: root of that (sub)tree
432 */
433 static int ceph_show_options(struct seq_file *m, struct dentry *root)
434 {
435 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
436 struct ceph_mount_options *fsopt = fsc->mount_options;
437 size_t pos;
438 int ret;
439
440 /* a comma between MNT/MS and client options */
441 seq_putc(m, ',');
442 pos = m->count;
443
444 ret = ceph_print_client_options(m, fsc->client);
445 if (ret)
446 return ret;
447
448 /* retract our comma if no client options */
449 if (m->count == pos)
450 m->count--;
451
452 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
453 seq_puts(m, ",dirstat");
454 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
455 seq_puts(m, ",rbytes");
456 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
457 seq_puts(m, ",noasyncreaddir");
458 if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
459 seq_puts(m, ",nodcache");
460 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE)
461 seq_puts(m, ",fsc");
462 if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
463 seq_puts(m, ",nopoolperm");
464
465 #ifdef CONFIG_CEPH_FS_POSIX_ACL
466 if (fsopt->sb_flags & MS_POSIXACL)
467 seq_puts(m, ",acl");
468 else
469 seq_puts(m, ",noacl");
470 #endif
471
472 if (fsopt->mds_namespace != CEPH_FS_CLUSTER_ID_NONE)
473 seq_printf(m, ",mds_namespace=%d", fsopt->mds_namespace);
474 if (fsopt->wsize)
475 seq_printf(m, ",wsize=%d", fsopt->wsize);
476 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
477 seq_printf(m, ",rsize=%d", fsopt->rsize);
478 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
479 seq_printf(m, ",rasize=%d", fsopt->rasize);
480 if (fsopt->congestion_kb != default_congestion_kb())
481 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
482 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
483 seq_printf(m, ",caps_wanted_delay_min=%d",
484 fsopt->caps_wanted_delay_min);
485 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
486 seq_printf(m, ",caps_wanted_delay_max=%d",
487 fsopt->caps_wanted_delay_max);
488 if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
489 seq_printf(m, ",cap_release_safety=%d",
490 fsopt->cap_release_safety);
491 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
492 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
493 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
494 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
495 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
496 seq_show_option(m, "snapdirname", fsopt->snapdir_name);
497
498 return 0;
499 }
500
501 /*
502 * handle any mon messages the standard library doesn't understand.
503 * return error if we don't either.
504 */
505 static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
506 {
507 struct ceph_fs_client *fsc = client->private;
508 int type = le16_to_cpu(msg->hdr.type);
509
510 switch (type) {
511 case CEPH_MSG_MDS_MAP:
512 ceph_mdsc_handle_map(fsc->mdsc, msg);
513 return 0;
514
515 default:
516 return -1;
517 }
518 }
519
520 /*
521 * create a new fs client
522 */
523 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
524 struct ceph_options *opt)
525 {
526 struct ceph_fs_client *fsc;
527 const u64 supported_features =
528 CEPH_FEATURE_FLOCK | CEPH_FEATURE_DIRLAYOUTHASH |
529 CEPH_FEATURE_MDSENC | CEPH_FEATURE_MDS_INLINE_DATA;
530 const u64 required_features = 0;
531 int page_count;
532 size_t size;
533 int err = -ENOMEM;
534
535 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
536 if (!fsc)
537 return ERR_PTR(-ENOMEM);
538
539 fsc->client = ceph_create_client(opt, fsc, supported_features,
540 required_features);
541 if (IS_ERR(fsc->client)) {
542 err = PTR_ERR(fsc->client);
543 goto fail;
544 }
545 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
546 fsc->client->monc.fs_cluster_id = fsopt->mds_namespace;
547 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP, 0, true);
548
549 fsc->mount_options = fsopt;
550
551 fsc->sb = NULL;
552 fsc->mount_state = CEPH_MOUNT_MOUNTING;
553
554 atomic_long_set(&fsc->writeback_count, 0);
555
556 err = bdi_init(&fsc->backing_dev_info);
557 if (err < 0)
558 goto fail_client;
559
560 err = -ENOMEM;
561 /*
562 * The number of concurrent works can be high but they don't need
563 * to be processed in parallel, limit concurrency.
564 */
565 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
566 if (fsc->wb_wq == NULL)
567 goto fail_bdi;
568 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
569 if (fsc->pg_inv_wq == NULL)
570 goto fail_wb_wq;
571 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
572 if (fsc->trunc_wq == NULL)
573 goto fail_pg_inv_wq;
574
575 /* set up mempools */
576 err = -ENOMEM;
577 page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
578 size = sizeof (struct page *) * (page_count ? page_count : 1);
579 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
580 if (!fsc->wb_pagevec_pool)
581 goto fail_trunc_wq;
582
583 /* setup fscache */
584 if ((fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) &&
585 (ceph_fscache_register_fs(fsc) != 0))
586 goto fail_fscache;
587
588 /* caps */
589 fsc->min_caps = fsopt->max_readdir;
590
591 return fsc;
592
593 fail_fscache:
594 ceph_fscache_unregister_fs(fsc);
595 fail_trunc_wq:
596 destroy_workqueue(fsc->trunc_wq);
597 fail_pg_inv_wq:
598 destroy_workqueue(fsc->pg_inv_wq);
599 fail_wb_wq:
600 destroy_workqueue(fsc->wb_wq);
601 fail_bdi:
602 bdi_destroy(&fsc->backing_dev_info);
603 fail_client:
604 ceph_destroy_client(fsc->client);
605 fail:
606 kfree(fsc);
607 return ERR_PTR(err);
608 }
609
610 static void destroy_fs_client(struct ceph_fs_client *fsc)
611 {
612 dout("destroy_fs_client %p\n", fsc);
613
614 ceph_fscache_unregister_fs(fsc);
615
616 destroy_workqueue(fsc->wb_wq);
617 destroy_workqueue(fsc->pg_inv_wq);
618 destroy_workqueue(fsc->trunc_wq);
619
620 bdi_destroy(&fsc->backing_dev_info);
621
622 mempool_destroy(fsc->wb_pagevec_pool);
623
624 destroy_mount_options(fsc->mount_options);
625
626 ceph_fs_debugfs_cleanup(fsc);
627
628 ceph_destroy_client(fsc->client);
629
630 kfree(fsc);
631 dout("destroy_fs_client %p done\n", fsc);
632 }
633
634 /*
635 * caches
636 */
637 struct kmem_cache *ceph_inode_cachep;
638 struct kmem_cache *ceph_cap_cachep;
639 struct kmem_cache *ceph_cap_flush_cachep;
640 struct kmem_cache *ceph_dentry_cachep;
641 struct kmem_cache *ceph_file_cachep;
642
643 static void ceph_inode_init_once(void *foo)
644 {
645 struct ceph_inode_info *ci = foo;
646 inode_init_once(&ci->vfs_inode);
647 }
648
649 static int __init init_caches(void)
650 {
651 int error = -ENOMEM;
652
653 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
654 sizeof(struct ceph_inode_info),
655 __alignof__(struct ceph_inode_info),
656 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
657 SLAB_ACCOUNT, ceph_inode_init_once);
658 if (ceph_inode_cachep == NULL)
659 return -ENOMEM;
660
661 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
662 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
663 if (ceph_cap_cachep == NULL)
664 goto bad_cap;
665 ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
666 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
667 if (ceph_cap_flush_cachep == NULL)
668 goto bad_cap_flush;
669
670 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
671 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
672 if (ceph_dentry_cachep == NULL)
673 goto bad_dentry;
674
675 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
676 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
677 if (ceph_file_cachep == NULL)
678 goto bad_file;
679
680 if ((error = ceph_fscache_register()))
681 goto bad_file;
682
683 return 0;
684 bad_file:
685 kmem_cache_destroy(ceph_dentry_cachep);
686 bad_dentry:
687 kmem_cache_destroy(ceph_cap_flush_cachep);
688 bad_cap_flush:
689 kmem_cache_destroy(ceph_cap_cachep);
690 bad_cap:
691 kmem_cache_destroy(ceph_inode_cachep);
692 return error;
693 }
694
695 static void destroy_caches(void)
696 {
697 /*
698 * Make sure all delayed rcu free inodes are flushed before we
699 * destroy cache.
700 */
701 rcu_barrier();
702
703 kmem_cache_destroy(ceph_inode_cachep);
704 kmem_cache_destroy(ceph_cap_cachep);
705 kmem_cache_destroy(ceph_cap_flush_cachep);
706 kmem_cache_destroy(ceph_dentry_cachep);
707 kmem_cache_destroy(ceph_file_cachep);
708
709 ceph_fscache_unregister();
710 }
711
712
713 /*
714 * ceph_umount_begin - initiate forced umount. Tear down down the
715 * mount, skipping steps that may hang while waiting for server(s).
716 */
717 static void ceph_umount_begin(struct super_block *sb)
718 {
719 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
720
721 dout("ceph_umount_begin - starting forced umount\n");
722 if (!fsc)
723 return;
724 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
725 ceph_mdsc_force_umount(fsc->mdsc);
726 return;
727 }
728
729 static const struct super_operations ceph_super_ops = {
730 .alloc_inode = ceph_alloc_inode,
731 .destroy_inode = ceph_destroy_inode,
732 .write_inode = ceph_write_inode,
733 .drop_inode = ceph_drop_inode,
734 .sync_fs = ceph_sync_fs,
735 .put_super = ceph_put_super,
736 .show_options = ceph_show_options,
737 .statfs = ceph_statfs,
738 .umount_begin = ceph_umount_begin,
739 };
740
741 /*
742 * Bootstrap mount by opening the root directory. Note the mount
743 * @started time from caller, and time out if this takes too long.
744 */
745 static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
746 const char *path,
747 unsigned long started)
748 {
749 struct ceph_mds_client *mdsc = fsc->mdsc;
750 struct ceph_mds_request *req = NULL;
751 int err;
752 struct dentry *root;
753
754 /* open dir */
755 dout("open_root_inode opening '%s'\n", path);
756 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
757 if (IS_ERR(req))
758 return ERR_CAST(req);
759 req->r_path1 = kstrdup(path, GFP_NOFS);
760 if (!req->r_path1) {
761 root = ERR_PTR(-ENOMEM);
762 goto out;
763 }
764
765 req->r_ino1.ino = CEPH_INO_ROOT;
766 req->r_ino1.snap = CEPH_NOSNAP;
767 req->r_started = started;
768 req->r_timeout = fsc->client->options->mount_timeout;
769 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
770 req->r_num_caps = 2;
771 err = ceph_mdsc_do_request(mdsc, NULL, req);
772 if (err == 0) {
773 struct inode *inode = req->r_target_inode;
774 req->r_target_inode = NULL;
775 dout("open_root_inode success\n");
776 if (ceph_ino(inode) == CEPH_INO_ROOT &&
777 fsc->sb->s_root == NULL) {
778 root = d_make_root(inode);
779 if (!root) {
780 root = ERR_PTR(-ENOMEM);
781 goto out;
782 }
783 } else {
784 root = d_obtain_root(inode);
785 }
786 ceph_init_dentry(root);
787 dout("open_root_inode success, root dentry is %p\n", root);
788 } else {
789 root = ERR_PTR(err);
790 }
791 out:
792 ceph_mdsc_put_request(req);
793 return root;
794 }
795
796
797
798
799 /*
800 * mount: join the ceph cluster, and open root directory.
801 */
802 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
803 {
804 int err;
805 unsigned long started = jiffies; /* note the start time */
806 struct dentry *root;
807 int first = 0; /* first vfsmount for this super_block */
808
809 dout("mount start %p\n", fsc);
810 mutex_lock(&fsc->client->mount_mutex);
811
812 if (!fsc->sb->s_root) {
813 err = __ceph_open_session(fsc->client, started);
814 if (err < 0)
815 goto out;
816
817 dout("mount opening root\n");
818 root = open_root_dentry(fsc, "", started);
819 if (IS_ERR(root)) {
820 err = PTR_ERR(root);
821 goto out;
822 }
823 fsc->sb->s_root = root;
824 first = 1;
825
826 err = ceph_fs_debugfs_init(fsc);
827 if (err < 0)
828 goto fail;
829 }
830
831 if (!fsc->mount_options->server_path) {
832 root = fsc->sb->s_root;
833 dget(root);
834 } else {
835 const char *path = fsc->mount_options->server_path + 1;
836 dout("mount opening path %s\n", path);
837 root = open_root_dentry(fsc, path, started);
838 if (IS_ERR(root)) {
839 err = PTR_ERR(root);
840 goto fail;
841 }
842 }
843
844 fsc->mount_state = CEPH_MOUNT_MOUNTED;
845 dout("mount success\n");
846 mutex_unlock(&fsc->client->mount_mutex);
847 return root;
848
849 fail:
850 if (first) {
851 dput(fsc->sb->s_root);
852 fsc->sb->s_root = NULL;
853 }
854 out:
855 mutex_unlock(&fsc->client->mount_mutex);
856 return ERR_PTR(err);
857 }
858
859 static int ceph_set_super(struct super_block *s, void *data)
860 {
861 struct ceph_fs_client *fsc = data;
862 int ret;
863
864 dout("set_super %p data %p\n", s, data);
865
866 s->s_flags = fsc->mount_options->sb_flags;
867 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
868
869 s->s_xattr = ceph_xattr_handlers;
870 s->s_fs_info = fsc;
871 fsc->sb = s;
872
873 s->s_op = &ceph_super_ops;
874 s->s_export_op = &ceph_export_ops;
875
876 s->s_time_gran = 1000; /* 1000 ns == 1 us */
877
878 ret = set_anon_super(s, NULL); /* what is that second arg for? */
879 if (ret != 0)
880 goto fail;
881
882 return ret;
883
884 fail:
885 s->s_fs_info = NULL;
886 fsc->sb = NULL;
887 return ret;
888 }
889
890 /*
891 * share superblock if same fs AND options
892 */
893 static int ceph_compare_super(struct super_block *sb, void *data)
894 {
895 struct ceph_fs_client *new = data;
896 struct ceph_mount_options *fsopt = new->mount_options;
897 struct ceph_options *opt = new->client->options;
898 struct ceph_fs_client *other = ceph_sb_to_client(sb);
899
900 dout("ceph_compare_super %p\n", sb);
901
902 if (compare_mount_options(fsopt, opt, other)) {
903 dout("monitor(s)/mount options don't match\n");
904 return 0;
905 }
906 if ((opt->flags & CEPH_OPT_FSID) &&
907 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
908 dout("fsid doesn't match\n");
909 return 0;
910 }
911 if (fsopt->sb_flags != other->mount_options->sb_flags) {
912 dout("flags differ\n");
913 return 0;
914 }
915 return 1;
916 }
917
918 /*
919 * construct our own bdi so we can control readahead, etc.
920 */
921 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
922
923 static int ceph_register_bdi(struct super_block *sb,
924 struct ceph_fs_client *fsc)
925 {
926 int err;
927
928 /* set ra_pages based on rasize mount option? */
929 if (fsc->mount_options->rasize >= PAGE_SIZE)
930 fsc->backing_dev_info.ra_pages =
931 (fsc->mount_options->rasize + PAGE_SIZE - 1)
932 >> PAGE_SHIFT;
933 else
934 fsc->backing_dev_info.ra_pages =
935 VM_MAX_READAHEAD * 1024 / PAGE_SIZE;
936
937 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
938 atomic_long_inc_return(&bdi_seq));
939 if (!err)
940 sb->s_bdi = &fsc->backing_dev_info;
941 return err;
942 }
943
944 static struct dentry *ceph_mount(struct file_system_type *fs_type,
945 int flags, const char *dev_name, void *data)
946 {
947 struct super_block *sb;
948 struct ceph_fs_client *fsc;
949 struct dentry *res;
950 int err;
951 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
952 struct ceph_mount_options *fsopt = NULL;
953 struct ceph_options *opt = NULL;
954
955 dout("ceph_mount\n");
956
957 #ifdef CONFIG_CEPH_FS_POSIX_ACL
958 flags |= MS_POSIXACL;
959 #endif
960 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
961 if (err < 0) {
962 res = ERR_PTR(err);
963 goto out_final;
964 }
965
966 /* create client (which we may/may not use) */
967 fsc = create_fs_client(fsopt, opt);
968 if (IS_ERR(fsc)) {
969 res = ERR_CAST(fsc);
970 destroy_mount_options(fsopt);
971 ceph_destroy_options(opt);
972 goto out_final;
973 }
974
975 err = ceph_mdsc_init(fsc);
976 if (err < 0) {
977 res = ERR_PTR(err);
978 goto out;
979 }
980
981 if (ceph_test_opt(fsc->client, NOSHARE))
982 compare_super = NULL;
983 sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
984 if (IS_ERR(sb)) {
985 res = ERR_CAST(sb);
986 goto out;
987 }
988
989 if (ceph_sb_to_client(sb) != fsc) {
990 ceph_mdsc_destroy(fsc);
991 destroy_fs_client(fsc);
992 fsc = ceph_sb_to_client(sb);
993 dout("get_sb got existing client %p\n", fsc);
994 } else {
995 dout("get_sb using new client %p\n", fsc);
996 err = ceph_register_bdi(sb, fsc);
997 if (err < 0) {
998 res = ERR_PTR(err);
999 goto out_splat;
1000 }
1001 }
1002
1003 res = ceph_real_mount(fsc);
1004 if (IS_ERR(res))
1005 goto out_splat;
1006 dout("root %p inode %p ino %llx.%llx\n", res,
1007 d_inode(res), ceph_vinop(d_inode(res)));
1008 return res;
1009
1010 out_splat:
1011 ceph_mdsc_close_sessions(fsc->mdsc);
1012 deactivate_locked_super(sb);
1013 goto out_final;
1014
1015 out:
1016 ceph_mdsc_destroy(fsc);
1017 destroy_fs_client(fsc);
1018 out_final:
1019 dout("ceph_mount fail %ld\n", PTR_ERR(res));
1020 return res;
1021 }
1022
1023 static void ceph_kill_sb(struct super_block *s)
1024 {
1025 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1026 dev_t dev = s->s_dev;
1027
1028 dout("kill_sb %p\n", s);
1029
1030 ceph_mdsc_pre_umount(fsc->mdsc);
1031 generic_shutdown_super(s);
1032 ceph_mdsc_destroy(fsc);
1033
1034 destroy_fs_client(fsc);
1035 free_anon_bdev(dev);
1036 }
1037
1038 static struct file_system_type ceph_fs_type = {
1039 .owner = THIS_MODULE,
1040 .name = "ceph",
1041 .mount = ceph_mount,
1042 .kill_sb = ceph_kill_sb,
1043 .fs_flags = FS_RENAME_DOES_D_MOVE,
1044 };
1045 MODULE_ALIAS_FS("ceph");
1046
1047 static int __init init_ceph(void)
1048 {
1049 int ret = init_caches();
1050 if (ret)
1051 goto out;
1052
1053 ceph_flock_init();
1054 ceph_xattr_init();
1055 ret = register_filesystem(&ceph_fs_type);
1056 if (ret)
1057 goto out_xattr;
1058
1059 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1060
1061 return 0;
1062
1063 out_xattr:
1064 ceph_xattr_exit();
1065 destroy_caches();
1066 out:
1067 return ret;
1068 }
1069
1070 static void __exit exit_ceph(void)
1071 {
1072 dout("exit_ceph\n");
1073 unregister_filesystem(&ceph_fs_type);
1074 ceph_xattr_exit();
1075 destroy_caches();
1076 }
1077
1078 module_init(init_ceph);
1079 module_exit(exit_ceph);
1080
1081 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1082 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1083 MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1084 MODULE_DESCRIPTION("Ceph filesystem for Linux");
1085 MODULE_LICENSE("GPL");
This page took 0.05344 seconds and 5 git commands to generate.