libceph: expose client options through debugfs
[deliverable/linux.git] / fs / ceph / super.c
CommitLineData
16725b9d 1
3d14c5d2 2#include <linux/ceph/ceph_debug.h>
16725b9d
SW
3
4#include <linux/backing-dev.h>
c309f0ab 5#include <linux/ctype.h>
16725b9d
SW
6#include <linux/fs.h>
7#include <linux/inet.h>
8#include <linux/in6.h>
9#include <linux/module.h>
10#include <linux/mount.h>
11#include <linux/parser.h>
16725b9d
SW
12#include <linux/sched.h>
13#include <linux/seq_file.h>
5a0e3ad6 14#include <linux/slab.h>
16725b9d
SW
15#include <linux/statfs.h>
16#include <linux/string.h>
16725b9d 17
16725b9d 18#include "super.h"
3d14c5d2 19#include "mds_client.h"
99ccbd22 20#include "cache.h"
3d14c5d2 21
1fe60e51 22#include <linux/ceph/ceph_features.h>
3d14c5d2
YS
23#include <linux/ceph/decode.h>
24#include <linux/ceph/mon_client.h>
25#include <linux/ceph/auth.h>
26#include <linux/ceph/debugfs.h>
16725b9d
SW
27
28/*
29 * Ceph superblock operations
30 *
31 * Handle the basics of mounting, unmounting.
32 */
33
16725b9d
SW
34/*
35 * super ops
36 */
37static void ceph_put_super(struct super_block *s)
38{
3d14c5d2 39 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
16725b9d
SW
40
41 dout("put_super\n");
3d14c5d2 42 ceph_mdsc_close_sessions(fsc->mdsc);
16725b9d
SW
43}
44
45static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
46{
3d14c5d2
YS
47 struct ceph_fs_client *fsc = ceph_inode_to_client(dentry->d_inode);
48 struct ceph_monmap *monmap = fsc->client->monc.monmap;
16725b9d
SW
49 struct ceph_statfs st;
50 u64 fsid;
51 int err;
52
53 dout("statfs\n");
3d14c5d2 54 err = ceph_monc_do_statfs(&fsc->client->monc, &st);
16725b9d
SW
55 if (err < 0)
56 return err;
57
58 /* fill in kstatfs */
59 buf->f_type = CEPH_SUPER_MAGIC; /* ?? */
60
61 /*
62 * express utilization in terms of large blocks to avoid
63 * overflow on 32-bit machines.
92a49fb0
SW
64 *
65 * NOTE: for the time being, we make bsize == frsize to humor
66 * not-yet-ancient versions of glibc that are broken.
67 * Someday, we will probably want to report a real block
68 * size... whatever that may mean for a network file system!
16725b9d
SW
69 */
70 buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
92a49fb0 71 buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
16725b9d 72 buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
8f04d422 73 buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
16725b9d
SW
74 buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
75
76 buf->f_files = le64_to_cpu(st.num_objects);
77 buf->f_ffree = -1;
558d3499 78 buf->f_namelen = NAME_MAX;
16725b9d
SW
79
80 /* leave fsid little-endian, regardless of host endianness */
81 fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1);
82 buf->f_fsid.val[0] = fsid & 0xffffffff;
83 buf->f_fsid.val[1] = fsid >> 32;
84
85 return 0;
86}
87
88
2d9c98ae 89static int ceph_sync_fs(struct super_block *sb, int wait)
16725b9d 90{
3d14c5d2 91 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
2d9c98ae
SW
92
93 if (!wait) {
94 dout("sync_fs (non-blocking)\n");
3d14c5d2 95 ceph_flush_dirty_caps(fsc->mdsc);
2d9c98ae
SW
96 dout("sync_fs (non-blocking) done\n");
97 return 0;
98 }
99
100 dout("sync_fs (blocking)\n");
3d14c5d2
YS
101 ceph_osdc_sync(&fsc->client->osdc);
102 ceph_mdsc_sync(fsc->mdsc);
2d9c98ae 103 dout("sync_fs (blocking) done\n");
16725b9d
SW
104 return 0;
105}
106
16725b9d
SW
107/*
108 * mount options
109 */
110enum {
16725b9d
SW
111 Opt_wsize,
112 Opt_rsize,
83817e35 113 Opt_rasize,
16725b9d
SW
114 Opt_caps_wanted_delay_min,
115 Opt_caps_wanted_delay_max,
6e19a16e 116 Opt_cap_release_safety,
16725b9d 117 Opt_readdir_max_entries,
23804d91 118 Opt_readdir_max_bytes,
2baba250 119 Opt_congestion_kb,
e53c2fe0 120 Opt_last_int,
16725b9d
SW
121 /* int args above */
122 Opt_snapdirname,
e53c2fe0 123 Opt_last_string,
16725b9d 124 /* string args above */
16725b9d
SW
125 Opt_dirstat,
126 Opt_nodirstat,
127 Opt_rbytes,
128 Opt_norbytes,
cffaba15 129 Opt_asyncreaddir,
16725b9d 130 Opt_noasyncreaddir,
a40dc6cc
SW
131 Opt_dcache,
132 Opt_nodcache,
ad1fee96 133 Opt_ino32,
cffaba15 134 Opt_noino32,
99ccbd22 135 Opt_fscache,
45195e42
SW
136 Opt_nofscache,
137#ifdef CONFIG_CEPH_FS_POSIX_ACL
138 Opt_acl,
139#endif
140 Opt_noacl
16725b9d
SW
141};
142
3d14c5d2 143static match_table_t fsopt_tokens = {
16725b9d
SW
144 {Opt_wsize, "wsize=%d"},
145 {Opt_rsize, "rsize=%d"},
83817e35 146 {Opt_rasize, "rasize=%d"},
16725b9d
SW
147 {Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
148 {Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
6e19a16e 149 {Opt_cap_release_safety, "cap_release_safety=%d"},
16725b9d 150 {Opt_readdir_max_entries, "readdir_max_entries=%d"},
23804d91 151 {Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
2baba250 152 {Opt_congestion_kb, "write_congestion_kb=%d"},
16725b9d
SW
153 /* int args above */
154 {Opt_snapdirname, "snapdirname=%s"},
16725b9d 155 /* string args above */
16725b9d
SW
156 {Opt_dirstat, "dirstat"},
157 {Opt_nodirstat, "nodirstat"},
158 {Opt_rbytes, "rbytes"},
159 {Opt_norbytes, "norbytes"},
cffaba15 160 {Opt_asyncreaddir, "asyncreaddir"},
16725b9d 161 {Opt_noasyncreaddir, "noasyncreaddir"},
a40dc6cc
SW
162 {Opt_dcache, "dcache"},
163 {Opt_nodcache, "nodcache"},
ad1fee96 164 {Opt_ino32, "ino32"},
cffaba15 165 {Opt_noino32, "noino32"},
99ccbd22
MT
166 {Opt_fscache, "fsc"},
167 {Opt_nofscache, "nofsc"},
45195e42
SW
168#ifdef CONFIG_CEPH_FS_POSIX_ACL
169 {Opt_acl, "acl"},
170#endif
171 {Opt_noacl, "noacl"},
16725b9d
SW
172 {-1, NULL}
173};
174
3d14c5d2 175static int parse_fsopt_token(char *c, void *private)
c309f0ab 176{
3d14c5d2
YS
177 struct ceph_mount_options *fsopt = private;
178 substring_t argstr[MAX_OPT_ARGS];
179 int token, intval, ret;
180
181 token = match_token((char *)c, fsopt_tokens, argstr);
182 if (token < 0)
183 return -EINVAL;
184
185 if (token < Opt_last_int) {
186 ret = match_int(&argstr[0], &intval);
187 if (ret < 0) {
188 pr_err("bad mount option arg (not int) "
189 "at '%s'\n", c);
190 return ret;
c309f0ab 191 }
3d14c5d2
YS
192 dout("got int token %d val %d\n", token, intval);
193 } else if (token > Opt_last_int && token < Opt_last_string) {
194 dout("got string token %d val %s\n", token,
195 argstr[0].from);
196 } else {
197 dout("got token %d\n", token);
c309f0ab
SW
198 }
199
3d14c5d2
YS
200 switch (token) {
201 case Opt_snapdirname:
202 kfree(fsopt->snapdir_name);
203 fsopt->snapdir_name = kstrndup(argstr[0].from,
204 argstr[0].to-argstr[0].from,
205 GFP_KERNEL);
206 if (!fsopt->snapdir_name)
207 return -ENOMEM;
208 break;
209
210 /* misc */
211 case Opt_wsize:
212 fsopt->wsize = intval;
213 break;
214 case Opt_rsize:
215 fsopt->rsize = intval;
216 break;
83817e35
SW
217 case Opt_rasize:
218 fsopt->rasize = intval;
219 break;
3d14c5d2
YS
220 case Opt_caps_wanted_delay_min:
221 fsopt->caps_wanted_delay_min = intval;
222 break;
223 case Opt_caps_wanted_delay_max:
224 fsopt->caps_wanted_delay_max = intval;
225 break;
226 case Opt_readdir_max_entries:
227 fsopt->max_readdir = intval;
228 break;
229 case Opt_readdir_max_bytes:
230 fsopt->max_readdir_bytes = intval;
231 break;
232 case Opt_congestion_kb:
233 fsopt->congestion_kb = intval;
234 break;
235 case Opt_dirstat:
236 fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
237 break;
238 case Opt_nodirstat:
239 fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
240 break;
241 case Opt_rbytes:
242 fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
243 break;
244 case Opt_norbytes:
245 fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
246 break;
cffaba15
AE
247 case Opt_asyncreaddir:
248 fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
249 break;
3d14c5d2
YS
250 case Opt_noasyncreaddir:
251 fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
252 break;
a40dc6cc
SW
253 case Opt_dcache:
254 fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
255 break;
256 case Opt_nodcache:
257 fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
258 break;
ad1fee96
YS
259 case Opt_ino32:
260 fsopt->flags |= CEPH_MOUNT_OPT_INO32;
261 break;
cffaba15
AE
262 case Opt_noino32:
263 fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
264 break;
99ccbd22
MT
265 case Opt_fscache:
266 fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
267 break;
268 case Opt_nofscache:
269 fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
270 break;
45195e42
SW
271#ifdef CONFIG_CEPH_FS_POSIX_ACL
272 case Opt_acl:
273 fsopt->sb_flags |= MS_POSIXACL;
274 break;
275#endif
276 case Opt_noacl:
277 fsopt->sb_flags &= ~MS_POSIXACL;
278 break;
3d14c5d2
YS
279 default:
280 BUG_ON(token);
281 }
282 return 0;
c309f0ab 283}
16725b9d 284
3d14c5d2 285static void destroy_mount_options(struct ceph_mount_options *args)
16725b9d 286{
3d14c5d2
YS
287 dout("destroy_mount_options %p\n", args);
288 kfree(args->snapdir_name);
289 kfree(args);
290}
16725b9d 291
3d14c5d2
YS
292static int strcmp_null(const char *s1, const char *s2)
293{
294 if (!s1 && !s2)
295 return 0;
296 if (s1 && !s2)
297 return -1;
298 if (!s1 && s2)
299 return 1;
300 return strcmp(s1, s2);
301}
16725b9d 302
3d14c5d2
YS
303static int compare_mount_options(struct ceph_mount_options *new_fsopt,
304 struct ceph_options *new_opt,
305 struct ceph_fs_client *fsc)
306{
307 struct ceph_mount_options *fsopt1 = new_fsopt;
308 struct ceph_mount_options *fsopt2 = fsc->mount_options;
309 int ofs = offsetof(struct ceph_mount_options, snapdir_name);
310 int ret;
16725b9d 311
3d14c5d2
YS
312 ret = memcmp(fsopt1, fsopt2, ofs);
313 if (ret)
314 return ret;
315
316 ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
317 if (ret)
318 return ret;
319
320 return ceph_compare_options(new_opt, fsc->client);
321}
322
323static int parse_mount_options(struct ceph_mount_options **pfsopt,
324 struct ceph_options **popt,
325 int flags, char *options,
326 const char *dev_name,
327 const char **path)
328{
329 struct ceph_mount_options *fsopt;
330 const char *dev_name_end;
c98f533c
AE
331 int err;
332
333 if (!dev_name || !*dev_name)
334 return -EINVAL;
3d14c5d2
YS
335
336 fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
337 if (!fsopt)
338 return -ENOMEM;
339
340 dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
341
80db8bea
NW
342 fsopt->sb_flags = flags;
343 fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
3d14c5d2 344
80db8bea
NW
345 fsopt->rsize = CEPH_RSIZE_DEFAULT;
346 fsopt->rasize = CEPH_RASIZE_DEFAULT;
347 fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
a149bb9a
SK
348 if (!fsopt->snapdir_name) {
349 err = -ENOMEM;
350 goto out;
351 }
352
50aac4fe
SW
353 fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
354 fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
80db8bea
NW
355 fsopt->cap_release_safety = CEPH_CAP_RELEASE_SAFETY_DEFAULT;
356 fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
357 fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
358 fsopt->congestion_kb = default_congestion_kb();
359
c98f533c
AE
360 /*
361 * Distinguish the server list from the path in "dev_name".
362 * Internally we do not include the leading '/' in the path.
363 *
364 * "dev_name" will look like:
365 * <server_spec>[,<server_spec>...]:[<path>]
366 * where
367 * <server_spec> is <ip>[:<port>]
368 * <path> is optional, but if present must begin with '/'
369 */
370 dev_name_end = strchr(dev_name, '/');
371 if (dev_name_end) {
372 /* skip over leading '/' for path */
373 *path = dev_name_end + 1;
374 } else {
375 /* path is empty */
376 dev_name_end = dev_name + strlen(dev_name);
377 *path = dev_name_end;
378 }
80db8bea 379 err = -EINVAL;
c98f533c 380 dev_name_end--; /* back up to ':' separator */
54464296 381 if (dev_name_end < dev_name || *dev_name_end != ':') {
c98f533c 382 pr_err("device name is missing path (no : separator in %s)\n",
80db8bea
NW
383 dev_name);
384 goto out;
385 }
3d14c5d2 386 dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
16725b9d
SW
387 dout("server path '%s'\n", *path);
388
ee57741c 389 *popt = ceph_parse_options(options, dev_name, dev_name_end,
3d14c5d2 390 parse_fsopt_token, (void *)fsopt);
ee57741c
AE
391 if (IS_ERR(*popt)) {
392 err = PTR_ERR(*popt);
3d14c5d2 393 goto out;
ee57741c 394 }
3d14c5d2
YS
395
396 /* success */
397 *pfsopt = fsopt;
398 return 0;
16725b9d 399
7b813c46 400out:
3d14c5d2
YS
401 destroy_mount_options(fsopt);
402 return err;
16725b9d
SW
403}
404
3d14c5d2
YS
405/**
406 * ceph_show_options - Show mount options in /proc/mounts
407 * @m: seq_file to write to
34c80b1d 408 * @root: root of that (sub)tree
3d14c5d2 409 */
34c80b1d 410static int ceph_show_options(struct seq_file *m, struct dentry *root)
16725b9d 411{
34c80b1d 412 struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
3d14c5d2 413 struct ceph_mount_options *fsopt = fsc->mount_options;
ff40f9ae
ID
414 size_t pos;
415 int ret;
416
417 /* a comma between MNT/MS and client options */
418 seq_putc(m, ',');
419 pos = m->count;
420
421 ret = ceph_print_client_options(m, fsc->client);
422 if (ret)
423 return ret;
424
425 /* retract our comma if no client options */
426 if (m->count == pos)
427 m->count--;
3d14c5d2
YS
428
429 if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
430 seq_puts(m, ",dirstat");
431 if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES) == 0)
432 seq_puts(m, ",norbytes");
433 if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
434 seq_puts(m, ",noasyncreaddir");
a40dc6cc
SW
435 if (fsopt->flags & CEPH_MOUNT_OPT_DCACHE)
436 seq_puts(m, ",dcache");
437 else
438 seq_puts(m, ",nodcache");
99ccbd22
MT
439 if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE)
440 seq_puts(m, ",fsc");
441 else
442 seq_puts(m, ",nofsc");
3d14c5d2 443
45195e42
SW
444#ifdef CONFIG_CEPH_FS_POSIX_ACL
445 if (fsopt->sb_flags & MS_POSIXACL)
446 seq_puts(m, ",acl");
447 else
448 seq_puts(m, ",noacl");
449#endif
450
3d14c5d2
YS
451 if (fsopt->wsize)
452 seq_printf(m, ",wsize=%d", fsopt->wsize);
80456f86 453 if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
3d14c5d2 454 seq_printf(m, ",rsize=%d", fsopt->rsize);
83817e35 455 if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
2151937d 456 seq_printf(m, ",rasize=%d", fsopt->rasize);
3d14c5d2
YS
457 if (fsopt->congestion_kb != default_congestion_kb())
458 seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
459 if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
460 seq_printf(m, ",caps_wanted_delay_min=%d",
461 fsopt->caps_wanted_delay_min);
462 if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
463 seq_printf(m, ",caps_wanted_delay_max=%d",
464 fsopt->caps_wanted_delay_max);
465 if (fsopt->cap_release_safety != CEPH_CAP_RELEASE_SAFETY_DEFAULT)
466 seq_printf(m, ",cap_release_safety=%d",
467 fsopt->cap_release_safety);
468 if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
469 seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
470 if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
471 seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
472 if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
473 seq_printf(m, ",snapdirname=%s", fsopt->snapdir_name);
ff40f9ae 474
3d14c5d2 475 return 0;
16725b9d
SW
476}
477
478/*
3d14c5d2
YS
479 * handle any mon messages the standard library doesn't understand.
480 * return error if we don't either.
16725b9d 481 */
3d14c5d2 482static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
16725b9d 483{
3d14c5d2
YS
484 struct ceph_fs_client *fsc = client->private;
485 int type = le16_to_cpu(msg->hdr.type);
486
487 switch (type) {
488 case CEPH_MSG_MDS_MAP:
489 ceph_mdsc_handle_map(fsc->mdsc, msg);
490 return 0;
491
492 default:
493 return -1;
494 }
495}
496
497/*
498 * create a new fs client
499 */
0c6d4b4e 500static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
3d14c5d2
YS
501 struct ceph_options *opt)
502{
503 struct ceph_fs_client *fsc;
12b4629a 504 const u64 supported_features =
6ab00d46 505 CEPH_FEATURE_FLOCK |
65a22662
YZ
506 CEPH_FEATURE_DIRLAYOUTHASH |
507 CEPH_FEATURE_MDS_INLINE_DATA;
12b4629a 508 const u64 required_features = 0;
3bf53337
AE
509 int page_count;
510 size_t size;
16725b9d
SW
511 int err = -ENOMEM;
512
3d14c5d2
YS
513 fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
514 if (!fsc)
16725b9d
SW
515 return ERR_PTR(-ENOMEM);
516
6ab00d46
SW
517 fsc->client = ceph_create_client(opt, fsc, supported_features,
518 required_features);
3d14c5d2
YS
519 if (IS_ERR(fsc->client)) {
520 err = PTR_ERR(fsc->client);
521 goto fail;
522 }
523 fsc->client->extra_mon_dispatch = extra_mon_dispatch;
3d14c5d2 524 fsc->client->monc.want_mdsmap = 1;
16725b9d 525
3d14c5d2 526 fsc->mount_options = fsopt;
16725b9d 527
3d14c5d2
YS
528 fsc->sb = NULL;
529 fsc->mount_state = CEPH_MOUNT_MOUNTING;
16725b9d 530
3d14c5d2 531 atomic_long_set(&fsc->writeback_count, 0);
16725b9d 532
3d14c5d2 533 err = bdi_init(&fsc->backing_dev_info);
859e7b14 534 if (err < 0)
3d14c5d2 535 goto fail_client;
859e7b14 536
16725b9d 537 err = -ENOMEM;
01e6acc4
TH
538 /*
539 * The number of concurrent works can be high but they don't need
540 * to be processed in parallel, limit concurrency.
541 */
542 fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
3d14c5d2 543 if (fsc->wb_wq == NULL)
859e7b14 544 goto fail_bdi;
01e6acc4 545 fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
3d14c5d2 546 if (fsc->pg_inv_wq == NULL)
16725b9d 547 goto fail_wb_wq;
01e6acc4 548 fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
3d14c5d2 549 if (fsc->trunc_wq == NULL)
16725b9d
SW
550 goto fail_pg_inv_wq;
551
b9bfb93c
SW
552 /* set up mempools */
553 err = -ENOMEM;
3bf53337
AE
554 page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT;
555 size = sizeof (struct page *) * (page_count ? page_count : 1);
556 fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
3d14c5d2 557 if (!fsc->wb_pagevec_pool)
b9bfb93c
SW
558 goto fail_trunc_wq;
559
99ccbd22
MT
560 /* setup fscache */
561 if ((fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) &&
562 (ceph_fscache_register_fs(fsc) != 0))
563 goto fail_fscache;
564
85ccce43 565 /* caps */
3d14c5d2
YS
566 fsc->min_caps = fsopt->max_readdir;
567
568 return fsc;
b9bfb93c 569
99ccbd22
MT
570fail_fscache:
571 ceph_fscache_unregister_fs(fsc);
16725b9d 572fail_trunc_wq:
3d14c5d2 573 destroy_workqueue(fsc->trunc_wq);
16725b9d 574fail_pg_inv_wq:
3d14c5d2 575 destroy_workqueue(fsc->pg_inv_wq);
16725b9d 576fail_wb_wq:
3d14c5d2 577 destroy_workqueue(fsc->wb_wq);
859e7b14 578fail_bdi:
3d14c5d2
YS
579 bdi_destroy(&fsc->backing_dev_info);
580fail_client:
581 ceph_destroy_client(fsc->client);
16725b9d 582fail:
3d14c5d2 583 kfree(fsc);
16725b9d
SW
584 return ERR_PTR(err);
585}
586
0c6d4b4e 587static void destroy_fs_client(struct ceph_fs_client *fsc)
16725b9d 588{
3d14c5d2 589 dout("destroy_fs_client %p\n", fsc);
16725b9d 590
99ccbd22
MT
591 ceph_fscache_unregister_fs(fsc);
592
3d14c5d2
YS
593 destroy_workqueue(fsc->wb_wq);
594 destroy_workqueue(fsc->pg_inv_wq);
595 destroy_workqueue(fsc->trunc_wq);
16725b9d 596
3d14c5d2 597 bdi_destroy(&fsc->backing_dev_info);
a922d38f 598
3d14c5d2 599 mempool_destroy(fsc->wb_pagevec_pool);
16725b9d 600
3d14c5d2 601 destroy_mount_options(fsc->mount_options);
5dfc589a 602
3d14c5d2 603 ceph_fs_debugfs_cleanup(fsc);
16725b9d 604
3d14c5d2 605 ceph_destroy_client(fsc->client);
16725b9d 606
3d14c5d2
YS
607 kfree(fsc);
608 dout("destroy_fs_client %p done\n", fsc);
16725b9d
SW
609}
610
0743304d 611/*
3d14c5d2 612 * caches
0743304d 613 */
3d14c5d2
YS
614struct kmem_cache *ceph_inode_cachep;
615struct kmem_cache *ceph_cap_cachep;
616struct kmem_cache *ceph_dentry_cachep;
617struct kmem_cache *ceph_file_cachep;
618
619static void ceph_inode_init_once(void *foo)
0743304d 620{
3d14c5d2
YS
621 struct ceph_inode_info *ci = foo;
622 inode_init_once(&ci->vfs_inode);
623}
624
625static int __init init_caches(void)
626{
99ccbd22
MT
627 int error = -ENOMEM;
628
3d14c5d2
YS
629 ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
630 sizeof(struct ceph_inode_info),
631 __alignof__(struct ceph_inode_info),
632 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
633 ceph_inode_init_once);
634 if (ceph_inode_cachep == NULL)
635 return -ENOMEM;
636
637 ceph_cap_cachep = KMEM_CACHE(ceph_cap,
638 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
639 if (ceph_cap_cachep == NULL)
640 goto bad_cap;
641
642 ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
643 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
644 if (ceph_dentry_cachep == NULL)
645 goto bad_dentry;
646
647 ceph_file_cachep = KMEM_CACHE(ceph_file_info,
648 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
649 if (ceph_file_cachep == NULL)
650 goto bad_file;
651
99ccbd22
MT
652 if ((error = ceph_fscache_register()))
653 goto bad_file;
3d14c5d2 654
99ccbd22 655 return 0;
3d14c5d2
YS
656bad_file:
657 kmem_cache_destroy(ceph_dentry_cachep);
658bad_dentry:
659 kmem_cache_destroy(ceph_cap_cachep);
660bad_cap:
661 kmem_cache_destroy(ceph_inode_cachep);
99ccbd22 662 return error;
0743304d
SW
663}
664
3d14c5d2
YS
665static void destroy_caches(void)
666{
8c0a8537
KS
667 /*
668 * Make sure all delayed rcu free inodes are flushed before we
669 * destroy cache.
670 */
671 rcu_barrier();
99ccbd22 672
3d14c5d2
YS
673 kmem_cache_destroy(ceph_inode_cachep);
674 kmem_cache_destroy(ceph_cap_cachep);
675 kmem_cache_destroy(ceph_dentry_cachep);
676 kmem_cache_destroy(ceph_file_cachep);
99ccbd22
MT
677
678 ceph_fscache_unregister();
3d14c5d2
YS
679}
680
681
16725b9d 682/*
3d14c5d2
YS
683 * ceph_umount_begin - initiate forced umount. Tear down down the
684 * mount, skipping steps that may hang while waiting for server(s).
16725b9d 685 */
3d14c5d2 686static void ceph_umount_begin(struct super_block *sb)
16725b9d 687{
3d14c5d2
YS
688 struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
689
690 dout("ceph_umount_begin - starting forced umount\n");
691 if (!fsc)
692 return;
693 fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
694 return;
16725b9d
SW
695}
696
3d14c5d2
YS
697static const struct super_operations ceph_super_ops = {
698 .alloc_inode = ceph_alloc_inode,
699 .destroy_inode = ceph_destroy_inode,
700 .write_inode = ceph_write_inode,
9f12bd11 701 .drop_inode = ceph_drop_inode,
3d14c5d2
YS
702 .sync_fs = ceph_sync_fs,
703 .put_super = ceph_put_super,
704 .show_options = ceph_show_options,
705 .statfs = ceph_statfs,
706 .umount_begin = ceph_umount_begin,
707};
708
16725b9d
SW
709/*
710 * Bootstrap mount by opening the root directory. Note the mount
711 * @started time from caller, and time out if this takes too long.
712 */
3d14c5d2 713static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
16725b9d
SW
714 const char *path,
715 unsigned long started)
716{
3d14c5d2 717 struct ceph_mds_client *mdsc = fsc->mdsc;
16725b9d
SW
718 struct ceph_mds_request *req = NULL;
719 int err;
720 struct dentry *root;
721
722 /* open dir */
723 dout("open_root_inode opening '%s'\n", path);
724 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
725 if (IS_ERR(req))
7e34bc52 726 return ERR_CAST(req);
16725b9d 727 req->r_path1 = kstrdup(path, GFP_NOFS);
a149bb9a
SK
728 if (!req->r_path1) {
729 root = ERR_PTR(-ENOMEM);
730 goto out;
731 }
732
16725b9d
SW
733 req->r_ino1.ino = CEPH_INO_ROOT;
734 req->r_ino1.snap = CEPH_NOSNAP;
735 req->r_started = started;
3d14c5d2 736 req->r_timeout = fsc->client->options->mount_timeout * HZ;
16725b9d
SW
737 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
738 req->r_num_caps = 2;
739 err = ceph_mdsc_do_request(mdsc, NULL, req);
740 if (err == 0) {
3c5184ef
AV
741 struct inode *inode = req->r_target_inode;
742 req->r_target_inode = NULL;
16725b9d 743 dout("open_root_inode success\n");
3c5184ef 744 if (ceph_ino(inode) == CEPH_INO_ROOT &&
774ac21d 745 fsc->sb->s_root == NULL) {
48fde701 746 root = d_make_root(inode);
3c5184ef 747 if (!root) {
3c5184ef
AV
748 root = ERR_PTR(-ENOMEM);
749 goto out;
750 }
774ac21d 751 } else {
1a0a397e 752 root = d_obtain_root(inode);
774ac21d 753 }
d46cfba5 754 ceph_init_dentry(root);
16725b9d
SW
755 dout("open_root_inode success, root dentry is %p\n", root);
756 } else {
757 root = ERR_PTR(err);
758 }
3c5184ef 759out:
16725b9d
SW
760 ceph_mdsc_put_request(req);
761 return root;
762}
763
3d14c5d2
YS
764
765
766
16725b9d
SW
767/*
768 * mount: join the ceph cluster, and open root directory.
769 */
a7f9fb20 770static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
16725b9d
SW
771 const char *path)
772{
16725b9d 773 int err;
16725b9d
SW
774 unsigned long started = jiffies; /* note the start time */
775 struct dentry *root;
3d14c5d2 776 int first = 0; /* first vfsmount for this super_block */
16725b9d
SW
777
778 dout("mount start\n");
3d14c5d2 779 mutex_lock(&fsc->client->mount_mutex);
16725b9d 780
3d14c5d2 781 err = __ceph_open_session(fsc->client, started);
16725b9d
SW
782 if (err < 0)
783 goto out;
784
16725b9d 785 dout("mount opening root\n");
3d14c5d2 786 root = open_root_dentry(fsc, "", started);
16725b9d
SW
787 if (IS_ERR(root)) {
788 err = PTR_ERR(root);
789 goto out;
790 }
3d14c5d2 791 if (fsc->sb->s_root) {
16725b9d 792 dput(root);
3d14c5d2
YS
793 } else {
794 fsc->sb->s_root = root;
795 first = 1;
796
797 err = ceph_fs_debugfs_init(fsc);
798 if (err < 0)
799 goto fail;
800 }
16725b9d
SW
801
802 if (path[0] == 0) {
803 dget(root);
804 } else {
805 dout("mount opening base mountpoint\n");
3d14c5d2 806 root = open_root_dentry(fsc, path, started);
16725b9d
SW
807 if (IS_ERR(root)) {
808 err = PTR_ERR(root);
3d14c5d2 809 goto fail;
16725b9d
SW
810 }
811 }
812
3d14c5d2 813 fsc->mount_state = CEPH_MOUNT_MOUNTED;
16725b9d 814 dout("mount success\n");
a7f9fb20
AV
815 mutex_unlock(&fsc->client->mount_mutex);
816 return root;
16725b9d
SW
817
818out:
3d14c5d2 819 mutex_unlock(&fsc->client->mount_mutex);
a7f9fb20 820 return ERR_PTR(err);
3d14c5d2
YS
821
822fail:
823 if (first) {
824 dput(fsc->sb->s_root);
825 fsc->sb->s_root = NULL;
826 }
827 goto out;
16725b9d
SW
828}
829
830static int ceph_set_super(struct super_block *s, void *data)
831{
3d14c5d2 832 struct ceph_fs_client *fsc = data;
16725b9d
SW
833 int ret;
834
835 dout("set_super %p data %p\n", s, data);
836
3d14c5d2 837 s->s_flags = fsc->mount_options->sb_flags;
16725b9d
SW
838 s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
839
7221fe4c 840 s->s_xattr = ceph_xattr_handlers;
3d14c5d2
YS
841 s->s_fs_info = fsc;
842 fsc->sb = s;
16725b9d
SW
843
844 s->s_op = &ceph_super_ops;
845 s->s_export_op = &ceph_export_ops;
846
847 s->s_time_gran = 1000; /* 1000 ns == 1 us */
848
849 ret = set_anon_super(s, NULL); /* what is that second arg for? */
850 if (ret != 0)
851 goto fail;
852
853 return ret;
854
855fail:
856 s->s_fs_info = NULL;
3d14c5d2 857 fsc->sb = NULL;
16725b9d
SW
858 return ret;
859}
860
861/*
862 * share superblock if same fs AND options
863 */
864static int ceph_compare_super(struct super_block *sb, void *data)
865{
3d14c5d2
YS
866 struct ceph_fs_client *new = data;
867 struct ceph_mount_options *fsopt = new->mount_options;
868 struct ceph_options *opt = new->client->options;
869 struct ceph_fs_client *other = ceph_sb_to_client(sb);
16725b9d
SW
870
871 dout("ceph_compare_super %p\n", sb);
3d14c5d2
YS
872
873 if (compare_mount_options(fsopt, opt, other)) {
874 dout("monitor(s)/mount options don't match\n");
875 return 0;
16725b9d 876 }
3d14c5d2
YS
877 if ((opt->flags & CEPH_OPT_FSID) &&
878 ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
879 dout("fsid doesn't match\n");
880 return 0;
881 }
882 if (fsopt->sb_flags != other->mount_options->sb_flags) {
16725b9d
SW
883 dout("flags differ\n");
884 return 0;
885 }
886 return 1;
887}
888
889/*
890 * construct our own bdi so we can control readahead, etc.
891 */
00d5643e 892static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
31e0cf8f 893
3d14c5d2
YS
894static int ceph_register_bdi(struct super_block *sb,
895 struct ceph_fs_client *fsc)
16725b9d
SW
896{
897 int err;
898
83817e35
SW
899 /* set ra_pages based on rasize mount option? */
900 if (fsc->mount_options->rasize >= PAGE_CACHE_SIZE)
3d14c5d2 901 fsc->backing_dev_info.ra_pages =
83817e35 902 (fsc->mount_options->rasize + PAGE_CACHE_SIZE - 1)
16725b9d 903 >> PAGE_SHIFT;
e9852227
YS
904 else
905 fsc->backing_dev_info.ra_pages =
df0ce26c 906 VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE;
e9852227 907
d2cc4dde 908 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
31e0cf8f 909 atomic_long_inc_return(&bdi_seq));
5dfc589a 910 if (!err)
3d14c5d2 911 sb->s_bdi = &fsc->backing_dev_info;
16725b9d
SW
912 return err;
913}
914
a7f9fb20
AV
915static struct dentry *ceph_mount(struct file_system_type *fs_type,
916 int flags, const char *dev_name, void *data)
16725b9d
SW
917{
918 struct super_block *sb;
3d14c5d2 919 struct ceph_fs_client *fsc;
a7f9fb20 920 struct dentry *res;
16725b9d
SW
921 int err;
922 int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
6a18be16 923 const char *path = NULL;
3d14c5d2
YS
924 struct ceph_mount_options *fsopt = NULL;
925 struct ceph_options *opt = NULL;
16725b9d 926
a7f9fb20 927 dout("ceph_mount\n");
45195e42
SW
928
929#ifdef CONFIG_CEPH_FS_POSIX_ACL
930 flags |= MS_POSIXACL;
931#endif
3d14c5d2 932 err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
a7f9fb20
AV
933 if (err < 0) {
934 res = ERR_PTR(err);
6b805185 935 goto out_final;
a7f9fb20 936 }
16725b9d
SW
937
938 /* create client (which we may/may not use) */
3d14c5d2
YS
939 fsc = create_fs_client(fsopt, opt);
940 if (IS_ERR(fsc)) {
a7f9fb20 941 res = ERR_CAST(fsc);
259a187a
NW
942 destroy_mount_options(fsopt);
943 ceph_destroy_options(opt);
6b805185
SW
944 goto out_final;
945 }
16725b9d 946
3d14c5d2 947 err = ceph_mdsc_init(fsc);
a7f9fb20
AV
948 if (err < 0) {
949 res = ERR_PTR(err);
3d14c5d2 950 goto out;
a7f9fb20 951 }
3d14c5d2
YS
952
953 if (ceph_test_opt(fsc->client, NOSHARE))
16725b9d 954 compare_super = NULL;
9249e17f 955 sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
16725b9d 956 if (IS_ERR(sb)) {
a7f9fb20 957 res = ERR_CAST(sb);
16725b9d
SW
958 goto out;
959 }
960
3d14c5d2
YS
961 if (ceph_sb_to_client(sb) != fsc) {
962 ceph_mdsc_destroy(fsc);
963 destroy_fs_client(fsc);
964 fsc = ceph_sb_to_client(sb);
965 dout("get_sb got existing client %p\n", fsc);
16725b9d 966 } else {
3d14c5d2
YS
967 dout("get_sb using new client %p\n", fsc);
968 err = ceph_register_bdi(sb, fsc);
a7f9fb20
AV
969 if (err < 0) {
970 res = ERR_PTR(err);
16725b9d 971 goto out_splat;
a7f9fb20 972 }
16725b9d
SW
973 }
974
a7f9fb20
AV
975 res = ceph_real_mount(fsc, path);
976 if (IS_ERR(res))
16725b9d 977 goto out_splat;
a7f9fb20
AV
978 dout("root %p inode %p ino %llx.%llx\n", res,
979 res->d_inode, ceph_vinop(res->d_inode));
980 return res;
16725b9d
SW
981
982out_splat:
3d14c5d2 983 ceph_mdsc_close_sessions(fsc->mdsc);
3981f2e2 984 deactivate_locked_super(sb);
16725b9d
SW
985 goto out_final;
986
987out:
3d14c5d2
YS
988 ceph_mdsc_destroy(fsc);
989 destroy_fs_client(fsc);
16725b9d 990out_final:
a7f9fb20
AV
991 dout("ceph_mount fail %ld\n", PTR_ERR(res));
992 return res;
16725b9d
SW
993}
994
995static void ceph_kill_sb(struct super_block *s)
996{
3d14c5d2 997 struct ceph_fs_client *fsc = ceph_sb_to_client(s);
e4d27509
CH
998 dev_t dev = s->s_dev;
999
16725b9d 1000 dout("kill_sb %p\n", s);
e4d27509 1001
3d14c5d2 1002 ceph_mdsc_pre_umount(fsc->mdsc);
e4d27509 1003 generic_shutdown_super(s);
3d14c5d2 1004 ceph_mdsc_destroy(fsc);
e4d27509 1005
3d14c5d2 1006 destroy_fs_client(fsc);
e4d27509 1007 free_anon_bdev(dev);
16725b9d
SW
1008}
1009
1010static struct file_system_type ceph_fs_type = {
1011 .owner = THIS_MODULE,
1012 .name = "ceph",
a7f9fb20 1013 .mount = ceph_mount,
16725b9d
SW
1014 .kill_sb = ceph_kill_sb,
1015 .fs_flags = FS_RENAME_DOES_D_MOVE,
1016};
7f78e035 1017MODULE_ALIAS_FS("ceph");
16725b9d 1018
16725b9d
SW
1019static int __init init_ceph(void)
1020{
3d14c5d2 1021 int ret = init_caches();
16725b9d 1022 if (ret)
3d14c5d2 1023 goto out;
16725b9d 1024
eb13e832 1025 ceph_flock_init();
3ce6cd12 1026 ceph_xattr_init();
97c85a82
YZ
1027 ret = ceph_snap_init();
1028 if (ret)
1029 goto out_xattr;
16725b9d
SW
1030 ret = register_filesystem(&ceph_fs_type);
1031 if (ret)
97c85a82 1032 goto out_snap;
16725b9d 1033
3d14c5d2
YS
1034 pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1035
16725b9d
SW
1036 return 0;
1037
97c85a82
YZ
1038out_snap:
1039 ceph_snap_exit();
1040out_xattr:
3ce6cd12 1041 ceph_xattr_exit();
16725b9d 1042 destroy_caches();
16725b9d
SW
1043out:
1044 return ret;
1045}
1046
1047static void __exit exit_ceph(void)
1048{
1049 dout("exit_ceph\n");
1050 unregister_filesystem(&ceph_fs_type);
97c85a82 1051 ceph_snap_exit();
3ce6cd12 1052 ceph_xattr_exit();
16725b9d 1053 destroy_caches();
16725b9d
SW
1054}
1055
1056module_init(init_ceph);
1057module_exit(exit_ceph);
1058
1059MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1060MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1061MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1062MODULE_DESCRIPTION("Ceph filesystem for Linux");
1063MODULE_LICENSE("GPL");
This page took 0.277762 seconds and 5 git commands to generate.