5f67728ba4d7947a965ce3d2096b8799a436ef41
[deliverable/linux.git] / fs / ceph / dir.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8
9 #include "super.h"
10 #include "mds_client.h"
11
12 /*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17 /*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29 const struct inode_operations ceph_dir_iops;
30 const struct file_operations ceph_dir_fops;
31 const struct dentry_operations ceph_dentry_ops;
32
33 /*
34 * Initialize ceph dentry state.
35 */
36 int ceph_init_dentry(struct dentry *dentry)
37 {
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
43 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
44 dentry->d_op = &ceph_dentry_ops;
45 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
46 dentry->d_op = &ceph_snapdir_dentry_ops;
47 else
48 dentry->d_op = &ceph_snap_dentry_ops;
49
50 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
51 if (!di)
52 return -ENOMEM; /* oh well */
53
54 spin_lock(&dentry->d_lock);
55 if (dentry->d_fsdata) {
56 /* lost a race */
57 kmem_cache_free(ceph_dentry_cachep, di);
58 goto out_unlock;
59 }
60 di->dentry = dentry;
61 di->lease_session = NULL;
62 dentry->d_fsdata = di;
63 dentry->d_time = jiffies;
64 ceph_dentry_lru_add(dentry);
65 out_unlock:
66 spin_unlock(&dentry->d_lock);
67 return 0;
68 }
69
70
71
72 /*
73 * for readdir, we encode the directory frag and offset within that
74 * frag into f_pos.
75 */
76 static unsigned fpos_frag(loff_t p)
77 {
78 return p >> 32;
79 }
80 static unsigned fpos_off(loff_t p)
81 {
82 return p & 0xffffffff;
83 }
84
85 /*
86 * When possible, we try to satisfy a readdir by peeking at the
87 * dcache. We make this work by carefully ordering dentries on
88 * d_u.d_child when we initially get results back from the MDS, and
89 * falling back to a "normal" sync readdir if any dentries in the dir
90 * are dropped.
91 *
92 * I_COMPLETE tells indicates we have all dentries in the dir. It is
93 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
94 * the MDS if/when the directory is modified).
95 */
96 static int __dcache_readdir(struct file *filp,
97 void *dirent, filldir_t filldir)
98 {
99 struct ceph_file_info *fi = filp->private_data;
100 struct dentry *parent = filp->f_dentry;
101 struct inode *dir = parent->d_inode;
102 struct list_head *p;
103 struct dentry *dentry, *last;
104 struct ceph_dentry_info *di;
105 int err = 0;
106
107 /* claim ref on last dentry we returned */
108 last = fi->dentry;
109 fi->dentry = NULL;
110
111 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
112 last);
113
114 spin_lock(&dcache_lock);
115
116 /* start at beginning? */
117 if (filp->f_pos == 2 || (last &&
118 filp->f_pos < ceph_dentry(last)->offset)) {
119 if (list_empty(&parent->d_subdirs))
120 goto out_unlock;
121 p = parent->d_subdirs.prev;
122 dout(" initial p %p/%p\n", p->prev, p->next);
123 } else {
124 p = last->d_u.d_child.prev;
125 }
126
127 more:
128 dentry = list_entry(p, struct dentry, d_u.d_child);
129 di = ceph_dentry(dentry);
130 while (1) {
131 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
132 d_unhashed(dentry) ? "!hashed" : "hashed",
133 parent->d_subdirs.prev, parent->d_subdirs.next);
134 if (p == &parent->d_subdirs) {
135 fi->at_end = 1;
136 goto out_unlock;
137 }
138 if (!d_unhashed(dentry) && dentry->d_inode &&
139 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
140 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
141 filp->f_pos <= di->offset)
142 break;
143 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
144 dentry->d_name.len, dentry->d_name.name, di->offset,
145 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
146 !dentry->d_inode ? " null" : "");
147 p = p->prev;
148 dentry = list_entry(p, struct dentry, d_u.d_child);
149 di = ceph_dentry(dentry);
150 }
151
152 atomic_inc(&dentry->d_count);
153 spin_unlock(&dcache_lock);
154
155 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
156 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
157 filp->f_pos = di->offset;
158 err = filldir(dirent, dentry->d_name.name,
159 dentry->d_name.len, di->offset,
160 dentry->d_inode->i_ino,
161 dentry->d_inode->i_mode >> 12);
162
163 if (last) {
164 if (err < 0) {
165 /* remember our position */
166 fi->dentry = last;
167 fi->next_offset = di->offset;
168 } else {
169 dput(last);
170 }
171 }
172 last = dentry;
173
174 if (err < 0)
175 goto out;
176
177 filp->f_pos++;
178
179 /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
180 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
181 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
182 err = -EAGAIN;
183 goto out;
184 }
185
186 spin_lock(&dcache_lock);
187 p = p->prev; /* advance to next dentry */
188 goto more;
189
190 out_unlock:
191 spin_unlock(&dcache_lock);
192 out:
193 if (last)
194 dput(last);
195 return err;
196 }
197
198 /*
199 * make note of the last dentry we read, so we can
200 * continue at the same lexicographical point,
201 * regardless of what dir changes take place on the
202 * server.
203 */
204 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
205 int len)
206 {
207 kfree(fi->last_name);
208 fi->last_name = kmalloc(len+1, GFP_NOFS);
209 if (!fi->last_name)
210 return -ENOMEM;
211 memcpy(fi->last_name, name, len);
212 fi->last_name[len] = 0;
213 dout("note_last_dentry '%s'\n", fi->last_name);
214 return 0;
215 }
216
217 static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
218 {
219 struct ceph_file_info *fi = filp->private_data;
220 struct inode *inode = filp->f_dentry->d_inode;
221 struct ceph_inode_info *ci = ceph_inode(inode);
222 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
223 struct ceph_mds_client *mdsc = fsc->mdsc;
224 unsigned frag = fpos_frag(filp->f_pos);
225 int off = fpos_off(filp->f_pos);
226 int err;
227 u32 ftype;
228 struct ceph_mds_reply_info_parsed *rinfo;
229 const int max_entries = fsc->mount_options->max_readdir;
230 const int max_bytes = fsc->mount_options->max_readdir_bytes;
231
232 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
233 if (fi->at_end)
234 return 0;
235
236 /* always start with . and .. */
237 if (filp->f_pos == 0) {
238 /* note dir version at start of readdir so we can tell
239 * if any dentries get dropped */
240 fi->dir_release_count = ci->i_release_count;
241
242 dout("readdir off 0 -> '.'\n");
243 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
244 inode->i_ino, inode->i_mode >> 12) < 0)
245 return 0;
246 filp->f_pos = 1;
247 off = 1;
248 }
249 if (filp->f_pos == 1) {
250 dout("readdir off 1 -> '..'\n");
251 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
252 filp->f_dentry->d_parent->d_inode->i_ino,
253 inode->i_mode >> 12) < 0)
254 return 0;
255 filp->f_pos = 2;
256 off = 2;
257 }
258
259 /* can we use the dcache? */
260 spin_lock(&inode->i_lock);
261 if ((filp->f_pos == 2 || fi->dentry) &&
262 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
263 ceph_snap(inode) != CEPH_SNAPDIR &&
264 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
265 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
266 spin_unlock(&inode->i_lock);
267 err = __dcache_readdir(filp, dirent, filldir);
268 if (err != -EAGAIN)
269 return err;
270 } else {
271 spin_unlock(&inode->i_lock);
272 }
273 if (fi->dentry) {
274 err = note_last_dentry(fi, fi->dentry->d_name.name,
275 fi->dentry->d_name.len);
276 if (err)
277 return err;
278 dput(fi->dentry);
279 fi->dentry = NULL;
280 }
281
282 /* proceed with a normal readdir */
283
284 more:
285 /* do we have the correct frag content buffered? */
286 if (fi->frag != frag || fi->last_readdir == NULL) {
287 struct ceph_mds_request *req;
288 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
289 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
290
291 /* discard old result, if any */
292 if (fi->last_readdir) {
293 ceph_mdsc_put_request(fi->last_readdir);
294 fi->last_readdir = NULL;
295 }
296
297 /* requery frag tree, as the frag topology may have changed */
298 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
299
300 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
301 ceph_vinop(inode), frag, fi->last_name);
302 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
303 if (IS_ERR(req))
304 return PTR_ERR(req);
305 req->r_inode = igrab(inode);
306 req->r_dentry = dget(filp->f_dentry);
307 /* hints to request -> mds selection code */
308 req->r_direct_mode = USE_AUTH_MDS;
309 req->r_direct_hash = ceph_frag_value(frag);
310 req->r_direct_is_hash = true;
311 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
312 req->r_readdir_offset = fi->next_offset;
313 req->r_args.readdir.frag = cpu_to_le32(frag);
314 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
315 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
316 req->r_num_caps = max_entries + 1;
317 err = ceph_mdsc_do_request(mdsc, NULL, req);
318 if (err < 0) {
319 ceph_mdsc_put_request(req);
320 return err;
321 }
322 dout("readdir got and parsed readdir result=%d"
323 " on frag %x, end=%d, complete=%d\n", err, frag,
324 (int)req->r_reply_info.dir_end,
325 (int)req->r_reply_info.dir_complete);
326
327 if (!req->r_did_prepopulate) {
328 dout("readdir !did_prepopulate");
329 fi->dir_release_count--; /* preclude I_COMPLETE */
330 }
331
332 /* note next offset and last dentry name */
333 fi->offset = fi->next_offset;
334 fi->last_readdir = req;
335
336 if (req->r_reply_info.dir_end) {
337 kfree(fi->last_name);
338 fi->last_name = NULL;
339 if (ceph_frag_is_rightmost(frag))
340 fi->next_offset = 2;
341 else
342 fi->next_offset = 0;
343 } else {
344 rinfo = &req->r_reply_info;
345 err = note_last_dentry(fi,
346 rinfo->dir_dname[rinfo->dir_nr-1],
347 rinfo->dir_dname_len[rinfo->dir_nr-1]);
348 if (err)
349 return err;
350 fi->next_offset += rinfo->dir_nr;
351 }
352 }
353
354 rinfo = &fi->last_readdir->r_reply_info;
355 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
356 rinfo->dir_nr, off, fi->offset);
357 while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
358 u64 pos = ceph_make_fpos(frag, off);
359 struct ceph_mds_reply_inode *in =
360 rinfo->dir_in[off - fi->offset].in;
361 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
362 off, off - fi->offset, rinfo->dir_nr, pos,
363 rinfo->dir_dname_len[off - fi->offset],
364 rinfo->dir_dname[off - fi->offset], in);
365 BUG_ON(!in);
366 ftype = le32_to_cpu(in->mode) >> 12;
367 if (filldir(dirent,
368 rinfo->dir_dname[off - fi->offset],
369 rinfo->dir_dname_len[off - fi->offset],
370 pos,
371 le64_to_cpu(in->ino),
372 ftype) < 0) {
373 dout("filldir stopping us...\n");
374 return 0;
375 }
376 off++;
377 filp->f_pos = pos + 1;
378 }
379
380 if (fi->last_name) {
381 ceph_mdsc_put_request(fi->last_readdir);
382 fi->last_readdir = NULL;
383 goto more;
384 }
385
386 /* more frags? */
387 if (!ceph_frag_is_rightmost(frag)) {
388 frag = ceph_frag_next(frag);
389 off = 0;
390 filp->f_pos = ceph_make_fpos(frag, off);
391 dout("readdir next frag is %x\n", frag);
392 goto more;
393 }
394 fi->at_end = 1;
395
396 /*
397 * if dir_release_count still matches the dir, no dentries
398 * were released during the whole readdir, and we should have
399 * the complete dir contents in our cache.
400 */
401 spin_lock(&inode->i_lock);
402 if (ci->i_release_count == fi->dir_release_count) {
403 dout(" marking %p complete\n", inode);
404 ci->i_ceph_flags |= CEPH_I_COMPLETE;
405 ci->i_max_offset = filp->f_pos;
406 }
407 spin_unlock(&inode->i_lock);
408
409 dout("readdir %p filp %p done.\n", inode, filp);
410 return 0;
411 }
412
413 static void reset_readdir(struct ceph_file_info *fi)
414 {
415 if (fi->last_readdir) {
416 ceph_mdsc_put_request(fi->last_readdir);
417 fi->last_readdir = NULL;
418 }
419 kfree(fi->last_name);
420 fi->last_name = NULL;
421 fi->next_offset = 2; /* compensate for . and .. */
422 if (fi->dentry) {
423 dput(fi->dentry);
424 fi->dentry = NULL;
425 }
426 fi->at_end = 0;
427 }
428
429 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
430 {
431 struct ceph_file_info *fi = file->private_data;
432 struct inode *inode = file->f_mapping->host;
433 loff_t old_offset = offset;
434 loff_t retval;
435
436 mutex_lock(&inode->i_mutex);
437 switch (origin) {
438 case SEEK_END:
439 offset += inode->i_size + 2; /* FIXME */
440 break;
441 case SEEK_CUR:
442 offset += file->f_pos;
443 }
444 retval = -EINVAL;
445 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
446 if (offset != file->f_pos) {
447 file->f_pos = offset;
448 file->f_version = 0;
449 fi->at_end = 0;
450 }
451 retval = offset;
452
453 /*
454 * discard buffered readdir content on seekdir(0), or
455 * seek to new frag, or seek prior to current chunk.
456 */
457 if (offset == 0 ||
458 fpos_frag(offset) != fpos_frag(old_offset) ||
459 fpos_off(offset) < fi->offset) {
460 dout("dir_llseek dropping %p content\n", file);
461 reset_readdir(fi);
462 }
463
464 /* bump dir_release_count if we did a forward seek */
465 if (offset > old_offset)
466 fi->dir_release_count--;
467 }
468 mutex_unlock(&inode->i_mutex);
469 return retval;
470 }
471
472 /*
473 * Process result of a lookup/open request.
474 *
475 * Mainly, make sure we return the final req->r_dentry (if it already
476 * existed) in place of the original VFS-provided dentry when they
477 * differ.
478 *
479 * Gracefully handle the case where the MDS replies with -ENOENT and
480 * no trace (which it may do, at its discretion, e.g., if it doesn't
481 * care to issue a lease on the negative dentry).
482 */
483 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
484 struct dentry *dentry, int err)
485 {
486 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
487 struct inode *parent = dentry->d_parent->d_inode;
488
489 /* .snap dir? */
490 if (err == -ENOENT &&
491 strcmp(dentry->d_name.name,
492 fsc->mount_options->snapdir_name) == 0) {
493 struct inode *inode = ceph_get_snapdir(parent);
494 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
495 dentry, dentry->d_name.len, dentry->d_name.name, inode);
496 BUG_ON(!d_unhashed(dentry));
497 d_add(dentry, inode);
498 err = 0;
499 }
500
501 if (err == -ENOENT) {
502 /* no trace? */
503 err = 0;
504 if (!req->r_reply_info.head->is_dentry) {
505 dout("ENOENT and no trace, dentry %p inode %p\n",
506 dentry, dentry->d_inode);
507 if (dentry->d_inode) {
508 d_drop(dentry);
509 err = -ENOENT;
510 } else {
511 d_add(dentry, NULL);
512 }
513 }
514 }
515 if (err)
516 dentry = ERR_PTR(err);
517 else if (dentry != req->r_dentry)
518 dentry = dget(req->r_dentry); /* we got spliced */
519 else
520 dentry = NULL;
521 return dentry;
522 }
523
524 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
525 {
526 return ceph_ino(inode) == CEPH_INO_ROOT &&
527 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
528 }
529
530 /*
531 * Look up a single dir entry. If there is a lookup intent, inform
532 * the MDS so that it gets our 'caps wanted' value in a single op.
533 */
534 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
535 struct nameidata *nd)
536 {
537 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
538 struct ceph_mds_client *mdsc = fsc->mdsc;
539 struct ceph_mds_request *req;
540 int op;
541 int err;
542
543 dout("lookup %p dentry %p '%.*s'\n",
544 dir, dentry, dentry->d_name.len, dentry->d_name.name);
545
546 if (dentry->d_name.len > NAME_MAX)
547 return ERR_PTR(-ENAMETOOLONG);
548
549 err = ceph_init_dentry(dentry);
550 if (err < 0)
551 return ERR_PTR(err);
552
553 /* open (but not create!) intent? */
554 if (nd &&
555 (nd->flags & LOOKUP_OPEN) &&
556 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
557 !(nd->intent.open.flags & O_CREAT)) {
558 int mode = nd->intent.open.create_mode & ~current->fs->umask;
559 return ceph_lookup_open(dir, dentry, nd, mode, 1);
560 }
561
562 /* can we conclude ENOENT locally? */
563 if (dentry->d_inode == NULL) {
564 struct ceph_inode_info *ci = ceph_inode(dir);
565 struct ceph_dentry_info *di = ceph_dentry(dentry);
566
567 spin_lock(&dir->i_lock);
568 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
569 if (strncmp(dentry->d_name.name,
570 fsc->mount_options->snapdir_name,
571 dentry->d_name.len) &&
572 !is_root_ceph_dentry(dir, dentry) &&
573 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
574 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
575 spin_unlock(&dir->i_lock);
576 dout(" dir %p complete, -ENOENT\n", dir);
577 d_add(dentry, NULL);
578 di->lease_shared_gen = ci->i_shared_gen;
579 return NULL;
580 }
581 spin_unlock(&dir->i_lock);
582 }
583
584 op = ceph_snap(dir) == CEPH_SNAPDIR ?
585 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
586 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
587 if (IS_ERR(req))
588 return ERR_CAST(req);
589 req->r_dentry = dget(dentry);
590 req->r_num_caps = 2;
591 /* we only need inode linkage */
592 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
593 req->r_locked_dir = dir;
594 err = ceph_mdsc_do_request(mdsc, NULL, req);
595 dentry = ceph_finish_lookup(req, dentry, err);
596 ceph_mdsc_put_request(req); /* will dput(dentry) */
597 dout("lookup result=%p\n", dentry);
598 return dentry;
599 }
600
601 /*
602 * If we do a create but get no trace back from the MDS, follow up with
603 * a lookup (the VFS expects us to link up the provided dentry).
604 */
605 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
606 {
607 struct dentry *result = ceph_lookup(dir, dentry, NULL);
608
609 if (result && !IS_ERR(result)) {
610 /*
611 * We created the item, then did a lookup, and found
612 * it was already linked to another inode we already
613 * had in our cache (and thus got spliced). Link our
614 * dentry to that inode, but don't hash it, just in
615 * case the VFS wants to dereference it.
616 */
617 BUG_ON(!result->d_inode);
618 d_instantiate(dentry, result->d_inode);
619 return 0;
620 }
621 return PTR_ERR(result);
622 }
623
624 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
625 int mode, dev_t rdev)
626 {
627 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
628 struct ceph_mds_client *mdsc = fsc->mdsc;
629 struct ceph_mds_request *req;
630 int err;
631
632 if (ceph_snap(dir) != CEPH_NOSNAP)
633 return -EROFS;
634
635 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
636 dir, dentry, mode, rdev);
637 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
638 if (IS_ERR(req)) {
639 d_drop(dentry);
640 return PTR_ERR(req);
641 }
642 req->r_dentry = dget(dentry);
643 req->r_num_caps = 2;
644 req->r_locked_dir = dir;
645 req->r_args.mknod.mode = cpu_to_le32(mode);
646 req->r_args.mknod.rdev = cpu_to_le32(rdev);
647 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
648 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
649 err = ceph_mdsc_do_request(mdsc, dir, req);
650 if (!err && !req->r_reply_info.head->is_dentry)
651 err = ceph_handle_notrace_create(dir, dentry);
652 ceph_mdsc_put_request(req);
653 if (err)
654 d_drop(dentry);
655 return err;
656 }
657
658 static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
659 struct nameidata *nd)
660 {
661 dout("create in dir %p dentry %p name '%.*s'\n",
662 dir, dentry, dentry->d_name.len, dentry->d_name.name);
663
664 if (ceph_snap(dir) != CEPH_NOSNAP)
665 return -EROFS;
666
667 if (nd) {
668 BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
669 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
670 /* hrm, what should i do here if we get aliased? */
671 if (IS_ERR(dentry))
672 return PTR_ERR(dentry);
673 return 0;
674 }
675
676 /* fall back to mknod */
677 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
678 }
679
680 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
681 const char *dest)
682 {
683 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
684 struct ceph_mds_client *mdsc = fsc->mdsc;
685 struct ceph_mds_request *req;
686 int err;
687
688 if (ceph_snap(dir) != CEPH_NOSNAP)
689 return -EROFS;
690
691 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
692 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
693 if (IS_ERR(req)) {
694 d_drop(dentry);
695 return PTR_ERR(req);
696 }
697 req->r_dentry = dget(dentry);
698 req->r_num_caps = 2;
699 req->r_path2 = kstrdup(dest, GFP_NOFS);
700 req->r_locked_dir = dir;
701 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
702 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
703 err = ceph_mdsc_do_request(mdsc, dir, req);
704 if (!err && !req->r_reply_info.head->is_dentry)
705 err = ceph_handle_notrace_create(dir, dentry);
706 ceph_mdsc_put_request(req);
707 if (err)
708 d_drop(dentry);
709 return err;
710 }
711
712 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
713 {
714 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
715 struct ceph_mds_client *mdsc = fsc->mdsc;
716 struct ceph_mds_request *req;
717 int err = -EROFS;
718 int op;
719
720 if (ceph_snap(dir) == CEPH_SNAPDIR) {
721 /* mkdir .snap/foo is a MKSNAP */
722 op = CEPH_MDS_OP_MKSNAP;
723 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
724 dentry->d_name.len, dentry->d_name.name, dentry);
725 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
726 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
727 op = CEPH_MDS_OP_MKDIR;
728 } else {
729 goto out;
730 }
731 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
732 if (IS_ERR(req)) {
733 err = PTR_ERR(req);
734 goto out;
735 }
736
737 req->r_dentry = dget(dentry);
738 req->r_num_caps = 2;
739 req->r_locked_dir = dir;
740 req->r_args.mkdir.mode = cpu_to_le32(mode);
741 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
742 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
743 err = ceph_mdsc_do_request(mdsc, dir, req);
744 if (!err && !req->r_reply_info.head->is_dentry)
745 err = ceph_handle_notrace_create(dir, dentry);
746 ceph_mdsc_put_request(req);
747 out:
748 if (err < 0)
749 d_drop(dentry);
750 return err;
751 }
752
753 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
754 struct dentry *dentry)
755 {
756 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
757 struct ceph_mds_client *mdsc = fsc->mdsc;
758 struct ceph_mds_request *req;
759 int err;
760
761 if (ceph_snap(dir) != CEPH_NOSNAP)
762 return -EROFS;
763
764 dout("link in dir %p old_dentry %p dentry %p\n", dir,
765 old_dentry, dentry);
766 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
767 if (IS_ERR(req)) {
768 d_drop(dentry);
769 return PTR_ERR(req);
770 }
771 req->r_dentry = dget(dentry);
772 req->r_num_caps = 2;
773 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
774 req->r_locked_dir = dir;
775 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
776 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
777 err = ceph_mdsc_do_request(mdsc, dir, req);
778 if (err)
779 d_drop(dentry);
780 else if (!req->r_reply_info.head->is_dentry)
781 d_instantiate(dentry, igrab(old_dentry->d_inode));
782 ceph_mdsc_put_request(req);
783 return err;
784 }
785
786 /*
787 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
788 * looks like the link count will hit 0, drop any other caps (other
789 * than PIN) we don't specifically want (due to the file still being
790 * open).
791 */
792 static int drop_caps_for_unlink(struct inode *inode)
793 {
794 struct ceph_inode_info *ci = ceph_inode(inode);
795 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
796
797 spin_lock(&inode->i_lock);
798 if (inode->i_nlink == 1) {
799 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
800 ci->i_ceph_flags |= CEPH_I_NODELAY;
801 }
802 spin_unlock(&inode->i_lock);
803 return drop;
804 }
805
806 /*
807 * rmdir and unlink are differ only by the metadata op code
808 */
809 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
810 {
811 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
812 struct ceph_mds_client *mdsc = fsc->mdsc;
813 struct inode *inode = dentry->d_inode;
814 struct ceph_mds_request *req;
815 int err = -EROFS;
816 int op;
817
818 if (ceph_snap(dir) == CEPH_SNAPDIR) {
819 /* rmdir .snap/foo is RMSNAP */
820 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
821 dentry->d_name.name, dentry);
822 op = CEPH_MDS_OP_RMSNAP;
823 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
824 dout("unlink/rmdir dir %p dn %p inode %p\n",
825 dir, dentry, inode);
826 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
827 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
828 } else
829 goto out;
830 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
831 if (IS_ERR(req)) {
832 err = PTR_ERR(req);
833 goto out;
834 }
835 req->r_dentry = dget(dentry);
836 req->r_num_caps = 2;
837 req->r_locked_dir = dir;
838 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
839 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
840 req->r_inode_drop = drop_caps_for_unlink(inode);
841 err = ceph_mdsc_do_request(mdsc, dir, req);
842 if (!err && !req->r_reply_info.head->is_dentry)
843 d_delete(dentry);
844 ceph_mdsc_put_request(req);
845 out:
846 return err;
847 }
848
849 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
850 struct inode *new_dir, struct dentry *new_dentry)
851 {
852 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
853 struct ceph_mds_client *mdsc = fsc->mdsc;
854 struct ceph_mds_request *req;
855 int err;
856
857 if (ceph_snap(old_dir) != ceph_snap(new_dir))
858 return -EXDEV;
859 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
860 ceph_snap(new_dir) != CEPH_NOSNAP)
861 return -EROFS;
862 dout("rename dir %p dentry %p to dir %p dentry %p\n",
863 old_dir, old_dentry, new_dir, new_dentry);
864 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
865 if (IS_ERR(req))
866 return PTR_ERR(req);
867 req->r_dentry = dget(new_dentry);
868 req->r_num_caps = 2;
869 req->r_old_dentry = dget(old_dentry);
870 req->r_locked_dir = new_dir;
871 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
872 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
873 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
874 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
875 /* release LINK_RDCACHE on source inode (mds will lock it) */
876 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
877 if (new_dentry->d_inode)
878 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
879 err = ceph_mdsc_do_request(mdsc, old_dir, req);
880 if (!err && !req->r_reply_info.head->is_dentry) {
881 /*
882 * Normally d_move() is done by fill_trace (called by
883 * do_request, above). If there is no trace, we need
884 * to do it here.
885 */
886
887 /* d_move screws up d_subdirs order */
888 ceph_i_clear(new_dir, CEPH_I_COMPLETE);
889
890 d_move(old_dentry, new_dentry);
891
892 /* ensure target dentry is invalidated, despite
893 rehashing bug in vfs_rename_dir */
894 ceph_invalidate_dentry_lease(new_dentry);
895 }
896 ceph_mdsc_put_request(req);
897 return err;
898 }
899
900 /*
901 * Ensure a dentry lease will no longer revalidate.
902 */
903 void ceph_invalidate_dentry_lease(struct dentry *dentry)
904 {
905 spin_lock(&dentry->d_lock);
906 dentry->d_time = jiffies;
907 ceph_dentry(dentry)->lease_shared_gen = 0;
908 spin_unlock(&dentry->d_lock);
909 }
910
911 /*
912 * Check if dentry lease is valid. If not, delete the lease. Try to
913 * renew if the least is more than half up.
914 */
915 static int dentry_lease_is_valid(struct dentry *dentry)
916 {
917 struct ceph_dentry_info *di;
918 struct ceph_mds_session *s;
919 int valid = 0;
920 u32 gen;
921 unsigned long ttl;
922 struct ceph_mds_session *session = NULL;
923 struct inode *dir = NULL;
924 u32 seq = 0;
925
926 spin_lock(&dentry->d_lock);
927 di = ceph_dentry(dentry);
928 if (di && di->lease_session) {
929 s = di->lease_session;
930 spin_lock(&s->s_cap_lock);
931 gen = s->s_cap_gen;
932 ttl = s->s_cap_ttl;
933 spin_unlock(&s->s_cap_lock);
934
935 if (di->lease_gen == gen &&
936 time_before(jiffies, dentry->d_time) &&
937 time_before(jiffies, ttl)) {
938 valid = 1;
939 if (di->lease_renew_after &&
940 time_after(jiffies, di->lease_renew_after)) {
941 /* we should renew */
942 dir = dentry->d_parent->d_inode;
943 session = ceph_get_mds_session(s);
944 seq = di->lease_seq;
945 di->lease_renew_after = 0;
946 di->lease_renew_from = jiffies;
947 }
948 }
949 }
950 spin_unlock(&dentry->d_lock);
951
952 if (session) {
953 ceph_mdsc_lease_send_msg(session, dir, dentry,
954 CEPH_MDS_LEASE_RENEW, seq);
955 ceph_put_mds_session(session);
956 }
957 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
958 return valid;
959 }
960
961 /*
962 * Check if directory-wide content lease/cap is valid.
963 */
964 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
965 {
966 struct ceph_inode_info *ci = ceph_inode(dir);
967 struct ceph_dentry_info *di = ceph_dentry(dentry);
968 int valid = 0;
969
970 spin_lock(&dir->i_lock);
971 if (ci->i_shared_gen == di->lease_shared_gen)
972 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
973 spin_unlock(&dir->i_lock);
974 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
975 dir, (unsigned)ci->i_shared_gen, dentry,
976 (unsigned)di->lease_shared_gen, valid);
977 return valid;
978 }
979
980 /*
981 * Check if cached dentry can be trusted.
982 */
983 static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
984 {
985 struct inode *dir = dentry->d_parent->d_inode;
986
987 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
988 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
989 ceph_dentry(dentry)->offset);
990
991 /* always trust cached snapped dentries, snapdir dentry */
992 if (ceph_snap(dir) != CEPH_NOSNAP) {
993 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
994 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
995 goto out_touch;
996 }
997 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
998 goto out_touch;
999
1000 if (dentry_lease_is_valid(dentry) ||
1001 dir_lease_is_valid(dir, dentry))
1002 goto out_touch;
1003
1004 dout("d_revalidate %p invalid\n", dentry);
1005 d_drop(dentry);
1006 return 0;
1007 out_touch:
1008 ceph_dentry_lru_touch(dentry);
1009 return 1;
1010 }
1011
1012 /*
1013 * When a dentry is released, clear the dir I_COMPLETE if it was part
1014 * of the current dir gen or if this is in the snapshot namespace.
1015 */
1016 static void ceph_dentry_release(struct dentry *dentry)
1017 {
1018 struct ceph_dentry_info *di = ceph_dentry(dentry);
1019 struct inode *parent_inode = NULL;
1020 u64 snapid = CEPH_NOSNAP;
1021
1022 if (!IS_ROOT(dentry)) {
1023 parent_inode = dentry->d_parent->d_inode;
1024 if (parent_inode)
1025 snapid = ceph_snap(parent_inode);
1026 }
1027 dout("dentry_release %p parent %p\n", dentry, parent_inode);
1028 if (parent_inode && snapid != CEPH_SNAPDIR) {
1029 struct ceph_inode_info *ci = ceph_inode(parent_inode);
1030
1031 spin_lock(&parent_inode->i_lock);
1032 if (ci->i_shared_gen == di->lease_shared_gen ||
1033 snapid <= CEPH_MAXSNAP) {
1034 dout(" clearing %p complete (d_release)\n",
1035 parent_inode);
1036 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
1037 ci->i_release_count++;
1038 }
1039 spin_unlock(&parent_inode->i_lock);
1040 }
1041 if (di) {
1042 ceph_dentry_lru_del(dentry);
1043 if (di->lease_session)
1044 ceph_put_mds_session(di->lease_session);
1045 kmem_cache_free(ceph_dentry_cachep, di);
1046 dentry->d_fsdata = NULL;
1047 }
1048 }
1049
1050 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1051 struct nameidata *nd)
1052 {
1053 /*
1054 * Eventually, we'll want to revalidate snapped metadata
1055 * too... probably...
1056 */
1057 return 1;
1058 }
1059
1060
1061
1062 /*
1063 * read() on a dir. This weird interface hack only works if mounted
1064 * with '-o dirstat'.
1065 */
1066 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1067 loff_t *ppos)
1068 {
1069 struct ceph_file_info *cf = file->private_data;
1070 struct inode *inode = file->f_dentry->d_inode;
1071 struct ceph_inode_info *ci = ceph_inode(inode);
1072 int left;
1073
1074 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1075 return -EISDIR;
1076
1077 if (!cf->dir_info) {
1078 cf->dir_info = kmalloc(1024, GFP_NOFS);
1079 if (!cf->dir_info)
1080 return -ENOMEM;
1081 cf->dir_info_len =
1082 sprintf(cf->dir_info,
1083 "entries: %20lld\n"
1084 " files: %20lld\n"
1085 " subdirs: %20lld\n"
1086 "rentries: %20lld\n"
1087 " rfiles: %20lld\n"
1088 " rsubdirs: %20lld\n"
1089 "rbytes: %20lld\n"
1090 "rctime: %10ld.%09ld\n",
1091 ci->i_files + ci->i_subdirs,
1092 ci->i_files,
1093 ci->i_subdirs,
1094 ci->i_rfiles + ci->i_rsubdirs,
1095 ci->i_rfiles,
1096 ci->i_rsubdirs,
1097 ci->i_rbytes,
1098 (long)ci->i_rctime.tv_sec,
1099 (long)ci->i_rctime.tv_nsec);
1100 }
1101
1102 if (*ppos >= cf->dir_info_len)
1103 return 0;
1104 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1105 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1106 if (left == size)
1107 return -EFAULT;
1108 *ppos += (size - left);
1109 return size - left;
1110 }
1111
1112 /*
1113 * an fsync() on a dir will wait for any uncommitted directory
1114 * operations to commit.
1115 */
1116 static int ceph_dir_fsync(struct file *file, int datasync)
1117 {
1118 struct inode *inode = file->f_path.dentry->d_inode;
1119 struct ceph_inode_info *ci = ceph_inode(inode);
1120 struct list_head *head = &ci->i_unsafe_dirops;
1121 struct ceph_mds_request *req;
1122 u64 last_tid;
1123 int ret = 0;
1124
1125 dout("dir_fsync %p\n", inode);
1126 spin_lock(&ci->i_unsafe_lock);
1127 if (list_empty(head))
1128 goto out;
1129
1130 req = list_entry(head->prev,
1131 struct ceph_mds_request, r_unsafe_dir_item);
1132 last_tid = req->r_tid;
1133
1134 do {
1135 ceph_mdsc_get_request(req);
1136 spin_unlock(&ci->i_unsafe_lock);
1137 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1138 inode, req->r_tid, last_tid);
1139 if (req->r_timeout) {
1140 ret = wait_for_completion_timeout(
1141 &req->r_safe_completion, req->r_timeout);
1142 if (ret > 0)
1143 ret = 0;
1144 else if (ret == 0)
1145 ret = -EIO; /* timed out */
1146 } else {
1147 wait_for_completion(&req->r_safe_completion);
1148 }
1149 spin_lock(&ci->i_unsafe_lock);
1150 ceph_mdsc_put_request(req);
1151
1152 if (ret || list_empty(head))
1153 break;
1154 req = list_entry(head->next,
1155 struct ceph_mds_request, r_unsafe_dir_item);
1156 } while (req->r_tid < last_tid);
1157 out:
1158 spin_unlock(&ci->i_unsafe_lock);
1159 return ret;
1160 }
1161
1162 /*
1163 * We maintain a private dentry LRU.
1164 *
1165 * FIXME: this needs to be changed to a per-mds lru to be useful.
1166 */
1167 void ceph_dentry_lru_add(struct dentry *dn)
1168 {
1169 struct ceph_dentry_info *di = ceph_dentry(dn);
1170 struct ceph_mds_client *mdsc;
1171
1172 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1173 dn->d_name.len, dn->d_name.name);
1174 if (di) {
1175 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1176 spin_lock(&mdsc->dentry_lru_lock);
1177 list_add_tail(&di->lru, &mdsc->dentry_lru);
1178 mdsc->num_dentry++;
1179 spin_unlock(&mdsc->dentry_lru_lock);
1180 }
1181 }
1182
1183 void ceph_dentry_lru_touch(struct dentry *dn)
1184 {
1185 struct ceph_dentry_info *di = ceph_dentry(dn);
1186 struct ceph_mds_client *mdsc;
1187
1188 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1189 dn->d_name.len, dn->d_name.name, di->offset);
1190 if (di) {
1191 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1192 spin_lock(&mdsc->dentry_lru_lock);
1193 list_move_tail(&di->lru, &mdsc->dentry_lru);
1194 spin_unlock(&mdsc->dentry_lru_lock);
1195 }
1196 }
1197
1198 void ceph_dentry_lru_del(struct dentry *dn)
1199 {
1200 struct ceph_dentry_info *di = ceph_dentry(dn);
1201 struct ceph_mds_client *mdsc;
1202
1203 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1204 dn->d_name.len, dn->d_name.name);
1205 if (di) {
1206 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1207 spin_lock(&mdsc->dentry_lru_lock);
1208 list_del_init(&di->lru);
1209 mdsc->num_dentry--;
1210 spin_unlock(&mdsc->dentry_lru_lock);
1211 }
1212 }
1213
1214 const struct file_operations ceph_dir_fops = {
1215 .read = ceph_read_dir,
1216 .readdir = ceph_readdir,
1217 .llseek = ceph_dir_llseek,
1218 .open = ceph_open,
1219 .release = ceph_release,
1220 .unlocked_ioctl = ceph_ioctl,
1221 .fsync = ceph_dir_fsync,
1222 };
1223
1224 const struct inode_operations ceph_dir_iops = {
1225 .lookup = ceph_lookup,
1226 .permission = ceph_permission,
1227 .getattr = ceph_getattr,
1228 .setattr = ceph_setattr,
1229 .setxattr = ceph_setxattr,
1230 .getxattr = ceph_getxattr,
1231 .listxattr = ceph_listxattr,
1232 .removexattr = ceph_removexattr,
1233 .mknod = ceph_mknod,
1234 .symlink = ceph_symlink,
1235 .mkdir = ceph_mkdir,
1236 .link = ceph_link,
1237 .unlink = ceph_unlink,
1238 .rmdir = ceph_unlink,
1239 .rename = ceph_rename,
1240 .create = ceph_create,
1241 };
1242
1243 const struct dentry_operations ceph_dentry_ops = {
1244 .d_revalidate = ceph_d_revalidate,
1245 .d_release = ceph_dentry_release,
1246 };
1247
1248 const struct dentry_operations ceph_snapdir_dentry_ops = {
1249 .d_revalidate = ceph_snapdir_d_revalidate,
1250 .d_release = ceph_dentry_release,
1251 };
1252
1253 const struct dentry_operations ceph_snap_dentry_ops = {
1254 .d_release = ceph_dentry_release,
1255 };
This page took 0.113548 seconds and 4 git commands to generate.