Merge tag 'pci-v3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[deliverable/linux.git] / fs / ceph / dir.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8
9 #include "super.h"
10 #include "mds_client.h"
11
12 /*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17 /*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29 const struct inode_operations ceph_dir_iops;
30 const struct file_operations ceph_dir_fops;
31 const struct dentry_operations ceph_dentry_ops;
32
33 /*
34 * Initialize ceph dentry state.
35 */
36 int ceph_init_dentry(struct dentry *dentry)
37 {
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
44 if (!di)
45 return -ENOMEM; /* oh well */
46
47 spin_lock(&dentry->d_lock);
48 if (dentry->d_fsdata) {
49 /* lost a race */
50 kmem_cache_free(ceph_dentry_cachep, di);
51 goto out_unlock;
52 }
53
54 if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
55 d_set_d_op(dentry, &ceph_dentry_ops);
56 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
57 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
58 else
59 d_set_d_op(dentry, &ceph_snap_dentry_ops);
60
61 di->dentry = dentry;
62 di->lease_session = NULL;
63 dentry->d_time = jiffies;
64 /* avoid reordering d_fsdata setup so that the check above is safe */
65 smp_mb();
66 dentry->d_fsdata = di;
67 ceph_dentry_lru_add(dentry);
68 out_unlock:
69 spin_unlock(&dentry->d_lock);
70 return 0;
71 }
72
73 struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
74 {
75 struct inode *inode = NULL;
76
77 if (!dentry)
78 return NULL;
79
80 spin_lock(&dentry->d_lock);
81 if (!IS_ROOT(dentry)) {
82 inode = dentry->d_parent->d_inode;
83 ihold(inode);
84 }
85 spin_unlock(&dentry->d_lock);
86 return inode;
87 }
88
89
90 /*
91 * for readdir, we encode the directory frag and offset within that
92 * frag into f_pos.
93 */
94 static unsigned fpos_frag(loff_t p)
95 {
96 return p >> 32;
97 }
98 static unsigned fpos_off(loff_t p)
99 {
100 return p & 0xffffffff;
101 }
102
103 static int fpos_cmp(loff_t l, loff_t r)
104 {
105 int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
106 if (v)
107 return v;
108 return (int)(fpos_off(l) - fpos_off(r));
109 }
110
111 /*
112 * When possible, we try to satisfy a readdir by peeking at the
113 * dcache. We make this work by carefully ordering dentries on
114 * d_u.d_child when we initially get results back from the MDS, and
115 * falling back to a "normal" sync readdir if any dentries in the dir
116 * are dropped.
117 *
118 * Complete dir indicates that we have all dentries in the dir. It is
119 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
120 * the MDS if/when the directory is modified).
121 */
122 static int __dcache_readdir(struct file *file, struct dir_context *ctx)
123 {
124 struct ceph_file_info *fi = file->private_data;
125 struct dentry *parent = file->f_dentry;
126 struct inode *dir = parent->d_inode;
127 struct list_head *p;
128 struct dentry *dentry, *last;
129 struct ceph_dentry_info *di;
130 int err = 0;
131
132 /* claim ref on last dentry we returned */
133 last = fi->dentry;
134 fi->dentry = NULL;
135
136 dout("__dcache_readdir %p at %llu (last %p)\n", dir, ctx->pos,
137 last);
138
139 spin_lock(&parent->d_lock);
140
141 /* start at beginning? */
142 if (ctx->pos == 2 || last == NULL ||
143 ctx->pos < ceph_dentry(last)->offset) {
144 if (list_empty(&parent->d_subdirs))
145 goto out_unlock;
146 p = parent->d_subdirs.prev;
147 dout(" initial p %p/%p\n", p->prev, p->next);
148 } else {
149 p = last->d_u.d_child.prev;
150 }
151
152 more:
153 dentry = list_entry(p, struct dentry, d_u.d_child);
154 di = ceph_dentry(dentry);
155 while (1) {
156 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
157 d_unhashed(dentry) ? "!hashed" : "hashed",
158 parent->d_subdirs.prev, parent->d_subdirs.next);
159 if (p == &parent->d_subdirs) {
160 fi->flags |= CEPH_F_ATEND;
161 goto out_unlock;
162 }
163 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
164 if (!d_unhashed(dentry) && dentry->d_inode &&
165 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
166 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
167 fpos_cmp(ctx->pos, di->offset) <= 0)
168 break;
169 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
170 dentry->d_name.len, dentry->d_name.name, di->offset,
171 ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
172 !dentry->d_inode ? " null" : "");
173 spin_unlock(&dentry->d_lock);
174 p = p->prev;
175 dentry = list_entry(p, struct dentry, d_u.d_child);
176 di = ceph_dentry(dentry);
177 }
178
179 dget_dlock(dentry);
180 spin_unlock(&dentry->d_lock);
181 spin_unlock(&parent->d_lock);
182
183 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
184 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
185 ctx->pos = di->offset;
186 if (!dir_emit(ctx, dentry->d_name.name,
187 dentry->d_name.len,
188 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
189 dentry->d_inode->i_mode >> 12)) {
190 if (last) {
191 /* remember our position */
192 fi->dentry = last;
193 fi->next_offset = di->offset;
194 }
195 dput(dentry);
196 return 0;
197 }
198
199 if (last)
200 dput(last);
201 last = dentry;
202
203 ctx->pos++;
204
205 /* make sure a dentry wasn't dropped while we didn't have parent lock */
206 if (!ceph_dir_is_complete(dir)) {
207 dout(" lost dir complete on %p; falling back to mds\n", dir);
208 err = -EAGAIN;
209 goto out;
210 }
211
212 spin_lock(&parent->d_lock);
213 p = p->prev; /* advance to next dentry */
214 goto more;
215
216 out_unlock:
217 spin_unlock(&parent->d_lock);
218 out:
219 if (last)
220 dput(last);
221 return err;
222 }
223
224 /*
225 * make note of the last dentry we read, so we can
226 * continue at the same lexicographical point,
227 * regardless of what dir changes take place on the
228 * server.
229 */
230 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
231 int len)
232 {
233 kfree(fi->last_name);
234 fi->last_name = kmalloc(len+1, GFP_NOFS);
235 if (!fi->last_name)
236 return -ENOMEM;
237 memcpy(fi->last_name, name, len);
238 fi->last_name[len] = 0;
239 dout("note_last_dentry '%s'\n", fi->last_name);
240 return 0;
241 }
242
243 static int ceph_readdir(struct file *file, struct dir_context *ctx)
244 {
245 struct ceph_file_info *fi = file->private_data;
246 struct inode *inode = file_inode(file);
247 struct ceph_inode_info *ci = ceph_inode(inode);
248 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
249 struct ceph_mds_client *mdsc = fsc->mdsc;
250 unsigned frag = fpos_frag(ctx->pos);
251 int off = fpos_off(ctx->pos);
252 int err;
253 u32 ftype;
254 struct ceph_mds_reply_info_parsed *rinfo;
255 const int max_entries = fsc->mount_options->max_readdir;
256 const int max_bytes = fsc->mount_options->max_readdir_bytes;
257
258 dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
259 if (fi->flags & CEPH_F_ATEND)
260 return 0;
261
262 /* always start with . and .. */
263 if (ctx->pos == 0) {
264 /* note dir version at start of readdir so we can tell
265 * if any dentries get dropped */
266 fi->dir_release_count = atomic_read(&ci->i_release_count);
267
268 dout("readdir off 0 -> '.'\n");
269 if (!dir_emit(ctx, ".", 1,
270 ceph_translate_ino(inode->i_sb, inode->i_ino),
271 inode->i_mode >> 12))
272 return 0;
273 ctx->pos = 1;
274 off = 1;
275 }
276 if (ctx->pos == 1) {
277 ino_t ino = parent_ino(file->f_dentry);
278 dout("readdir off 1 -> '..'\n");
279 if (!dir_emit(ctx, "..", 2,
280 ceph_translate_ino(inode->i_sb, ino),
281 inode->i_mode >> 12))
282 return 0;
283 ctx->pos = 2;
284 off = 2;
285 }
286
287 /* can we use the dcache? */
288 spin_lock(&ci->i_ceph_lock);
289 if ((ctx->pos == 2 || fi->dentry) &&
290 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
291 ceph_snap(inode) != CEPH_SNAPDIR &&
292 __ceph_dir_is_complete(ci) &&
293 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
294 spin_unlock(&ci->i_ceph_lock);
295 err = __dcache_readdir(file, ctx);
296 if (err != -EAGAIN)
297 return err;
298 } else {
299 spin_unlock(&ci->i_ceph_lock);
300 }
301 if (fi->dentry) {
302 err = note_last_dentry(fi, fi->dentry->d_name.name,
303 fi->dentry->d_name.len);
304 if (err)
305 return err;
306 dput(fi->dentry);
307 fi->dentry = NULL;
308 }
309
310 /* proceed with a normal readdir */
311
312 more:
313 /* do we have the correct frag content buffered? */
314 if (fi->frag != frag || fi->last_readdir == NULL) {
315 struct ceph_mds_request *req;
316 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
317 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
318
319 /* discard old result, if any */
320 if (fi->last_readdir) {
321 ceph_mdsc_put_request(fi->last_readdir);
322 fi->last_readdir = NULL;
323 }
324
325 /* requery frag tree, as the frag topology may have changed */
326 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
327
328 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
329 ceph_vinop(inode), frag, fi->last_name);
330 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
331 if (IS_ERR(req))
332 return PTR_ERR(req);
333 req->r_inode = inode;
334 ihold(inode);
335 req->r_dentry = dget(file->f_dentry);
336 /* hints to request -> mds selection code */
337 req->r_direct_mode = USE_AUTH_MDS;
338 req->r_direct_hash = ceph_frag_value(frag);
339 req->r_direct_is_hash = true;
340 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
341 req->r_readdir_offset = fi->next_offset;
342 req->r_args.readdir.frag = cpu_to_le32(frag);
343 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
344 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
345 req->r_num_caps = max_entries + 1;
346 err = ceph_mdsc_do_request(mdsc, NULL, req);
347 if (err < 0) {
348 ceph_mdsc_put_request(req);
349 return err;
350 }
351 dout("readdir got and parsed readdir result=%d"
352 " on frag %x, end=%d, complete=%d\n", err, frag,
353 (int)req->r_reply_info.dir_end,
354 (int)req->r_reply_info.dir_complete);
355
356 if (!req->r_did_prepopulate) {
357 dout("readdir !did_prepopulate");
358 /* preclude from marking dir complete */
359 fi->dir_release_count--;
360 }
361
362 /* note next offset and last dentry name */
363 rinfo = &req->r_reply_info;
364 if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
365 frag = le32_to_cpu(rinfo->dir_dir->frag);
366 if (ceph_frag_is_leftmost(frag))
367 fi->next_offset = 2;
368 else
369 fi->next_offset = 0;
370 off = fi->next_offset;
371 }
372 fi->offset = fi->next_offset;
373 fi->last_readdir = req;
374 fi->frag = frag;
375
376 if (req->r_reply_info.dir_end) {
377 kfree(fi->last_name);
378 fi->last_name = NULL;
379 if (ceph_frag_is_rightmost(frag))
380 fi->next_offset = 2;
381 else
382 fi->next_offset = 0;
383 } else {
384 err = note_last_dentry(fi,
385 rinfo->dir_dname[rinfo->dir_nr-1],
386 rinfo->dir_dname_len[rinfo->dir_nr-1]);
387 if (err)
388 return err;
389 fi->next_offset += rinfo->dir_nr;
390 }
391 }
392
393 rinfo = &fi->last_readdir->r_reply_info;
394 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
395 rinfo->dir_nr, off, fi->offset);
396
397 ctx->pos = ceph_make_fpos(frag, off);
398 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
399 struct ceph_mds_reply_inode *in =
400 rinfo->dir_in[off - fi->offset].in;
401 struct ceph_vino vino;
402 ino_t ino;
403
404 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
405 off, off - fi->offset, rinfo->dir_nr, ctx->pos,
406 rinfo->dir_dname_len[off - fi->offset],
407 rinfo->dir_dname[off - fi->offset], in);
408 BUG_ON(!in);
409 ftype = le32_to_cpu(in->mode) >> 12;
410 vino.ino = le64_to_cpu(in->ino);
411 vino.snap = le64_to_cpu(in->snapid);
412 ino = ceph_vino_to_ino(vino);
413 if (!dir_emit(ctx,
414 rinfo->dir_dname[off - fi->offset],
415 rinfo->dir_dname_len[off - fi->offset],
416 ceph_translate_ino(inode->i_sb, ino), ftype)) {
417 dout("filldir stopping us...\n");
418 return 0;
419 }
420 off++;
421 ctx->pos++;
422 }
423
424 if (fi->last_name) {
425 ceph_mdsc_put_request(fi->last_readdir);
426 fi->last_readdir = NULL;
427 goto more;
428 }
429
430 /* more frags? */
431 if (!ceph_frag_is_rightmost(frag)) {
432 frag = ceph_frag_next(frag);
433 off = 0;
434 ctx->pos = ceph_make_fpos(frag, off);
435 dout("readdir next frag is %x\n", frag);
436 goto more;
437 }
438 fi->flags |= CEPH_F_ATEND;
439
440 /*
441 * if dir_release_count still matches the dir, no dentries
442 * were released during the whole readdir, and we should have
443 * the complete dir contents in our cache.
444 */
445 spin_lock(&ci->i_ceph_lock);
446 if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
447 dout(" marking %p complete\n", inode);
448 __ceph_dir_set_complete(ci, fi->dir_release_count);
449 ci->i_max_offset = ctx->pos;
450 }
451 spin_unlock(&ci->i_ceph_lock);
452
453 dout("readdir %p file %p done.\n", inode, file);
454 return 0;
455 }
456
457 static void reset_readdir(struct ceph_file_info *fi)
458 {
459 if (fi->last_readdir) {
460 ceph_mdsc_put_request(fi->last_readdir);
461 fi->last_readdir = NULL;
462 }
463 kfree(fi->last_name);
464 fi->last_name = NULL;
465 fi->next_offset = 2; /* compensate for . and .. */
466 if (fi->dentry) {
467 dput(fi->dentry);
468 fi->dentry = NULL;
469 }
470 fi->flags &= ~CEPH_F_ATEND;
471 }
472
473 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
474 {
475 struct ceph_file_info *fi = file->private_data;
476 struct inode *inode = file->f_mapping->host;
477 loff_t old_offset = offset;
478 loff_t retval;
479
480 mutex_lock(&inode->i_mutex);
481 retval = -EINVAL;
482 switch (whence) {
483 case SEEK_END:
484 offset += inode->i_size + 2; /* FIXME */
485 break;
486 case SEEK_CUR:
487 offset += file->f_pos;
488 case SEEK_SET:
489 break;
490 default:
491 goto out;
492 }
493
494 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
495 if (offset != file->f_pos) {
496 file->f_pos = offset;
497 file->f_version = 0;
498 fi->flags &= ~CEPH_F_ATEND;
499 }
500 retval = offset;
501
502 /*
503 * discard buffered readdir content on seekdir(0), or
504 * seek to new frag, or seek prior to current chunk.
505 */
506 if (offset == 0 ||
507 fpos_frag(offset) != fpos_frag(old_offset) ||
508 fpos_off(offset) < fi->offset) {
509 dout("dir_llseek dropping %p content\n", file);
510 reset_readdir(fi);
511 }
512
513 /* bump dir_release_count if we did a forward seek */
514 if (offset > old_offset)
515 fi->dir_release_count--;
516 }
517 out:
518 mutex_unlock(&inode->i_mutex);
519 return retval;
520 }
521
522 /*
523 * Handle lookups for the hidden .snap directory.
524 */
525 int ceph_handle_snapdir(struct ceph_mds_request *req,
526 struct dentry *dentry, int err)
527 {
528 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
529 struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
530
531 /* .snap dir? */
532 if (err == -ENOENT &&
533 ceph_snap(parent) == CEPH_NOSNAP &&
534 strcmp(dentry->d_name.name,
535 fsc->mount_options->snapdir_name) == 0) {
536 struct inode *inode = ceph_get_snapdir(parent);
537 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
538 dentry, dentry->d_name.len, dentry->d_name.name, inode);
539 BUG_ON(!d_unhashed(dentry));
540 d_add(dentry, inode);
541 err = 0;
542 }
543 return err;
544 }
545
546 /*
547 * Figure out final result of a lookup/open request.
548 *
549 * Mainly, make sure we return the final req->r_dentry (if it already
550 * existed) in place of the original VFS-provided dentry when they
551 * differ.
552 *
553 * Gracefully handle the case where the MDS replies with -ENOENT and
554 * no trace (which it may do, at its discretion, e.g., if it doesn't
555 * care to issue a lease on the negative dentry).
556 */
557 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
558 struct dentry *dentry, int err)
559 {
560 if (err == -ENOENT) {
561 /* no trace? */
562 err = 0;
563 if (!req->r_reply_info.head->is_dentry) {
564 dout("ENOENT and no trace, dentry %p inode %p\n",
565 dentry, dentry->d_inode);
566 if (dentry->d_inode) {
567 d_drop(dentry);
568 err = -ENOENT;
569 } else {
570 d_add(dentry, NULL);
571 }
572 }
573 }
574 if (err)
575 dentry = ERR_PTR(err);
576 else if (dentry != req->r_dentry)
577 dentry = dget(req->r_dentry); /* we got spliced */
578 else
579 dentry = NULL;
580 return dentry;
581 }
582
583 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
584 {
585 return ceph_ino(inode) == CEPH_INO_ROOT &&
586 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
587 }
588
589 /*
590 * Look up a single dir entry. If there is a lookup intent, inform
591 * the MDS so that it gets our 'caps wanted' value in a single op.
592 */
593 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
594 unsigned int flags)
595 {
596 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
597 struct ceph_mds_client *mdsc = fsc->mdsc;
598 struct ceph_mds_request *req;
599 int op;
600 int err;
601
602 dout("lookup %p dentry %p '%.*s'\n",
603 dir, dentry, dentry->d_name.len, dentry->d_name.name);
604
605 if (dentry->d_name.len > NAME_MAX)
606 return ERR_PTR(-ENAMETOOLONG);
607
608 err = ceph_init_dentry(dentry);
609 if (err < 0)
610 return ERR_PTR(err);
611
612 /* can we conclude ENOENT locally? */
613 if (dentry->d_inode == NULL) {
614 struct ceph_inode_info *ci = ceph_inode(dir);
615 struct ceph_dentry_info *di = ceph_dentry(dentry);
616
617 spin_lock(&ci->i_ceph_lock);
618 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
619 if (strncmp(dentry->d_name.name,
620 fsc->mount_options->snapdir_name,
621 dentry->d_name.len) &&
622 !is_root_ceph_dentry(dir, dentry) &&
623 __ceph_dir_is_complete(ci) &&
624 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
625 spin_unlock(&ci->i_ceph_lock);
626 dout(" dir %p complete, -ENOENT\n", dir);
627 d_add(dentry, NULL);
628 di->lease_shared_gen = ci->i_shared_gen;
629 return NULL;
630 }
631 spin_unlock(&ci->i_ceph_lock);
632 }
633
634 op = ceph_snap(dir) == CEPH_SNAPDIR ?
635 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
636 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
637 if (IS_ERR(req))
638 return ERR_CAST(req);
639 req->r_dentry = dget(dentry);
640 req->r_num_caps = 2;
641 /* we only need inode linkage */
642 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
643 req->r_locked_dir = dir;
644 err = ceph_mdsc_do_request(mdsc, NULL, req);
645 err = ceph_handle_snapdir(req, dentry, err);
646 dentry = ceph_finish_lookup(req, dentry, err);
647 ceph_mdsc_put_request(req); /* will dput(dentry) */
648 dout("lookup result=%p\n", dentry);
649 return dentry;
650 }
651
652 /*
653 * If we do a create but get no trace back from the MDS, follow up with
654 * a lookup (the VFS expects us to link up the provided dentry).
655 */
656 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
657 {
658 struct dentry *result = ceph_lookup(dir, dentry, 0);
659
660 if (result && !IS_ERR(result)) {
661 /*
662 * We created the item, then did a lookup, and found
663 * it was already linked to another inode we already
664 * had in our cache (and thus got spliced). Link our
665 * dentry to that inode, but don't hash it, just in
666 * case the VFS wants to dereference it.
667 */
668 BUG_ON(!result->d_inode);
669 d_instantiate(dentry, result->d_inode);
670 return 0;
671 }
672 return PTR_ERR(result);
673 }
674
675 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
676 umode_t mode, dev_t rdev)
677 {
678 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
679 struct ceph_mds_client *mdsc = fsc->mdsc;
680 struct ceph_mds_request *req;
681 int err;
682
683 if (ceph_snap(dir) != CEPH_NOSNAP)
684 return -EROFS;
685
686 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
687 dir, dentry, mode, rdev);
688 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
689 if (IS_ERR(req)) {
690 d_drop(dentry);
691 return PTR_ERR(req);
692 }
693 req->r_dentry = dget(dentry);
694 req->r_num_caps = 2;
695 req->r_locked_dir = dir;
696 req->r_args.mknod.mode = cpu_to_le32(mode);
697 req->r_args.mknod.rdev = cpu_to_le32(rdev);
698 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
699 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
700 err = ceph_mdsc_do_request(mdsc, dir, req);
701 if (!err && !req->r_reply_info.head->is_dentry)
702 err = ceph_handle_notrace_create(dir, dentry);
703 ceph_mdsc_put_request(req);
704
705 if (!err)
706 ceph_init_acl(dentry, dentry->d_inode, dir);
707 else
708 d_drop(dentry);
709 return err;
710 }
711
712 static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
713 bool excl)
714 {
715 return ceph_mknod(dir, dentry, mode, 0);
716 }
717
718 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
719 const char *dest)
720 {
721 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
722 struct ceph_mds_client *mdsc = fsc->mdsc;
723 struct ceph_mds_request *req;
724 int err;
725
726 if (ceph_snap(dir) != CEPH_NOSNAP)
727 return -EROFS;
728
729 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
730 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
731 if (IS_ERR(req)) {
732 d_drop(dentry);
733 return PTR_ERR(req);
734 }
735 req->r_dentry = dget(dentry);
736 req->r_num_caps = 2;
737 req->r_path2 = kstrdup(dest, GFP_NOFS);
738 req->r_locked_dir = dir;
739 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
740 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
741 err = ceph_mdsc_do_request(mdsc, dir, req);
742 if (!err && !req->r_reply_info.head->is_dentry)
743 err = ceph_handle_notrace_create(dir, dentry);
744 ceph_mdsc_put_request(req);
745 if (!err)
746 ceph_init_acl(dentry, dentry->d_inode, dir);
747 else
748 d_drop(dentry);
749 return err;
750 }
751
752 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
753 {
754 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
755 struct ceph_mds_client *mdsc = fsc->mdsc;
756 struct ceph_mds_request *req;
757 int err = -EROFS;
758 int op;
759
760 if (ceph_snap(dir) == CEPH_SNAPDIR) {
761 /* mkdir .snap/foo is a MKSNAP */
762 op = CEPH_MDS_OP_MKSNAP;
763 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
764 dentry->d_name.len, dentry->d_name.name, dentry);
765 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
766 dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
767 op = CEPH_MDS_OP_MKDIR;
768 } else {
769 goto out;
770 }
771 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
772 if (IS_ERR(req)) {
773 err = PTR_ERR(req);
774 goto out;
775 }
776
777 req->r_dentry = dget(dentry);
778 req->r_num_caps = 2;
779 req->r_locked_dir = dir;
780 req->r_args.mkdir.mode = cpu_to_le32(mode);
781 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
782 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
783 err = ceph_mdsc_do_request(mdsc, dir, req);
784 if (!err && !req->r_reply_info.head->is_dentry)
785 err = ceph_handle_notrace_create(dir, dentry);
786 ceph_mdsc_put_request(req);
787 out:
788 if (!err)
789 ceph_init_acl(dentry, dentry->d_inode, dir);
790 else
791 d_drop(dentry);
792 return err;
793 }
794
795 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
796 struct dentry *dentry)
797 {
798 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
799 struct ceph_mds_client *mdsc = fsc->mdsc;
800 struct ceph_mds_request *req;
801 int err;
802
803 if (ceph_snap(dir) != CEPH_NOSNAP)
804 return -EROFS;
805
806 dout("link in dir %p old_dentry %p dentry %p\n", dir,
807 old_dentry, dentry);
808 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
809 if (IS_ERR(req)) {
810 d_drop(dentry);
811 return PTR_ERR(req);
812 }
813 req->r_dentry = dget(dentry);
814 req->r_num_caps = 2;
815 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
816 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
817 req->r_locked_dir = dir;
818 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
819 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
820 /* release LINK_SHARED on source inode (mds will lock it) */
821 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
822 err = ceph_mdsc_do_request(mdsc, dir, req);
823 if (err) {
824 d_drop(dentry);
825 } else if (!req->r_reply_info.head->is_dentry) {
826 ihold(old_dentry->d_inode);
827 d_instantiate(dentry, old_dentry->d_inode);
828 }
829 ceph_mdsc_put_request(req);
830 return err;
831 }
832
833 /*
834 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
835 * looks like the link count will hit 0, drop any other caps (other
836 * than PIN) we don't specifically want (due to the file still being
837 * open).
838 */
839 static int drop_caps_for_unlink(struct inode *inode)
840 {
841 struct ceph_inode_info *ci = ceph_inode(inode);
842 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
843
844 spin_lock(&ci->i_ceph_lock);
845 if (inode->i_nlink == 1) {
846 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
847 ci->i_ceph_flags |= CEPH_I_NODELAY;
848 }
849 spin_unlock(&ci->i_ceph_lock);
850 return drop;
851 }
852
853 /*
854 * rmdir and unlink are differ only by the metadata op code
855 */
856 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
857 {
858 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
859 struct ceph_mds_client *mdsc = fsc->mdsc;
860 struct inode *inode = dentry->d_inode;
861 struct ceph_mds_request *req;
862 int err = -EROFS;
863 int op;
864
865 if (ceph_snap(dir) == CEPH_SNAPDIR) {
866 /* rmdir .snap/foo is RMSNAP */
867 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
868 dentry->d_name.name, dentry);
869 op = CEPH_MDS_OP_RMSNAP;
870 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
871 dout("unlink/rmdir dir %p dn %p inode %p\n",
872 dir, dentry, inode);
873 op = S_ISDIR(dentry->d_inode->i_mode) ?
874 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
875 } else
876 goto out;
877 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
878 if (IS_ERR(req)) {
879 err = PTR_ERR(req);
880 goto out;
881 }
882 req->r_dentry = dget(dentry);
883 req->r_num_caps = 2;
884 req->r_locked_dir = dir;
885 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
886 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
887 req->r_inode_drop = drop_caps_for_unlink(inode);
888 err = ceph_mdsc_do_request(mdsc, dir, req);
889 if (!err && !req->r_reply_info.head->is_dentry)
890 d_delete(dentry);
891 ceph_mdsc_put_request(req);
892 out:
893 return err;
894 }
895
896 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
897 struct inode *new_dir, struct dentry *new_dentry)
898 {
899 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
900 struct ceph_mds_client *mdsc = fsc->mdsc;
901 struct ceph_mds_request *req;
902 int err;
903
904 if (ceph_snap(old_dir) != ceph_snap(new_dir))
905 return -EXDEV;
906 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
907 ceph_snap(new_dir) != CEPH_NOSNAP)
908 return -EROFS;
909 dout("rename dir %p dentry %p to dir %p dentry %p\n",
910 old_dir, old_dentry, new_dir, new_dentry);
911 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
912 if (IS_ERR(req))
913 return PTR_ERR(req);
914 req->r_dentry = dget(new_dentry);
915 req->r_num_caps = 2;
916 req->r_old_dentry = dget(old_dentry);
917 req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
918 req->r_locked_dir = new_dir;
919 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
920 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
921 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
922 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
923 /* release LINK_RDCACHE on source inode (mds will lock it) */
924 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
925 if (new_dentry->d_inode)
926 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
927 err = ceph_mdsc_do_request(mdsc, old_dir, req);
928 if (!err && !req->r_reply_info.head->is_dentry) {
929 /*
930 * Normally d_move() is done by fill_trace (called by
931 * do_request, above). If there is no trace, we need
932 * to do it here.
933 */
934
935 /* d_move screws up d_subdirs order */
936 ceph_dir_clear_complete(new_dir);
937
938 d_move(old_dentry, new_dentry);
939
940 /* ensure target dentry is invalidated, despite
941 rehashing bug in vfs_rename_dir */
942 ceph_invalidate_dentry_lease(new_dentry);
943 }
944 ceph_mdsc_put_request(req);
945 return err;
946 }
947
948 /*
949 * Ensure a dentry lease will no longer revalidate.
950 */
951 void ceph_invalidate_dentry_lease(struct dentry *dentry)
952 {
953 spin_lock(&dentry->d_lock);
954 dentry->d_time = jiffies;
955 ceph_dentry(dentry)->lease_shared_gen = 0;
956 spin_unlock(&dentry->d_lock);
957 }
958
959 /*
960 * Check if dentry lease is valid. If not, delete the lease. Try to
961 * renew if the least is more than half up.
962 */
963 static int dentry_lease_is_valid(struct dentry *dentry)
964 {
965 struct ceph_dentry_info *di;
966 struct ceph_mds_session *s;
967 int valid = 0;
968 u32 gen;
969 unsigned long ttl;
970 struct ceph_mds_session *session = NULL;
971 struct inode *dir = NULL;
972 u32 seq = 0;
973
974 spin_lock(&dentry->d_lock);
975 di = ceph_dentry(dentry);
976 if (di->lease_session) {
977 s = di->lease_session;
978 spin_lock(&s->s_gen_ttl_lock);
979 gen = s->s_cap_gen;
980 ttl = s->s_cap_ttl;
981 spin_unlock(&s->s_gen_ttl_lock);
982
983 if (di->lease_gen == gen &&
984 time_before(jiffies, dentry->d_time) &&
985 time_before(jiffies, ttl)) {
986 valid = 1;
987 if (di->lease_renew_after &&
988 time_after(jiffies, di->lease_renew_after)) {
989 /* we should renew */
990 dir = dentry->d_parent->d_inode;
991 session = ceph_get_mds_session(s);
992 seq = di->lease_seq;
993 di->lease_renew_after = 0;
994 di->lease_renew_from = jiffies;
995 }
996 }
997 }
998 spin_unlock(&dentry->d_lock);
999
1000 if (session) {
1001 ceph_mdsc_lease_send_msg(session, dir, dentry,
1002 CEPH_MDS_LEASE_RENEW, seq);
1003 ceph_put_mds_session(session);
1004 }
1005 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
1006 return valid;
1007 }
1008
1009 /*
1010 * Check if directory-wide content lease/cap is valid.
1011 */
1012 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
1013 {
1014 struct ceph_inode_info *ci = ceph_inode(dir);
1015 struct ceph_dentry_info *di = ceph_dentry(dentry);
1016 int valid = 0;
1017
1018 spin_lock(&ci->i_ceph_lock);
1019 if (ci->i_shared_gen == di->lease_shared_gen)
1020 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1021 spin_unlock(&ci->i_ceph_lock);
1022 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1023 dir, (unsigned)ci->i_shared_gen, dentry,
1024 (unsigned)di->lease_shared_gen, valid);
1025 return valid;
1026 }
1027
1028 /*
1029 * Check if cached dentry can be trusted.
1030 */
1031 static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
1032 {
1033 int valid = 0;
1034 struct inode *dir;
1035
1036 if (flags & LOOKUP_RCU)
1037 return -ECHILD;
1038
1039 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
1040 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
1041 ceph_dentry(dentry)->offset);
1042
1043 dir = ceph_get_dentry_parent_inode(dentry);
1044
1045 /* always trust cached snapped dentries, snapdir dentry */
1046 if (ceph_snap(dir) != CEPH_NOSNAP) {
1047 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
1048 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
1049 valid = 1;
1050 } else if (dentry->d_inode &&
1051 ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
1052 valid = 1;
1053 } else if (dentry_lease_is_valid(dentry) ||
1054 dir_lease_is_valid(dir, dentry)) {
1055 if (dentry->d_inode)
1056 valid = ceph_is_any_caps(dentry->d_inode);
1057 else
1058 valid = 1;
1059 }
1060
1061 dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
1062 if (valid) {
1063 ceph_dentry_lru_touch(dentry);
1064 } else {
1065 ceph_dir_clear_complete(dir);
1066 d_drop(dentry);
1067 }
1068 iput(dir);
1069 return valid;
1070 }
1071
1072 /*
1073 * Release our ceph_dentry_info.
1074 */
1075 static void ceph_d_release(struct dentry *dentry)
1076 {
1077 struct ceph_dentry_info *di = ceph_dentry(dentry);
1078
1079 dout("d_release %p\n", dentry);
1080 ceph_dentry_lru_del(dentry);
1081 if (di->lease_session)
1082 ceph_put_mds_session(di->lease_session);
1083 kmem_cache_free(ceph_dentry_cachep, di);
1084 dentry->d_fsdata = NULL;
1085 }
1086
1087 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1088 unsigned int flags)
1089 {
1090 /*
1091 * Eventually, we'll want to revalidate snapped metadata
1092 * too... probably...
1093 */
1094 return 1;
1095 }
1096
1097 /*
1098 * When the VFS prunes a dentry from the cache, we need to clear the
1099 * complete flag on the parent directory.
1100 *
1101 * Called under dentry->d_lock.
1102 */
1103 static void ceph_d_prune(struct dentry *dentry)
1104 {
1105 dout("ceph_d_prune %p\n", dentry);
1106
1107 /* do we have a valid parent? */
1108 if (IS_ROOT(dentry))
1109 return;
1110
1111 /* if we are not hashed, we don't affect dir's completeness */
1112 if (d_unhashed(dentry))
1113 return;
1114
1115 /*
1116 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1117 * cleared until d_release
1118 */
1119 ceph_dir_clear_complete(dentry->d_parent->d_inode);
1120 }
1121
1122 /*
1123 * read() on a dir. This weird interface hack only works if mounted
1124 * with '-o dirstat'.
1125 */
1126 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1127 loff_t *ppos)
1128 {
1129 struct ceph_file_info *cf = file->private_data;
1130 struct inode *inode = file_inode(file);
1131 struct ceph_inode_info *ci = ceph_inode(inode);
1132 int left;
1133 const int bufsize = 1024;
1134
1135 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1136 return -EISDIR;
1137
1138 if (!cf->dir_info) {
1139 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
1140 if (!cf->dir_info)
1141 return -ENOMEM;
1142 cf->dir_info_len =
1143 snprintf(cf->dir_info, bufsize,
1144 "entries: %20lld\n"
1145 " files: %20lld\n"
1146 " subdirs: %20lld\n"
1147 "rentries: %20lld\n"
1148 " rfiles: %20lld\n"
1149 " rsubdirs: %20lld\n"
1150 "rbytes: %20lld\n"
1151 "rctime: %10ld.%09ld\n",
1152 ci->i_files + ci->i_subdirs,
1153 ci->i_files,
1154 ci->i_subdirs,
1155 ci->i_rfiles + ci->i_rsubdirs,
1156 ci->i_rfiles,
1157 ci->i_rsubdirs,
1158 ci->i_rbytes,
1159 (long)ci->i_rctime.tv_sec,
1160 (long)ci->i_rctime.tv_nsec);
1161 }
1162
1163 if (*ppos >= cf->dir_info_len)
1164 return 0;
1165 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1166 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1167 if (left == size)
1168 return -EFAULT;
1169 *ppos += (size - left);
1170 return size - left;
1171 }
1172
1173 /*
1174 * an fsync() on a dir will wait for any uncommitted directory
1175 * operations to commit.
1176 */
1177 static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
1178 int datasync)
1179 {
1180 struct inode *inode = file_inode(file);
1181 struct ceph_inode_info *ci = ceph_inode(inode);
1182 struct list_head *head = &ci->i_unsafe_dirops;
1183 struct ceph_mds_request *req;
1184 u64 last_tid;
1185 int ret = 0;
1186
1187 dout("dir_fsync %p\n", inode);
1188 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1189 if (ret)
1190 return ret;
1191 mutex_lock(&inode->i_mutex);
1192
1193 spin_lock(&ci->i_unsafe_lock);
1194 if (list_empty(head))
1195 goto out;
1196
1197 req = list_entry(head->prev,
1198 struct ceph_mds_request, r_unsafe_dir_item);
1199 last_tid = req->r_tid;
1200
1201 do {
1202 ceph_mdsc_get_request(req);
1203 spin_unlock(&ci->i_unsafe_lock);
1204
1205 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1206 inode, req->r_tid, last_tid);
1207 if (req->r_timeout) {
1208 ret = wait_for_completion_timeout(
1209 &req->r_safe_completion, req->r_timeout);
1210 if (ret > 0)
1211 ret = 0;
1212 else if (ret == 0)
1213 ret = -EIO; /* timed out */
1214 } else {
1215 wait_for_completion(&req->r_safe_completion);
1216 }
1217 ceph_mdsc_put_request(req);
1218
1219 spin_lock(&ci->i_unsafe_lock);
1220 if (ret || list_empty(head))
1221 break;
1222 req = list_entry(head->next,
1223 struct ceph_mds_request, r_unsafe_dir_item);
1224 } while (req->r_tid < last_tid);
1225 out:
1226 spin_unlock(&ci->i_unsafe_lock);
1227 mutex_unlock(&inode->i_mutex);
1228
1229 return ret;
1230 }
1231
1232 /*
1233 * We maintain a private dentry LRU.
1234 *
1235 * FIXME: this needs to be changed to a per-mds lru to be useful.
1236 */
1237 void ceph_dentry_lru_add(struct dentry *dn)
1238 {
1239 struct ceph_dentry_info *di = ceph_dentry(dn);
1240 struct ceph_mds_client *mdsc;
1241
1242 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1243 dn->d_name.len, dn->d_name.name);
1244 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1245 spin_lock(&mdsc->dentry_lru_lock);
1246 list_add_tail(&di->lru, &mdsc->dentry_lru);
1247 mdsc->num_dentry++;
1248 spin_unlock(&mdsc->dentry_lru_lock);
1249 }
1250
1251 void ceph_dentry_lru_touch(struct dentry *dn)
1252 {
1253 struct ceph_dentry_info *di = ceph_dentry(dn);
1254 struct ceph_mds_client *mdsc;
1255
1256 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1257 dn->d_name.len, dn->d_name.name, di->offset);
1258 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1259 spin_lock(&mdsc->dentry_lru_lock);
1260 list_move_tail(&di->lru, &mdsc->dentry_lru);
1261 spin_unlock(&mdsc->dentry_lru_lock);
1262 }
1263
1264 void ceph_dentry_lru_del(struct dentry *dn)
1265 {
1266 struct ceph_dentry_info *di = ceph_dentry(dn);
1267 struct ceph_mds_client *mdsc;
1268
1269 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1270 dn->d_name.len, dn->d_name.name);
1271 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1272 spin_lock(&mdsc->dentry_lru_lock);
1273 list_del_init(&di->lru);
1274 mdsc->num_dentry--;
1275 spin_unlock(&mdsc->dentry_lru_lock);
1276 }
1277
1278 /*
1279 * Return name hash for a given dentry. This is dependent on
1280 * the parent directory's hash function.
1281 */
1282 unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
1283 {
1284 struct ceph_inode_info *dci = ceph_inode(dir);
1285
1286 switch (dci->i_dir_layout.dl_dir_hash) {
1287 case 0: /* for backward compat */
1288 case CEPH_STR_HASH_LINUX:
1289 return dn->d_name.hash;
1290
1291 default:
1292 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1293 dn->d_name.name, dn->d_name.len);
1294 }
1295 }
1296
1297 const struct file_operations ceph_dir_fops = {
1298 .read = ceph_read_dir,
1299 .iterate = ceph_readdir,
1300 .llseek = ceph_dir_llseek,
1301 .open = ceph_open,
1302 .release = ceph_release,
1303 .unlocked_ioctl = ceph_ioctl,
1304 .fsync = ceph_dir_fsync,
1305 };
1306
1307 const struct inode_operations ceph_dir_iops = {
1308 .lookup = ceph_lookup,
1309 .permission = ceph_permission,
1310 .getattr = ceph_getattr,
1311 .setattr = ceph_setattr,
1312 .setxattr = ceph_setxattr,
1313 .getxattr = ceph_getxattr,
1314 .listxattr = ceph_listxattr,
1315 .removexattr = ceph_removexattr,
1316 .get_acl = ceph_get_acl,
1317 .set_acl = ceph_set_acl,
1318 .mknod = ceph_mknod,
1319 .symlink = ceph_symlink,
1320 .mkdir = ceph_mkdir,
1321 .link = ceph_link,
1322 .unlink = ceph_unlink,
1323 .rmdir = ceph_unlink,
1324 .rename = ceph_rename,
1325 .create = ceph_create,
1326 .atomic_open = ceph_atomic_open,
1327 };
1328
1329 const struct dentry_operations ceph_dentry_ops = {
1330 .d_revalidate = ceph_d_revalidate,
1331 .d_release = ceph_d_release,
1332 .d_prune = ceph_d_prune,
1333 };
1334
1335 const struct dentry_operations ceph_snapdir_dentry_ops = {
1336 .d_revalidate = ceph_snapdir_d_revalidate,
1337 .d_release = ceph_d_release,
1338 };
1339
1340 const struct dentry_operations ceph_snap_dentry_ops = {
1341 .d_release = ceph_d_release,
1342 .d_prune = ceph_d_prune,
1343 };
This page took 0.059442 seconds and 5 git commands to generate.