ceph: handle racing calls to ceph_init_dentry
[deliverable/linux.git] / fs / ceph / dir.c
1 #include <linux/ceph/ceph_debug.h>
2
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
8
9 #include "super.h"
10 #include "mds_client.h"
11
12 /*
13 * Directory operations: readdir, lookup, create, link, unlink,
14 * rename, etc.
15 */
16
17 /*
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
22 *
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
26 * point by name.
27 */
28
29 const struct inode_operations ceph_dir_iops;
30 const struct file_operations ceph_dir_fops;
31 const struct dentry_operations ceph_dentry_ops;
32
33 /*
34 * Initialize ceph dentry state.
35 */
36 int ceph_init_dentry(struct dentry *dentry)
37 {
38 struct ceph_dentry_info *di;
39
40 if (dentry->d_fsdata)
41 return 0;
42
43 di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
44 if (!di)
45 return -ENOMEM; /* oh well */
46
47 spin_lock(&dentry->d_lock);
48 if (dentry->d_fsdata) {
49 /* lost a race */
50 kmem_cache_free(ceph_dentry_cachep, di);
51 goto out_unlock;
52 }
53
54 if (dentry->d_parent == NULL || /* nfs fh_to_dentry */
55 ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
56 d_set_d_op(dentry, &ceph_dentry_ops);
57 else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
58 d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
59 else
60 d_set_d_op(dentry, &ceph_snap_dentry_ops);
61
62 di->dentry = dentry;
63 di->lease_session = NULL;
64 dentry->d_time = jiffies;
65 /* avoid reordering d_fsdata setup so that the check above is safe */
66 smp_mb();
67 dentry->d_fsdata = di;
68 ceph_dentry_lru_add(dentry);
69 out_unlock:
70 spin_unlock(&dentry->d_lock);
71 return 0;
72 }
73
74
75
76 /*
77 * for readdir, we encode the directory frag and offset within that
78 * frag into f_pos.
79 */
80 static unsigned fpos_frag(loff_t p)
81 {
82 return p >> 32;
83 }
84 static unsigned fpos_off(loff_t p)
85 {
86 return p & 0xffffffff;
87 }
88
89 /*
90 * When possible, we try to satisfy a readdir by peeking at the
91 * dcache. We make this work by carefully ordering dentries on
92 * d_u.d_child when we initially get results back from the MDS, and
93 * falling back to a "normal" sync readdir if any dentries in the dir
94 * are dropped.
95 *
96 * I_COMPLETE tells indicates we have all dentries in the dir. It is
97 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
98 * the MDS if/when the directory is modified).
99 */
100 static int __dcache_readdir(struct file *filp,
101 void *dirent, filldir_t filldir)
102 {
103 struct ceph_file_info *fi = filp->private_data;
104 struct dentry *parent = filp->f_dentry;
105 struct inode *dir = parent->d_inode;
106 struct list_head *p;
107 struct dentry *dentry, *last;
108 struct ceph_dentry_info *di;
109 int err = 0;
110
111 /* claim ref on last dentry we returned */
112 last = fi->dentry;
113 fi->dentry = NULL;
114
115 dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
116 last);
117
118 spin_lock(&parent->d_lock);
119
120 /* start at beginning? */
121 if (filp->f_pos == 2 || last == NULL ||
122 filp->f_pos < ceph_dentry(last)->offset) {
123 if (list_empty(&parent->d_subdirs))
124 goto out_unlock;
125 p = parent->d_subdirs.prev;
126 dout(" initial p %p/%p\n", p->prev, p->next);
127 } else {
128 p = last->d_u.d_child.prev;
129 }
130
131 more:
132 dentry = list_entry(p, struct dentry, d_u.d_child);
133 di = ceph_dentry(dentry);
134 while (1) {
135 dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
136 d_unhashed(dentry) ? "!hashed" : "hashed",
137 parent->d_subdirs.prev, parent->d_subdirs.next);
138 if (p == &parent->d_subdirs) {
139 fi->flags |= CEPH_F_ATEND;
140 goto out_unlock;
141 }
142 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
143 if (!d_unhashed(dentry) && dentry->d_inode &&
144 ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
145 ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
146 filp->f_pos <= di->offset)
147 break;
148 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
149 dentry->d_name.len, dentry->d_name.name, di->offset,
150 filp->f_pos, d_unhashed(dentry) ? " unhashed" : "",
151 !dentry->d_inode ? " null" : "");
152 spin_unlock(&dentry->d_lock);
153 p = p->prev;
154 dentry = list_entry(p, struct dentry, d_u.d_child);
155 di = ceph_dentry(dentry);
156 }
157
158 dget_dlock(dentry);
159 spin_unlock(&dentry->d_lock);
160 spin_unlock(&parent->d_lock);
161
162 dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
163 dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
164 filp->f_pos = di->offset;
165 err = filldir(dirent, dentry->d_name.name,
166 dentry->d_name.len, di->offset,
167 ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
168 dentry->d_inode->i_mode >> 12);
169
170 if (last) {
171 if (err < 0) {
172 /* remember our position */
173 fi->dentry = last;
174 fi->next_offset = di->offset;
175 } else {
176 dput(last);
177 }
178 }
179 last = dentry;
180
181 if (err < 0)
182 goto out;
183
184 filp->f_pos++;
185
186 /* make sure a dentry wasn't dropped while we didn't have parent lock */
187 if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
188 dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
189 err = -EAGAIN;
190 goto out;
191 }
192
193 spin_lock(&parent->d_lock);
194 p = p->prev; /* advance to next dentry */
195 goto more;
196
197 out_unlock:
198 spin_unlock(&parent->d_lock);
199 out:
200 if (last)
201 dput(last);
202 return err;
203 }
204
205 /*
206 * make note of the last dentry we read, so we can
207 * continue at the same lexicographical point,
208 * regardless of what dir changes take place on the
209 * server.
210 */
211 static int note_last_dentry(struct ceph_file_info *fi, const char *name,
212 int len)
213 {
214 kfree(fi->last_name);
215 fi->last_name = kmalloc(len+1, GFP_NOFS);
216 if (!fi->last_name)
217 return -ENOMEM;
218 memcpy(fi->last_name, name, len);
219 fi->last_name[len] = 0;
220 dout("note_last_dentry '%s'\n", fi->last_name);
221 return 0;
222 }
223
224 static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
225 {
226 struct ceph_file_info *fi = filp->private_data;
227 struct inode *inode = filp->f_dentry->d_inode;
228 struct ceph_inode_info *ci = ceph_inode(inode);
229 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
230 struct ceph_mds_client *mdsc = fsc->mdsc;
231 unsigned frag = fpos_frag(filp->f_pos);
232 int off = fpos_off(filp->f_pos);
233 int err;
234 u32 ftype;
235 struct ceph_mds_reply_info_parsed *rinfo;
236 const int max_entries = fsc->mount_options->max_readdir;
237 const int max_bytes = fsc->mount_options->max_readdir_bytes;
238
239 dout("readdir %p filp %p frag %u off %u\n", inode, filp, frag, off);
240 if (fi->flags & CEPH_F_ATEND)
241 return 0;
242
243 /* always start with . and .. */
244 if (filp->f_pos == 0) {
245 /* note dir version at start of readdir so we can tell
246 * if any dentries get dropped */
247 fi->dir_release_count = ci->i_release_count;
248
249 dout("readdir off 0 -> '.'\n");
250 if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0),
251 ceph_translate_ino(inode->i_sb, inode->i_ino),
252 inode->i_mode >> 12) < 0)
253 return 0;
254 filp->f_pos = 1;
255 off = 1;
256 }
257 if (filp->f_pos == 1) {
258 ino_t ino = filp->f_dentry->d_parent->d_inode->i_ino;
259 dout("readdir off 1 -> '..'\n");
260 if (filldir(dirent, "..", 2, ceph_make_fpos(0, 1),
261 ceph_translate_ino(inode->i_sb, ino),
262 inode->i_mode >> 12) < 0)
263 return 0;
264 filp->f_pos = 2;
265 off = 2;
266 }
267
268 /* can we use the dcache? */
269 spin_lock(&inode->i_lock);
270 if ((filp->f_pos == 2 || fi->dentry) &&
271 !ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
272 ceph_snap(inode) != CEPH_SNAPDIR &&
273 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
274 __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
275 spin_unlock(&inode->i_lock);
276 err = __dcache_readdir(filp, dirent, filldir);
277 if (err != -EAGAIN)
278 return err;
279 } else {
280 spin_unlock(&inode->i_lock);
281 }
282 if (fi->dentry) {
283 err = note_last_dentry(fi, fi->dentry->d_name.name,
284 fi->dentry->d_name.len);
285 if (err)
286 return err;
287 dput(fi->dentry);
288 fi->dentry = NULL;
289 }
290
291 /* proceed with a normal readdir */
292
293 more:
294 /* do we have the correct frag content buffered? */
295 if (fi->frag != frag || fi->last_readdir == NULL) {
296 struct ceph_mds_request *req;
297 int op = ceph_snap(inode) == CEPH_SNAPDIR ?
298 CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
299
300 /* discard old result, if any */
301 if (fi->last_readdir) {
302 ceph_mdsc_put_request(fi->last_readdir);
303 fi->last_readdir = NULL;
304 }
305
306 /* requery frag tree, as the frag topology may have changed */
307 frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
308
309 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
310 ceph_vinop(inode), frag, fi->last_name);
311 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
312 if (IS_ERR(req))
313 return PTR_ERR(req);
314 req->r_inode = inode;
315 ihold(inode);
316 req->r_dentry = dget(filp->f_dentry);
317 /* hints to request -> mds selection code */
318 req->r_direct_mode = USE_AUTH_MDS;
319 req->r_direct_hash = ceph_frag_value(frag);
320 req->r_direct_is_hash = true;
321 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
322 req->r_readdir_offset = fi->next_offset;
323 req->r_args.readdir.frag = cpu_to_le32(frag);
324 req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
325 req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
326 req->r_num_caps = max_entries + 1;
327 err = ceph_mdsc_do_request(mdsc, NULL, req);
328 if (err < 0) {
329 ceph_mdsc_put_request(req);
330 return err;
331 }
332 dout("readdir got and parsed readdir result=%d"
333 " on frag %x, end=%d, complete=%d\n", err, frag,
334 (int)req->r_reply_info.dir_end,
335 (int)req->r_reply_info.dir_complete);
336
337 if (!req->r_did_prepopulate) {
338 dout("readdir !did_prepopulate");
339 fi->dir_release_count--; /* preclude I_COMPLETE */
340 }
341
342 /* note next offset and last dentry name */
343 fi->offset = fi->next_offset;
344 fi->last_readdir = req;
345
346 if (req->r_reply_info.dir_end) {
347 kfree(fi->last_name);
348 fi->last_name = NULL;
349 if (ceph_frag_is_rightmost(frag))
350 fi->next_offset = 2;
351 else
352 fi->next_offset = 0;
353 } else {
354 rinfo = &req->r_reply_info;
355 err = note_last_dentry(fi,
356 rinfo->dir_dname[rinfo->dir_nr-1],
357 rinfo->dir_dname_len[rinfo->dir_nr-1]);
358 if (err)
359 return err;
360 fi->next_offset += rinfo->dir_nr;
361 }
362 }
363
364 rinfo = &fi->last_readdir->r_reply_info;
365 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
366 rinfo->dir_nr, off, fi->offset);
367 while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
368 u64 pos = ceph_make_fpos(frag, off);
369 struct ceph_mds_reply_inode *in =
370 rinfo->dir_in[off - fi->offset].in;
371 struct ceph_vino vino;
372 ino_t ino;
373
374 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
375 off, off - fi->offset, rinfo->dir_nr, pos,
376 rinfo->dir_dname_len[off - fi->offset],
377 rinfo->dir_dname[off - fi->offset], in);
378 BUG_ON(!in);
379 ftype = le32_to_cpu(in->mode) >> 12;
380 vino.ino = le64_to_cpu(in->ino);
381 vino.snap = le64_to_cpu(in->snapid);
382 ino = ceph_vino_to_ino(vino);
383 if (filldir(dirent,
384 rinfo->dir_dname[off - fi->offset],
385 rinfo->dir_dname_len[off - fi->offset],
386 pos,
387 ceph_translate_ino(inode->i_sb, ino), ftype) < 0) {
388 dout("filldir stopping us...\n");
389 return 0;
390 }
391 off++;
392 filp->f_pos = pos + 1;
393 }
394
395 if (fi->last_name) {
396 ceph_mdsc_put_request(fi->last_readdir);
397 fi->last_readdir = NULL;
398 goto more;
399 }
400
401 /* more frags? */
402 if (!ceph_frag_is_rightmost(frag)) {
403 frag = ceph_frag_next(frag);
404 off = 0;
405 filp->f_pos = ceph_make_fpos(frag, off);
406 dout("readdir next frag is %x\n", frag);
407 goto more;
408 }
409 fi->flags |= CEPH_F_ATEND;
410
411 /*
412 * if dir_release_count still matches the dir, no dentries
413 * were released during the whole readdir, and we should have
414 * the complete dir contents in our cache.
415 */
416 spin_lock(&inode->i_lock);
417 if (ci->i_release_count == fi->dir_release_count) {
418 dout(" marking %p complete\n", inode);
419 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
420 ci->i_max_offset = filp->f_pos;
421 }
422 spin_unlock(&inode->i_lock);
423
424 dout("readdir %p filp %p done.\n", inode, filp);
425 return 0;
426 }
427
428 static void reset_readdir(struct ceph_file_info *fi)
429 {
430 if (fi->last_readdir) {
431 ceph_mdsc_put_request(fi->last_readdir);
432 fi->last_readdir = NULL;
433 }
434 kfree(fi->last_name);
435 fi->last_name = NULL;
436 fi->next_offset = 2; /* compensate for . and .. */
437 if (fi->dentry) {
438 dput(fi->dentry);
439 fi->dentry = NULL;
440 }
441 fi->flags &= ~CEPH_F_ATEND;
442 }
443
444 static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int origin)
445 {
446 struct ceph_file_info *fi = file->private_data;
447 struct inode *inode = file->f_mapping->host;
448 loff_t old_offset = offset;
449 loff_t retval;
450
451 mutex_lock(&inode->i_mutex);
452 switch (origin) {
453 case SEEK_END:
454 offset += inode->i_size + 2; /* FIXME */
455 break;
456 case SEEK_CUR:
457 offset += file->f_pos;
458 }
459 retval = -EINVAL;
460 if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
461 if (offset != file->f_pos) {
462 file->f_pos = offset;
463 file->f_version = 0;
464 fi->flags &= ~CEPH_F_ATEND;
465 }
466 retval = offset;
467
468 /*
469 * discard buffered readdir content on seekdir(0), or
470 * seek to new frag, or seek prior to current chunk.
471 */
472 if (offset == 0 ||
473 fpos_frag(offset) != fpos_frag(old_offset) ||
474 fpos_off(offset) < fi->offset) {
475 dout("dir_llseek dropping %p content\n", file);
476 reset_readdir(fi);
477 }
478
479 /* bump dir_release_count if we did a forward seek */
480 if (offset > old_offset)
481 fi->dir_release_count--;
482 }
483 mutex_unlock(&inode->i_mutex);
484 return retval;
485 }
486
487 /*
488 * Handle lookups for the hidden .snap directory.
489 */
490 int ceph_handle_snapdir(struct ceph_mds_request *req,
491 struct dentry *dentry, int err)
492 {
493 struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
494 struct inode *parent = dentry->d_parent->d_inode;
495
496 /* .snap dir? */
497 if (err == -ENOENT &&
498 ceph_snap(parent) == CEPH_NOSNAP &&
499 strcmp(dentry->d_name.name,
500 fsc->mount_options->snapdir_name) == 0) {
501 struct inode *inode = ceph_get_snapdir(parent);
502 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
503 dentry, dentry->d_name.len, dentry->d_name.name, inode);
504 BUG_ON(!d_unhashed(dentry));
505 d_add(dentry, inode);
506 err = 0;
507 }
508 return err;
509 }
510
511 /*
512 * Figure out final result of a lookup/open request.
513 *
514 * Mainly, make sure we return the final req->r_dentry (if it already
515 * existed) in place of the original VFS-provided dentry when they
516 * differ.
517 *
518 * Gracefully handle the case where the MDS replies with -ENOENT and
519 * no trace (which it may do, at its discretion, e.g., if it doesn't
520 * care to issue a lease on the negative dentry).
521 */
522 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
523 struct dentry *dentry, int err)
524 {
525 if (err == -ENOENT) {
526 /* no trace? */
527 err = 0;
528 if (!req->r_reply_info.head->is_dentry) {
529 dout("ENOENT and no trace, dentry %p inode %p\n",
530 dentry, dentry->d_inode);
531 if (dentry->d_inode) {
532 d_drop(dentry);
533 err = -ENOENT;
534 } else {
535 d_add(dentry, NULL);
536 }
537 }
538 }
539 if (err)
540 dentry = ERR_PTR(err);
541 else if (dentry != req->r_dentry)
542 dentry = dget(req->r_dentry); /* we got spliced */
543 else
544 dentry = NULL;
545 return dentry;
546 }
547
548 static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
549 {
550 return ceph_ino(inode) == CEPH_INO_ROOT &&
551 strncmp(dentry->d_name.name, ".ceph", 5) == 0;
552 }
553
554 /*
555 * Look up a single dir entry. If there is a lookup intent, inform
556 * the MDS so that it gets our 'caps wanted' value in a single op.
557 */
558 static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
559 struct nameidata *nd)
560 {
561 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
562 struct ceph_mds_client *mdsc = fsc->mdsc;
563 struct ceph_mds_request *req;
564 int op;
565 int err;
566
567 dout("lookup %p dentry %p '%.*s'\n",
568 dir, dentry, dentry->d_name.len, dentry->d_name.name);
569
570 if (dentry->d_name.len > NAME_MAX)
571 return ERR_PTR(-ENAMETOOLONG);
572
573 err = ceph_init_dentry(dentry);
574 if (err < 0)
575 return ERR_PTR(err);
576
577 /* open (but not create!) intent? */
578 if (nd &&
579 (nd->flags & LOOKUP_OPEN) &&
580 (nd->flags & LOOKUP_CONTINUE) == 0 && /* only open last component */
581 !(nd->intent.open.flags & O_CREAT)) {
582 int mode = nd->intent.open.create_mode & ~current->fs->umask;
583 return ceph_lookup_open(dir, dentry, nd, mode, 1);
584 }
585
586 /* can we conclude ENOENT locally? */
587 if (dentry->d_inode == NULL) {
588 struct ceph_inode_info *ci = ceph_inode(dir);
589 struct ceph_dentry_info *di = ceph_dentry(dentry);
590
591 spin_lock(&dir->i_lock);
592 dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
593 if (strncmp(dentry->d_name.name,
594 fsc->mount_options->snapdir_name,
595 dentry->d_name.len) &&
596 !is_root_ceph_dentry(dir, dentry) &&
597 (ci->i_ceph_flags & CEPH_I_COMPLETE) &&
598 (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
599 spin_unlock(&dir->i_lock);
600 dout(" dir %p complete, -ENOENT\n", dir);
601 d_add(dentry, NULL);
602 di->lease_shared_gen = ci->i_shared_gen;
603 return NULL;
604 }
605 spin_unlock(&dir->i_lock);
606 }
607
608 op = ceph_snap(dir) == CEPH_SNAPDIR ?
609 CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
610 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
611 if (IS_ERR(req))
612 return ERR_CAST(req);
613 req->r_dentry = dget(dentry);
614 req->r_num_caps = 2;
615 /* we only need inode linkage */
616 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
617 req->r_locked_dir = dir;
618 err = ceph_mdsc_do_request(mdsc, NULL, req);
619 err = ceph_handle_snapdir(req, dentry, err);
620 dentry = ceph_finish_lookup(req, dentry, err);
621 ceph_mdsc_put_request(req); /* will dput(dentry) */
622 dout("lookup result=%p\n", dentry);
623 return dentry;
624 }
625
626 /*
627 * If we do a create but get no trace back from the MDS, follow up with
628 * a lookup (the VFS expects us to link up the provided dentry).
629 */
630 int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
631 {
632 struct dentry *result = ceph_lookup(dir, dentry, NULL);
633
634 if (result && !IS_ERR(result)) {
635 /*
636 * We created the item, then did a lookup, and found
637 * it was already linked to another inode we already
638 * had in our cache (and thus got spliced). Link our
639 * dentry to that inode, but don't hash it, just in
640 * case the VFS wants to dereference it.
641 */
642 BUG_ON(!result->d_inode);
643 d_instantiate(dentry, result->d_inode);
644 return 0;
645 }
646 return PTR_ERR(result);
647 }
648
649 static int ceph_mknod(struct inode *dir, struct dentry *dentry,
650 int mode, dev_t rdev)
651 {
652 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
653 struct ceph_mds_client *mdsc = fsc->mdsc;
654 struct ceph_mds_request *req;
655 int err;
656
657 if (ceph_snap(dir) != CEPH_NOSNAP)
658 return -EROFS;
659
660 dout("mknod in dir %p dentry %p mode 0%o rdev %d\n",
661 dir, dentry, mode, rdev);
662 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
663 if (IS_ERR(req)) {
664 d_drop(dentry);
665 return PTR_ERR(req);
666 }
667 req->r_dentry = dget(dentry);
668 req->r_num_caps = 2;
669 req->r_locked_dir = dir;
670 req->r_args.mknod.mode = cpu_to_le32(mode);
671 req->r_args.mknod.rdev = cpu_to_le32(rdev);
672 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
673 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
674 err = ceph_mdsc_do_request(mdsc, dir, req);
675 if (!err && !req->r_reply_info.head->is_dentry)
676 err = ceph_handle_notrace_create(dir, dentry);
677 ceph_mdsc_put_request(req);
678 if (err)
679 d_drop(dentry);
680 return err;
681 }
682
683 static int ceph_create(struct inode *dir, struct dentry *dentry, int mode,
684 struct nameidata *nd)
685 {
686 dout("create in dir %p dentry %p name '%.*s'\n",
687 dir, dentry, dentry->d_name.len, dentry->d_name.name);
688
689 if (ceph_snap(dir) != CEPH_NOSNAP)
690 return -EROFS;
691
692 if (nd) {
693 BUG_ON((nd->flags & LOOKUP_OPEN) == 0);
694 dentry = ceph_lookup_open(dir, dentry, nd, mode, 0);
695 /* hrm, what should i do here if we get aliased? */
696 if (IS_ERR(dentry))
697 return PTR_ERR(dentry);
698 return 0;
699 }
700
701 /* fall back to mknod */
702 return ceph_mknod(dir, dentry, (mode & ~S_IFMT) | S_IFREG, 0);
703 }
704
705 static int ceph_symlink(struct inode *dir, struct dentry *dentry,
706 const char *dest)
707 {
708 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
709 struct ceph_mds_client *mdsc = fsc->mdsc;
710 struct ceph_mds_request *req;
711 int err;
712
713 if (ceph_snap(dir) != CEPH_NOSNAP)
714 return -EROFS;
715
716 dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
717 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
718 if (IS_ERR(req)) {
719 d_drop(dentry);
720 return PTR_ERR(req);
721 }
722 req->r_dentry = dget(dentry);
723 req->r_num_caps = 2;
724 req->r_path2 = kstrdup(dest, GFP_NOFS);
725 req->r_locked_dir = dir;
726 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
727 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
728 err = ceph_mdsc_do_request(mdsc, dir, req);
729 if (!err && !req->r_reply_info.head->is_dentry)
730 err = ceph_handle_notrace_create(dir, dentry);
731 ceph_mdsc_put_request(req);
732 if (err)
733 d_drop(dentry);
734 return err;
735 }
736
737 static int ceph_mkdir(struct inode *dir, struct dentry *dentry, int mode)
738 {
739 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
740 struct ceph_mds_client *mdsc = fsc->mdsc;
741 struct ceph_mds_request *req;
742 int err = -EROFS;
743 int op;
744
745 if (ceph_snap(dir) == CEPH_SNAPDIR) {
746 /* mkdir .snap/foo is a MKSNAP */
747 op = CEPH_MDS_OP_MKSNAP;
748 dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
749 dentry->d_name.len, dentry->d_name.name, dentry);
750 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
751 dout("mkdir dir %p dn %p mode 0%o\n", dir, dentry, mode);
752 op = CEPH_MDS_OP_MKDIR;
753 } else {
754 goto out;
755 }
756 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
757 if (IS_ERR(req)) {
758 err = PTR_ERR(req);
759 goto out;
760 }
761
762 req->r_dentry = dget(dentry);
763 req->r_num_caps = 2;
764 req->r_locked_dir = dir;
765 req->r_args.mkdir.mode = cpu_to_le32(mode);
766 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
767 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
768 err = ceph_mdsc_do_request(mdsc, dir, req);
769 if (!err && !req->r_reply_info.head->is_dentry)
770 err = ceph_handle_notrace_create(dir, dentry);
771 ceph_mdsc_put_request(req);
772 out:
773 if (err < 0)
774 d_drop(dentry);
775 return err;
776 }
777
778 static int ceph_link(struct dentry *old_dentry, struct inode *dir,
779 struct dentry *dentry)
780 {
781 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
782 struct ceph_mds_client *mdsc = fsc->mdsc;
783 struct ceph_mds_request *req;
784 int err;
785
786 if (ceph_snap(dir) != CEPH_NOSNAP)
787 return -EROFS;
788
789 dout("link in dir %p old_dentry %p dentry %p\n", dir,
790 old_dentry, dentry);
791 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
792 if (IS_ERR(req)) {
793 d_drop(dentry);
794 return PTR_ERR(req);
795 }
796 req->r_dentry = dget(dentry);
797 req->r_num_caps = 2;
798 req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
799 req->r_locked_dir = dir;
800 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
801 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
802 err = ceph_mdsc_do_request(mdsc, dir, req);
803 if (err) {
804 d_drop(dentry);
805 } else if (!req->r_reply_info.head->is_dentry) {
806 ihold(old_dentry->d_inode);
807 d_instantiate(dentry, old_dentry->d_inode);
808 }
809 ceph_mdsc_put_request(req);
810 return err;
811 }
812
813 /*
814 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
815 * looks like the link count will hit 0, drop any other caps (other
816 * than PIN) we don't specifically want (due to the file still being
817 * open).
818 */
819 static int drop_caps_for_unlink(struct inode *inode)
820 {
821 struct ceph_inode_info *ci = ceph_inode(inode);
822 int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
823
824 spin_lock(&inode->i_lock);
825 if (inode->i_nlink == 1) {
826 drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
827 ci->i_ceph_flags |= CEPH_I_NODELAY;
828 }
829 spin_unlock(&inode->i_lock);
830 return drop;
831 }
832
833 /*
834 * rmdir and unlink are differ only by the metadata op code
835 */
836 static int ceph_unlink(struct inode *dir, struct dentry *dentry)
837 {
838 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
839 struct ceph_mds_client *mdsc = fsc->mdsc;
840 struct inode *inode = dentry->d_inode;
841 struct ceph_mds_request *req;
842 int err = -EROFS;
843 int op;
844
845 if (ceph_snap(dir) == CEPH_SNAPDIR) {
846 /* rmdir .snap/foo is RMSNAP */
847 dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
848 dentry->d_name.name, dentry);
849 op = CEPH_MDS_OP_RMSNAP;
850 } else if (ceph_snap(dir) == CEPH_NOSNAP) {
851 dout("unlink/rmdir dir %p dn %p inode %p\n",
852 dir, dentry, inode);
853 op = ((dentry->d_inode->i_mode & S_IFMT) == S_IFDIR) ?
854 CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
855 } else
856 goto out;
857 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
858 if (IS_ERR(req)) {
859 err = PTR_ERR(req);
860 goto out;
861 }
862 req->r_dentry = dget(dentry);
863 req->r_num_caps = 2;
864 req->r_locked_dir = dir;
865 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
866 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
867 req->r_inode_drop = drop_caps_for_unlink(inode);
868 err = ceph_mdsc_do_request(mdsc, dir, req);
869 if (!err && !req->r_reply_info.head->is_dentry)
870 d_delete(dentry);
871 ceph_mdsc_put_request(req);
872 out:
873 return err;
874 }
875
876 static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
877 struct inode *new_dir, struct dentry *new_dentry)
878 {
879 struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
880 struct ceph_mds_client *mdsc = fsc->mdsc;
881 struct ceph_mds_request *req;
882 int err;
883
884 if (ceph_snap(old_dir) != ceph_snap(new_dir))
885 return -EXDEV;
886 if (ceph_snap(old_dir) != CEPH_NOSNAP ||
887 ceph_snap(new_dir) != CEPH_NOSNAP)
888 return -EROFS;
889 dout("rename dir %p dentry %p to dir %p dentry %p\n",
890 old_dir, old_dentry, new_dir, new_dentry);
891 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
892 if (IS_ERR(req))
893 return PTR_ERR(req);
894 req->r_dentry = dget(new_dentry);
895 req->r_num_caps = 2;
896 req->r_old_dentry = dget(old_dentry);
897 req->r_locked_dir = new_dir;
898 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
899 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
900 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
901 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
902 /* release LINK_RDCACHE on source inode (mds will lock it) */
903 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
904 if (new_dentry->d_inode)
905 req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
906 err = ceph_mdsc_do_request(mdsc, old_dir, req);
907 if (!err && !req->r_reply_info.head->is_dentry) {
908 /*
909 * Normally d_move() is done by fill_trace (called by
910 * do_request, above). If there is no trace, we need
911 * to do it here.
912 */
913
914 /* d_move screws up d_subdirs order */
915 ceph_i_clear(new_dir, CEPH_I_COMPLETE);
916
917 d_move(old_dentry, new_dentry);
918
919 /* ensure target dentry is invalidated, despite
920 rehashing bug in vfs_rename_dir */
921 ceph_invalidate_dentry_lease(new_dentry);
922 }
923 ceph_mdsc_put_request(req);
924 return err;
925 }
926
927 /*
928 * Ensure a dentry lease will no longer revalidate.
929 */
930 void ceph_invalidate_dentry_lease(struct dentry *dentry)
931 {
932 spin_lock(&dentry->d_lock);
933 dentry->d_time = jiffies;
934 ceph_dentry(dentry)->lease_shared_gen = 0;
935 spin_unlock(&dentry->d_lock);
936 }
937
938 /*
939 * Check if dentry lease is valid. If not, delete the lease. Try to
940 * renew if the least is more than half up.
941 */
942 static int dentry_lease_is_valid(struct dentry *dentry)
943 {
944 struct ceph_dentry_info *di;
945 struct ceph_mds_session *s;
946 int valid = 0;
947 u32 gen;
948 unsigned long ttl;
949 struct ceph_mds_session *session = NULL;
950 struct inode *dir = NULL;
951 u32 seq = 0;
952
953 spin_lock(&dentry->d_lock);
954 di = ceph_dentry(dentry);
955 if (di && di->lease_session) {
956 s = di->lease_session;
957 spin_lock(&s->s_cap_lock);
958 gen = s->s_cap_gen;
959 ttl = s->s_cap_ttl;
960 spin_unlock(&s->s_cap_lock);
961
962 if (di->lease_gen == gen &&
963 time_before(jiffies, dentry->d_time) &&
964 time_before(jiffies, ttl)) {
965 valid = 1;
966 if (di->lease_renew_after &&
967 time_after(jiffies, di->lease_renew_after)) {
968 /* we should renew */
969 dir = dentry->d_parent->d_inode;
970 session = ceph_get_mds_session(s);
971 seq = di->lease_seq;
972 di->lease_renew_after = 0;
973 di->lease_renew_from = jiffies;
974 }
975 }
976 }
977 spin_unlock(&dentry->d_lock);
978
979 if (session) {
980 ceph_mdsc_lease_send_msg(session, dir, dentry,
981 CEPH_MDS_LEASE_RENEW, seq);
982 ceph_put_mds_session(session);
983 }
984 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
985 return valid;
986 }
987
988 /*
989 * Check if directory-wide content lease/cap is valid.
990 */
991 static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
992 {
993 struct ceph_inode_info *ci = ceph_inode(dir);
994 struct ceph_dentry_info *di = ceph_dentry(dentry);
995 int valid = 0;
996
997 spin_lock(&dir->i_lock);
998 if (ci->i_shared_gen == di->lease_shared_gen)
999 valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
1000 spin_unlock(&dir->i_lock);
1001 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1002 dir, (unsigned)ci->i_shared_gen, dentry,
1003 (unsigned)di->lease_shared_gen, valid);
1004 return valid;
1005 }
1006
1007 /*
1008 * Check if cached dentry can be trusted.
1009 */
1010 static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
1011 {
1012 struct inode *dir;
1013
1014 if (nd && nd->flags & LOOKUP_RCU)
1015 return -ECHILD;
1016
1017 dir = dentry->d_parent->d_inode;
1018
1019 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
1020 dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
1021 ceph_dentry(dentry)->offset);
1022
1023 /* always trust cached snapped dentries, snapdir dentry */
1024 if (ceph_snap(dir) != CEPH_NOSNAP) {
1025 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
1026 dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
1027 goto out_touch;
1028 }
1029 if (dentry->d_inode && ceph_snap(dentry->d_inode) == CEPH_SNAPDIR)
1030 goto out_touch;
1031
1032 if (dentry_lease_is_valid(dentry) ||
1033 dir_lease_is_valid(dir, dentry))
1034 goto out_touch;
1035
1036 dout("d_revalidate %p invalid\n", dentry);
1037 d_drop(dentry);
1038 return 0;
1039 out_touch:
1040 ceph_dentry_lru_touch(dentry);
1041 return 1;
1042 }
1043
1044 /*
1045 * Release our ceph_dentry_info.
1046 */
1047 static void ceph_d_release(struct dentry *dentry)
1048 {
1049 struct ceph_dentry_info *di = ceph_dentry(dentry);
1050
1051 dout("d_release %p\n", dentry);
1052 if (di) {
1053 ceph_dentry_lru_del(dentry);
1054 if (di->lease_session)
1055 ceph_put_mds_session(di->lease_session);
1056 kmem_cache_free(ceph_dentry_cachep, di);
1057 dentry->d_fsdata = NULL;
1058 }
1059 }
1060
1061 static int ceph_snapdir_d_revalidate(struct dentry *dentry,
1062 struct nameidata *nd)
1063 {
1064 /*
1065 * Eventually, we'll want to revalidate snapped metadata
1066 * too... probably...
1067 */
1068 return 1;
1069 }
1070
1071
1072
1073 /*
1074 * read() on a dir. This weird interface hack only works if mounted
1075 * with '-o dirstat'.
1076 */
1077 static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
1078 loff_t *ppos)
1079 {
1080 struct ceph_file_info *cf = file->private_data;
1081 struct inode *inode = file->f_dentry->d_inode;
1082 struct ceph_inode_info *ci = ceph_inode(inode);
1083 int left;
1084 const int bufsize = 1024;
1085
1086 if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
1087 return -EISDIR;
1088
1089 if (!cf->dir_info) {
1090 cf->dir_info = kmalloc(bufsize, GFP_NOFS);
1091 if (!cf->dir_info)
1092 return -ENOMEM;
1093 cf->dir_info_len =
1094 snprintf(cf->dir_info, bufsize,
1095 "entries: %20lld\n"
1096 " files: %20lld\n"
1097 " subdirs: %20lld\n"
1098 "rentries: %20lld\n"
1099 " rfiles: %20lld\n"
1100 " rsubdirs: %20lld\n"
1101 "rbytes: %20lld\n"
1102 "rctime: %10ld.%09ld\n",
1103 ci->i_files + ci->i_subdirs,
1104 ci->i_files,
1105 ci->i_subdirs,
1106 ci->i_rfiles + ci->i_rsubdirs,
1107 ci->i_rfiles,
1108 ci->i_rsubdirs,
1109 ci->i_rbytes,
1110 (long)ci->i_rctime.tv_sec,
1111 (long)ci->i_rctime.tv_nsec);
1112 }
1113
1114 if (*ppos >= cf->dir_info_len)
1115 return 0;
1116 size = min_t(unsigned, size, cf->dir_info_len-*ppos);
1117 left = copy_to_user(buf, cf->dir_info + *ppos, size);
1118 if (left == size)
1119 return -EFAULT;
1120 *ppos += (size - left);
1121 return size - left;
1122 }
1123
1124 /*
1125 * an fsync() on a dir will wait for any uncommitted directory
1126 * operations to commit.
1127 */
1128 static int ceph_dir_fsync(struct file *file, int datasync)
1129 {
1130 struct inode *inode = file->f_path.dentry->d_inode;
1131 struct ceph_inode_info *ci = ceph_inode(inode);
1132 struct list_head *head = &ci->i_unsafe_dirops;
1133 struct ceph_mds_request *req;
1134 u64 last_tid;
1135 int ret = 0;
1136
1137 dout("dir_fsync %p\n", inode);
1138 spin_lock(&ci->i_unsafe_lock);
1139 if (list_empty(head))
1140 goto out;
1141
1142 req = list_entry(head->prev,
1143 struct ceph_mds_request, r_unsafe_dir_item);
1144 last_tid = req->r_tid;
1145
1146 do {
1147 ceph_mdsc_get_request(req);
1148 spin_unlock(&ci->i_unsafe_lock);
1149 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1150 inode, req->r_tid, last_tid);
1151 if (req->r_timeout) {
1152 ret = wait_for_completion_timeout(
1153 &req->r_safe_completion, req->r_timeout);
1154 if (ret > 0)
1155 ret = 0;
1156 else if (ret == 0)
1157 ret = -EIO; /* timed out */
1158 } else {
1159 wait_for_completion(&req->r_safe_completion);
1160 }
1161 spin_lock(&ci->i_unsafe_lock);
1162 ceph_mdsc_put_request(req);
1163
1164 if (ret || list_empty(head))
1165 break;
1166 req = list_entry(head->next,
1167 struct ceph_mds_request, r_unsafe_dir_item);
1168 } while (req->r_tid < last_tid);
1169 out:
1170 spin_unlock(&ci->i_unsafe_lock);
1171 return ret;
1172 }
1173
1174 /*
1175 * We maintain a private dentry LRU.
1176 *
1177 * FIXME: this needs to be changed to a per-mds lru to be useful.
1178 */
1179 void ceph_dentry_lru_add(struct dentry *dn)
1180 {
1181 struct ceph_dentry_info *di = ceph_dentry(dn);
1182 struct ceph_mds_client *mdsc;
1183
1184 dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
1185 dn->d_name.len, dn->d_name.name);
1186 if (di) {
1187 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1188 spin_lock(&mdsc->dentry_lru_lock);
1189 list_add_tail(&di->lru, &mdsc->dentry_lru);
1190 mdsc->num_dentry++;
1191 spin_unlock(&mdsc->dentry_lru_lock);
1192 }
1193 }
1194
1195 void ceph_dentry_lru_touch(struct dentry *dn)
1196 {
1197 struct ceph_dentry_info *di = ceph_dentry(dn);
1198 struct ceph_mds_client *mdsc;
1199
1200 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
1201 dn->d_name.len, dn->d_name.name, di->offset);
1202 if (di) {
1203 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1204 spin_lock(&mdsc->dentry_lru_lock);
1205 list_move_tail(&di->lru, &mdsc->dentry_lru);
1206 spin_unlock(&mdsc->dentry_lru_lock);
1207 }
1208 }
1209
1210 void ceph_dentry_lru_del(struct dentry *dn)
1211 {
1212 struct ceph_dentry_info *di = ceph_dentry(dn);
1213 struct ceph_mds_client *mdsc;
1214
1215 dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
1216 dn->d_name.len, dn->d_name.name);
1217 if (di) {
1218 mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
1219 spin_lock(&mdsc->dentry_lru_lock);
1220 list_del_init(&di->lru);
1221 mdsc->num_dentry--;
1222 spin_unlock(&mdsc->dentry_lru_lock);
1223 }
1224 }
1225
1226 /*
1227 * Return name hash for a given dentry. This is dependent on
1228 * the parent directory's hash function.
1229 */
1230 unsigned ceph_dentry_hash(struct dentry *dn)
1231 {
1232 struct inode *dir = dn->d_parent->d_inode;
1233 struct ceph_inode_info *dci = ceph_inode(dir);
1234
1235 switch (dci->i_dir_layout.dl_dir_hash) {
1236 case 0: /* for backward compat */
1237 case CEPH_STR_HASH_LINUX:
1238 return dn->d_name.hash;
1239
1240 default:
1241 return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
1242 dn->d_name.name, dn->d_name.len);
1243 }
1244 }
1245
1246 const struct file_operations ceph_dir_fops = {
1247 .read = ceph_read_dir,
1248 .readdir = ceph_readdir,
1249 .llseek = ceph_dir_llseek,
1250 .open = ceph_open,
1251 .release = ceph_release,
1252 .unlocked_ioctl = ceph_ioctl,
1253 .fsync = ceph_dir_fsync,
1254 };
1255
1256 const struct inode_operations ceph_dir_iops = {
1257 .lookup = ceph_lookup,
1258 .permission = ceph_permission,
1259 .getattr = ceph_getattr,
1260 .setattr = ceph_setattr,
1261 .setxattr = ceph_setxattr,
1262 .getxattr = ceph_getxattr,
1263 .listxattr = ceph_listxattr,
1264 .removexattr = ceph_removexattr,
1265 .mknod = ceph_mknod,
1266 .symlink = ceph_symlink,
1267 .mkdir = ceph_mkdir,
1268 .link = ceph_link,
1269 .unlink = ceph_unlink,
1270 .rmdir = ceph_unlink,
1271 .rename = ceph_rename,
1272 .create = ceph_create,
1273 };
1274
1275 const struct dentry_operations ceph_dentry_ops = {
1276 .d_revalidate = ceph_d_revalidate,
1277 .d_release = ceph_d_release,
1278 };
1279
1280 const struct dentry_operations ceph_snapdir_dentry_ops = {
1281 .d_revalidate = ceph_snapdir_d_revalidate,
1282 .d_release = ceph_d_release,
1283 };
1284
1285 const struct dentry_operations ceph_snap_dentry_ops = {
1286 .d_release = ceph_d_release,
1287 };
This page took 0.060575 seconds and 6 git commands to generate.