1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/spinlock.h>
4 #include <linux/fs_struct.h>
5 #include <linux/namei.h>
6 #include <linux/slab.h>
7 #include <linux/sched.h>
10 #include "mds_client.h"
13 * Directory operations: readdir, lookup, create, link, unlink,
18 * Ceph MDS operations are specified in terms of a base ino and
19 * relative path. Thus, the client can specify an operation on a
20 * specific inode (e.g., a getattr due to fstat(2)), or as a path
21 * relative to, say, the root directory.
23 * Normally, we limit ourselves to strict inode ops (no path component)
24 * or dentry operations (a single path component relative to an ino). The
25 * exception to this is open_root_dentry(), which will open the mount
29 const struct inode_operations ceph_dir_iops
;
30 const struct file_operations ceph_dir_fops
;
31 const struct dentry_operations ceph_dentry_ops
;
34 * Initialize ceph dentry state.
36 int ceph_init_dentry(struct dentry
*dentry
)
38 struct ceph_dentry_info
*di
;
43 di
= kmem_cache_alloc(ceph_dentry_cachep
, GFP_NOFS
| __GFP_ZERO
);
45 return -ENOMEM
; /* oh well */
47 spin_lock(&dentry
->d_lock
);
48 if (dentry
->d_fsdata
) {
50 kmem_cache_free(ceph_dentry_cachep
, di
);
54 if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_NOSNAP
)
55 d_set_d_op(dentry
, &ceph_dentry_ops
);
56 else if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_SNAPDIR
)
57 d_set_d_op(dentry
, &ceph_snapdir_dentry_ops
);
59 d_set_d_op(dentry
, &ceph_snap_dentry_ops
);
62 di
->lease_session
= NULL
;
63 dentry
->d_time
= jiffies
;
64 /* avoid reordering d_fsdata setup so that the check above is safe */
66 dentry
->d_fsdata
= di
;
67 ceph_dentry_lru_add(dentry
);
69 spin_unlock(&dentry
->d_lock
);
73 struct inode
*ceph_get_dentry_parent_inode(struct dentry
*dentry
)
75 struct inode
*inode
= NULL
;
80 spin_lock(&dentry
->d_lock
);
81 if (!IS_ROOT(dentry
)) {
82 inode
= dentry
->d_parent
->d_inode
;
85 spin_unlock(&dentry
->d_lock
);
91 * for readdir, we encode the directory frag and offset within that
94 static unsigned fpos_frag(loff_t p
)
98 static unsigned fpos_off(loff_t p
)
100 return p
& 0xffffffff;
103 static int fpos_cmp(loff_t l
, loff_t r
)
105 int v
= ceph_frag_compare(fpos_frag(l
), fpos_frag(r
));
108 return (int)(fpos_off(l
) - fpos_off(r
));
112 * When possible, we try to satisfy a readdir by peeking at the
113 * dcache. We make this work by carefully ordering dentries on
114 * d_u.d_child when we initially get results back from the MDS, and
115 * falling back to a "normal" sync readdir if any dentries in the dir
118 * Complete dir indicates that we have all dentries in the dir. It is
119 * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
120 * the MDS if/when the directory is modified).
122 static int __dcache_readdir(struct file
*file
, struct dir_context
*ctx
,
125 struct ceph_file_info
*fi
= file
->private_data
;
126 struct dentry
*parent
= file
->f_dentry
;
127 struct inode
*dir
= parent
->d_inode
;
129 struct dentry
*dentry
, *last
;
130 struct ceph_dentry_info
*di
;
133 /* claim ref on last dentry we returned */
137 dout("__dcache_readdir %p v%u at %llu (last %p)\n",
138 dir
, shared_gen
, ctx
->pos
, last
);
140 spin_lock(&parent
->d_lock
);
142 /* start at beginning? */
143 if (ctx
->pos
== 2 || last
== NULL
||
144 ctx
->pos
< ceph_dentry(last
)->offset
) {
145 if (list_empty(&parent
->d_subdirs
))
147 p
= parent
->d_subdirs
.prev
;
148 dout(" initial p %p/%p\n", p
->prev
, p
->next
);
150 p
= last
->d_u
.d_child
.prev
;
154 dentry
= list_entry(p
, struct dentry
, d_u
.d_child
);
155 di
= ceph_dentry(dentry
);
157 dout(" p %p/%p %s d_subdirs %p/%p\n", p
->prev
, p
->next
,
158 d_unhashed(dentry
) ? "!hashed" : "hashed",
159 parent
->d_subdirs
.prev
, parent
->d_subdirs
.next
);
160 if (p
== &parent
->d_subdirs
) {
161 fi
->flags
|= CEPH_F_ATEND
;
164 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
165 if (di
->lease_shared_gen
== shared_gen
&&
166 !d_unhashed(dentry
) && dentry
->d_inode
&&
167 ceph_snap(dentry
->d_inode
) != CEPH_SNAPDIR
&&
168 ceph_ino(dentry
->d_inode
) != CEPH_INO_CEPH
&&
169 fpos_cmp(ctx
->pos
, di
->offset
) <= 0)
171 dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry
,
172 dentry
->d_name
.len
, dentry
->d_name
.name
, di
->offset
,
173 ctx
->pos
, d_unhashed(dentry
) ? " unhashed" : "",
174 !dentry
->d_inode
? " null" : "");
175 spin_unlock(&dentry
->d_lock
);
177 dentry
= list_entry(p
, struct dentry
, d_u
.d_child
);
178 di
= ceph_dentry(dentry
);
182 spin_unlock(&dentry
->d_lock
);
183 spin_unlock(&parent
->d_lock
);
185 dout(" %llu (%llu) dentry %p %.*s %p\n", di
->offset
, ctx
->pos
,
186 dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
);
187 ctx
->pos
= di
->offset
;
188 if (!dir_emit(ctx
, dentry
->d_name
.name
,
190 ceph_translate_ino(dentry
->d_sb
, dentry
->d_inode
->i_ino
),
191 dentry
->d_inode
->i_mode
>> 12)) {
193 /* remember our position */
195 fi
->next_offset
= fpos_off(di
->offset
);
207 /* make sure a dentry wasn't dropped while we didn't have parent lock */
208 if (!ceph_dir_is_complete(dir
)) {
209 dout(" lost dir complete on %p; falling back to mds\n", dir
);
214 spin_lock(&parent
->d_lock
);
215 p
= p
->prev
; /* advance to next dentry */
219 spin_unlock(&parent
->d_lock
);
227 * make note of the last dentry we read, so we can
228 * continue at the same lexicographical point,
229 * regardless of what dir changes take place on the
232 static int note_last_dentry(struct ceph_file_info
*fi
, const char *name
,
235 kfree(fi
->last_name
);
236 fi
->last_name
= kmalloc(len
+1, GFP_NOFS
);
239 memcpy(fi
->last_name
, name
, len
);
240 fi
->last_name
[len
] = 0;
241 dout("note_last_dentry '%s'\n", fi
->last_name
);
245 static int ceph_readdir(struct file
*file
, struct dir_context
*ctx
)
247 struct ceph_file_info
*fi
= file
->private_data
;
248 struct inode
*inode
= file_inode(file
);
249 struct ceph_inode_info
*ci
= ceph_inode(inode
);
250 struct ceph_fs_client
*fsc
= ceph_inode_to_client(inode
);
251 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
252 unsigned frag
= fpos_frag(ctx
->pos
);
253 int off
= fpos_off(ctx
->pos
);
256 struct ceph_mds_reply_info_parsed
*rinfo
;
258 dout("readdir %p file %p frag %u off %u\n", inode
, file
, frag
, off
);
259 if (fi
->flags
& CEPH_F_ATEND
)
262 /* always start with . and .. */
264 /* note dir version at start of readdir so we can tell
265 * if any dentries get dropped */
266 fi
->dir_release_count
= atomic_read(&ci
->i_release_count
);
268 dout("readdir off 0 -> '.'\n");
269 if (!dir_emit(ctx
, ".", 1,
270 ceph_translate_ino(inode
->i_sb
, inode
->i_ino
),
271 inode
->i_mode
>> 12))
277 ino_t ino
= parent_ino(file
->f_dentry
);
278 dout("readdir off 1 -> '..'\n");
279 if (!dir_emit(ctx
, "..", 2,
280 ceph_translate_ino(inode
->i_sb
, ino
),
281 inode
->i_mode
>> 12))
287 /* can we use the dcache? */
288 spin_lock(&ci
->i_ceph_lock
);
289 if ((ctx
->pos
== 2 || fi
->dentry
) &&
290 !ceph_test_mount_opt(fsc
, NOASYNCREADDIR
) &&
291 ceph_snap(inode
) != CEPH_SNAPDIR
&&
292 __ceph_dir_is_complete(ci
) &&
293 __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1)) {
294 u32 shared_gen
= ci
->i_shared_gen
;
295 spin_unlock(&ci
->i_ceph_lock
);
296 err
= __dcache_readdir(file
, ctx
, shared_gen
);
300 spin_unlock(&ci
->i_ceph_lock
);
303 err
= note_last_dentry(fi
, fi
->dentry
->d_name
.name
,
304 fi
->dentry
->d_name
.len
);
311 /* proceed with a normal readdir */
314 /* do we have the correct frag content buffered? */
315 if (fi
->frag
!= frag
|| fi
->last_readdir
== NULL
) {
316 struct ceph_mds_request
*req
;
317 int op
= ceph_snap(inode
) == CEPH_SNAPDIR
?
318 CEPH_MDS_OP_LSSNAP
: CEPH_MDS_OP_READDIR
;
320 /* discard old result, if any */
321 if (fi
->last_readdir
) {
322 ceph_mdsc_put_request(fi
->last_readdir
);
323 fi
->last_readdir
= NULL
;
326 dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
327 ceph_vinop(inode
), frag
, fi
->last_name
);
328 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
331 err
= ceph_alloc_readdir_reply_buffer(req
, inode
);
333 ceph_mdsc_put_request(req
);
336 req
->r_inode
= inode
;
338 req
->r_dentry
= dget(file
->f_dentry
);
339 /* hints to request -> mds selection code */
340 req
->r_direct_mode
= USE_AUTH_MDS
;
341 req
->r_direct_hash
= ceph_frag_value(frag
);
342 req
->r_direct_is_hash
= true;
343 req
->r_path2
= kstrdup(fi
->last_name
, GFP_NOFS
);
344 req
->r_readdir_offset
= fi
->next_offset
;
345 req
->r_args
.readdir
.frag
= cpu_to_le32(frag
);
346 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
348 ceph_mdsc_put_request(req
);
351 dout("readdir got and parsed readdir result=%d"
352 " on frag %x, end=%d, complete=%d\n", err
, frag
,
353 (int)req
->r_reply_info
.dir_end
,
354 (int)req
->r_reply_info
.dir_complete
);
356 if (!req
->r_did_prepopulate
) {
357 dout("readdir !did_prepopulate");
358 /* preclude from marking dir complete */
359 fi
->dir_release_count
--;
362 /* note next offset and last dentry name */
363 rinfo
= &req
->r_reply_info
;
364 if (le32_to_cpu(rinfo
->dir_dir
->frag
) != frag
) {
365 frag
= le32_to_cpu(rinfo
->dir_dir
->frag
);
366 if (ceph_frag_is_leftmost(frag
))
370 off
= fi
->next_offset
;
373 fi
->offset
= fi
->next_offset
;
374 fi
->last_readdir
= req
;
376 if (req
->r_reply_info
.dir_end
) {
377 kfree(fi
->last_name
);
378 fi
->last_name
= NULL
;
379 if (ceph_frag_is_rightmost(frag
))
384 err
= note_last_dentry(fi
,
385 rinfo
->dir_dname
[rinfo
->dir_nr
-1],
386 rinfo
->dir_dname_len
[rinfo
->dir_nr
-1]);
389 fi
->next_offset
+= rinfo
->dir_nr
;
393 rinfo
= &fi
->last_readdir
->r_reply_info
;
394 dout("readdir frag %x num %d off %d chunkoff %d\n", frag
,
395 rinfo
->dir_nr
, off
, fi
->offset
);
397 ctx
->pos
= ceph_make_fpos(frag
, off
);
398 while (off
>= fi
->offset
&& off
- fi
->offset
< rinfo
->dir_nr
) {
399 struct ceph_mds_reply_inode
*in
=
400 rinfo
->dir_in
[off
- fi
->offset
].in
;
401 struct ceph_vino vino
;
404 dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
405 off
, off
- fi
->offset
, rinfo
->dir_nr
, ctx
->pos
,
406 rinfo
->dir_dname_len
[off
- fi
->offset
],
407 rinfo
->dir_dname
[off
- fi
->offset
], in
);
409 ftype
= le32_to_cpu(in
->mode
) >> 12;
410 vino
.ino
= le64_to_cpu(in
->ino
);
411 vino
.snap
= le64_to_cpu(in
->snapid
);
412 ino
= ceph_vino_to_ino(vino
);
414 rinfo
->dir_dname
[off
- fi
->offset
],
415 rinfo
->dir_dname_len
[off
- fi
->offset
],
416 ceph_translate_ino(inode
->i_sb
, ino
), ftype
)) {
417 dout("filldir stopping us...\n");
425 ceph_mdsc_put_request(fi
->last_readdir
);
426 fi
->last_readdir
= NULL
;
431 if (!ceph_frag_is_rightmost(frag
)) {
432 frag
= ceph_frag_next(frag
);
434 ctx
->pos
= ceph_make_fpos(frag
, off
);
435 dout("readdir next frag is %x\n", frag
);
438 fi
->flags
|= CEPH_F_ATEND
;
441 * if dir_release_count still matches the dir, no dentries
442 * were released during the whole readdir, and we should have
443 * the complete dir contents in our cache.
445 spin_lock(&ci
->i_ceph_lock
);
446 if (atomic_read(&ci
->i_release_count
) == fi
->dir_release_count
) {
447 dout(" marking %p complete\n", inode
);
448 __ceph_dir_set_complete(ci
, fi
->dir_release_count
);
449 ci
->i_max_offset
= ctx
->pos
;
451 spin_unlock(&ci
->i_ceph_lock
);
453 dout("readdir %p file %p done.\n", inode
, file
);
457 static void reset_readdir(struct ceph_file_info
*fi
, unsigned frag
)
459 if (fi
->last_readdir
) {
460 ceph_mdsc_put_request(fi
->last_readdir
);
461 fi
->last_readdir
= NULL
;
463 kfree(fi
->last_name
);
464 fi
->last_name
= NULL
;
465 if (ceph_frag_is_leftmost(frag
))
466 fi
->next_offset
= 2; /* compensate for . and .. */
473 fi
->flags
&= ~CEPH_F_ATEND
;
476 static loff_t
ceph_dir_llseek(struct file
*file
, loff_t offset
, int whence
)
478 struct ceph_file_info
*fi
= file
->private_data
;
479 struct inode
*inode
= file
->f_mapping
->host
;
480 loff_t old_offset
= ceph_make_fpos(fi
->frag
, fi
->next_offset
);
483 mutex_lock(&inode
->i_mutex
);
487 offset
+= inode
->i_size
+ 2; /* FIXME */
490 offset
+= file
->f_pos
;
498 if (offset
!= file
->f_pos
) {
499 file
->f_pos
= offset
;
501 fi
->flags
&= ~CEPH_F_ATEND
;
506 * discard buffered readdir content on seekdir(0), or
507 * seek to new frag, or seek prior to current chunk.
510 fpos_frag(offset
) != fi
->frag
||
511 fpos_off(offset
) < fi
->offset
) {
512 dout("dir_llseek dropping %p content\n", file
);
513 reset_readdir(fi
, fpos_frag(offset
));
516 /* bump dir_release_count if we did a forward seek */
517 if (fpos_cmp(offset
, old_offset
) > 0)
518 fi
->dir_release_count
--;
521 mutex_unlock(&inode
->i_mutex
);
526 * Handle lookups for the hidden .snap directory.
528 int ceph_handle_snapdir(struct ceph_mds_request
*req
,
529 struct dentry
*dentry
, int err
)
531 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dentry
->d_sb
);
532 struct inode
*parent
= dentry
->d_parent
->d_inode
; /* we hold i_mutex */
535 if (err
== -ENOENT
&&
536 ceph_snap(parent
) == CEPH_NOSNAP
&&
537 strcmp(dentry
->d_name
.name
,
538 fsc
->mount_options
->snapdir_name
) == 0) {
539 struct inode
*inode
= ceph_get_snapdir(parent
);
540 dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
541 dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
, inode
);
542 BUG_ON(!d_unhashed(dentry
));
543 d_add(dentry
, inode
);
550 * Figure out final result of a lookup/open request.
552 * Mainly, make sure we return the final req->r_dentry (if it already
553 * existed) in place of the original VFS-provided dentry when they
556 * Gracefully handle the case where the MDS replies with -ENOENT and
557 * no trace (which it may do, at its discretion, e.g., if it doesn't
558 * care to issue a lease on the negative dentry).
560 struct dentry
*ceph_finish_lookup(struct ceph_mds_request
*req
,
561 struct dentry
*dentry
, int err
)
563 if (err
== -ENOENT
) {
566 if (!req
->r_reply_info
.head
->is_dentry
) {
567 dout("ENOENT and no trace, dentry %p inode %p\n",
568 dentry
, dentry
->d_inode
);
569 if (dentry
->d_inode
) {
578 dentry
= ERR_PTR(err
);
579 else if (dentry
!= req
->r_dentry
)
580 dentry
= dget(req
->r_dentry
); /* we got spliced */
586 static int is_root_ceph_dentry(struct inode
*inode
, struct dentry
*dentry
)
588 return ceph_ino(inode
) == CEPH_INO_ROOT
&&
589 strncmp(dentry
->d_name
.name
, ".ceph", 5) == 0;
593 * Look up a single dir entry. If there is a lookup intent, inform
594 * the MDS so that it gets our 'caps wanted' value in a single op.
596 static struct dentry
*ceph_lookup(struct inode
*dir
, struct dentry
*dentry
,
599 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
600 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
601 struct ceph_mds_request
*req
;
605 dout("lookup %p dentry %p '%.*s'\n",
606 dir
, dentry
, dentry
->d_name
.len
, dentry
->d_name
.name
);
608 if (dentry
->d_name
.len
> NAME_MAX
)
609 return ERR_PTR(-ENAMETOOLONG
);
611 err
= ceph_init_dentry(dentry
);
615 /* can we conclude ENOENT locally? */
616 if (dentry
->d_inode
== NULL
) {
617 struct ceph_inode_info
*ci
= ceph_inode(dir
);
618 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
620 spin_lock(&ci
->i_ceph_lock
);
621 dout(" dir %p flags are %d\n", dir
, ci
->i_ceph_flags
);
622 if (strncmp(dentry
->d_name
.name
,
623 fsc
->mount_options
->snapdir_name
,
624 dentry
->d_name
.len
) &&
625 !is_root_ceph_dentry(dir
, dentry
) &&
626 __ceph_dir_is_complete(ci
) &&
627 (__ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1))) {
628 spin_unlock(&ci
->i_ceph_lock
);
629 dout(" dir %p complete, -ENOENT\n", dir
);
631 di
->lease_shared_gen
= ci
->i_shared_gen
;
634 spin_unlock(&ci
->i_ceph_lock
);
637 op
= ceph_snap(dir
) == CEPH_SNAPDIR
?
638 CEPH_MDS_OP_LOOKUPSNAP
: CEPH_MDS_OP_LOOKUP
;
639 req
= ceph_mdsc_create_request(mdsc
, op
, USE_ANY_MDS
);
641 return ERR_CAST(req
);
642 req
->r_dentry
= dget(dentry
);
644 /* we only need inode linkage */
645 req
->r_args
.getattr
.mask
= cpu_to_le32(CEPH_STAT_CAP_INODE
);
646 req
->r_locked_dir
= dir
;
647 err
= ceph_mdsc_do_request(mdsc
, NULL
, req
);
648 err
= ceph_handle_snapdir(req
, dentry
, err
);
649 dentry
= ceph_finish_lookup(req
, dentry
, err
);
650 ceph_mdsc_put_request(req
); /* will dput(dentry) */
651 dout("lookup result=%p\n", dentry
);
656 * If we do a create but get no trace back from the MDS, follow up with
657 * a lookup (the VFS expects us to link up the provided dentry).
659 int ceph_handle_notrace_create(struct inode
*dir
, struct dentry
*dentry
)
661 struct dentry
*result
= ceph_lookup(dir
, dentry
, 0);
663 if (result
&& !IS_ERR(result
)) {
665 * We created the item, then did a lookup, and found
666 * it was already linked to another inode we already
667 * had in our cache (and thus got spliced). Link our
668 * dentry to that inode, but don't hash it, just in
669 * case the VFS wants to dereference it.
671 BUG_ON(!result
->d_inode
);
672 d_instantiate(dentry
, result
->d_inode
);
675 return PTR_ERR(result
);
678 static int ceph_mknod(struct inode
*dir
, struct dentry
*dentry
,
679 umode_t mode
, dev_t rdev
)
681 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
682 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
683 struct ceph_mds_request
*req
;
686 if (ceph_snap(dir
) != CEPH_NOSNAP
)
689 dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
690 dir
, dentry
, mode
, rdev
);
691 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_MKNOD
, USE_AUTH_MDS
);
696 req
->r_dentry
= dget(dentry
);
698 req
->r_locked_dir
= dir
;
699 req
->r_args
.mknod
.mode
= cpu_to_le32(mode
);
700 req
->r_args
.mknod
.rdev
= cpu_to_le32(rdev
);
701 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
702 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
703 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
704 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
705 err
= ceph_handle_notrace_create(dir
, dentry
);
706 ceph_mdsc_put_request(req
);
709 ceph_init_acl(dentry
, dentry
->d_inode
, dir
);
715 static int ceph_create(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
,
718 return ceph_mknod(dir
, dentry
, mode
, 0);
721 static int ceph_symlink(struct inode
*dir
, struct dentry
*dentry
,
724 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
725 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
726 struct ceph_mds_request
*req
;
729 if (ceph_snap(dir
) != CEPH_NOSNAP
)
732 dout("symlink in dir %p dentry %p to '%s'\n", dir
, dentry
, dest
);
733 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_SYMLINK
, USE_AUTH_MDS
);
738 req
->r_dentry
= dget(dentry
);
740 req
->r_path2
= kstrdup(dest
, GFP_NOFS
);
741 req
->r_locked_dir
= dir
;
742 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
743 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
744 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
745 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
746 err
= ceph_handle_notrace_create(dir
, dentry
);
747 ceph_mdsc_put_request(req
);
749 ceph_init_acl(dentry
, dentry
->d_inode
, dir
);
755 static int ceph_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
757 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
758 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
759 struct ceph_mds_request
*req
;
763 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
764 /* mkdir .snap/foo is a MKSNAP */
765 op
= CEPH_MDS_OP_MKSNAP
;
766 dout("mksnap dir %p snap '%.*s' dn %p\n", dir
,
767 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
);
768 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
769 dout("mkdir dir %p dn %p mode 0%ho\n", dir
, dentry
, mode
);
770 op
= CEPH_MDS_OP_MKDIR
;
774 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
780 req
->r_dentry
= dget(dentry
);
782 req
->r_locked_dir
= dir
;
783 req
->r_args
.mkdir
.mode
= cpu_to_le32(mode
);
784 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
785 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
786 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
787 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
788 err
= ceph_handle_notrace_create(dir
, dentry
);
789 ceph_mdsc_put_request(req
);
792 ceph_init_acl(dentry
, dentry
->d_inode
, dir
);
798 static int ceph_link(struct dentry
*old_dentry
, struct inode
*dir
,
799 struct dentry
*dentry
)
801 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
802 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
803 struct ceph_mds_request
*req
;
806 if (ceph_snap(dir
) != CEPH_NOSNAP
)
809 dout("link in dir %p old_dentry %p dentry %p\n", dir
,
811 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_LINK
, USE_AUTH_MDS
);
816 req
->r_dentry
= dget(dentry
);
818 req
->r_old_dentry
= dget(old_dentry
);
819 req
->r_locked_dir
= dir
;
820 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
821 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
822 /* release LINK_SHARED on source inode (mds will lock it) */
823 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
;
824 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
827 } else if (!req
->r_reply_info
.head
->is_dentry
) {
828 ihold(old_dentry
->d_inode
);
829 d_instantiate(dentry
, old_dentry
->d_inode
);
831 ceph_mdsc_put_request(req
);
836 * For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
837 * looks like the link count will hit 0, drop any other caps (other
838 * than PIN) we don't specifically want (due to the file still being
841 static int drop_caps_for_unlink(struct inode
*inode
)
843 struct ceph_inode_info
*ci
= ceph_inode(inode
);
844 int drop
= CEPH_CAP_LINK_SHARED
| CEPH_CAP_LINK_EXCL
;
846 spin_lock(&ci
->i_ceph_lock
);
847 if (inode
->i_nlink
== 1) {
848 drop
|= ~(__ceph_caps_wanted(ci
) | CEPH_CAP_PIN
);
849 ci
->i_ceph_flags
|= CEPH_I_NODELAY
;
851 spin_unlock(&ci
->i_ceph_lock
);
856 * rmdir and unlink are differ only by the metadata op code
858 static int ceph_unlink(struct inode
*dir
, struct dentry
*dentry
)
860 struct ceph_fs_client
*fsc
= ceph_sb_to_client(dir
->i_sb
);
861 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
862 struct inode
*inode
= dentry
->d_inode
;
863 struct ceph_mds_request
*req
;
867 if (ceph_snap(dir
) == CEPH_SNAPDIR
) {
868 /* rmdir .snap/foo is RMSNAP */
869 dout("rmsnap dir %p '%.*s' dn %p\n", dir
, dentry
->d_name
.len
,
870 dentry
->d_name
.name
, dentry
);
871 op
= CEPH_MDS_OP_RMSNAP
;
872 } else if (ceph_snap(dir
) == CEPH_NOSNAP
) {
873 dout("unlink/rmdir dir %p dn %p inode %p\n",
875 op
= S_ISDIR(dentry
->d_inode
->i_mode
) ?
876 CEPH_MDS_OP_RMDIR
: CEPH_MDS_OP_UNLINK
;
879 req
= ceph_mdsc_create_request(mdsc
, op
, USE_AUTH_MDS
);
884 req
->r_dentry
= dget(dentry
);
886 req
->r_locked_dir
= dir
;
887 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
888 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
889 req
->r_inode_drop
= drop_caps_for_unlink(inode
);
890 err
= ceph_mdsc_do_request(mdsc
, dir
, req
);
891 if (!err
&& !req
->r_reply_info
.head
->is_dentry
)
893 ceph_mdsc_put_request(req
);
898 static int ceph_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
899 struct inode
*new_dir
, struct dentry
*new_dentry
)
901 struct ceph_fs_client
*fsc
= ceph_sb_to_client(old_dir
->i_sb
);
902 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
903 struct ceph_mds_request
*req
;
906 if (ceph_snap(old_dir
) != ceph_snap(new_dir
))
908 if (ceph_snap(old_dir
) != CEPH_NOSNAP
||
909 ceph_snap(new_dir
) != CEPH_NOSNAP
)
911 dout("rename dir %p dentry %p to dir %p dentry %p\n",
912 old_dir
, old_dentry
, new_dir
, new_dentry
);
913 req
= ceph_mdsc_create_request(mdsc
, CEPH_MDS_OP_RENAME
, USE_AUTH_MDS
);
917 req
->r_dentry
= dget(new_dentry
);
919 req
->r_old_dentry
= dget(old_dentry
);
920 req
->r_old_dentry_dir
= old_dir
;
921 req
->r_locked_dir
= new_dir
;
922 req
->r_old_dentry_drop
= CEPH_CAP_FILE_SHARED
;
923 req
->r_old_dentry_unless
= CEPH_CAP_FILE_EXCL
;
924 req
->r_dentry_drop
= CEPH_CAP_FILE_SHARED
;
925 req
->r_dentry_unless
= CEPH_CAP_FILE_EXCL
;
926 /* release LINK_RDCACHE on source inode (mds will lock it) */
927 req
->r_old_inode_drop
= CEPH_CAP_LINK_SHARED
;
928 if (new_dentry
->d_inode
)
929 req
->r_inode_drop
= drop_caps_for_unlink(new_dentry
->d_inode
);
930 err
= ceph_mdsc_do_request(mdsc
, old_dir
, req
);
931 if (!err
&& !req
->r_reply_info
.head
->is_dentry
) {
933 * Normally d_move() is done by fill_trace (called by
934 * do_request, above). If there is no trace, we need
938 /* d_move screws up d_subdirs order */
939 ceph_dir_clear_complete(new_dir
);
941 d_move(old_dentry
, new_dentry
);
943 /* ensure target dentry is invalidated, despite
944 rehashing bug in vfs_rename_dir */
945 ceph_invalidate_dentry_lease(new_dentry
);
947 ceph_mdsc_put_request(req
);
952 * Ensure a dentry lease will no longer revalidate.
954 void ceph_invalidate_dentry_lease(struct dentry
*dentry
)
956 spin_lock(&dentry
->d_lock
);
957 dentry
->d_time
= jiffies
;
958 ceph_dentry(dentry
)->lease_shared_gen
= 0;
959 spin_unlock(&dentry
->d_lock
);
963 * Check if dentry lease is valid. If not, delete the lease. Try to
964 * renew if the least is more than half up.
966 static int dentry_lease_is_valid(struct dentry
*dentry
)
968 struct ceph_dentry_info
*di
;
969 struct ceph_mds_session
*s
;
973 struct ceph_mds_session
*session
= NULL
;
974 struct inode
*dir
= NULL
;
977 spin_lock(&dentry
->d_lock
);
978 di
= ceph_dentry(dentry
);
979 if (di
->lease_session
) {
980 s
= di
->lease_session
;
981 spin_lock(&s
->s_gen_ttl_lock
);
984 spin_unlock(&s
->s_gen_ttl_lock
);
986 if (di
->lease_gen
== gen
&&
987 time_before(jiffies
, dentry
->d_time
) &&
988 time_before(jiffies
, ttl
)) {
990 if (di
->lease_renew_after
&&
991 time_after(jiffies
, di
->lease_renew_after
)) {
992 /* we should renew */
993 dir
= dentry
->d_parent
->d_inode
;
994 session
= ceph_get_mds_session(s
);
996 di
->lease_renew_after
= 0;
997 di
->lease_renew_from
= jiffies
;
1001 spin_unlock(&dentry
->d_lock
);
1004 ceph_mdsc_lease_send_msg(session
, dir
, dentry
,
1005 CEPH_MDS_LEASE_RENEW
, seq
);
1006 ceph_put_mds_session(session
);
1008 dout("dentry_lease_is_valid - dentry %p = %d\n", dentry
, valid
);
1013 * Check if directory-wide content lease/cap is valid.
1015 static int dir_lease_is_valid(struct inode
*dir
, struct dentry
*dentry
)
1017 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1018 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1021 spin_lock(&ci
->i_ceph_lock
);
1022 if (ci
->i_shared_gen
== di
->lease_shared_gen
)
1023 valid
= __ceph_caps_issued_mask(ci
, CEPH_CAP_FILE_SHARED
, 1);
1024 spin_unlock(&ci
->i_ceph_lock
);
1025 dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
1026 dir
, (unsigned)ci
->i_shared_gen
, dentry
,
1027 (unsigned)di
->lease_shared_gen
, valid
);
1032 * Check if cached dentry can be trusted.
1034 static int ceph_d_revalidate(struct dentry
*dentry
, unsigned int flags
)
1039 if (flags
& LOOKUP_RCU
)
1042 dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry
,
1043 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
,
1044 ceph_dentry(dentry
)->offset
);
1046 dir
= ceph_get_dentry_parent_inode(dentry
);
1048 /* always trust cached snapped dentries, snapdir dentry */
1049 if (ceph_snap(dir
) != CEPH_NOSNAP
) {
1050 dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry
,
1051 dentry
->d_name
.len
, dentry
->d_name
.name
, dentry
->d_inode
);
1053 } else if (dentry
->d_inode
&&
1054 ceph_snap(dentry
->d_inode
) == CEPH_SNAPDIR
) {
1056 } else if (dentry_lease_is_valid(dentry
) ||
1057 dir_lease_is_valid(dir
, dentry
)) {
1058 if (dentry
->d_inode
)
1059 valid
= ceph_is_any_caps(dentry
->d_inode
);
1064 dout("d_revalidate %p %s\n", dentry
, valid
? "valid" : "invalid");
1066 ceph_dentry_lru_touch(dentry
);
1068 ceph_dir_clear_complete(dir
);
1076 * Release our ceph_dentry_info.
1078 static void ceph_d_release(struct dentry
*dentry
)
1080 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
1082 dout("d_release %p\n", dentry
);
1083 ceph_dentry_lru_del(dentry
);
1084 if (di
->lease_session
)
1085 ceph_put_mds_session(di
->lease_session
);
1086 kmem_cache_free(ceph_dentry_cachep
, di
);
1087 dentry
->d_fsdata
= NULL
;
1090 static int ceph_snapdir_d_revalidate(struct dentry
*dentry
,
1094 * Eventually, we'll want to revalidate snapped metadata
1095 * too... probably...
1101 * When the VFS prunes a dentry from the cache, we need to clear the
1102 * complete flag on the parent directory.
1104 * Called under dentry->d_lock.
1106 static void ceph_d_prune(struct dentry
*dentry
)
1108 dout("ceph_d_prune %p\n", dentry
);
1110 /* do we have a valid parent? */
1111 if (IS_ROOT(dentry
))
1114 /* if we are not hashed, we don't affect dir's completeness */
1115 if (d_unhashed(dentry
))
1119 * we hold d_lock, so d_parent is stable, and d_fsdata is never
1120 * cleared until d_release
1122 ceph_dir_clear_complete(dentry
->d_parent
->d_inode
);
1126 * read() on a dir. This weird interface hack only works if mounted
1127 * with '-o dirstat'.
1129 static ssize_t
ceph_read_dir(struct file
*file
, char __user
*buf
, size_t size
,
1132 struct ceph_file_info
*cf
= file
->private_data
;
1133 struct inode
*inode
= file_inode(file
);
1134 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1136 const int bufsize
= 1024;
1138 if (!ceph_test_mount_opt(ceph_sb_to_client(inode
->i_sb
), DIRSTAT
))
1141 if (!cf
->dir_info
) {
1142 cf
->dir_info
= kmalloc(bufsize
, GFP_NOFS
);
1146 snprintf(cf
->dir_info
, bufsize
,
1149 " subdirs: %20lld\n"
1150 "rentries: %20lld\n"
1152 " rsubdirs: %20lld\n"
1154 "rctime: %10ld.%09ld\n",
1155 ci
->i_files
+ ci
->i_subdirs
,
1158 ci
->i_rfiles
+ ci
->i_rsubdirs
,
1162 (long)ci
->i_rctime
.tv_sec
,
1163 (long)ci
->i_rctime
.tv_nsec
);
1166 if (*ppos
>= cf
->dir_info_len
)
1168 size
= min_t(unsigned, size
, cf
->dir_info_len
-*ppos
);
1169 left
= copy_to_user(buf
, cf
->dir_info
+ *ppos
, size
);
1172 *ppos
+= (size
- left
);
1177 * an fsync() on a dir will wait for any uncommitted directory
1178 * operations to commit.
1180 static int ceph_dir_fsync(struct file
*file
, loff_t start
, loff_t end
,
1183 struct inode
*inode
= file_inode(file
);
1184 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1185 struct list_head
*head
= &ci
->i_unsafe_dirops
;
1186 struct ceph_mds_request
*req
;
1190 dout("dir_fsync %p\n", inode
);
1191 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1194 mutex_lock(&inode
->i_mutex
);
1196 spin_lock(&ci
->i_unsafe_lock
);
1197 if (list_empty(head
))
1200 req
= list_entry(head
->prev
,
1201 struct ceph_mds_request
, r_unsafe_dir_item
);
1202 last_tid
= req
->r_tid
;
1205 ceph_mdsc_get_request(req
);
1206 spin_unlock(&ci
->i_unsafe_lock
);
1208 dout("dir_fsync %p wait on tid %llu (until %llu)\n",
1209 inode
, req
->r_tid
, last_tid
);
1210 if (req
->r_timeout
) {
1211 ret
= wait_for_completion_timeout(
1212 &req
->r_safe_completion
, req
->r_timeout
);
1216 ret
= -EIO
; /* timed out */
1218 wait_for_completion(&req
->r_safe_completion
);
1220 ceph_mdsc_put_request(req
);
1222 spin_lock(&ci
->i_unsafe_lock
);
1223 if (ret
|| list_empty(head
))
1225 req
= list_entry(head
->next
,
1226 struct ceph_mds_request
, r_unsafe_dir_item
);
1227 } while (req
->r_tid
< last_tid
);
1229 spin_unlock(&ci
->i_unsafe_lock
);
1230 mutex_unlock(&inode
->i_mutex
);
1236 * We maintain a private dentry LRU.
1238 * FIXME: this needs to be changed to a per-mds lru to be useful.
1240 void ceph_dentry_lru_add(struct dentry
*dn
)
1242 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1243 struct ceph_mds_client
*mdsc
;
1245 dout("dentry_lru_add %p %p '%.*s'\n", di
, dn
,
1246 dn
->d_name
.len
, dn
->d_name
.name
);
1247 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1248 spin_lock(&mdsc
->dentry_lru_lock
);
1249 list_add_tail(&di
->lru
, &mdsc
->dentry_lru
);
1251 spin_unlock(&mdsc
->dentry_lru_lock
);
1254 void ceph_dentry_lru_touch(struct dentry
*dn
)
1256 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1257 struct ceph_mds_client
*mdsc
;
1259 dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di
, dn
,
1260 dn
->d_name
.len
, dn
->d_name
.name
, di
->offset
);
1261 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1262 spin_lock(&mdsc
->dentry_lru_lock
);
1263 list_move_tail(&di
->lru
, &mdsc
->dentry_lru
);
1264 spin_unlock(&mdsc
->dentry_lru_lock
);
1267 void ceph_dentry_lru_del(struct dentry
*dn
)
1269 struct ceph_dentry_info
*di
= ceph_dentry(dn
);
1270 struct ceph_mds_client
*mdsc
;
1272 dout("dentry_lru_del %p %p '%.*s'\n", di
, dn
,
1273 dn
->d_name
.len
, dn
->d_name
.name
);
1274 mdsc
= ceph_sb_to_client(dn
->d_sb
)->mdsc
;
1275 spin_lock(&mdsc
->dentry_lru_lock
);
1276 list_del_init(&di
->lru
);
1278 spin_unlock(&mdsc
->dentry_lru_lock
);
1282 * Return name hash for a given dentry. This is dependent on
1283 * the parent directory's hash function.
1285 unsigned ceph_dentry_hash(struct inode
*dir
, struct dentry
*dn
)
1287 struct ceph_inode_info
*dci
= ceph_inode(dir
);
1289 switch (dci
->i_dir_layout
.dl_dir_hash
) {
1290 case 0: /* for backward compat */
1291 case CEPH_STR_HASH_LINUX
:
1292 return dn
->d_name
.hash
;
1295 return ceph_str_hash(dci
->i_dir_layout
.dl_dir_hash
,
1296 dn
->d_name
.name
, dn
->d_name
.len
);
1300 const struct file_operations ceph_dir_fops
= {
1301 .read
= ceph_read_dir
,
1302 .iterate
= ceph_readdir
,
1303 .llseek
= ceph_dir_llseek
,
1305 .release
= ceph_release
,
1306 .unlocked_ioctl
= ceph_ioctl
,
1307 .fsync
= ceph_dir_fsync
,
1310 const struct inode_operations ceph_dir_iops
= {
1311 .lookup
= ceph_lookup
,
1312 .permission
= ceph_permission
,
1313 .getattr
= ceph_getattr
,
1314 .setattr
= ceph_setattr
,
1315 .setxattr
= ceph_setxattr
,
1316 .getxattr
= ceph_getxattr
,
1317 .listxattr
= ceph_listxattr
,
1318 .removexattr
= ceph_removexattr
,
1319 .get_acl
= ceph_get_acl
,
1320 .set_acl
= ceph_set_acl
,
1321 .mknod
= ceph_mknod
,
1322 .symlink
= ceph_symlink
,
1323 .mkdir
= ceph_mkdir
,
1325 .unlink
= ceph_unlink
,
1326 .rmdir
= ceph_unlink
,
1327 .rename
= ceph_rename
,
1328 .create
= ceph_create
,
1329 .atomic_open
= ceph_atomic_open
,
1332 const struct dentry_operations ceph_dentry_ops
= {
1333 .d_revalidate
= ceph_d_revalidate
,
1334 .d_release
= ceph_d_release
,
1335 .d_prune
= ceph_d_prune
,
1338 const struct dentry_operations ceph_snapdir_dentry_ops
= {
1339 .d_revalidate
= ceph_snapdir_d_revalidate
,
1340 .d_release
= ceph_d_release
,
1343 const struct dentry_operations ceph_snap_dentry_ops
= {
1344 .d_release
= ceph_d_release
,
1345 .d_prune
= ceph_d_prune
,