1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/wait.h>
5 #include <linux/slab.h>
7 #include <linux/sched.h>
8 #include <linux/debugfs.h>
9 #include <linux/seq_file.h>
10 #include <linux/utsname.h>
13 #include "mds_client.h"
15 #include <linux/ceph/ceph_features.h>
16 #include <linux/ceph/messenger.h>
17 #include <linux/ceph/decode.h>
18 #include <linux/ceph/pagelist.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/debugfs.h>
23 * A cluster of MDS (metadata server) daemons is responsible for
24 * managing the file system namespace (the directory hierarchy and
25 * inodes) and for coordinating shared access to storage. Metadata is
26 * partitioning hierarchically across a number of servers, and that
27 * partition varies over time as the cluster adjusts the distribution
28 * in order to balance load.
30 * The MDS client is primarily responsible to managing synchronous
31 * metadata requests for operations like open, unlink, and so forth.
32 * If there is a MDS failure, we find out about it when we (possibly
33 * request and) receive a new MDS map, and can resubmit affected
36 * For the most part, though, we take advantage of a lossless
37 * communications channel to the MDS, and do not need to worry about
38 * timing out or resubmitting requests.
40 * We maintain a stateful "session" with each MDS we interact with.
41 * Within each session, we sent periodic heartbeat messages to ensure
42 * any capabilities or leases we have been issues remain valid. If
43 * the session times out and goes stale, our leases and capabilities
44 * are no longer valid.
47 struct ceph_reconnect_state
{
49 struct ceph_pagelist
*pagelist
;
53 static void __wake_requests(struct ceph_mds_client
*mdsc
,
54 struct list_head
*head
);
56 static const struct ceph_connection_operations mds_con_ops
;
64 * parse individual inode info
66 static int parse_reply_info_in(void **p
, void *end
,
67 struct ceph_mds_reply_info_in
*info
,
73 *p
+= sizeof(struct ceph_mds_reply_inode
) +
74 sizeof(*info
->in
->fragtree
.splits
) *
75 le32_to_cpu(info
->in
->fragtree
.nsplits
);
77 ceph_decode_32_safe(p
, end
, info
->symlink_len
, bad
);
78 ceph_decode_need(p
, end
, info
->symlink_len
, bad
);
80 *p
+= info
->symlink_len
;
82 if (features
& CEPH_FEATURE_DIRLAYOUTHASH
)
83 ceph_decode_copy_safe(p
, end
, &info
->dir_layout
,
84 sizeof(info
->dir_layout
), bad
);
86 memset(&info
->dir_layout
, 0, sizeof(info
->dir_layout
));
88 ceph_decode_32_safe(p
, end
, info
->xattr_len
, bad
);
89 ceph_decode_need(p
, end
, info
->xattr_len
, bad
);
90 info
->xattr_data
= *p
;
91 *p
+= info
->xattr_len
;
93 if (features
& CEPH_FEATURE_MDS_INLINE_DATA
) {
94 ceph_decode_64_safe(p
, end
, info
->inline_version
, bad
);
95 ceph_decode_32_safe(p
, end
, info
->inline_len
, bad
);
96 ceph_decode_need(p
, end
, info
->inline_len
, bad
);
97 info
->inline_data
= *p
;
98 *p
+= info
->inline_len
;
100 info
->inline_version
= CEPH_INLINE_NONE
;
108 * parse a normal reply, which may contain a (dir+)dentry and/or a
111 static int parse_reply_info_trace(void **p
, void *end
,
112 struct ceph_mds_reply_info_parsed
*info
,
117 if (info
->head
->is_dentry
) {
118 err
= parse_reply_info_in(p
, end
, &info
->diri
, features
);
122 if (unlikely(*p
+ sizeof(*info
->dirfrag
) > end
))
125 *p
+= sizeof(*info
->dirfrag
) +
126 sizeof(u32
)*le32_to_cpu(info
->dirfrag
->ndist
);
127 if (unlikely(*p
> end
))
130 ceph_decode_32_safe(p
, end
, info
->dname_len
, bad
);
131 ceph_decode_need(p
, end
, info
->dname_len
, bad
);
133 *p
+= info
->dname_len
;
135 *p
+= sizeof(*info
->dlease
);
138 if (info
->head
->is_target
) {
139 err
= parse_reply_info_in(p
, end
, &info
->targeti
, features
);
144 if (unlikely(*p
!= end
))
151 pr_err("problem parsing mds trace %d\n", err
);
156 * parse readdir results
158 static int parse_reply_info_dir(void **p
, void *end
,
159 struct ceph_mds_reply_info_parsed
*info
,
166 if (*p
+ sizeof(*info
->dir_dir
) > end
)
168 *p
+= sizeof(*info
->dir_dir
) +
169 sizeof(u32
)*le32_to_cpu(info
->dir_dir
->ndist
);
173 ceph_decode_need(p
, end
, sizeof(num
) + 2, bad
);
174 num
= ceph_decode_32(p
);
175 info
->dir_end
= ceph_decode_8(p
);
176 info
->dir_complete
= ceph_decode_8(p
);
180 BUG_ON(!info
->dir_in
);
181 info
->dir_dname
= (void *)(info
->dir_in
+ num
);
182 info
->dir_dname_len
= (void *)(info
->dir_dname
+ num
);
183 info
->dir_dlease
= (void *)(info
->dir_dname_len
+ num
);
184 if ((unsigned long)(info
->dir_dlease
+ num
) >
185 (unsigned long)info
->dir_in
+ info
->dir_buf_size
) {
186 pr_err("dir contents are larger than expected\n");
194 ceph_decode_need(p
, end
, sizeof(u32
)*2, bad
);
195 info
->dir_dname_len
[i
] = ceph_decode_32(p
);
196 ceph_decode_need(p
, end
, info
->dir_dname_len
[i
], bad
);
197 info
->dir_dname
[i
] = *p
;
198 *p
+= info
->dir_dname_len
[i
];
199 dout("parsed dir dname '%.*s'\n", info
->dir_dname_len
[i
],
201 info
->dir_dlease
[i
] = *p
;
202 *p
+= sizeof(struct ceph_mds_reply_lease
);
205 err
= parse_reply_info_in(p
, end
, &info
->dir_in
[i
], features
);
220 pr_err("problem parsing dir contents %d\n", err
);
225 * parse fcntl F_GETLK results
227 static int parse_reply_info_filelock(void **p
, void *end
,
228 struct ceph_mds_reply_info_parsed
*info
,
231 if (*p
+ sizeof(*info
->filelock_reply
) > end
)
234 info
->filelock_reply
= *p
;
235 *p
+= sizeof(*info
->filelock_reply
);
237 if (unlikely(*p
!= end
))
246 * parse create results
248 static int parse_reply_info_create(void **p
, void *end
,
249 struct ceph_mds_reply_info_parsed
*info
,
252 if (features
& CEPH_FEATURE_REPLY_CREATE_INODE
) {
254 info
->has_create_ino
= false;
256 info
->has_create_ino
= true;
257 info
->ino
= ceph_decode_64(p
);
261 if (unlikely(*p
!= end
))
270 * parse extra results
272 static int parse_reply_info_extra(void **p
, void *end
,
273 struct ceph_mds_reply_info_parsed
*info
,
276 if (info
->head
->op
== CEPH_MDS_OP_GETFILELOCK
)
277 return parse_reply_info_filelock(p
, end
, info
, features
);
278 else if (info
->head
->op
== CEPH_MDS_OP_READDIR
||
279 info
->head
->op
== CEPH_MDS_OP_LSSNAP
)
280 return parse_reply_info_dir(p
, end
, info
, features
);
281 else if (info
->head
->op
== CEPH_MDS_OP_CREATE
)
282 return parse_reply_info_create(p
, end
, info
, features
);
288 * parse entire mds reply
290 static int parse_reply_info(struct ceph_msg
*msg
,
291 struct ceph_mds_reply_info_parsed
*info
,
298 info
->head
= msg
->front
.iov_base
;
299 p
= msg
->front
.iov_base
+ sizeof(struct ceph_mds_reply_head
);
300 end
= p
+ msg
->front
.iov_len
- sizeof(struct ceph_mds_reply_head
);
303 ceph_decode_32_safe(&p
, end
, len
, bad
);
305 ceph_decode_need(&p
, end
, len
, bad
);
306 err
= parse_reply_info_trace(&p
, p
+len
, info
, features
);
312 ceph_decode_32_safe(&p
, end
, len
, bad
);
314 ceph_decode_need(&p
, end
, len
, bad
);
315 err
= parse_reply_info_extra(&p
, p
+len
, info
, features
);
321 ceph_decode_32_safe(&p
, end
, len
, bad
);
322 info
->snapblob_len
= len
;
333 pr_err("mds parse_reply err %d\n", err
);
337 static void destroy_reply_info(struct ceph_mds_reply_info_parsed
*info
)
341 free_pages((unsigned long)info
->dir_in
, get_order(info
->dir_buf_size
));
348 const char *ceph_session_state_name(int s
)
351 case CEPH_MDS_SESSION_NEW
: return "new";
352 case CEPH_MDS_SESSION_OPENING
: return "opening";
353 case CEPH_MDS_SESSION_OPEN
: return "open";
354 case CEPH_MDS_SESSION_HUNG
: return "hung";
355 case CEPH_MDS_SESSION_CLOSING
: return "closing";
356 case CEPH_MDS_SESSION_RESTARTING
: return "restarting";
357 case CEPH_MDS_SESSION_RECONNECTING
: return "reconnecting";
358 default: return "???";
362 static struct ceph_mds_session
*get_session(struct ceph_mds_session
*s
)
364 if (atomic_inc_not_zero(&s
->s_ref
)) {
365 dout("mdsc get_session %p %d -> %d\n", s
,
366 atomic_read(&s
->s_ref
)-1, atomic_read(&s
->s_ref
));
369 dout("mdsc get_session %p 0 -- FAIL", s
);
374 void ceph_put_mds_session(struct ceph_mds_session
*s
)
376 dout("mdsc put_session %p %d -> %d\n", s
,
377 atomic_read(&s
->s_ref
), atomic_read(&s
->s_ref
)-1);
378 if (atomic_dec_and_test(&s
->s_ref
)) {
379 if (s
->s_auth
.authorizer
)
380 ceph_auth_destroy_authorizer(
381 s
->s_mdsc
->fsc
->client
->monc
.auth
,
382 s
->s_auth
.authorizer
);
388 * called under mdsc->mutex
390 struct ceph_mds_session
*__ceph_lookup_mds_session(struct ceph_mds_client
*mdsc
,
393 struct ceph_mds_session
*session
;
395 if (mds
>= mdsc
->max_sessions
|| mdsc
->sessions
[mds
] == NULL
)
397 session
= mdsc
->sessions
[mds
];
398 dout("lookup_mds_session %p %d\n", session
,
399 atomic_read(&session
->s_ref
));
400 get_session(session
);
404 static bool __have_session(struct ceph_mds_client
*mdsc
, int mds
)
406 if (mds
>= mdsc
->max_sessions
)
408 return mdsc
->sessions
[mds
];
411 static int __verify_registered_session(struct ceph_mds_client
*mdsc
,
412 struct ceph_mds_session
*s
)
414 if (s
->s_mds
>= mdsc
->max_sessions
||
415 mdsc
->sessions
[s
->s_mds
] != s
)
421 * create+register a new session for given mds.
422 * called under mdsc->mutex.
424 static struct ceph_mds_session
*register_session(struct ceph_mds_client
*mdsc
,
427 struct ceph_mds_session
*s
;
429 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
430 return ERR_PTR(-EINVAL
);
432 s
= kzalloc(sizeof(*s
), GFP_NOFS
);
434 return ERR_PTR(-ENOMEM
);
437 s
->s_state
= CEPH_MDS_SESSION_NEW
;
440 mutex_init(&s
->s_mutex
);
442 ceph_con_init(&s
->s_con
, s
, &mds_con_ops
, &mdsc
->fsc
->client
->msgr
);
444 spin_lock_init(&s
->s_gen_ttl_lock
);
446 s
->s_cap_ttl
= jiffies
- 1;
448 spin_lock_init(&s
->s_cap_lock
);
449 s
->s_renew_requested
= 0;
451 INIT_LIST_HEAD(&s
->s_caps
);
454 atomic_set(&s
->s_ref
, 1);
455 INIT_LIST_HEAD(&s
->s_waiting
);
456 INIT_LIST_HEAD(&s
->s_unsafe
);
457 s
->s_num_cap_releases
= 0;
458 s
->s_cap_reconnect
= 0;
459 s
->s_cap_iterator
= NULL
;
460 INIT_LIST_HEAD(&s
->s_cap_releases
);
461 INIT_LIST_HEAD(&s
->s_cap_releases_done
);
462 INIT_LIST_HEAD(&s
->s_cap_flushing
);
463 INIT_LIST_HEAD(&s
->s_cap_snaps_flushing
);
465 dout("register_session mds%d\n", mds
);
466 if (mds
>= mdsc
->max_sessions
) {
467 int newmax
= 1 << get_count_order(mds
+1);
468 struct ceph_mds_session
**sa
;
470 dout("register_session realloc to %d\n", newmax
);
471 sa
= kcalloc(newmax
, sizeof(void *), GFP_NOFS
);
474 if (mdsc
->sessions
) {
475 memcpy(sa
, mdsc
->sessions
,
476 mdsc
->max_sessions
* sizeof(void *));
477 kfree(mdsc
->sessions
);
480 mdsc
->max_sessions
= newmax
;
482 mdsc
->sessions
[mds
] = s
;
483 atomic_inc(&s
->s_ref
); /* one ref to sessions[], one to caller */
485 ceph_con_open(&s
->s_con
, CEPH_ENTITY_TYPE_MDS
, mds
,
486 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
492 return ERR_PTR(-ENOMEM
);
496 * called under mdsc->mutex
498 static void __unregister_session(struct ceph_mds_client
*mdsc
,
499 struct ceph_mds_session
*s
)
501 dout("__unregister_session mds%d %p\n", s
->s_mds
, s
);
502 BUG_ON(mdsc
->sessions
[s
->s_mds
] != s
);
503 mdsc
->sessions
[s
->s_mds
] = NULL
;
504 ceph_con_close(&s
->s_con
);
505 ceph_put_mds_session(s
);
509 * drop session refs in request.
511 * should be last request ref, or hold mdsc->mutex
513 static void put_request_session(struct ceph_mds_request
*req
)
515 if (req
->r_session
) {
516 ceph_put_mds_session(req
->r_session
);
517 req
->r_session
= NULL
;
521 void ceph_mdsc_release_request(struct kref
*kref
)
523 struct ceph_mds_request
*req
= container_of(kref
,
524 struct ceph_mds_request
,
526 destroy_reply_info(&req
->r_reply_info
);
528 ceph_msg_put(req
->r_request
);
530 ceph_msg_put(req
->r_reply
);
532 ceph_put_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
535 if (req
->r_locked_dir
)
536 ceph_put_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
537 iput(req
->r_target_inode
);
540 if (req
->r_old_dentry
)
541 dput(req
->r_old_dentry
);
542 if (req
->r_old_dentry_dir
) {
544 * track (and drop pins for) r_old_dentry_dir
545 * separately, since r_old_dentry's d_parent may have
546 * changed between the dir mutex being dropped and
547 * this request being freed.
549 ceph_put_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
551 iput(req
->r_old_dentry_dir
);
556 ceph_pagelist_release(req
->r_pagelist
);
557 put_request_session(req
);
558 ceph_unreserve_caps(req
->r_mdsc
, &req
->r_caps_reservation
);
563 * lookup session, bump ref if found.
565 * called under mdsc->mutex.
567 static struct ceph_mds_request
*__lookup_request(struct ceph_mds_client
*mdsc
,
570 struct ceph_mds_request
*req
;
571 struct rb_node
*n
= mdsc
->request_tree
.rb_node
;
574 req
= rb_entry(n
, struct ceph_mds_request
, r_node
);
575 if (tid
< req
->r_tid
)
577 else if (tid
> req
->r_tid
)
580 ceph_mdsc_get_request(req
);
587 static void __insert_request(struct ceph_mds_client
*mdsc
,
588 struct ceph_mds_request
*new)
590 struct rb_node
**p
= &mdsc
->request_tree
.rb_node
;
591 struct rb_node
*parent
= NULL
;
592 struct ceph_mds_request
*req
= NULL
;
596 req
= rb_entry(parent
, struct ceph_mds_request
, r_node
);
597 if (new->r_tid
< req
->r_tid
)
599 else if (new->r_tid
> req
->r_tid
)
605 rb_link_node(&new->r_node
, parent
, p
);
606 rb_insert_color(&new->r_node
, &mdsc
->request_tree
);
610 * Register an in-flight request, and assign a tid. Link to directory
611 * are modifying (if any).
613 * Called under mdsc->mutex.
615 static void __register_request(struct ceph_mds_client
*mdsc
,
616 struct ceph_mds_request
*req
,
619 req
->r_tid
= ++mdsc
->last_tid
;
621 ceph_reserve_caps(mdsc
, &req
->r_caps_reservation
,
623 dout("__register_request %p tid %lld\n", req
, req
->r_tid
);
624 ceph_mdsc_get_request(req
);
625 __insert_request(mdsc
, req
);
627 req
->r_uid
= current_fsuid();
628 req
->r_gid
= current_fsgid();
631 struct ceph_inode_info
*ci
= ceph_inode(dir
);
634 spin_lock(&ci
->i_unsafe_lock
);
635 req
->r_unsafe_dir
= dir
;
636 list_add_tail(&req
->r_unsafe_dir_item
, &ci
->i_unsafe_dirops
);
637 spin_unlock(&ci
->i_unsafe_lock
);
641 static void __unregister_request(struct ceph_mds_client
*mdsc
,
642 struct ceph_mds_request
*req
)
644 dout("__unregister_request %p tid %lld\n", req
, req
->r_tid
);
645 rb_erase(&req
->r_node
, &mdsc
->request_tree
);
646 RB_CLEAR_NODE(&req
->r_node
);
648 if (req
->r_unsafe_dir
) {
649 struct ceph_inode_info
*ci
= ceph_inode(req
->r_unsafe_dir
);
651 spin_lock(&ci
->i_unsafe_lock
);
652 list_del_init(&req
->r_unsafe_dir_item
);
653 spin_unlock(&ci
->i_unsafe_lock
);
655 iput(req
->r_unsafe_dir
);
656 req
->r_unsafe_dir
= NULL
;
659 complete_all(&req
->r_safe_completion
);
661 ceph_mdsc_put_request(req
);
665 * Choose mds to send request to next. If there is a hint set in the
666 * request (e.g., due to a prior forward hint from the mds), use that.
667 * Otherwise, consult frag tree and/or caps to identify the
668 * appropriate mds. If all else fails, choose randomly.
670 * Called under mdsc->mutex.
672 static struct dentry
*get_nonsnap_parent(struct dentry
*dentry
)
675 * we don't need to worry about protecting the d_parent access
676 * here because we never renaming inside the snapped namespace
677 * except to resplice to another snapdir, and either the old or new
678 * result is a valid result.
680 while (!IS_ROOT(dentry
) && ceph_snap(dentry
->d_inode
) != CEPH_NOSNAP
)
681 dentry
= dentry
->d_parent
;
685 static int __choose_mds(struct ceph_mds_client
*mdsc
,
686 struct ceph_mds_request
*req
)
689 struct ceph_inode_info
*ci
;
690 struct ceph_cap
*cap
;
691 int mode
= req
->r_direct_mode
;
693 u32 hash
= req
->r_direct_hash
;
694 bool is_hash
= req
->r_direct_is_hash
;
697 * is there a specific mds we should try? ignore hint if we have
698 * no session and the mds is not up (active or recovering).
700 if (req
->r_resend_mds
>= 0 &&
701 (__have_session(mdsc
, req
->r_resend_mds
) ||
702 ceph_mdsmap_get_state(mdsc
->mdsmap
, req
->r_resend_mds
) > 0)) {
703 dout("choose_mds using resend_mds mds%d\n",
705 return req
->r_resend_mds
;
708 if (mode
== USE_RANDOM_MDS
)
713 inode
= req
->r_inode
;
714 } else if (req
->r_dentry
) {
715 /* ignore race with rename; old or new d_parent is okay */
716 struct dentry
*parent
= req
->r_dentry
->d_parent
;
717 struct inode
*dir
= parent
->d_inode
;
719 if (dir
->i_sb
!= mdsc
->fsc
->sb
) {
721 inode
= req
->r_dentry
->d_inode
;
722 } else if (ceph_snap(dir
) != CEPH_NOSNAP
) {
723 /* direct snapped/virtual snapdir requests
724 * based on parent dir inode */
725 struct dentry
*dn
= get_nonsnap_parent(parent
);
727 dout("__choose_mds using nonsnap parent %p\n", inode
);
730 inode
= req
->r_dentry
->d_inode
;
731 if (!inode
|| mode
== USE_AUTH_MDS
) {
734 hash
= ceph_dentry_hash(dir
, req
->r_dentry
);
740 dout("__choose_mds %p is_hash=%d (%d) mode %d\n", inode
, (int)is_hash
,
744 ci
= ceph_inode(inode
);
746 if (is_hash
&& S_ISDIR(inode
->i_mode
)) {
747 struct ceph_inode_frag frag
;
750 ceph_choose_frag(ci
, hash
, &frag
, &found
);
752 if (mode
== USE_ANY_MDS
&& frag
.ndist
> 0) {
755 /* choose a random replica */
756 get_random_bytes(&r
, 1);
759 dout("choose_mds %p %llx.%llx "
760 "frag %u mds%d (%d/%d)\n",
761 inode
, ceph_vinop(inode
),
764 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
765 CEPH_MDS_STATE_ACTIVE
)
769 /* since this file/dir wasn't known to be
770 * replicated, then we want to look for the
771 * authoritative mds. */
774 /* choose auth mds */
776 dout("choose_mds %p %llx.%llx "
777 "frag %u mds%d (auth)\n",
778 inode
, ceph_vinop(inode
), frag
.frag
, mds
);
779 if (ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) >=
780 CEPH_MDS_STATE_ACTIVE
)
786 spin_lock(&ci
->i_ceph_lock
);
788 if (mode
== USE_AUTH_MDS
)
789 cap
= ci
->i_auth_cap
;
790 if (!cap
&& !RB_EMPTY_ROOT(&ci
->i_caps
))
791 cap
= rb_entry(rb_first(&ci
->i_caps
), struct ceph_cap
, ci_node
);
793 spin_unlock(&ci
->i_ceph_lock
);
796 mds
= cap
->session
->s_mds
;
797 dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
798 inode
, ceph_vinop(inode
), mds
,
799 cap
== ci
->i_auth_cap
? "auth " : "", cap
);
800 spin_unlock(&ci
->i_ceph_lock
);
804 mds
= ceph_mdsmap_get_random_mds(mdsc
->mdsmap
);
805 dout("choose_mds chose random mds%d\n", mds
);
813 static struct ceph_msg
*create_session_msg(u32 op
, u64 seq
)
815 struct ceph_msg
*msg
;
816 struct ceph_mds_session_head
*h
;
818 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
), GFP_NOFS
,
821 pr_err("create_session_msg ENOMEM creating msg\n");
824 h
= msg
->front
.iov_base
;
825 h
->op
= cpu_to_le32(op
);
826 h
->seq
= cpu_to_le64(seq
);
832 * session message, specialization for CEPH_SESSION_REQUEST_OPEN
833 * to include additional client metadata fields.
835 static struct ceph_msg
*create_session_open_msg(struct ceph_mds_client
*mdsc
, u64 seq
)
837 struct ceph_msg
*msg
;
838 struct ceph_mds_session_head
*h
;
840 int metadata_bytes
= 0;
841 int metadata_key_count
= 0;
842 struct ceph_options
*opt
= mdsc
->fsc
->client
->options
;
845 const char* metadata
[3][2] = {
846 {"hostname", utsname()->nodename
},
847 {"entity_id", opt
->name
? opt
->name
: ""},
851 /* Calculate serialized length of metadata */
852 metadata_bytes
= 4; /* map length */
853 for (i
= 0; metadata
[i
][0] != NULL
; ++i
) {
854 metadata_bytes
+= 8 + strlen(metadata
[i
][0]) +
855 strlen(metadata
[i
][1]);
856 metadata_key_count
++;
859 /* Allocate the message */
860 msg
= ceph_msg_new(CEPH_MSG_CLIENT_SESSION
, sizeof(*h
) + metadata_bytes
,
863 pr_err("create_session_msg ENOMEM creating msg\n");
866 h
= msg
->front
.iov_base
;
867 h
->op
= cpu_to_le32(CEPH_SESSION_REQUEST_OPEN
);
868 h
->seq
= cpu_to_le64(seq
);
871 * Serialize client metadata into waiting buffer space, using
872 * the format that userspace expects for map<string, string>
874 * ClientSession messages with metadata are v2
876 msg
->hdr
.version
= cpu_to_le16(2);
877 msg
->hdr
.compat_version
= cpu_to_le16(1);
879 /* The write pointer, following the session_head structure */
880 p
= msg
->front
.iov_base
+ sizeof(*h
);
882 /* Number of entries in the map */
883 ceph_encode_32(&p
, metadata_key_count
);
885 /* Two length-prefixed strings for each entry in the map */
886 for (i
= 0; metadata
[i
][0] != NULL
; ++i
) {
887 size_t const key_len
= strlen(metadata
[i
][0]);
888 size_t const val_len
= strlen(metadata
[i
][1]);
890 ceph_encode_32(&p
, key_len
);
891 memcpy(p
, metadata
[i
][0], key_len
);
893 ceph_encode_32(&p
, val_len
);
894 memcpy(p
, metadata
[i
][1], val_len
);
902 * send session open request.
904 * called under mdsc->mutex
906 static int __open_session(struct ceph_mds_client
*mdsc
,
907 struct ceph_mds_session
*session
)
909 struct ceph_msg
*msg
;
911 int mds
= session
->s_mds
;
913 /* wait for mds to go active? */
914 mstate
= ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
);
915 dout("open_session to mds%d (%s)\n", mds
,
916 ceph_mds_state_name(mstate
));
917 session
->s_state
= CEPH_MDS_SESSION_OPENING
;
918 session
->s_renew_requested
= jiffies
;
920 /* send connect message */
921 msg
= create_session_open_msg(mdsc
, session
->s_seq
);
924 ceph_con_send(&session
->s_con
, msg
);
929 * open sessions for any export targets for the given mds
931 * called under mdsc->mutex
933 static struct ceph_mds_session
*
934 __open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
936 struct ceph_mds_session
*session
;
938 session
= __ceph_lookup_mds_session(mdsc
, target
);
940 session
= register_session(mdsc
, target
);
944 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
945 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
946 __open_session(mdsc
, session
);
951 struct ceph_mds_session
*
952 ceph_mdsc_open_export_target_session(struct ceph_mds_client
*mdsc
, int target
)
954 struct ceph_mds_session
*session
;
956 dout("open_export_target_session to mds%d\n", target
);
958 mutex_lock(&mdsc
->mutex
);
959 session
= __open_export_target_session(mdsc
, target
);
960 mutex_unlock(&mdsc
->mutex
);
965 static void __open_export_target_sessions(struct ceph_mds_client
*mdsc
,
966 struct ceph_mds_session
*session
)
968 struct ceph_mds_info
*mi
;
969 struct ceph_mds_session
*ts
;
970 int i
, mds
= session
->s_mds
;
972 if (mds
>= mdsc
->mdsmap
->m_max_mds
)
975 mi
= &mdsc
->mdsmap
->m_info
[mds
];
976 dout("open_export_target_sessions for mds%d (%d targets)\n",
977 session
->s_mds
, mi
->num_export_targets
);
979 for (i
= 0; i
< mi
->num_export_targets
; i
++) {
980 ts
= __open_export_target_session(mdsc
, mi
->export_targets
[i
]);
982 ceph_put_mds_session(ts
);
986 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client
*mdsc
,
987 struct ceph_mds_session
*session
)
989 mutex_lock(&mdsc
->mutex
);
990 __open_export_target_sessions(mdsc
, session
);
991 mutex_unlock(&mdsc
->mutex
);
999 * Free preallocated cap messages assigned to this session
1001 static void cleanup_cap_releases(struct ceph_mds_session
*session
)
1003 struct ceph_msg
*msg
;
1005 spin_lock(&session
->s_cap_lock
);
1006 while (!list_empty(&session
->s_cap_releases
)) {
1007 msg
= list_first_entry(&session
->s_cap_releases
,
1008 struct ceph_msg
, list_head
);
1009 list_del_init(&msg
->list_head
);
1012 while (!list_empty(&session
->s_cap_releases_done
)) {
1013 msg
= list_first_entry(&session
->s_cap_releases_done
,
1014 struct ceph_msg
, list_head
);
1015 list_del_init(&msg
->list_head
);
1018 spin_unlock(&session
->s_cap_lock
);
1022 * Helper to safely iterate over all caps associated with a session, with
1023 * special care taken to handle a racing __ceph_remove_cap().
1025 * Caller must hold session s_mutex.
1027 static int iterate_session_caps(struct ceph_mds_session
*session
,
1028 int (*cb
)(struct inode
*, struct ceph_cap
*,
1031 struct list_head
*p
;
1032 struct ceph_cap
*cap
;
1033 struct inode
*inode
, *last_inode
= NULL
;
1034 struct ceph_cap
*old_cap
= NULL
;
1037 dout("iterate_session_caps %p mds%d\n", session
, session
->s_mds
);
1038 spin_lock(&session
->s_cap_lock
);
1039 p
= session
->s_caps
.next
;
1040 while (p
!= &session
->s_caps
) {
1041 cap
= list_entry(p
, struct ceph_cap
, session_caps
);
1042 inode
= igrab(&cap
->ci
->vfs_inode
);
1047 session
->s_cap_iterator
= cap
;
1048 spin_unlock(&session
->s_cap_lock
);
1055 ceph_put_cap(session
->s_mdsc
, old_cap
);
1059 ret
= cb(inode
, cap
, arg
);
1062 spin_lock(&session
->s_cap_lock
);
1064 if (cap
->ci
== NULL
) {
1065 dout("iterate_session_caps finishing cap %p removal\n",
1067 BUG_ON(cap
->session
!= session
);
1068 list_del_init(&cap
->session_caps
);
1069 session
->s_nr_caps
--;
1070 cap
->session
= NULL
;
1071 old_cap
= cap
; /* put_cap it w/o locks held */
1078 session
->s_cap_iterator
= NULL
;
1079 spin_unlock(&session
->s_cap_lock
);
1083 ceph_put_cap(session
->s_mdsc
, old_cap
);
1088 static int remove_session_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1091 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1094 dout("removing cap %p, ci is %p, inode is %p\n",
1095 cap
, ci
, &ci
->vfs_inode
);
1096 spin_lock(&ci
->i_ceph_lock
);
1097 __ceph_remove_cap(cap
, false);
1098 if (!__ceph_is_any_real_caps(ci
)) {
1099 struct ceph_mds_client
*mdsc
=
1100 ceph_sb_to_client(inode
->i_sb
)->mdsc
;
1102 spin_lock(&mdsc
->cap_dirty_lock
);
1103 if (!list_empty(&ci
->i_dirty_item
)) {
1104 pr_info(" dropping dirty %s state for %p %lld\n",
1105 ceph_cap_string(ci
->i_dirty_caps
),
1106 inode
, ceph_ino(inode
));
1107 ci
->i_dirty_caps
= 0;
1108 list_del_init(&ci
->i_dirty_item
);
1111 if (!list_empty(&ci
->i_flushing_item
)) {
1112 pr_info(" dropping dirty+flushing %s state for %p %lld\n",
1113 ceph_cap_string(ci
->i_flushing_caps
),
1114 inode
, ceph_ino(inode
));
1115 ci
->i_flushing_caps
= 0;
1116 list_del_init(&ci
->i_flushing_item
);
1117 mdsc
->num_cap_flushing
--;
1120 if (drop
&& ci
->i_wrbuffer_ref
) {
1121 pr_info(" dropping dirty data for %p %lld\n",
1122 inode
, ceph_ino(inode
));
1123 ci
->i_wrbuffer_ref
= 0;
1124 ci
->i_wrbuffer_ref_head
= 0;
1127 spin_unlock(&mdsc
->cap_dirty_lock
);
1129 spin_unlock(&ci
->i_ceph_lock
);
1136 * caller must hold session s_mutex
1138 static void remove_session_caps(struct ceph_mds_session
*session
)
1140 dout("remove_session_caps on %p\n", session
);
1141 iterate_session_caps(session
, remove_session_caps_cb
, NULL
);
1143 spin_lock(&session
->s_cap_lock
);
1144 if (session
->s_nr_caps
> 0) {
1145 struct super_block
*sb
= session
->s_mdsc
->fsc
->sb
;
1146 struct inode
*inode
;
1147 struct ceph_cap
*cap
, *prev
= NULL
;
1148 struct ceph_vino vino
;
1150 * iterate_session_caps() skips inodes that are being
1151 * deleted, we need to wait until deletions are complete.
1152 * __wait_on_freeing_inode() is designed for the job,
1153 * but it is not exported, so use lookup inode function
1156 while (!list_empty(&session
->s_caps
)) {
1157 cap
= list_entry(session
->s_caps
.next
,
1158 struct ceph_cap
, session_caps
);
1162 vino
= cap
->ci
->i_vino
;
1163 spin_unlock(&session
->s_cap_lock
);
1165 inode
= ceph_find_inode(sb
, vino
);
1168 spin_lock(&session
->s_cap_lock
);
1171 spin_unlock(&session
->s_cap_lock
);
1173 BUG_ON(session
->s_nr_caps
> 0);
1174 BUG_ON(!list_empty(&session
->s_cap_flushing
));
1175 cleanup_cap_releases(session
);
1179 * wake up any threads waiting on this session's caps. if the cap is
1180 * old (didn't get renewed on the client reconnect), remove it now.
1182 * caller must hold s_mutex.
1184 static int wake_up_session_cb(struct inode
*inode
, struct ceph_cap
*cap
,
1187 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1189 wake_up_all(&ci
->i_cap_wq
);
1191 spin_lock(&ci
->i_ceph_lock
);
1192 ci
->i_wanted_max_size
= 0;
1193 ci
->i_requested_max_size
= 0;
1194 spin_unlock(&ci
->i_ceph_lock
);
1199 static void wake_up_session_caps(struct ceph_mds_session
*session
,
1202 dout("wake_up_session_caps %p mds%d\n", session
, session
->s_mds
);
1203 iterate_session_caps(session
, wake_up_session_cb
,
1204 (void *)(unsigned long)reconnect
);
1208 * Send periodic message to MDS renewing all currently held caps. The
1209 * ack will reset the expiration for all caps from this session.
1211 * caller holds s_mutex
1213 static int send_renew_caps(struct ceph_mds_client
*mdsc
,
1214 struct ceph_mds_session
*session
)
1216 struct ceph_msg
*msg
;
1219 if (time_after_eq(jiffies
, session
->s_cap_ttl
) &&
1220 time_after_eq(session
->s_cap_ttl
, session
->s_renew_requested
))
1221 pr_info("mds%d caps stale\n", session
->s_mds
);
1222 session
->s_renew_requested
= jiffies
;
1224 /* do not try to renew caps until a recovering mds has reconnected
1225 * with its clients. */
1226 state
= ceph_mdsmap_get_state(mdsc
->mdsmap
, session
->s_mds
);
1227 if (state
< CEPH_MDS_STATE_RECONNECT
) {
1228 dout("send_renew_caps ignoring mds%d (%s)\n",
1229 session
->s_mds
, ceph_mds_state_name(state
));
1233 dout("send_renew_caps to mds%d (%s)\n", session
->s_mds
,
1234 ceph_mds_state_name(state
));
1235 msg
= create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS
,
1236 ++session
->s_renew_seq
);
1239 ceph_con_send(&session
->s_con
, msg
);
1243 static int send_flushmsg_ack(struct ceph_mds_client
*mdsc
,
1244 struct ceph_mds_session
*session
, u64 seq
)
1246 struct ceph_msg
*msg
;
1248 dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1249 session
->s_mds
, ceph_session_state_name(session
->s_state
), seq
);
1250 msg
= create_session_msg(CEPH_SESSION_FLUSHMSG_ACK
, seq
);
1253 ceph_con_send(&session
->s_con
, msg
);
1259 * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1261 * Called under session->s_mutex
1263 static void renewed_caps(struct ceph_mds_client
*mdsc
,
1264 struct ceph_mds_session
*session
, int is_renew
)
1269 spin_lock(&session
->s_cap_lock
);
1270 was_stale
= is_renew
&& time_after_eq(jiffies
, session
->s_cap_ttl
);
1272 session
->s_cap_ttl
= session
->s_renew_requested
+
1273 mdsc
->mdsmap
->m_session_timeout
*HZ
;
1276 if (time_before(jiffies
, session
->s_cap_ttl
)) {
1277 pr_info("mds%d caps renewed\n", session
->s_mds
);
1280 pr_info("mds%d caps still stale\n", session
->s_mds
);
1283 dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1284 session
->s_mds
, session
->s_cap_ttl
, was_stale
? "stale" : "fresh",
1285 time_before(jiffies
, session
->s_cap_ttl
) ? "stale" : "fresh");
1286 spin_unlock(&session
->s_cap_lock
);
1289 wake_up_session_caps(session
, 0);
1293 * send a session close request
1295 static int request_close_session(struct ceph_mds_client
*mdsc
,
1296 struct ceph_mds_session
*session
)
1298 struct ceph_msg
*msg
;
1300 dout("request_close_session mds%d state %s seq %lld\n",
1301 session
->s_mds
, ceph_session_state_name(session
->s_state
),
1303 msg
= create_session_msg(CEPH_SESSION_REQUEST_CLOSE
, session
->s_seq
);
1306 ceph_con_send(&session
->s_con
, msg
);
1311 * Called with s_mutex held.
1313 static int __close_session(struct ceph_mds_client
*mdsc
,
1314 struct ceph_mds_session
*session
)
1316 if (session
->s_state
>= CEPH_MDS_SESSION_CLOSING
)
1318 session
->s_state
= CEPH_MDS_SESSION_CLOSING
;
1319 return request_close_session(mdsc
, session
);
1323 * Trim old(er) caps.
1325 * Because we can't cache an inode without one or more caps, we do
1326 * this indirectly: if a cap is unused, we prune its aliases, at which
1327 * point the inode will hopefully get dropped to.
1329 * Yes, this is a bit sloppy. Our only real goal here is to respond to
1330 * memory pressure from the MDS, though, so it needn't be perfect.
1332 static int trim_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
, void *arg
)
1334 struct ceph_mds_session
*session
= arg
;
1335 struct ceph_inode_info
*ci
= ceph_inode(inode
);
1336 int used
, wanted
, oissued
, mine
;
1338 if (session
->s_trim_caps
<= 0)
1341 spin_lock(&ci
->i_ceph_lock
);
1342 mine
= cap
->issued
| cap
->implemented
;
1343 used
= __ceph_caps_used(ci
);
1344 wanted
= __ceph_caps_file_wanted(ci
);
1345 oissued
= __ceph_caps_issued_other(ci
, cap
);
1347 dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1348 inode
, cap
, ceph_cap_string(mine
), ceph_cap_string(oissued
),
1349 ceph_cap_string(used
), ceph_cap_string(wanted
));
1350 if (cap
== ci
->i_auth_cap
) {
1351 if (ci
->i_dirty_caps
| ci
->i_flushing_caps
)
1353 if ((used
| wanted
) & CEPH_CAP_ANY_WR
)
1356 if ((used
| wanted
) & ~oissued
& mine
)
1357 goto out
; /* we need these caps */
1359 session
->s_trim_caps
--;
1361 /* we aren't the only cap.. just remove us */
1362 __ceph_remove_cap(cap
, true);
1364 /* try to drop referring dentries */
1365 spin_unlock(&ci
->i_ceph_lock
);
1366 d_prune_aliases(inode
);
1367 dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1368 inode
, cap
, atomic_read(&inode
->i_count
));
1373 spin_unlock(&ci
->i_ceph_lock
);
1378 * Trim session cap count down to some max number.
1380 static int trim_caps(struct ceph_mds_client
*mdsc
,
1381 struct ceph_mds_session
*session
,
1384 int trim_caps
= session
->s_nr_caps
- max_caps
;
1386 dout("trim_caps mds%d start: %d / %d, trim %d\n",
1387 session
->s_mds
, session
->s_nr_caps
, max_caps
, trim_caps
);
1388 if (trim_caps
> 0) {
1389 session
->s_trim_caps
= trim_caps
;
1390 iterate_session_caps(session
, trim_caps_cb
, session
);
1391 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1392 session
->s_mds
, session
->s_nr_caps
, max_caps
,
1393 trim_caps
- session
->s_trim_caps
);
1394 session
->s_trim_caps
= 0;
1397 ceph_add_cap_releases(mdsc
, session
);
1398 ceph_send_cap_releases(mdsc
, session
);
1403 * Allocate cap_release messages. If there is a partially full message
1404 * in the queue, try to allocate enough to cover it's remainder, so that
1405 * we can send it immediately.
1407 * Called under s_mutex.
1409 int ceph_add_cap_releases(struct ceph_mds_client
*mdsc
,
1410 struct ceph_mds_session
*session
)
1412 struct ceph_msg
*msg
, *partial
= NULL
;
1413 struct ceph_mds_cap_release
*head
;
1415 int extra
= mdsc
->fsc
->mount_options
->cap_release_safety
;
1418 dout("add_cap_releases %p mds%d extra %d\n", session
, session
->s_mds
,
1421 spin_lock(&session
->s_cap_lock
);
1423 if (!list_empty(&session
->s_cap_releases
)) {
1424 msg
= list_first_entry(&session
->s_cap_releases
,
1427 head
= msg
->front
.iov_base
;
1428 num
= le32_to_cpu(head
->num
);
1430 dout(" partial %p with (%d/%d)\n", msg
, num
,
1431 (int)CEPH_CAPS_PER_RELEASE
);
1432 extra
+= CEPH_CAPS_PER_RELEASE
- num
;
1436 while (session
->s_num_cap_releases
< session
->s_nr_caps
+ extra
) {
1437 spin_unlock(&session
->s_cap_lock
);
1438 msg
= ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE
, PAGE_CACHE_SIZE
,
1442 dout("add_cap_releases %p msg %p now %d\n", session
, msg
,
1443 (int)msg
->front
.iov_len
);
1444 head
= msg
->front
.iov_base
;
1445 head
->num
= cpu_to_le32(0);
1446 msg
->front
.iov_len
= sizeof(*head
);
1447 spin_lock(&session
->s_cap_lock
);
1448 list_add(&msg
->list_head
, &session
->s_cap_releases
);
1449 session
->s_num_cap_releases
+= CEPH_CAPS_PER_RELEASE
;
1453 head
= partial
->front
.iov_base
;
1454 num
= le32_to_cpu(head
->num
);
1455 dout(" queueing partial %p with %d/%d\n", partial
, num
,
1456 (int)CEPH_CAPS_PER_RELEASE
);
1457 list_move_tail(&partial
->list_head
,
1458 &session
->s_cap_releases_done
);
1459 session
->s_num_cap_releases
-= CEPH_CAPS_PER_RELEASE
- num
;
1462 spin_unlock(&session
->s_cap_lock
);
1468 * flush all dirty inode data to disk.
1470 * returns true if we've flushed through want_flush_seq
1472 static int check_cap_flush(struct ceph_mds_client
*mdsc
, u64 want_flush_seq
)
1476 dout("check_cap_flush want %lld\n", want_flush_seq
);
1477 mutex_lock(&mdsc
->mutex
);
1478 for (mds
= 0; ret
&& mds
< mdsc
->max_sessions
; mds
++) {
1479 struct ceph_mds_session
*session
= mdsc
->sessions
[mds
];
1483 get_session(session
);
1484 mutex_unlock(&mdsc
->mutex
);
1486 mutex_lock(&session
->s_mutex
);
1487 if (!list_empty(&session
->s_cap_flushing
)) {
1488 struct ceph_inode_info
*ci
=
1489 list_entry(session
->s_cap_flushing
.next
,
1490 struct ceph_inode_info
,
1492 struct inode
*inode
= &ci
->vfs_inode
;
1494 spin_lock(&ci
->i_ceph_lock
);
1495 if (ci
->i_cap_flush_seq
<= want_flush_seq
) {
1496 dout("check_cap_flush still flushing %p "
1497 "seq %lld <= %lld to mds%d\n", inode
,
1498 ci
->i_cap_flush_seq
, want_flush_seq
,
1502 spin_unlock(&ci
->i_ceph_lock
);
1504 mutex_unlock(&session
->s_mutex
);
1505 ceph_put_mds_session(session
);
1509 mutex_lock(&mdsc
->mutex
);
1512 mutex_unlock(&mdsc
->mutex
);
1513 dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq
);
1518 * called under s_mutex
1520 void ceph_send_cap_releases(struct ceph_mds_client
*mdsc
,
1521 struct ceph_mds_session
*session
)
1523 struct ceph_msg
*msg
;
1525 dout("send_cap_releases mds%d\n", session
->s_mds
);
1526 spin_lock(&session
->s_cap_lock
);
1527 while (!list_empty(&session
->s_cap_releases_done
)) {
1528 msg
= list_first_entry(&session
->s_cap_releases_done
,
1529 struct ceph_msg
, list_head
);
1530 list_del_init(&msg
->list_head
);
1531 spin_unlock(&session
->s_cap_lock
);
1532 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1533 dout("send_cap_releases mds%d %p\n", session
->s_mds
, msg
);
1534 ceph_con_send(&session
->s_con
, msg
);
1535 spin_lock(&session
->s_cap_lock
);
1537 spin_unlock(&session
->s_cap_lock
);
1540 static void discard_cap_releases(struct ceph_mds_client
*mdsc
,
1541 struct ceph_mds_session
*session
)
1543 struct ceph_msg
*msg
;
1544 struct ceph_mds_cap_release
*head
;
1547 dout("discard_cap_releases mds%d\n", session
->s_mds
);
1549 if (!list_empty(&session
->s_cap_releases
)) {
1550 /* zero out the in-progress message */
1551 msg
= list_first_entry(&session
->s_cap_releases
,
1552 struct ceph_msg
, list_head
);
1553 head
= msg
->front
.iov_base
;
1554 num
= le32_to_cpu(head
->num
);
1555 dout("discard_cap_releases mds%d %p %u\n",
1556 session
->s_mds
, msg
, num
);
1557 head
->num
= cpu_to_le32(0);
1558 msg
->front
.iov_len
= sizeof(*head
);
1559 session
->s_num_cap_releases
+= num
;
1562 /* requeue completed messages */
1563 while (!list_empty(&session
->s_cap_releases_done
)) {
1564 msg
= list_first_entry(&session
->s_cap_releases_done
,
1565 struct ceph_msg
, list_head
);
1566 list_del_init(&msg
->list_head
);
1568 head
= msg
->front
.iov_base
;
1569 num
= le32_to_cpu(head
->num
);
1570 dout("discard_cap_releases mds%d %p %u\n", session
->s_mds
, msg
,
1572 session
->s_num_cap_releases
+= num
;
1573 head
->num
= cpu_to_le32(0);
1574 msg
->front
.iov_len
= sizeof(*head
);
1575 list_add(&msg
->list_head
, &session
->s_cap_releases
);
1583 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request
*req
,
1586 struct ceph_inode_info
*ci
= ceph_inode(dir
);
1587 struct ceph_mds_reply_info_parsed
*rinfo
= &req
->r_reply_info
;
1588 struct ceph_mount_options
*opt
= req
->r_mdsc
->fsc
->mount_options
;
1589 size_t size
= sizeof(*rinfo
->dir_in
) + sizeof(*rinfo
->dir_dname_len
) +
1590 sizeof(*rinfo
->dir_dname
) + sizeof(*rinfo
->dir_dlease
);
1591 int order
, num_entries
;
1593 spin_lock(&ci
->i_ceph_lock
);
1594 num_entries
= ci
->i_files
+ ci
->i_subdirs
;
1595 spin_unlock(&ci
->i_ceph_lock
);
1596 num_entries
= max(num_entries
, 1);
1597 num_entries
= min(num_entries
, opt
->max_readdir
);
1599 order
= get_order(size
* num_entries
);
1600 while (order
>= 0) {
1601 rinfo
->dir_in
= (void*)__get_free_pages(GFP_NOFS
| __GFP_NOWARN
,
1610 num_entries
= (PAGE_SIZE
<< order
) / size
;
1611 num_entries
= min(num_entries
, opt
->max_readdir
);
1613 rinfo
->dir_buf_size
= PAGE_SIZE
<< order
;
1614 req
->r_num_caps
= num_entries
+ 1;
1615 req
->r_args
.readdir
.max_entries
= cpu_to_le32(num_entries
);
1616 req
->r_args
.readdir
.max_bytes
= cpu_to_le32(opt
->max_readdir_bytes
);
1621 * Create an mds request.
1623 struct ceph_mds_request
*
1624 ceph_mdsc_create_request(struct ceph_mds_client
*mdsc
, int op
, int mode
)
1626 struct ceph_mds_request
*req
= kzalloc(sizeof(*req
), GFP_NOFS
);
1629 return ERR_PTR(-ENOMEM
);
1631 mutex_init(&req
->r_fill_mutex
);
1633 req
->r_started
= jiffies
;
1634 req
->r_resend_mds
= -1;
1635 INIT_LIST_HEAD(&req
->r_unsafe_dir_item
);
1637 kref_init(&req
->r_kref
);
1638 INIT_LIST_HEAD(&req
->r_wait
);
1639 init_completion(&req
->r_completion
);
1640 init_completion(&req
->r_safe_completion
);
1641 INIT_LIST_HEAD(&req
->r_unsafe_item
);
1643 req
->r_stamp
= CURRENT_TIME
;
1646 req
->r_direct_mode
= mode
;
1651 * return oldest (lowest) request, tid in request tree, 0 if none.
1653 * called under mdsc->mutex.
1655 static struct ceph_mds_request
*__get_oldest_req(struct ceph_mds_client
*mdsc
)
1657 if (RB_EMPTY_ROOT(&mdsc
->request_tree
))
1659 return rb_entry(rb_first(&mdsc
->request_tree
),
1660 struct ceph_mds_request
, r_node
);
1663 static u64
__get_oldest_tid(struct ceph_mds_client
*mdsc
)
1665 struct ceph_mds_request
*req
= __get_oldest_req(mdsc
);
1673 * Build a dentry's path. Allocate on heap; caller must kfree. Based
1674 * on build_path_from_dentry in fs/cifs/dir.c.
1676 * If @stop_on_nosnap, generate path relative to the first non-snapped
1679 * Encode hidden .snap dirs as a double /, i.e.
1680 * foo/.snap/bar -> foo//bar
1682 char *ceph_mdsc_build_path(struct dentry
*dentry
, int *plen
, u64
*base
,
1685 struct dentry
*temp
;
1691 return ERR_PTR(-EINVAL
);
1695 seq
= read_seqbegin(&rename_lock
);
1697 for (temp
= dentry
; !IS_ROOT(temp
);) {
1698 struct inode
*inode
= temp
->d_inode
;
1699 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
)
1700 len
++; /* slash only */
1701 else if (stop_on_nosnap
&& inode
&&
1702 ceph_snap(inode
) == CEPH_NOSNAP
)
1705 len
+= 1 + temp
->d_name
.len
;
1706 temp
= temp
->d_parent
;
1710 len
--; /* no leading '/' */
1712 path
= kmalloc(len
+1, GFP_NOFS
);
1714 return ERR_PTR(-ENOMEM
);
1716 path
[pos
] = 0; /* trailing null */
1718 for (temp
= dentry
; !IS_ROOT(temp
) && pos
!= 0; ) {
1719 struct inode
*inode
;
1721 spin_lock(&temp
->d_lock
);
1722 inode
= temp
->d_inode
;
1723 if (inode
&& ceph_snap(inode
) == CEPH_SNAPDIR
) {
1724 dout("build_path path+%d: %p SNAPDIR\n",
1726 } else if (stop_on_nosnap
&& inode
&&
1727 ceph_snap(inode
) == CEPH_NOSNAP
) {
1728 spin_unlock(&temp
->d_lock
);
1731 pos
-= temp
->d_name
.len
;
1733 spin_unlock(&temp
->d_lock
);
1736 strncpy(path
+ pos
, temp
->d_name
.name
,
1739 spin_unlock(&temp
->d_lock
);
1742 temp
= temp
->d_parent
;
1745 if (pos
!= 0 || read_seqretry(&rename_lock
, seq
)) {
1746 pr_err("build_path did not end path lookup where "
1747 "expected, namelen is %d, pos is %d\n", len
, pos
);
1748 /* presumably this is only possible if racing with a
1749 rename of one of the parent directories (we can not
1750 lock the dentries above us to prevent this, but
1751 retrying should be harmless) */
1756 *base
= ceph_ino(temp
->d_inode
);
1758 dout("build_path on %p %d built %llx '%.*s'\n",
1759 dentry
, d_count(dentry
), *base
, len
, path
);
1763 static int build_dentry_path(struct dentry
*dentry
,
1764 const char **ppath
, int *ppathlen
, u64
*pino
,
1769 if (ceph_snap(dentry
->d_parent
->d_inode
) == CEPH_NOSNAP
) {
1770 *pino
= ceph_ino(dentry
->d_parent
->d_inode
);
1771 *ppath
= dentry
->d_name
.name
;
1772 *ppathlen
= dentry
->d_name
.len
;
1775 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1777 return PTR_ERR(path
);
1783 static int build_inode_path(struct inode
*inode
,
1784 const char **ppath
, int *ppathlen
, u64
*pino
,
1787 struct dentry
*dentry
;
1790 if (ceph_snap(inode
) == CEPH_NOSNAP
) {
1791 *pino
= ceph_ino(inode
);
1795 dentry
= d_find_alias(inode
);
1796 path
= ceph_mdsc_build_path(dentry
, ppathlen
, pino
, 1);
1799 return PTR_ERR(path
);
1806 * request arguments may be specified via an inode *, a dentry *, or
1807 * an explicit ino+path.
1809 static int set_request_path_attr(struct inode
*rinode
, struct dentry
*rdentry
,
1810 const char *rpath
, u64 rino
,
1811 const char **ppath
, int *pathlen
,
1812 u64
*ino
, int *freepath
)
1817 r
= build_inode_path(rinode
, ppath
, pathlen
, ino
, freepath
);
1818 dout(" inode %p %llx.%llx\n", rinode
, ceph_ino(rinode
),
1820 } else if (rdentry
) {
1821 r
= build_dentry_path(rdentry
, ppath
, pathlen
, ino
, freepath
);
1822 dout(" dentry %p %llx/%.*s\n", rdentry
, *ino
, *pathlen
,
1824 } else if (rpath
|| rino
) {
1827 *pathlen
= rpath
? strlen(rpath
) : 0;
1828 dout(" path %.*s\n", *pathlen
, rpath
);
1835 * called under mdsc->mutex
1837 static struct ceph_msg
*create_request_message(struct ceph_mds_client
*mdsc
,
1838 struct ceph_mds_request
*req
,
1841 struct ceph_msg
*msg
;
1842 struct ceph_mds_request_head
*head
;
1843 const char *path1
= NULL
;
1844 const char *path2
= NULL
;
1845 u64 ino1
= 0, ino2
= 0;
1846 int pathlen1
= 0, pathlen2
= 0;
1847 int freepath1
= 0, freepath2
= 0;
1853 ret
= set_request_path_attr(req
->r_inode
, req
->r_dentry
,
1854 req
->r_path1
, req
->r_ino1
.ino
,
1855 &path1
, &pathlen1
, &ino1
, &freepath1
);
1861 ret
= set_request_path_attr(NULL
, req
->r_old_dentry
,
1862 req
->r_path2
, req
->r_ino2
.ino
,
1863 &path2
, &pathlen2
, &ino2
, &freepath2
);
1869 len
= sizeof(*head
) +
1870 pathlen1
+ pathlen2
+ 2*(1 + sizeof(u32
) + sizeof(u64
)) +
1871 sizeof(struct timespec
);
1873 /* calculate (max) length for cap releases */
1874 len
+= sizeof(struct ceph_mds_request_release
) *
1875 (!!req
->r_inode_drop
+ !!req
->r_dentry_drop
+
1876 !!req
->r_old_inode_drop
+ !!req
->r_old_dentry_drop
);
1877 if (req
->r_dentry_drop
)
1878 len
+= req
->r_dentry
->d_name
.len
;
1879 if (req
->r_old_dentry_drop
)
1880 len
+= req
->r_old_dentry
->d_name
.len
;
1882 msg
= ceph_msg_new(CEPH_MSG_CLIENT_REQUEST
, len
, GFP_NOFS
, false);
1884 msg
= ERR_PTR(-ENOMEM
);
1888 msg
->hdr
.version
= cpu_to_le16(2);
1889 msg
->hdr
.tid
= cpu_to_le64(req
->r_tid
);
1891 head
= msg
->front
.iov_base
;
1892 p
= msg
->front
.iov_base
+ sizeof(*head
);
1893 end
= msg
->front
.iov_base
+ msg
->front
.iov_len
;
1895 head
->mdsmap_epoch
= cpu_to_le32(mdsc
->mdsmap
->m_epoch
);
1896 head
->op
= cpu_to_le32(req
->r_op
);
1897 head
->caller_uid
= cpu_to_le32(from_kuid(&init_user_ns
, req
->r_uid
));
1898 head
->caller_gid
= cpu_to_le32(from_kgid(&init_user_ns
, req
->r_gid
));
1899 head
->args
= req
->r_args
;
1901 ceph_encode_filepath(&p
, end
, ino1
, path1
);
1902 ceph_encode_filepath(&p
, end
, ino2
, path2
);
1904 /* make note of release offset, in case we need to replay */
1905 req
->r_request_release_offset
= p
- msg
->front
.iov_base
;
1909 if (req
->r_inode_drop
)
1910 releases
+= ceph_encode_inode_release(&p
,
1911 req
->r_inode
? req
->r_inode
: req
->r_dentry
->d_inode
,
1912 mds
, req
->r_inode_drop
, req
->r_inode_unless
, 0);
1913 if (req
->r_dentry_drop
)
1914 releases
+= ceph_encode_dentry_release(&p
, req
->r_dentry
,
1915 mds
, req
->r_dentry_drop
, req
->r_dentry_unless
);
1916 if (req
->r_old_dentry_drop
)
1917 releases
+= ceph_encode_dentry_release(&p
, req
->r_old_dentry
,
1918 mds
, req
->r_old_dentry_drop
, req
->r_old_dentry_unless
);
1919 if (req
->r_old_inode_drop
)
1920 releases
+= ceph_encode_inode_release(&p
,
1921 req
->r_old_dentry
->d_inode
,
1922 mds
, req
->r_old_inode_drop
, req
->r_old_inode_unless
, 0);
1923 head
->num_releases
= cpu_to_le16(releases
);
1926 ceph_encode_copy(&p
, &req
->r_stamp
, sizeof(req
->r_stamp
));
1929 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
1930 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
1932 if (req
->r_pagelist
) {
1933 struct ceph_pagelist
*pagelist
= req
->r_pagelist
;
1934 atomic_inc(&pagelist
->refcnt
);
1935 ceph_msg_data_add_pagelist(msg
, pagelist
);
1936 msg
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
1938 msg
->hdr
.data_len
= 0;
1941 msg
->hdr
.data_off
= cpu_to_le16(0);
1945 kfree((char *)path2
);
1948 kfree((char *)path1
);
1954 * called under mdsc->mutex if error, under no mutex if
1957 static void complete_request(struct ceph_mds_client
*mdsc
,
1958 struct ceph_mds_request
*req
)
1960 if (req
->r_callback
)
1961 req
->r_callback(mdsc
, req
);
1963 complete_all(&req
->r_completion
);
1967 * called under mdsc->mutex
1969 static int __prepare_send_request(struct ceph_mds_client
*mdsc
,
1970 struct ceph_mds_request
*req
,
1973 struct ceph_mds_request_head
*rhead
;
1974 struct ceph_msg
*msg
;
1979 struct ceph_cap
*cap
=
1980 ceph_get_cap_for_mds(ceph_inode(req
->r_inode
), mds
);
1983 req
->r_sent_on_mseq
= cap
->mseq
;
1985 req
->r_sent_on_mseq
= -1;
1987 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req
,
1988 req
->r_tid
, ceph_mds_op_name(req
->r_op
), req
->r_attempts
);
1990 if (req
->r_got_unsafe
) {
1993 * Replay. Do not regenerate message (and rebuild
1994 * paths, etc.); just use the original message.
1995 * Rebuilding paths will break for renames because
1996 * d_move mangles the src name.
1998 msg
= req
->r_request
;
1999 rhead
= msg
->front
.iov_base
;
2001 flags
= le32_to_cpu(rhead
->flags
);
2002 flags
|= CEPH_MDS_FLAG_REPLAY
;
2003 rhead
->flags
= cpu_to_le32(flags
);
2005 if (req
->r_target_inode
)
2006 rhead
->ino
= cpu_to_le64(ceph_ino(req
->r_target_inode
));
2008 rhead
->num_retry
= req
->r_attempts
- 1;
2010 /* remove cap/dentry releases from message */
2011 rhead
->num_releases
= 0;
2014 p
= msg
->front
.iov_base
+ req
->r_request_release_offset
;
2015 ceph_encode_copy(&p
, &req
->r_stamp
, sizeof(req
->r_stamp
));
2017 msg
->front
.iov_len
= p
- msg
->front
.iov_base
;
2018 msg
->hdr
.front_len
= cpu_to_le32(msg
->front
.iov_len
);
2022 if (req
->r_request
) {
2023 ceph_msg_put(req
->r_request
);
2024 req
->r_request
= NULL
;
2026 msg
= create_request_message(mdsc
, req
, mds
);
2028 req
->r_err
= PTR_ERR(msg
);
2029 complete_request(mdsc
, req
);
2030 return PTR_ERR(msg
);
2032 req
->r_request
= msg
;
2034 rhead
= msg
->front
.iov_base
;
2035 rhead
->oldest_client_tid
= cpu_to_le64(__get_oldest_tid(mdsc
));
2036 if (req
->r_got_unsafe
)
2037 flags
|= CEPH_MDS_FLAG_REPLAY
;
2038 if (req
->r_locked_dir
)
2039 flags
|= CEPH_MDS_FLAG_WANT_DENTRY
;
2040 rhead
->flags
= cpu_to_le32(flags
);
2041 rhead
->num_fwd
= req
->r_num_fwd
;
2042 rhead
->num_retry
= req
->r_attempts
- 1;
2045 dout(" r_locked_dir = %p\n", req
->r_locked_dir
);
2050 * send request, or put it on the appropriate wait list.
2052 static int __do_request(struct ceph_mds_client
*mdsc
,
2053 struct ceph_mds_request
*req
)
2055 struct ceph_mds_session
*session
= NULL
;
2059 if (req
->r_err
|| req
->r_got_result
) {
2061 __unregister_request(mdsc
, req
);
2065 if (req
->r_timeout
&&
2066 time_after_eq(jiffies
, req
->r_started
+ req
->r_timeout
)) {
2067 dout("do_request timed out\n");
2072 put_request_session(req
);
2074 mds
= __choose_mds(mdsc
, req
);
2076 ceph_mdsmap_get_state(mdsc
->mdsmap
, mds
) < CEPH_MDS_STATE_ACTIVE
) {
2077 dout("do_request no mds or not active, waiting for map\n");
2078 list_add(&req
->r_wait
, &mdsc
->waiting_for_map
);
2082 /* get, open session */
2083 session
= __ceph_lookup_mds_session(mdsc
, mds
);
2085 session
= register_session(mdsc
, mds
);
2086 if (IS_ERR(session
)) {
2087 err
= PTR_ERR(session
);
2091 req
->r_session
= get_session(session
);
2093 dout("do_request mds%d session %p state %s\n", mds
, session
,
2094 ceph_session_state_name(session
->s_state
));
2095 if (session
->s_state
!= CEPH_MDS_SESSION_OPEN
&&
2096 session
->s_state
!= CEPH_MDS_SESSION_HUNG
) {
2097 if (session
->s_state
== CEPH_MDS_SESSION_NEW
||
2098 session
->s_state
== CEPH_MDS_SESSION_CLOSING
)
2099 __open_session(mdsc
, session
);
2100 list_add(&req
->r_wait
, &session
->s_waiting
);
2105 req
->r_resend_mds
= -1; /* forget any previous mds hint */
2107 if (req
->r_request_started
== 0) /* note request start time */
2108 req
->r_request_started
= jiffies
;
2110 err
= __prepare_send_request(mdsc
, req
, mds
);
2112 ceph_msg_get(req
->r_request
);
2113 ceph_con_send(&session
->s_con
, req
->r_request
);
2117 ceph_put_mds_session(session
);
2123 complete_request(mdsc
, req
);
2128 * called under mdsc->mutex
2130 static void __wake_requests(struct ceph_mds_client
*mdsc
,
2131 struct list_head
*head
)
2133 struct ceph_mds_request
*req
;
2134 LIST_HEAD(tmp_list
);
2136 list_splice_init(head
, &tmp_list
);
2138 while (!list_empty(&tmp_list
)) {
2139 req
= list_entry(tmp_list
.next
,
2140 struct ceph_mds_request
, r_wait
);
2141 list_del_init(&req
->r_wait
);
2142 dout(" wake request %p tid %llu\n", req
, req
->r_tid
);
2143 __do_request(mdsc
, req
);
2148 * Wake up threads with requests pending for @mds, so that they can
2149 * resubmit their requests to a possibly different mds.
2151 static void kick_requests(struct ceph_mds_client
*mdsc
, int mds
)
2153 struct ceph_mds_request
*req
;
2154 struct rb_node
*p
= rb_first(&mdsc
->request_tree
);
2156 dout("kick_requests mds%d\n", mds
);
2158 req
= rb_entry(p
, struct ceph_mds_request
, r_node
);
2160 if (req
->r_got_unsafe
)
2162 if (req
->r_session
&&
2163 req
->r_session
->s_mds
== mds
) {
2164 dout(" kicking tid %llu\n", req
->r_tid
);
2165 list_del_init(&req
->r_wait
);
2166 __do_request(mdsc
, req
);
2171 void ceph_mdsc_submit_request(struct ceph_mds_client
*mdsc
,
2172 struct ceph_mds_request
*req
)
2174 dout("submit_request on %p\n", req
);
2175 mutex_lock(&mdsc
->mutex
);
2176 __register_request(mdsc
, req
, NULL
);
2177 __do_request(mdsc
, req
);
2178 mutex_unlock(&mdsc
->mutex
);
2182 * Synchrously perform an mds request. Take care of all of the
2183 * session setup, forwarding, retry details.
2185 int ceph_mdsc_do_request(struct ceph_mds_client
*mdsc
,
2187 struct ceph_mds_request
*req
)
2191 dout("do_request on %p\n", req
);
2193 /* take CAP_PIN refs for r_inode, r_locked_dir, r_old_dentry */
2195 ceph_get_cap_refs(ceph_inode(req
->r_inode
), CEPH_CAP_PIN
);
2196 if (req
->r_locked_dir
)
2197 ceph_get_cap_refs(ceph_inode(req
->r_locked_dir
), CEPH_CAP_PIN
);
2198 if (req
->r_old_dentry_dir
)
2199 ceph_get_cap_refs(ceph_inode(req
->r_old_dentry_dir
),
2203 mutex_lock(&mdsc
->mutex
);
2204 __register_request(mdsc
, req
, dir
);
2205 __do_request(mdsc
, req
);
2209 __unregister_request(mdsc
, req
);
2210 dout("do_request early error %d\n", err
);
2215 mutex_unlock(&mdsc
->mutex
);
2216 dout("do_request waiting\n");
2217 if (req
->r_timeout
) {
2218 err
= (long)wait_for_completion_killable_timeout(
2219 &req
->r_completion
, req
->r_timeout
);
2222 } else if (req
->r_wait_for_completion
) {
2223 err
= req
->r_wait_for_completion(mdsc
, req
);
2225 err
= wait_for_completion_killable(&req
->r_completion
);
2227 dout("do_request waited, got %d\n", err
);
2228 mutex_lock(&mdsc
->mutex
);
2230 /* only abort if we didn't race with a real reply */
2231 if (req
->r_got_result
) {
2232 err
= le32_to_cpu(req
->r_reply_info
.head
->result
);
2233 } else if (err
< 0) {
2234 dout("aborted request %lld with %d\n", req
->r_tid
, err
);
2237 * ensure we aren't running concurrently with
2238 * ceph_fill_trace or ceph_readdir_prepopulate, which
2239 * rely on locks (dir mutex) held by our caller.
2241 mutex_lock(&req
->r_fill_mutex
);
2243 req
->r_aborted
= true;
2244 mutex_unlock(&req
->r_fill_mutex
);
2246 if (req
->r_locked_dir
&&
2247 (req
->r_op
& CEPH_MDS_OP_WRITE
))
2248 ceph_invalidate_dir_request(req
);
2254 mutex_unlock(&mdsc
->mutex
);
2255 dout("do_request %p done, result %d\n", req
, err
);
2260 * Invalidate dir's completeness, dentry lease state on an aborted MDS
2261 * namespace request.
2263 void ceph_invalidate_dir_request(struct ceph_mds_request
*req
)
2265 struct inode
*inode
= req
->r_locked_dir
;
2267 dout("invalidate_dir_request %p (complete, lease(s))\n", inode
);
2269 ceph_dir_clear_complete(inode
);
2271 ceph_invalidate_dentry_lease(req
->r_dentry
);
2272 if (req
->r_old_dentry
)
2273 ceph_invalidate_dentry_lease(req
->r_old_dentry
);
2279 * We take the session mutex and parse and process the reply immediately.
2280 * This preserves the logical ordering of replies, capabilities, etc., sent
2281 * by the MDS as they are applied to our local cache.
2283 static void handle_reply(struct ceph_mds_session
*session
, struct ceph_msg
*msg
)
2285 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2286 struct ceph_mds_request
*req
;
2287 struct ceph_mds_reply_head
*head
= msg
->front
.iov_base
;
2288 struct ceph_mds_reply_info_parsed
*rinfo
; /* parsed reply info */
2291 int mds
= session
->s_mds
;
2293 if (msg
->front
.iov_len
< sizeof(*head
)) {
2294 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2299 /* get request, session */
2300 tid
= le64_to_cpu(msg
->hdr
.tid
);
2301 mutex_lock(&mdsc
->mutex
);
2302 req
= __lookup_request(mdsc
, tid
);
2304 dout("handle_reply on unknown tid %llu\n", tid
);
2305 mutex_unlock(&mdsc
->mutex
);
2308 dout("handle_reply %p\n", req
);
2310 /* correct session? */
2311 if (req
->r_session
!= session
) {
2312 pr_err("mdsc_handle_reply got %llu on session mds%d"
2313 " not mds%d\n", tid
, session
->s_mds
,
2314 req
->r_session
? req
->r_session
->s_mds
: -1);
2315 mutex_unlock(&mdsc
->mutex
);
2320 if ((req
->r_got_unsafe
&& !head
->safe
) ||
2321 (req
->r_got_safe
&& head
->safe
)) {
2322 pr_warn("got a dup %s reply on %llu from mds%d\n",
2323 head
->safe
? "safe" : "unsafe", tid
, mds
);
2324 mutex_unlock(&mdsc
->mutex
);
2327 if (req
->r_got_safe
&& !head
->safe
) {
2328 pr_warn("got unsafe after safe on %llu from mds%d\n",
2330 mutex_unlock(&mdsc
->mutex
);
2334 result
= le32_to_cpu(head
->result
);
2338 * if we're not talking to the authority, send to them
2339 * if the authority has changed while we weren't looking,
2340 * send to new authority
2341 * Otherwise we just have to return an ESTALE
2343 if (result
== -ESTALE
) {
2344 dout("got ESTALE on request %llu", req
->r_tid
);
2345 req
->r_resend_mds
= -1;
2346 if (req
->r_direct_mode
!= USE_AUTH_MDS
) {
2347 dout("not using auth, setting for that now");
2348 req
->r_direct_mode
= USE_AUTH_MDS
;
2349 __do_request(mdsc
, req
);
2350 mutex_unlock(&mdsc
->mutex
);
2353 int mds
= __choose_mds(mdsc
, req
);
2354 if (mds
>= 0 && mds
!= req
->r_session
->s_mds
) {
2355 dout("but auth changed, so resending");
2356 __do_request(mdsc
, req
);
2357 mutex_unlock(&mdsc
->mutex
);
2361 dout("have to return ESTALE on request %llu", req
->r_tid
);
2366 req
->r_got_safe
= true;
2367 __unregister_request(mdsc
, req
);
2369 if (req
->r_got_unsafe
) {
2371 * We already handled the unsafe response, now do the
2372 * cleanup. No need to examine the response; the MDS
2373 * doesn't include any result info in the safe
2374 * response. And even if it did, there is nothing
2375 * useful we could do with a revised return value.
2377 dout("got safe reply %llu, mds%d\n", tid
, mds
);
2378 list_del_init(&req
->r_unsafe_item
);
2380 /* last unsafe request during umount? */
2381 if (mdsc
->stopping
&& !__get_oldest_req(mdsc
))
2382 complete_all(&mdsc
->safe_umount_waiters
);
2383 mutex_unlock(&mdsc
->mutex
);
2387 req
->r_got_unsafe
= true;
2388 list_add_tail(&req
->r_unsafe_item
, &req
->r_session
->s_unsafe
);
2391 dout("handle_reply tid %lld result %d\n", tid
, result
);
2392 rinfo
= &req
->r_reply_info
;
2393 err
= parse_reply_info(msg
, rinfo
, session
->s_con
.peer_features
);
2394 mutex_unlock(&mdsc
->mutex
);
2396 mutex_lock(&session
->s_mutex
);
2398 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds
, tid
);
2404 if (rinfo
->snapblob_len
) {
2405 down_write(&mdsc
->snap_rwsem
);
2406 ceph_update_snap_trace(mdsc
, rinfo
->snapblob
,
2407 rinfo
->snapblob
+ rinfo
->snapblob_len
,
2408 le32_to_cpu(head
->op
) == CEPH_MDS_OP_RMSNAP
);
2409 downgrade_write(&mdsc
->snap_rwsem
);
2411 down_read(&mdsc
->snap_rwsem
);
2414 /* insert trace into our cache */
2415 mutex_lock(&req
->r_fill_mutex
);
2416 err
= ceph_fill_trace(mdsc
->fsc
->sb
, req
, req
->r_session
);
2418 if (result
== 0 && (req
->r_op
== CEPH_MDS_OP_READDIR
||
2419 req
->r_op
== CEPH_MDS_OP_LSSNAP
))
2420 ceph_readdir_prepopulate(req
, req
->r_session
);
2421 ceph_unreserve_caps(mdsc
, &req
->r_caps_reservation
);
2423 mutex_unlock(&req
->r_fill_mutex
);
2425 up_read(&mdsc
->snap_rwsem
);
2427 mutex_lock(&mdsc
->mutex
);
2428 if (!req
->r_aborted
) {
2434 req
->r_got_result
= true;
2437 dout("reply arrived after request %lld was aborted\n", tid
);
2439 mutex_unlock(&mdsc
->mutex
);
2441 ceph_add_cap_releases(mdsc
, req
->r_session
);
2442 mutex_unlock(&session
->s_mutex
);
2444 /* kick calling process */
2445 complete_request(mdsc
, req
);
2447 ceph_mdsc_put_request(req
);
2454 * handle mds notification that our request has been forwarded.
2456 static void handle_forward(struct ceph_mds_client
*mdsc
,
2457 struct ceph_mds_session
*session
,
2458 struct ceph_msg
*msg
)
2460 struct ceph_mds_request
*req
;
2461 u64 tid
= le64_to_cpu(msg
->hdr
.tid
);
2465 void *p
= msg
->front
.iov_base
;
2466 void *end
= p
+ msg
->front
.iov_len
;
2468 ceph_decode_need(&p
, end
, 2*sizeof(u32
), bad
);
2469 next_mds
= ceph_decode_32(&p
);
2470 fwd_seq
= ceph_decode_32(&p
);
2472 mutex_lock(&mdsc
->mutex
);
2473 req
= __lookup_request(mdsc
, tid
);
2475 dout("forward tid %llu to mds%d - req dne\n", tid
, next_mds
);
2476 goto out
; /* dup reply? */
2479 if (req
->r_aborted
) {
2480 dout("forward tid %llu aborted, unregistering\n", tid
);
2481 __unregister_request(mdsc
, req
);
2482 } else if (fwd_seq
<= req
->r_num_fwd
) {
2483 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
2484 tid
, next_mds
, req
->r_num_fwd
, fwd_seq
);
2486 /* resend. forward race not possible; mds would drop */
2487 dout("forward tid %llu to mds%d (we resend)\n", tid
, next_mds
);
2489 BUG_ON(req
->r_got_result
);
2490 req
->r_num_fwd
= fwd_seq
;
2491 req
->r_resend_mds
= next_mds
;
2492 put_request_session(req
);
2493 __do_request(mdsc
, req
);
2495 ceph_mdsc_put_request(req
);
2497 mutex_unlock(&mdsc
->mutex
);
2501 pr_err("mdsc_handle_forward decode error err=%d\n", err
);
2505 * handle a mds session control message
2507 static void handle_session(struct ceph_mds_session
*session
,
2508 struct ceph_msg
*msg
)
2510 struct ceph_mds_client
*mdsc
= session
->s_mdsc
;
2513 int mds
= session
->s_mds
;
2514 struct ceph_mds_session_head
*h
= msg
->front
.iov_base
;
2518 if (msg
->front
.iov_len
!= sizeof(*h
))
2520 op
= le32_to_cpu(h
->op
);
2521 seq
= le64_to_cpu(h
->seq
);
2523 mutex_lock(&mdsc
->mutex
);
2524 if (op
== CEPH_SESSION_CLOSE
)
2525 __unregister_session(mdsc
, session
);
2526 /* FIXME: this ttl calculation is generous */
2527 session
->s_ttl
= jiffies
+ HZ
*mdsc
->mdsmap
->m_session_autoclose
;
2528 mutex_unlock(&mdsc
->mutex
);
2530 mutex_lock(&session
->s_mutex
);
2532 dout("handle_session mds%d %s %p state %s seq %llu\n",
2533 mds
, ceph_session_op_name(op
), session
,
2534 ceph_session_state_name(session
->s_state
), seq
);
2536 if (session
->s_state
== CEPH_MDS_SESSION_HUNG
) {
2537 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2538 pr_info("mds%d came back\n", session
->s_mds
);
2542 case CEPH_SESSION_OPEN
:
2543 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2544 pr_info("mds%d reconnect success\n", session
->s_mds
);
2545 session
->s_state
= CEPH_MDS_SESSION_OPEN
;
2546 renewed_caps(mdsc
, session
, 0);
2549 __close_session(mdsc
, session
);
2552 case CEPH_SESSION_RENEWCAPS
:
2553 if (session
->s_renew_seq
== seq
)
2554 renewed_caps(mdsc
, session
, 1);
2557 case CEPH_SESSION_CLOSE
:
2558 if (session
->s_state
== CEPH_MDS_SESSION_RECONNECTING
)
2559 pr_info("mds%d reconnect denied\n", session
->s_mds
);
2560 remove_session_caps(session
);
2561 wake
= 2; /* for good measure */
2562 wake_up_all(&mdsc
->session_close_wq
);
2565 case CEPH_SESSION_STALE
:
2566 pr_info("mds%d caps went stale, renewing\n",
2568 spin_lock(&session
->s_gen_ttl_lock
);
2569 session
->s_cap_gen
++;
2570 session
->s_cap_ttl
= jiffies
- 1;
2571 spin_unlock(&session
->s_gen_ttl_lock
);
2572 send_renew_caps(mdsc
, session
);
2575 case CEPH_SESSION_RECALL_STATE
:
2576 trim_caps(mdsc
, session
, le32_to_cpu(h
->max_caps
));
2579 case CEPH_SESSION_FLUSHMSG
:
2580 send_flushmsg_ack(mdsc
, session
, seq
);
2584 pr_err("mdsc_handle_session bad op %d mds%d\n", op
, mds
);
2588 mutex_unlock(&session
->s_mutex
);
2590 mutex_lock(&mdsc
->mutex
);
2591 __wake_requests(mdsc
, &session
->s_waiting
);
2593 kick_requests(mdsc
, mds
);
2594 mutex_unlock(&mdsc
->mutex
);
2599 pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds
,
2600 (int)msg
->front
.iov_len
);
2607 * called under session->mutex.
2609 static void replay_unsafe_requests(struct ceph_mds_client
*mdsc
,
2610 struct ceph_mds_session
*session
)
2612 struct ceph_mds_request
*req
, *nreq
;
2615 dout("replay_unsafe_requests mds%d\n", session
->s_mds
);
2617 mutex_lock(&mdsc
->mutex
);
2618 list_for_each_entry_safe(req
, nreq
, &session
->s_unsafe
, r_unsafe_item
) {
2619 err
= __prepare_send_request(mdsc
, req
, session
->s_mds
);
2621 ceph_msg_get(req
->r_request
);
2622 ceph_con_send(&session
->s_con
, req
->r_request
);
2625 mutex_unlock(&mdsc
->mutex
);
2629 * Encode information about a cap for a reconnect with the MDS.
2631 static int encode_caps_cb(struct inode
*inode
, struct ceph_cap
*cap
,
2635 struct ceph_mds_cap_reconnect v2
;
2636 struct ceph_mds_cap_reconnect_v1 v1
;
2639 struct ceph_inode_info
*ci
;
2640 struct ceph_reconnect_state
*recon_state
= arg
;
2641 struct ceph_pagelist
*pagelist
= recon_state
->pagelist
;
2645 struct dentry
*dentry
;
2649 dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
2650 inode
, ceph_vinop(inode
), cap
, cap
->cap_id
,
2651 ceph_cap_string(cap
->issued
));
2652 err
= ceph_pagelist_encode_64(pagelist
, ceph_ino(inode
));
2656 dentry
= d_find_alias(inode
);
2658 path
= ceph_mdsc_build_path(dentry
, &pathlen
, &pathbase
, 0);
2660 err
= PTR_ERR(path
);
2667 err
= ceph_pagelist_encode_string(pagelist
, path
, pathlen
);
2671 spin_lock(&ci
->i_ceph_lock
);
2672 cap
->seq
= 0; /* reset cap seq */
2673 cap
->issue_seq
= 0; /* and issue_seq */
2674 cap
->mseq
= 0; /* and migrate_seq */
2675 cap
->cap_gen
= cap
->session
->s_cap_gen
;
2677 if (recon_state
->flock
) {
2678 rec
.v2
.cap_id
= cpu_to_le64(cap
->cap_id
);
2679 rec
.v2
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2680 rec
.v2
.issued
= cpu_to_le32(cap
->issued
);
2681 rec
.v2
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2682 rec
.v2
.pathbase
= cpu_to_le64(pathbase
);
2683 rec
.v2
.flock_len
= 0;
2684 reclen
= sizeof(rec
.v2
);
2686 rec
.v1
.cap_id
= cpu_to_le64(cap
->cap_id
);
2687 rec
.v1
.wanted
= cpu_to_le32(__ceph_caps_wanted(ci
));
2688 rec
.v1
.issued
= cpu_to_le32(cap
->issued
);
2689 rec
.v1
.size
= cpu_to_le64(inode
->i_size
);
2690 ceph_encode_timespec(&rec
.v1
.mtime
, &inode
->i_mtime
);
2691 ceph_encode_timespec(&rec
.v1
.atime
, &inode
->i_atime
);
2692 rec
.v1
.snaprealm
= cpu_to_le64(ci
->i_snap_realm
->ino
);
2693 rec
.v1
.pathbase
= cpu_to_le64(pathbase
);
2694 reclen
= sizeof(rec
.v1
);
2696 spin_unlock(&ci
->i_ceph_lock
);
2698 if (recon_state
->flock
) {
2699 int num_fcntl_locks
, num_flock_locks
;
2700 struct ceph_filelock
*flocks
;
2703 spin_lock(&inode
->i_lock
);
2704 ceph_count_locks(inode
, &num_fcntl_locks
, &num_flock_locks
);
2705 spin_unlock(&inode
->i_lock
);
2706 flocks
= kmalloc((num_fcntl_locks
+num_flock_locks
) *
2707 sizeof(struct ceph_filelock
), GFP_NOFS
);
2712 spin_lock(&inode
->i_lock
);
2713 err
= ceph_encode_locks_to_buffer(inode
, flocks
,
2716 spin_unlock(&inode
->i_lock
);
2724 * number of encoded locks is stable, so copy to pagelist
2726 rec
.v2
.flock_len
= cpu_to_le32(2*sizeof(u32
) +
2727 (num_fcntl_locks
+num_flock_locks
) *
2728 sizeof(struct ceph_filelock
));
2729 err
= ceph_pagelist_append(pagelist
, &rec
, reclen
);
2731 err
= ceph_locks_to_pagelist(flocks
, pagelist
,
2736 err
= ceph_pagelist_append(pagelist
, &rec
, reclen
);
2739 recon_state
->nr_caps
++;
2749 * If an MDS fails and recovers, clients need to reconnect in order to
2750 * reestablish shared state. This includes all caps issued through
2751 * this session _and_ the snap_realm hierarchy. Because it's not
2752 * clear which snap realms the mds cares about, we send everything we
2753 * know about.. that ensures we'll then get any new info the
2754 * recovering MDS might have.
2756 * This is a relatively heavyweight operation, but it's rare.
2758 * called with mdsc->mutex held.
2760 static void send_mds_reconnect(struct ceph_mds_client
*mdsc
,
2761 struct ceph_mds_session
*session
)
2763 struct ceph_msg
*reply
;
2765 int mds
= session
->s_mds
;
2768 struct ceph_pagelist
*pagelist
;
2769 struct ceph_reconnect_state recon_state
;
2771 pr_info("mds%d reconnect start\n", mds
);
2773 pagelist
= kmalloc(sizeof(*pagelist
), GFP_NOFS
);
2775 goto fail_nopagelist
;
2776 ceph_pagelist_init(pagelist
);
2778 reply
= ceph_msg_new(CEPH_MSG_CLIENT_RECONNECT
, 0, GFP_NOFS
, false);
2782 mutex_lock(&session
->s_mutex
);
2783 session
->s_state
= CEPH_MDS_SESSION_RECONNECTING
;
2786 dout("session %p state %s\n", session
,
2787 ceph_session_state_name(session
->s_state
));
2789 spin_lock(&session
->s_gen_ttl_lock
);
2790 session
->s_cap_gen
++;
2791 spin_unlock(&session
->s_gen_ttl_lock
);
2793 spin_lock(&session
->s_cap_lock
);
2795 * notify __ceph_remove_cap() that we are composing cap reconnect.
2796 * If a cap get released before being added to the cap reconnect,
2797 * __ceph_remove_cap() should skip queuing cap release.
2799 session
->s_cap_reconnect
= 1;
2800 /* drop old cap expires; we're about to reestablish that state */
2801 discard_cap_releases(mdsc
, session
);
2802 spin_unlock(&session
->s_cap_lock
);
2804 /* trim unused caps to reduce MDS's cache rejoin time */
2805 shrink_dcache_parent(mdsc
->fsc
->sb
->s_root
);
2807 ceph_con_close(&session
->s_con
);
2808 ceph_con_open(&session
->s_con
,
2809 CEPH_ENTITY_TYPE_MDS
, mds
,
2810 ceph_mdsmap_get_addr(mdsc
->mdsmap
, mds
));
2812 /* replay unsafe requests */
2813 replay_unsafe_requests(mdsc
, session
);
2815 down_read(&mdsc
->snap_rwsem
);
2817 /* traverse this session's caps */
2818 s_nr_caps
= session
->s_nr_caps
;
2819 err
= ceph_pagelist_encode_32(pagelist
, s_nr_caps
);
2823 recon_state
.nr_caps
= 0;
2824 recon_state
.pagelist
= pagelist
;
2825 recon_state
.flock
= session
->s_con
.peer_features
& CEPH_FEATURE_FLOCK
;
2826 err
= iterate_session_caps(session
, encode_caps_cb
, &recon_state
);
2830 spin_lock(&session
->s_cap_lock
);
2831 session
->s_cap_reconnect
= 0;
2832 spin_unlock(&session
->s_cap_lock
);
2835 * snaprealms. we provide mds with the ino, seq (version), and
2836 * parent for all of our realms. If the mds has any newer info,
2839 for (p
= rb_first(&mdsc
->snap_realms
); p
; p
= rb_next(p
)) {
2840 struct ceph_snap_realm
*realm
=
2841 rb_entry(p
, struct ceph_snap_realm
, node
);
2842 struct ceph_mds_snaprealm_reconnect sr_rec
;
2844 dout(" adding snap realm %llx seq %lld parent %llx\n",
2845 realm
->ino
, realm
->seq
, realm
->parent_ino
);
2846 sr_rec
.ino
= cpu_to_le64(realm
->ino
);
2847 sr_rec
.seq
= cpu_to_le64(realm
->seq
);
2848 sr_rec
.parent
= cpu_to_le64(realm
->parent_ino
);
2849 err
= ceph_pagelist_append(pagelist
, &sr_rec
, sizeof(sr_rec
));
2854 if (recon_state
.flock
)
2855 reply
->hdr
.version
= cpu_to_le16(2);
2857 /* raced with cap release? */
2858 if (s_nr_caps
!= recon_state
.nr_caps
) {
2859 struct page
*page
= list_first_entry(&pagelist
->head
,
2861 __le32
*addr
= kmap_atomic(page
);
2862 *addr
= cpu_to_le32(recon_state
.nr_caps
);
2863 kunmap_atomic(addr
);
2866 reply
->hdr
.data_len
= cpu_to_le32(pagelist
->length
);
2867 ceph_msg_data_add_pagelist(reply
, pagelist
);
2868 ceph_con_send(&session
->s_con
, reply
);
2870 mutex_unlock(&session
->s_mutex
);
2872 mutex_lock(&mdsc
->mutex
);
2873 __wake_requests(mdsc
, &session
->s_waiting
);
2874 mutex_unlock(&mdsc
->mutex
);
2876 up_read(&mdsc
->snap_rwsem
);
2880 ceph_msg_put(reply
);
2881 up_read(&mdsc
->snap_rwsem
);
2882 mutex_unlock(&session
->s_mutex
);
2884 ceph_pagelist_release(pagelist
);
2886 pr_err("error %d preparing reconnect for mds%d\n", err
, mds
);
2892 * compare old and new mdsmaps, kicking requests
2893 * and closing out old connections as necessary
2895 * called under mdsc->mutex.
2897 static void check_new_map(struct ceph_mds_client
*mdsc
,
2898 struct ceph_mdsmap
*newmap
,
2899 struct ceph_mdsmap
*oldmap
)
2902 int oldstate
, newstate
;
2903 struct ceph_mds_session
*s
;
2905 dout("check_new_map new %u old %u\n",
2906 newmap
->m_epoch
, oldmap
->m_epoch
);
2908 for (i
= 0; i
< oldmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
2909 if (mdsc
->sessions
[i
] == NULL
)
2911 s
= mdsc
->sessions
[i
];
2912 oldstate
= ceph_mdsmap_get_state(oldmap
, i
);
2913 newstate
= ceph_mdsmap_get_state(newmap
, i
);
2915 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
2916 i
, ceph_mds_state_name(oldstate
),
2917 ceph_mdsmap_is_laggy(oldmap
, i
) ? " (laggy)" : "",
2918 ceph_mds_state_name(newstate
),
2919 ceph_mdsmap_is_laggy(newmap
, i
) ? " (laggy)" : "",
2920 ceph_session_state_name(s
->s_state
));
2922 if (i
>= newmap
->m_max_mds
||
2923 memcmp(ceph_mdsmap_get_addr(oldmap
, i
),
2924 ceph_mdsmap_get_addr(newmap
, i
),
2925 sizeof(struct ceph_entity_addr
))) {
2926 if (s
->s_state
== CEPH_MDS_SESSION_OPENING
) {
2927 /* the session never opened, just close it
2929 __wake_requests(mdsc
, &s
->s_waiting
);
2930 __unregister_session(mdsc
, s
);
2933 mutex_unlock(&mdsc
->mutex
);
2934 mutex_lock(&s
->s_mutex
);
2935 mutex_lock(&mdsc
->mutex
);
2936 ceph_con_close(&s
->s_con
);
2937 mutex_unlock(&s
->s_mutex
);
2938 s
->s_state
= CEPH_MDS_SESSION_RESTARTING
;
2941 /* kick any requests waiting on the recovering mds */
2942 kick_requests(mdsc
, i
);
2943 } else if (oldstate
== newstate
) {
2944 continue; /* nothing new with this mds */
2950 if (s
->s_state
== CEPH_MDS_SESSION_RESTARTING
&&
2951 newstate
>= CEPH_MDS_STATE_RECONNECT
) {
2952 mutex_unlock(&mdsc
->mutex
);
2953 send_mds_reconnect(mdsc
, s
);
2954 mutex_lock(&mdsc
->mutex
);
2958 * kick request on any mds that has gone active.
2960 if (oldstate
< CEPH_MDS_STATE_ACTIVE
&&
2961 newstate
>= CEPH_MDS_STATE_ACTIVE
) {
2962 if (oldstate
!= CEPH_MDS_STATE_CREATING
&&
2963 oldstate
!= CEPH_MDS_STATE_STARTING
)
2964 pr_info("mds%d recovery completed\n", s
->s_mds
);
2965 kick_requests(mdsc
, i
);
2966 ceph_kick_flushing_caps(mdsc
, s
);
2967 wake_up_session_caps(s
, 1);
2971 for (i
= 0; i
< newmap
->m_max_mds
&& i
< mdsc
->max_sessions
; i
++) {
2972 s
= mdsc
->sessions
[i
];
2975 if (!ceph_mdsmap_is_laggy(newmap
, i
))
2977 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
2978 s
->s_state
== CEPH_MDS_SESSION_HUNG
||
2979 s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
2980 dout(" connecting to export targets of laggy mds%d\n",
2982 __open_export_target_sessions(mdsc
, s
);
2994 * caller must hold session s_mutex, dentry->d_lock
2996 void __ceph_mdsc_drop_dentry_lease(struct dentry
*dentry
)
2998 struct ceph_dentry_info
*di
= ceph_dentry(dentry
);
3000 ceph_put_mds_session(di
->lease_session
);
3001 di
->lease_session
= NULL
;
3004 static void handle_lease(struct ceph_mds_client
*mdsc
,
3005 struct ceph_mds_session
*session
,
3006 struct ceph_msg
*msg
)
3008 struct super_block
*sb
= mdsc
->fsc
->sb
;
3009 struct inode
*inode
;
3010 struct dentry
*parent
, *dentry
;
3011 struct ceph_dentry_info
*di
;
3012 int mds
= session
->s_mds
;
3013 struct ceph_mds_lease
*h
= msg
->front
.iov_base
;
3015 struct ceph_vino vino
;
3019 dout("handle_lease from mds%d\n", mds
);
3022 if (msg
->front
.iov_len
< sizeof(*h
) + sizeof(u32
))
3024 vino
.ino
= le64_to_cpu(h
->ino
);
3025 vino
.snap
= CEPH_NOSNAP
;
3026 seq
= le32_to_cpu(h
->seq
);
3027 dname
.name
= (void *)h
+ sizeof(*h
) + sizeof(u32
);
3028 dname
.len
= msg
->front
.iov_len
- sizeof(*h
) - sizeof(u32
);
3029 if (dname
.len
!= get_unaligned_le32(h
+1))
3033 inode
= ceph_find_inode(sb
, vino
);
3034 dout("handle_lease %s, ino %llx %p %.*s\n",
3035 ceph_lease_op_name(h
->action
), vino
.ino
, inode
,
3036 dname
.len
, dname
.name
);
3038 mutex_lock(&session
->s_mutex
);
3041 if (inode
== NULL
) {
3042 dout("handle_lease no inode %llx\n", vino
.ino
);
3047 parent
= d_find_alias(inode
);
3049 dout("no parent dentry on inode %p\n", inode
);
3051 goto release
; /* hrm... */
3053 dname
.hash
= full_name_hash(dname
.name
, dname
.len
);
3054 dentry
= d_lookup(parent
, &dname
);
3059 spin_lock(&dentry
->d_lock
);
3060 di
= ceph_dentry(dentry
);
3061 switch (h
->action
) {
3062 case CEPH_MDS_LEASE_REVOKE
:
3063 if (di
->lease_session
== session
) {
3064 if (ceph_seq_cmp(di
->lease_seq
, seq
) > 0)
3065 h
->seq
= cpu_to_le32(di
->lease_seq
);
3066 __ceph_mdsc_drop_dentry_lease(dentry
);
3071 case CEPH_MDS_LEASE_RENEW
:
3072 if (di
->lease_session
== session
&&
3073 di
->lease_gen
== session
->s_cap_gen
&&
3074 di
->lease_renew_from
&&
3075 di
->lease_renew_after
== 0) {
3076 unsigned long duration
=
3077 le32_to_cpu(h
->duration_ms
) * HZ
/ 1000;
3079 di
->lease_seq
= seq
;
3080 dentry
->d_time
= di
->lease_renew_from
+ duration
;
3081 di
->lease_renew_after
= di
->lease_renew_from
+
3083 di
->lease_renew_from
= 0;
3087 spin_unlock(&dentry
->d_lock
);
3094 /* let's just reuse the same message */
3095 h
->action
= CEPH_MDS_LEASE_REVOKE_ACK
;
3097 ceph_con_send(&session
->s_con
, msg
);
3101 mutex_unlock(&session
->s_mutex
);
3105 pr_err("corrupt lease message\n");
3109 void ceph_mdsc_lease_send_msg(struct ceph_mds_session
*session
,
3110 struct inode
*inode
,
3111 struct dentry
*dentry
, char action
,
3114 struct ceph_msg
*msg
;
3115 struct ceph_mds_lease
*lease
;
3116 int len
= sizeof(*lease
) + sizeof(u32
);
3119 dout("lease_send_msg inode %p dentry %p %s to mds%d\n",
3120 inode
, dentry
, ceph_lease_op_name(action
), session
->s_mds
);
3121 dnamelen
= dentry
->d_name
.len
;
3124 msg
= ceph_msg_new(CEPH_MSG_CLIENT_LEASE
, len
, GFP_NOFS
, false);
3127 lease
= msg
->front
.iov_base
;
3128 lease
->action
= action
;
3129 lease
->ino
= cpu_to_le64(ceph_vino(inode
).ino
);
3130 lease
->first
= lease
->last
= cpu_to_le64(ceph_vino(inode
).snap
);
3131 lease
->seq
= cpu_to_le32(seq
);
3132 put_unaligned_le32(dnamelen
, lease
+ 1);
3133 memcpy((void *)(lease
+ 1) + 4, dentry
->d_name
.name
, dnamelen
);
3136 * if this is a preemptive lease RELEASE, no need to
3137 * flush request stream, since the actual request will
3140 msg
->more_to_follow
= (action
== CEPH_MDS_LEASE_RELEASE
);
3142 ceph_con_send(&session
->s_con
, msg
);
3146 * Preemptively release a lease we expect to invalidate anyway.
3147 * Pass @inode always, @dentry is optional.
3149 void ceph_mdsc_lease_release(struct ceph_mds_client
*mdsc
, struct inode
*inode
,
3150 struct dentry
*dentry
)
3152 struct ceph_dentry_info
*di
;
3153 struct ceph_mds_session
*session
;
3156 BUG_ON(inode
== NULL
);
3157 BUG_ON(dentry
== NULL
);
3159 /* is dentry lease valid? */
3160 spin_lock(&dentry
->d_lock
);
3161 di
= ceph_dentry(dentry
);
3162 if (!di
|| !di
->lease_session
||
3163 di
->lease_session
->s_mds
< 0 ||
3164 di
->lease_gen
!= di
->lease_session
->s_cap_gen
||
3165 !time_before(jiffies
, dentry
->d_time
)) {
3166 dout("lease_release inode %p dentry %p -- "
3169 spin_unlock(&dentry
->d_lock
);
3173 /* we do have a lease on this dentry; note mds and seq */
3174 session
= ceph_get_mds_session(di
->lease_session
);
3175 seq
= di
->lease_seq
;
3176 __ceph_mdsc_drop_dentry_lease(dentry
);
3177 spin_unlock(&dentry
->d_lock
);
3179 dout("lease_release inode %p dentry %p to mds%d\n",
3180 inode
, dentry
, session
->s_mds
);
3181 ceph_mdsc_lease_send_msg(session
, inode
, dentry
,
3182 CEPH_MDS_LEASE_RELEASE
, seq
);
3183 ceph_put_mds_session(session
);
3187 * drop all leases (and dentry refs) in preparation for umount
3189 static void drop_leases(struct ceph_mds_client
*mdsc
)
3193 dout("drop_leases\n");
3194 mutex_lock(&mdsc
->mutex
);
3195 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3196 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3199 mutex_unlock(&mdsc
->mutex
);
3200 mutex_lock(&s
->s_mutex
);
3201 mutex_unlock(&s
->s_mutex
);
3202 ceph_put_mds_session(s
);
3203 mutex_lock(&mdsc
->mutex
);
3205 mutex_unlock(&mdsc
->mutex
);
3211 * delayed work -- periodically trim expired leases, renew caps with mds
3213 static void schedule_delayed(struct ceph_mds_client
*mdsc
)
3216 unsigned hz
= round_jiffies_relative(HZ
* delay
);
3217 schedule_delayed_work(&mdsc
->delayed_work
, hz
);
3220 static void delayed_work(struct work_struct
*work
)
3223 struct ceph_mds_client
*mdsc
=
3224 container_of(work
, struct ceph_mds_client
, delayed_work
.work
);
3228 dout("mdsc delayed_work\n");
3229 ceph_check_delayed_caps(mdsc
);
3231 mutex_lock(&mdsc
->mutex
);
3232 renew_interval
= mdsc
->mdsmap
->m_session_timeout
>> 2;
3233 renew_caps
= time_after_eq(jiffies
, HZ
*renew_interval
+
3234 mdsc
->last_renew_caps
);
3236 mdsc
->last_renew_caps
= jiffies
;
3238 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3239 struct ceph_mds_session
*s
= __ceph_lookup_mds_session(mdsc
, i
);
3242 if (s
->s_state
== CEPH_MDS_SESSION_CLOSING
) {
3243 dout("resending session close request for mds%d\n",
3245 request_close_session(mdsc
, s
);
3246 ceph_put_mds_session(s
);
3249 if (s
->s_ttl
&& time_after(jiffies
, s
->s_ttl
)) {
3250 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
) {
3251 s
->s_state
= CEPH_MDS_SESSION_HUNG
;
3252 pr_info("mds%d hung\n", s
->s_mds
);
3255 if (s
->s_state
< CEPH_MDS_SESSION_OPEN
) {
3256 /* this mds is failed or recovering, just wait */
3257 ceph_put_mds_session(s
);
3260 mutex_unlock(&mdsc
->mutex
);
3262 mutex_lock(&s
->s_mutex
);
3264 send_renew_caps(mdsc
, s
);
3266 ceph_con_keepalive(&s
->s_con
);
3267 ceph_add_cap_releases(mdsc
, s
);
3268 if (s
->s_state
== CEPH_MDS_SESSION_OPEN
||
3269 s
->s_state
== CEPH_MDS_SESSION_HUNG
)
3270 ceph_send_cap_releases(mdsc
, s
);
3271 mutex_unlock(&s
->s_mutex
);
3272 ceph_put_mds_session(s
);
3274 mutex_lock(&mdsc
->mutex
);
3276 mutex_unlock(&mdsc
->mutex
);
3278 schedule_delayed(mdsc
);
3281 int ceph_mdsc_init(struct ceph_fs_client
*fsc
)
3284 struct ceph_mds_client
*mdsc
;
3286 mdsc
= kzalloc(sizeof(struct ceph_mds_client
), GFP_NOFS
);
3291 mutex_init(&mdsc
->mutex
);
3292 mdsc
->mdsmap
= kzalloc(sizeof(*mdsc
->mdsmap
), GFP_NOFS
);
3293 if (mdsc
->mdsmap
== NULL
) {
3298 init_completion(&mdsc
->safe_umount_waiters
);
3299 init_waitqueue_head(&mdsc
->session_close_wq
);
3300 INIT_LIST_HEAD(&mdsc
->waiting_for_map
);
3301 mdsc
->sessions
= NULL
;
3302 mdsc
->max_sessions
= 0;
3304 init_rwsem(&mdsc
->snap_rwsem
);
3305 mdsc
->snap_realms
= RB_ROOT
;
3306 INIT_LIST_HEAD(&mdsc
->snap_empty
);
3307 spin_lock_init(&mdsc
->snap_empty_lock
);
3309 mdsc
->request_tree
= RB_ROOT
;
3310 INIT_DELAYED_WORK(&mdsc
->delayed_work
, delayed_work
);
3311 mdsc
->last_renew_caps
= jiffies
;
3312 INIT_LIST_HEAD(&mdsc
->cap_delay_list
);
3313 spin_lock_init(&mdsc
->cap_delay_lock
);
3314 INIT_LIST_HEAD(&mdsc
->snap_flush_list
);
3315 spin_lock_init(&mdsc
->snap_flush_lock
);
3316 mdsc
->cap_flush_seq
= 0;
3317 INIT_LIST_HEAD(&mdsc
->cap_dirty
);
3318 INIT_LIST_HEAD(&mdsc
->cap_dirty_migrating
);
3319 mdsc
->num_cap_flushing
= 0;
3320 spin_lock_init(&mdsc
->cap_dirty_lock
);
3321 init_waitqueue_head(&mdsc
->cap_flushing_wq
);
3322 spin_lock_init(&mdsc
->dentry_lru_lock
);
3323 INIT_LIST_HEAD(&mdsc
->dentry_lru
);
3325 ceph_caps_init(mdsc
);
3326 ceph_adjust_min_caps(mdsc
, fsc
->min_caps
);
3332 * Wait for safe replies on open mds requests. If we time out, drop
3333 * all requests from the tree to avoid dangling dentry refs.
3335 static void wait_requests(struct ceph_mds_client
*mdsc
)
3337 struct ceph_mds_request
*req
;
3338 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3340 mutex_lock(&mdsc
->mutex
);
3341 if (__get_oldest_req(mdsc
)) {
3342 mutex_unlock(&mdsc
->mutex
);
3344 dout("wait_requests waiting for requests\n");
3345 wait_for_completion_timeout(&mdsc
->safe_umount_waiters
,
3346 fsc
->client
->options
->mount_timeout
* HZ
);
3348 /* tear down remaining requests */
3349 mutex_lock(&mdsc
->mutex
);
3350 while ((req
= __get_oldest_req(mdsc
))) {
3351 dout("wait_requests timed out on tid %llu\n",
3353 __unregister_request(mdsc
, req
);
3356 mutex_unlock(&mdsc
->mutex
);
3357 dout("wait_requests done\n");
3361 * called before mount is ro, and before dentries are torn down.
3362 * (hmm, does this still race with new lookups?)
3364 void ceph_mdsc_pre_umount(struct ceph_mds_client
*mdsc
)
3366 dout("pre_umount\n");
3370 ceph_flush_dirty_caps(mdsc
);
3371 wait_requests(mdsc
);
3374 * wait for reply handlers to drop their request refs and
3375 * their inode/dcache refs
3381 * wait for all write mds requests to flush.
3383 static void wait_unsafe_requests(struct ceph_mds_client
*mdsc
, u64 want_tid
)
3385 struct ceph_mds_request
*req
= NULL
, *nextreq
;
3388 mutex_lock(&mdsc
->mutex
);
3389 dout("wait_unsafe_requests want %lld\n", want_tid
);
3391 req
= __get_oldest_req(mdsc
);
3392 while (req
&& req
->r_tid
<= want_tid
) {
3393 /* find next request */
3394 n
= rb_next(&req
->r_node
);
3396 nextreq
= rb_entry(n
, struct ceph_mds_request
, r_node
);
3399 if ((req
->r_op
& CEPH_MDS_OP_WRITE
)) {
3401 ceph_mdsc_get_request(req
);
3403 ceph_mdsc_get_request(nextreq
);
3404 mutex_unlock(&mdsc
->mutex
);
3405 dout("wait_unsafe_requests wait on %llu (want %llu)\n",
3406 req
->r_tid
, want_tid
);
3407 wait_for_completion(&req
->r_safe_completion
);
3408 mutex_lock(&mdsc
->mutex
);
3409 ceph_mdsc_put_request(req
);
3411 break; /* next dne before, so we're done! */
3412 if (RB_EMPTY_NODE(&nextreq
->r_node
)) {
3413 /* next request was removed from tree */
3414 ceph_mdsc_put_request(nextreq
);
3417 ceph_mdsc_put_request(nextreq
); /* won't go away */
3421 mutex_unlock(&mdsc
->mutex
);
3422 dout("wait_unsafe_requests done\n");
3425 void ceph_mdsc_sync(struct ceph_mds_client
*mdsc
)
3427 u64 want_tid
, want_flush
;
3429 if (mdsc
->fsc
->mount_state
== CEPH_MOUNT_SHUTDOWN
)
3433 mutex_lock(&mdsc
->mutex
);
3434 want_tid
= mdsc
->last_tid
;
3435 want_flush
= mdsc
->cap_flush_seq
;
3436 mutex_unlock(&mdsc
->mutex
);
3437 dout("sync want tid %lld flush_seq %lld\n", want_tid
, want_flush
);
3439 ceph_flush_dirty_caps(mdsc
);
3441 wait_unsafe_requests(mdsc
, want_tid
);
3442 wait_event(mdsc
->cap_flushing_wq
, check_cap_flush(mdsc
, want_flush
));
3446 * true if all sessions are closed, or we force unmount
3448 static bool done_closing_sessions(struct ceph_mds_client
*mdsc
)
3452 if (mdsc
->fsc
->mount_state
== CEPH_MOUNT_SHUTDOWN
)
3455 mutex_lock(&mdsc
->mutex
);
3456 for (i
= 0; i
< mdsc
->max_sessions
; i
++)
3457 if (mdsc
->sessions
[i
])
3459 mutex_unlock(&mdsc
->mutex
);
3464 * called after sb is ro.
3466 void ceph_mdsc_close_sessions(struct ceph_mds_client
*mdsc
)
3468 struct ceph_mds_session
*session
;
3470 struct ceph_fs_client
*fsc
= mdsc
->fsc
;
3471 unsigned long timeout
= fsc
->client
->options
->mount_timeout
* HZ
;
3473 dout("close_sessions\n");
3475 /* close sessions */
3476 mutex_lock(&mdsc
->mutex
);
3477 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3478 session
= __ceph_lookup_mds_session(mdsc
, i
);
3481 mutex_unlock(&mdsc
->mutex
);
3482 mutex_lock(&session
->s_mutex
);
3483 __close_session(mdsc
, session
);
3484 mutex_unlock(&session
->s_mutex
);
3485 ceph_put_mds_session(session
);
3486 mutex_lock(&mdsc
->mutex
);
3488 mutex_unlock(&mdsc
->mutex
);
3490 dout("waiting for sessions to close\n");
3491 wait_event_timeout(mdsc
->session_close_wq
, done_closing_sessions(mdsc
),
3494 /* tear down remaining sessions */
3495 mutex_lock(&mdsc
->mutex
);
3496 for (i
= 0; i
< mdsc
->max_sessions
; i
++) {
3497 if (mdsc
->sessions
[i
]) {
3498 session
= get_session(mdsc
->sessions
[i
]);
3499 __unregister_session(mdsc
, session
);
3500 mutex_unlock(&mdsc
->mutex
);
3501 mutex_lock(&session
->s_mutex
);
3502 remove_session_caps(session
);
3503 mutex_unlock(&session
->s_mutex
);
3504 ceph_put_mds_session(session
);
3505 mutex_lock(&mdsc
->mutex
);
3508 WARN_ON(!list_empty(&mdsc
->cap_delay_list
));
3509 mutex_unlock(&mdsc
->mutex
);
3511 ceph_cleanup_empty_realms(mdsc
);
3513 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3518 static void ceph_mdsc_stop(struct ceph_mds_client
*mdsc
)
3521 cancel_delayed_work_sync(&mdsc
->delayed_work
); /* cancel timer */
3523 ceph_mdsmap_destroy(mdsc
->mdsmap
);
3524 kfree(mdsc
->sessions
);
3525 ceph_caps_finalize(mdsc
);
3528 void ceph_mdsc_destroy(struct ceph_fs_client
*fsc
)
3530 struct ceph_mds_client
*mdsc
= fsc
->mdsc
;
3532 dout("mdsc_destroy %p\n", mdsc
);
3533 ceph_mdsc_stop(mdsc
);
3535 /* flush out any connection work with references to us */
3540 dout("mdsc_destroy %p done\n", mdsc
);
3545 * handle mds map update.
3547 void ceph_mdsc_handle_map(struct ceph_mds_client
*mdsc
, struct ceph_msg
*msg
)
3551 void *p
= msg
->front
.iov_base
;
3552 void *end
= p
+ msg
->front
.iov_len
;
3553 struct ceph_mdsmap
*newmap
, *oldmap
;
3554 struct ceph_fsid fsid
;
3557 ceph_decode_need(&p
, end
, sizeof(fsid
)+2*sizeof(u32
), bad
);
3558 ceph_decode_copy(&p
, &fsid
, sizeof(fsid
));
3559 if (ceph_check_fsid(mdsc
->fsc
->client
, &fsid
) < 0)
3561 epoch
= ceph_decode_32(&p
);
3562 maplen
= ceph_decode_32(&p
);
3563 dout("handle_map epoch %u len %d\n", epoch
, (int)maplen
);
3565 /* do we need it? */
3566 ceph_monc_got_mdsmap(&mdsc
->fsc
->client
->monc
, epoch
);
3567 mutex_lock(&mdsc
->mutex
);
3568 if (mdsc
->mdsmap
&& epoch
<= mdsc
->mdsmap
->m_epoch
) {
3569 dout("handle_map epoch %u <= our %u\n",
3570 epoch
, mdsc
->mdsmap
->m_epoch
);
3571 mutex_unlock(&mdsc
->mutex
);
3575 newmap
= ceph_mdsmap_decode(&p
, end
);
3576 if (IS_ERR(newmap
)) {
3577 err
= PTR_ERR(newmap
);
3581 /* swap into place */
3583 oldmap
= mdsc
->mdsmap
;
3584 mdsc
->mdsmap
= newmap
;
3585 check_new_map(mdsc
, newmap
, oldmap
);
3586 ceph_mdsmap_destroy(oldmap
);
3588 mdsc
->mdsmap
= newmap
; /* first mds map */
3590 mdsc
->fsc
->sb
->s_maxbytes
= mdsc
->mdsmap
->m_max_file_size
;
3592 __wake_requests(mdsc
, &mdsc
->waiting_for_map
);
3594 mutex_unlock(&mdsc
->mutex
);
3595 schedule_delayed(mdsc
);
3599 mutex_unlock(&mdsc
->mutex
);
3601 pr_err("error decoding mdsmap %d\n", err
);
3605 static struct ceph_connection
*con_get(struct ceph_connection
*con
)
3607 struct ceph_mds_session
*s
= con
->private;
3609 if (get_session(s
)) {
3610 dout("mdsc con_get %p ok (%d)\n", s
, atomic_read(&s
->s_ref
));
3613 dout("mdsc con_get %p FAIL\n", s
);
3617 static void con_put(struct ceph_connection
*con
)
3619 struct ceph_mds_session
*s
= con
->private;
3621 dout("mdsc con_put %p (%d)\n", s
, atomic_read(&s
->s_ref
) - 1);
3622 ceph_put_mds_session(s
);
3626 * if the client is unresponsive for long enough, the mds will kill
3627 * the session entirely.
3629 static void peer_reset(struct ceph_connection
*con
)
3631 struct ceph_mds_session
*s
= con
->private;
3632 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3634 pr_warn("mds%d closed our session\n", s
->s_mds
);
3635 send_mds_reconnect(mdsc
, s
);
3638 static void dispatch(struct ceph_connection
*con
, struct ceph_msg
*msg
)
3640 struct ceph_mds_session
*s
= con
->private;
3641 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3642 int type
= le16_to_cpu(msg
->hdr
.type
);
3644 mutex_lock(&mdsc
->mutex
);
3645 if (__verify_registered_session(mdsc
, s
) < 0) {
3646 mutex_unlock(&mdsc
->mutex
);
3649 mutex_unlock(&mdsc
->mutex
);
3652 case CEPH_MSG_MDS_MAP
:
3653 ceph_mdsc_handle_map(mdsc
, msg
);
3655 case CEPH_MSG_CLIENT_SESSION
:
3656 handle_session(s
, msg
);
3658 case CEPH_MSG_CLIENT_REPLY
:
3659 handle_reply(s
, msg
);
3661 case CEPH_MSG_CLIENT_REQUEST_FORWARD
:
3662 handle_forward(mdsc
, s
, msg
);
3664 case CEPH_MSG_CLIENT_CAPS
:
3665 ceph_handle_caps(s
, msg
);
3667 case CEPH_MSG_CLIENT_SNAP
:
3668 ceph_handle_snap(mdsc
, s
, msg
);
3670 case CEPH_MSG_CLIENT_LEASE
:
3671 handle_lease(mdsc
, s
, msg
);
3675 pr_err("received unknown message type %d %s\n", type
,
3676 ceph_msg_type_name(type
));
3687 * Note: returned pointer is the address of a structure that's
3688 * managed separately. Caller must *not* attempt to free it.
3690 static struct ceph_auth_handshake
*get_authorizer(struct ceph_connection
*con
,
3691 int *proto
, int force_new
)
3693 struct ceph_mds_session
*s
= con
->private;
3694 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3695 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3696 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3698 if (force_new
&& auth
->authorizer
) {
3699 ceph_auth_destroy_authorizer(ac
, auth
->authorizer
);
3700 auth
->authorizer
= NULL
;
3702 if (!auth
->authorizer
) {
3703 int ret
= ceph_auth_create_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
3706 return ERR_PTR(ret
);
3708 int ret
= ceph_auth_update_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
,
3711 return ERR_PTR(ret
);
3713 *proto
= ac
->protocol
;
3719 static int verify_authorizer_reply(struct ceph_connection
*con
, int len
)
3721 struct ceph_mds_session
*s
= con
->private;
3722 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3723 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3725 return ceph_auth_verify_authorizer_reply(ac
, s
->s_auth
.authorizer
, len
);
3728 static int invalidate_authorizer(struct ceph_connection
*con
)
3730 struct ceph_mds_session
*s
= con
->private;
3731 struct ceph_mds_client
*mdsc
= s
->s_mdsc
;
3732 struct ceph_auth_client
*ac
= mdsc
->fsc
->client
->monc
.auth
;
3734 ceph_auth_invalidate_authorizer(ac
, CEPH_ENTITY_TYPE_MDS
);
3736 return ceph_monc_validate_auth(&mdsc
->fsc
->client
->monc
);
3739 static struct ceph_msg
*mds_alloc_msg(struct ceph_connection
*con
,
3740 struct ceph_msg_header
*hdr
, int *skip
)
3742 struct ceph_msg
*msg
;
3743 int type
= (int) le16_to_cpu(hdr
->type
);
3744 int front_len
= (int) le32_to_cpu(hdr
->front_len
);
3750 msg
= ceph_msg_new(type
, front_len
, GFP_NOFS
, false);
3752 pr_err("unable to allocate msg type %d len %d\n",
3760 static int sign_message(struct ceph_connection
*con
, struct ceph_msg
*msg
)
3762 struct ceph_mds_session
*s
= con
->private;
3763 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3764 return ceph_auth_sign_message(auth
, msg
);
3767 static int check_message_signature(struct ceph_connection
*con
, struct ceph_msg
*msg
)
3769 struct ceph_mds_session
*s
= con
->private;
3770 struct ceph_auth_handshake
*auth
= &s
->s_auth
;
3771 return ceph_auth_check_message_signature(auth
, msg
);
3774 static const struct ceph_connection_operations mds_con_ops
= {
3777 .dispatch
= dispatch
,
3778 .get_authorizer
= get_authorizer
,
3779 .verify_authorizer_reply
= verify_authorizer_reply
,
3780 .invalidate_authorizer
= invalidate_authorizer
,
3781 .peer_reset
= peer_reset
,
3782 .alloc_msg
= mds_alloc_msg
,
3783 .sign_message
= sign_message
,
3784 .check_message_signature
= check_message_signature
,