libceph: define new ceph_file_layout structure
[deliverable/linux.git] / net / ceph / mon_client.c
CommitLineData
3d14c5d2 1#include <linux/ceph/ceph_debug.h>
ba75bb98 2
3d14c5d2 3#include <linux/module.h>
ba75bb98 4#include <linux/types.h>
5a0e3ad6 5#include <linux/slab.h>
ba75bb98
SW
6#include <linux/random.h>
7#include <linux/sched.h>
8
3d14c5d2
YS
9#include <linux/ceph/mon_client.h>
10#include <linux/ceph/libceph.h>
ab434b60 11#include <linux/ceph/debugfs.h>
3d14c5d2 12#include <linux/ceph/decode.h>
3d14c5d2 13#include <linux/ceph/auth.h>
ba75bb98
SW
14
15/*
16 * Interact with Ceph monitor cluster. Handle requests for new map
17 * versions, and periodically resend as needed. Also implement
18 * statfs() and umount().
19 *
20 * A small cluster of Ceph "monitors" are responsible for managing critical
21 * cluster configuration and state information. An odd number (e.g., 3, 5)
22 * of cmon daemons use a modified version of the Paxos part-time parliament
23 * algorithm to manage the MDS map (mds cluster membership), OSD map, and
24 * list of clients who have mounted the file system.
25 *
26 * We maintain an open, active session with a monitor at all times in order to
27 * receive timely MDSMap updates. We periodically send a keepalive byte on the
28 * TCP socket to ensure we detect a failure. If the connection does break, we
29 * randomly hunt for a new monitor. Once the connection is reestablished, we
30 * resend any outstanding requests.
31 */
32
9e32789f 33static const struct ceph_connection_operations mon_con_ops;
ba75bb98 34
9bd2e6f8
SW
35static int __validate_auth(struct ceph_mon_client *monc);
36
ba75bb98
SW
37/*
38 * Decode a monmap blob (e.g., during mount).
39 */
40struct ceph_monmap *ceph_monmap_decode(void *p, void *end)
41{
42 struct ceph_monmap *m = NULL;
43 int i, err = -EINVAL;
44 struct ceph_fsid fsid;
45 u32 epoch, num_mon;
46 u16 version;
4e7a5dcd
SW
47 u32 len;
48
49 ceph_decode_32_safe(&p, end, len, bad);
50 ceph_decode_need(&p, end, len, bad);
ba75bb98
SW
51
52 dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p));
53
54 ceph_decode_16_safe(&p, end, version, bad);
55
56 ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad);
57 ceph_decode_copy(&p, &fsid, sizeof(fsid));
c89136ea 58 epoch = ceph_decode_32(&p);
ba75bb98 59
c89136ea 60 num_mon = ceph_decode_32(&p);
ba75bb98
SW
61 ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad);
62
63 if (num_mon >= CEPH_MAX_MON)
64 goto bad;
65 m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS);
66 if (m == NULL)
67 return ERR_PTR(-ENOMEM);
68 m->fsid = fsid;
69 m->epoch = epoch;
70 m->num_mon = num_mon;
71 ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0]));
63f2d211
SW
72 for (i = 0; i < num_mon; i++)
73 ceph_decode_addr(&m->mon_inst[i].addr);
ba75bb98 74
ba75bb98
SW
75 dout("monmap_decode epoch %d, num_mon %d\n", m->epoch,
76 m->num_mon);
77 for (i = 0; i < m->num_mon; i++)
78 dout("monmap_decode mon%d is %s\n", i,
3d14c5d2 79 ceph_pr_addr(&m->mon_inst[i].addr.in_addr));
ba75bb98
SW
80 return m;
81
82bad:
83 dout("monmap_decode failed with %d\n", err);
84 kfree(m);
85 return ERR_PTR(err);
86}
87
88/*
89 * return true if *addr is included in the monmap.
90 */
91int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr)
92{
93 int i;
94
95 for (i = 0; i < m->num_mon; i++)
103e2d3a 96 if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0)
ba75bb98
SW
97 return 1;
98 return 0;
99}
100
5ce6e9db
SW
101/*
102 * Send an auth request.
103 */
104static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len)
105{
106 monc->pending_auth = 1;
107 monc->m_auth->front.iov_len = len;
108 monc->m_auth->hdr.front_len = cpu_to_le32(len);
6740a845 109 ceph_msg_revoke(monc->m_auth);
5ce6e9db 110 ceph_msg_get(monc->m_auth); /* keep our ref */
67130934 111 ceph_con_send(&monc->con, monc->m_auth);
5ce6e9db
SW
112}
113
ba75bb98
SW
114/*
115 * Close monitor session, if any.
116 */
117static void __close_session(struct ceph_mon_client *monc)
118{
f6a2f5be 119 dout("__close_session closing mon%d\n", monc->cur_mon);
6740a845 120 ceph_msg_revoke(monc->m_auth);
4f471e4a
SW
121 ceph_msg_revoke_incoming(monc->m_auth_reply);
122 ceph_msg_revoke(monc->m_subscribe);
123 ceph_msg_revoke_incoming(monc->m_subscribe_ack);
67130934 124 ceph_con_close(&monc->con);
0e04dc26 125
f6a2f5be
SW
126 monc->pending_auth = 0;
127 ceph_auth_reset(monc->auth);
ba75bb98
SW
128}
129
130/*
0e04dc26
ID
131 * Pick a new monitor at random and set cur_mon. If we are repicking
132 * (i.e. cur_mon is already set), be sure to pick a different one.
ba75bb98 133 */
0e04dc26 134static void pick_new_mon(struct ceph_mon_client *monc)
ba75bb98 135{
0e04dc26 136 int old_mon = monc->cur_mon;
ba75bb98 137
0e04dc26 138 BUG_ON(monc->monmap->num_mon < 1);
ba75bb98 139
0e04dc26
ID
140 if (monc->monmap->num_mon == 1) {
141 monc->cur_mon = 0;
142 } else {
143 int max = monc->monmap->num_mon;
144 int o = -1;
145 int n;
146
147 if (monc->cur_mon >= 0) {
148 if (monc->cur_mon < monc->monmap->num_mon)
149 o = monc->cur_mon;
150 if (o >= 0)
151 max--;
152 }
4e7a5dcd 153
0e04dc26
ID
154 n = prandom_u32() % max;
155 if (o >= 0 && n >= o)
156 n++;
8b9558aa 157
0e04dc26 158 monc->cur_mon = n;
ba75bb98 159 }
0e04dc26
ID
160
161 dout("%s mon%d -> mon%d out of %d mons\n", __func__, old_mon,
162 monc->cur_mon, monc->monmap->num_mon);
163}
164
165/*
166 * Open a session with a new monitor.
167 */
168static void __open_session(struct ceph_mon_client *monc)
169{
170 int ret;
171
172 pick_new_mon(monc);
173
1752b50c 174 monc->hunting = true;
168b9090
ID
175 if (monc->had_a_connection) {
176 monc->hunt_mult *= CEPH_MONC_HUNT_BACKOFF;
177 if (monc->hunt_mult > CEPH_MONC_HUNT_MAX_MULT)
178 monc->hunt_mult = CEPH_MONC_HUNT_MAX_MULT;
179 }
180
0e04dc26
ID
181 monc->sub_renew_after = jiffies; /* i.e., expired */
182 monc->sub_renew_sent = 0;
183
184 dout("%s opening mon%d\n", __func__, monc->cur_mon);
185 ceph_con_open(&monc->con, CEPH_ENTITY_TYPE_MON, monc->cur_mon,
186 &monc->monmap->mon_inst[monc->cur_mon].addr);
187
188 /*
189 * send an initial keepalive to ensure our timestamp is valid
190 * by the time we are in an OPENED state
191 */
192 ceph_con_keepalive(&monc->con);
193
194 /* initiate authentication handshake */
195 ret = ceph_auth_build_hello(monc->auth,
196 monc->m_auth->front.iov_base,
197 monc->m_auth->front_alloc_len);
198 BUG_ON(ret <= 0);
199 __send_prepared_auth_request(monc, ret);
ba75bb98
SW
200}
201
1752b50c
ID
202static void reopen_session(struct ceph_mon_client *monc)
203{
204 if (!monc->hunting)
205 pr_info("mon%d %s session lost, hunting for new mon\n",
206 monc->cur_mon, ceph_pr_addr(&monc->con.peer_addr.in_addr));
207
208 __close_session(monc);
209 __open_session(monc);
210}
211
ba75bb98
SW
212/*
213 * Reschedule delayed work timer.
214 */
215static void __schedule_delayed(struct ceph_mon_client *monc)
216{
8b9558aa 217 unsigned long delay;
ba75bb98 218
168b9090
ID
219 if (monc->hunting)
220 delay = CEPH_MONC_HUNT_INTERVAL * monc->hunt_mult;
221 else
58d81b12 222 delay = CEPH_MONC_PING_INTERVAL;
168b9090 223
8b9558aa 224 dout("__schedule_delayed after %lu\n", delay);
bee3a37c
ID
225 mod_delayed_work(system_wq, &monc->delayed_work,
226 round_jiffies_relative(delay));
ba75bb98
SW
227}
228
82dcabad
ID
229const char *ceph_sub_str[] = {
230 [CEPH_SUB_MDSMAP] = "mdsmap",
231 [CEPH_SUB_MONMAP] = "monmap",
232 [CEPH_SUB_OSDMAP] = "osdmap",
233};
234
ba75bb98 235/*
82dcabad
ID
236 * Send subscribe request for one or more maps, according to
237 * monc->subs.
ba75bb98
SW
238 */
239static void __send_subscribe(struct ceph_mon_client *monc)
240{
82dcabad
ID
241 struct ceph_msg *msg = monc->m_subscribe;
242 void *p = msg->front.iov_base;
243 void *const end = p + msg->front_alloc_len;
244 int num = 0;
245 int i;
246
247 dout("%s sent %lu\n", __func__, monc->sub_renew_sent);
248
249 BUG_ON(monc->cur_mon < 0);
250
251 if (!monc->sub_renew_sent)
252 monc->sub_renew_sent = jiffies | 1; /* never 0 */
253
254 msg->hdr.version = cpu_to_le16(2);
255
256 for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
257 if (monc->subs[i].want)
258 num++;
259 }
260 BUG_ON(num < 1); /* monmap sub is always there */
261 ceph_encode_32(&p, num);
262 for (i = 0; i < ARRAY_SIZE(monc->subs); i++) {
737cc81e
ID
263 char buf[32];
264 int len;
82dcabad
ID
265
266 if (!monc->subs[i].want)
267 continue;
268
737cc81e
ID
269 len = sprintf(buf, "%s", ceph_sub_str[i]);
270 if (i == CEPH_SUB_MDSMAP &&
271 monc->fs_cluster_id != CEPH_FS_CLUSTER_ID_NONE)
272 len += sprintf(buf + len, ".%d", monc->fs_cluster_id);
273
274 dout("%s %s start %llu flags 0x%x\n", __func__, buf,
82dcabad
ID
275 le64_to_cpu(monc->subs[i].item.start),
276 monc->subs[i].item.flags);
737cc81e 277 ceph_encode_string(&p, end, buf, len);
82dcabad
ID
278 memcpy(p, &monc->subs[i].item, sizeof(monc->subs[i].item));
279 p += sizeof(monc->subs[i].item);
ba75bb98 280 }
82dcabad 281
737cc81e 282 BUG_ON(p > end);
82dcabad
ID
283 msg->front.iov_len = p - msg->front.iov_base;
284 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
285 ceph_msg_revoke(msg);
286 ceph_con_send(&monc->con, ceph_msg_get(msg));
ba75bb98
SW
287}
288
289static void handle_subscribe_ack(struct ceph_mon_client *monc,
290 struct ceph_msg *msg)
291{
95c96174 292 unsigned int seconds;
07bd10fb
SW
293 struct ceph_mon_subscribe_ack *h = msg->front.iov_base;
294
295 if (msg->front.iov_len < sizeof(*h))
296 goto bad;
297 seconds = le32_to_cpu(h->duration);
ba75bb98 298
ba75bb98 299 mutex_lock(&monc->mutex);
82dcabad
ID
300 if (monc->sub_renew_sent) {
301 monc->sub_renew_after = monc->sub_renew_sent +
302 (seconds >> 1) * HZ - 1;
303 dout("%s sent %lu duration %d renew after %lu\n", __func__,
304 monc->sub_renew_sent, seconds, monc->sub_renew_after);
305 monc->sub_renew_sent = 0;
306 } else {
307 dout("%s sent %lu renew after %lu, ignoring\n", __func__,
308 monc->sub_renew_sent, monc->sub_renew_after);
309 }
ba75bb98
SW
310 mutex_unlock(&monc->mutex);
311 return;
312bad:
313 pr_err("got corrupt subscribe-ack msg\n");
9ec7cab1 314 ceph_msg_dump(msg);
ba75bb98
SW
315}
316
317/*
82dcabad
ID
318 * Register interest in a map
319 *
320 * @sub: one of CEPH_SUB_*
321 * @epoch: X for "every map since X", or 0 for "just the latest"
ba75bb98 322 */
82dcabad
ID
323static bool __ceph_monc_want_map(struct ceph_mon_client *monc, int sub,
324 u32 epoch, bool continuous)
ba75bb98 325{
82dcabad
ID
326 __le64 start = cpu_to_le64(epoch);
327 u8 flags = !continuous ? CEPH_SUBSCRIBE_ONETIME : 0;
328
329 dout("%s %s epoch %u continuous %d\n", __func__, ceph_sub_str[sub],
330 epoch, continuous);
331
332 if (monc->subs[sub].want &&
333 monc->subs[sub].item.start == start &&
334 monc->subs[sub].item.flags == flags)
335 return false;
336
337 monc->subs[sub].item.start = start;
338 monc->subs[sub].item.flags = flags;
339 monc->subs[sub].want = true;
340
341 return true;
342}
343
344bool ceph_monc_want_map(struct ceph_mon_client *monc, int sub, u32 epoch,
345 bool continuous)
346{
347 bool need_request;
348
ba75bb98 349 mutex_lock(&monc->mutex);
82dcabad 350 need_request = __ceph_monc_want_map(monc, sub, epoch, continuous);
ba75bb98 351 mutex_unlock(&monc->mutex);
82dcabad
ID
352
353 return need_request;
354}
355EXPORT_SYMBOL(ceph_monc_want_map);
356
357/*
358 * Keep track of which maps we have
359 *
360 * @sub: one of CEPH_SUB_*
361 */
362static void __ceph_monc_got_map(struct ceph_mon_client *monc, int sub,
363 u32 epoch)
364{
365 dout("%s %s epoch %u\n", __func__, ceph_sub_str[sub], epoch);
366
367 if (monc->subs[sub].want) {
368 if (monc->subs[sub].item.flags & CEPH_SUBSCRIBE_ONETIME)
369 monc->subs[sub].want = false;
370 else
371 monc->subs[sub].item.start = cpu_to_le64(epoch + 1);
372 }
373
374 monc->subs[sub].have = epoch;
ba75bb98
SW
375}
376
82dcabad 377void ceph_monc_got_map(struct ceph_mon_client *monc, int sub, u32 epoch)
ba75bb98
SW
378{
379 mutex_lock(&monc->mutex);
82dcabad 380 __ceph_monc_got_map(monc, sub, epoch);
ba75bb98 381 mutex_unlock(&monc->mutex);
ba75bb98 382}
82dcabad 383EXPORT_SYMBOL(ceph_monc_got_map);
ba75bb98 384
42c1b124
ID
385void ceph_monc_renew_subs(struct ceph_mon_client *monc)
386{
387 mutex_lock(&monc->mutex);
388 __send_subscribe(monc);
389 mutex_unlock(&monc->mutex);
390}
391EXPORT_SYMBOL(ceph_monc_renew_subs);
392
a319bf56
ID
393/*
394 * Wait for an osdmap with a given epoch.
395 *
396 * @epoch: epoch to wait for
397 * @timeout: in jiffies, 0 means "wait forever"
398 */
6044cde6
ID
399int ceph_monc_wait_osdmap(struct ceph_mon_client *monc, u32 epoch,
400 unsigned long timeout)
401{
402 unsigned long started = jiffies;
216639dd 403 long ret;
6044cde6
ID
404
405 mutex_lock(&monc->mutex);
82dcabad 406 while (monc->subs[CEPH_SUB_OSDMAP].have < epoch) {
6044cde6
ID
407 mutex_unlock(&monc->mutex);
408
a319bf56 409 if (timeout && time_after_eq(jiffies, started + timeout))
6044cde6
ID
410 return -ETIMEDOUT;
411
412 ret = wait_event_interruptible_timeout(monc->client->auth_wq,
82dcabad
ID
413 monc->subs[CEPH_SUB_OSDMAP].have >= epoch,
414 ceph_timeout_jiffies(timeout));
6044cde6
ID
415 if (ret < 0)
416 return ret;
417
418 mutex_lock(&monc->mutex);
419 }
420
421 mutex_unlock(&monc->mutex);
422 return 0;
423}
424EXPORT_SYMBOL(ceph_monc_wait_osdmap);
ba75bb98 425
4e7a5dcd 426/*
82dcabad
ID
427 * Open a session with a random monitor. Request monmap and osdmap,
428 * which are waited upon in __ceph_open_session().
4e7a5dcd
SW
429 */
430int ceph_monc_open_session(struct ceph_mon_client *monc)
ba75bb98 431{
ba75bb98 432 mutex_lock(&monc->mutex);
82dcabad
ID
433 __ceph_monc_want_map(monc, CEPH_SUB_MONMAP, 0, true);
434 __ceph_monc_want_map(monc, CEPH_SUB_OSDMAP, 0, false);
4e7a5dcd 435 __open_session(monc);
ba75bb98
SW
436 __schedule_delayed(monc);
437 mutex_unlock(&monc->mutex);
438 return 0;
439}
3d14c5d2 440EXPORT_SYMBOL(ceph_monc_open_session);
ba75bb98 441
0743304d
SW
442static void ceph_monc_handle_map(struct ceph_mon_client *monc,
443 struct ceph_msg *msg)
4e7a5dcd
SW
444{
445 struct ceph_client *client = monc->client;
446 struct ceph_monmap *monmap = NULL, *old = monc->monmap;
447 void *p, *end;
448
449 mutex_lock(&monc->mutex);
450
451 dout("handle_monmap\n");
452 p = msg->front.iov_base;
453 end = p + msg->front.iov_len;
454
455 monmap = ceph_monmap_decode(p, end);
456 if (IS_ERR(monmap)) {
457 pr_err("problem decoding monmap, %d\n",
458 (int)PTR_ERR(monmap));
d4a780ce 459 goto out;
4e7a5dcd 460 }
0743304d
SW
461
462 if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) {
4e7a5dcd 463 kfree(monmap);
d4a780ce 464 goto out;
4e7a5dcd
SW
465 }
466
467 client->monc.monmap = monmap;
4e7a5dcd
SW
468 kfree(old);
469
82dcabad 470 __ceph_monc_got_map(monc, CEPH_SUB_MONMAP, monc->monmap->epoch);
02ac956c 471 client->have_fsid = true;
d1c338a5 472
d4a780ce 473out:
4e7a5dcd 474 mutex_unlock(&monc->mutex);
03066f23 475 wake_up_all(&client->auth_wq);
4e7a5dcd
SW
476}
477
ba75bb98 478/*
7a6fdeb2 479 * generic requests (currently statfs, mon_get_version)
ba75bb98 480 */
fcd00b68 481DEFINE_RB_FUNCS(generic_request, struct ceph_mon_generic_request, tid, node)
85ff03f6 482
f8c76f6f 483static void release_generic_request(struct kref *kref)
3143edd3 484{
f8c76f6f
YS
485 struct ceph_mon_generic_request *req =
486 container_of(kref, struct ceph_mon_generic_request, kref);
3143edd3 487
d0b19705
ID
488 dout("%s greq %p request %p reply %p\n", __func__, req, req->request,
489 req->reply);
490 WARN_ON(!RB_EMPTY_NODE(&req->node));
491
3143edd3
SW
492 if (req->reply)
493 ceph_msg_put(req->reply);
494 if (req->request)
495 ceph_msg_put(req->request);
20547567
YS
496
497 kfree(req);
3143edd3
SW
498}
499
f8c76f6f 500static void put_generic_request(struct ceph_mon_generic_request *req)
3143edd3 501{
d0b19705
ID
502 if (req)
503 kref_put(&req->kref, release_generic_request);
3143edd3
SW
504}
505
f8c76f6f 506static void get_generic_request(struct ceph_mon_generic_request *req)
3143edd3
SW
507{
508 kref_get(&req->kref);
509}
510
d0b19705
ID
511static struct ceph_mon_generic_request *
512alloc_generic_request(struct ceph_mon_client *monc, gfp_t gfp)
513{
514 struct ceph_mon_generic_request *req;
515
516 req = kzalloc(sizeof(*req), gfp);
517 if (!req)
518 return NULL;
519
520 req->monc = monc;
521 kref_init(&req->kref);
522 RB_CLEAR_NODE(&req->node);
523 init_completion(&req->completion);
524
525 dout("%s greq %p\n", __func__, req);
526 return req;
527}
528
529static void register_generic_request(struct ceph_mon_generic_request *req)
530{
531 struct ceph_mon_client *monc = req->monc;
532
533 WARN_ON(req->tid);
534
535 get_generic_request(req);
536 req->tid = ++monc->last_tid;
537 insert_generic_request(&monc->generic_request_tree, req);
538}
539
540static void send_generic_request(struct ceph_mon_client *monc,
541 struct ceph_mon_generic_request *req)
542{
543 WARN_ON(!req->tid);
544
545 dout("%s greq %p tid %llu\n", __func__, req, req->tid);
546 req->request->hdr.tid = cpu_to_le64(req->tid);
547 ceph_con_send(&monc->con, ceph_msg_get(req->request));
548}
549
550static void __finish_generic_request(struct ceph_mon_generic_request *req)
551{
552 struct ceph_mon_client *monc = req->monc;
553
554 dout("%s greq %p tid %llu\n", __func__, req, req->tid);
555 erase_generic_request(&monc->generic_request_tree, req);
556
557 ceph_msg_revoke(req->request);
558 ceph_msg_revoke_incoming(req->reply);
559}
560
561static void finish_generic_request(struct ceph_mon_generic_request *req)
562{
563 __finish_generic_request(req);
564 put_generic_request(req);
565}
566
567static void complete_generic_request(struct ceph_mon_generic_request *req)
568{
569 if (req->complete_cb)
570 req->complete_cb(req);
571 else
572 complete_all(&req->completion);
573 put_generic_request(req);
574}
575
576void cancel_generic_request(struct ceph_mon_generic_request *req)
577{
578 struct ceph_mon_client *monc = req->monc;
579 struct ceph_mon_generic_request *lookup_req;
580
581 dout("%s greq %p tid %llu\n", __func__, req, req->tid);
582
583 mutex_lock(&monc->mutex);
584 lookup_req = lookup_generic_request(&monc->generic_request_tree,
585 req->tid);
586 if (lookup_req) {
587 WARN_ON(lookup_req != req);
588 finish_generic_request(req);
589 }
590
591 mutex_unlock(&monc->mutex);
592}
593
594static int wait_generic_request(struct ceph_mon_generic_request *req)
595{
596 int ret;
597
598 dout("%s greq %p tid %llu\n", __func__, req, req->tid);
599 ret = wait_for_completion_interruptible(&req->completion);
600 if (ret)
601 cancel_generic_request(req);
602 else
603 ret = req->result; /* completed */
604
605 return ret;
606}
607
f8c76f6f 608static struct ceph_msg *get_generic_reply(struct ceph_connection *con,
3143edd3
SW
609 struct ceph_msg_header *hdr,
610 int *skip)
611{
612 struct ceph_mon_client *monc = con->private;
f8c76f6f 613 struct ceph_mon_generic_request *req;
3143edd3
SW
614 u64 tid = le64_to_cpu(hdr->tid);
615 struct ceph_msg *m;
616
617 mutex_lock(&monc->mutex);
fcd00b68 618 req = lookup_generic_request(&monc->generic_request_tree, tid);
3143edd3 619 if (!req) {
f8c76f6f 620 dout("get_generic_reply %lld dne\n", tid);
3143edd3
SW
621 *skip = 1;
622 m = NULL;
623 } else {
f8c76f6f 624 dout("get_generic_reply %lld got %p\n", tid, req->reply);
1c20f2d2 625 *skip = 0;
3143edd3
SW
626 m = ceph_msg_get(req->reply);
627 /*
628 * we don't need to track the connection reading into
629 * this reply because we only have one open connection
630 * at a time, ever.
631 */
632 }
633 mutex_unlock(&monc->mutex);
634 return m;
635}
636
e56fa10e
YS
637/*
638 * statfs
639 */
ba75bb98
SW
640static void handle_statfs_reply(struct ceph_mon_client *monc,
641 struct ceph_msg *msg)
642{
f8c76f6f 643 struct ceph_mon_generic_request *req;
ba75bb98 644 struct ceph_mon_statfs_reply *reply = msg->front.iov_base;
3143edd3 645 u64 tid = le64_to_cpu(msg->hdr.tid);
ba75bb98 646
d0b19705
ID
647 dout("%s msg %p tid %llu\n", __func__, msg, tid);
648
ba75bb98
SW
649 if (msg->front.iov_len != sizeof(*reply))
650 goto bad;
ba75bb98
SW
651
652 mutex_lock(&monc->mutex);
fcd00b68 653 req = lookup_generic_request(&monc->generic_request_tree, tid);
d0b19705
ID
654 if (!req) {
655 mutex_unlock(&monc->mutex);
656 return;
ba75bb98 657 }
d0b19705
ID
658
659 req->result = 0;
660 *req->u.st = reply->st; /* struct */
661 __finish_generic_request(req);
ba75bb98 662 mutex_unlock(&monc->mutex);
d0b19705
ID
663
664 complete_generic_request(req);
ba75bb98
SW
665 return;
666
667bad:
7a6fdeb2 668 pr_err("corrupt statfs reply, tid %llu\n", tid);
9ec7cab1 669 ceph_msg_dump(msg);
ba75bb98
SW
670}
671
672/*
3143edd3 673 * Do a synchronous statfs().
ba75bb98 674 */
3143edd3 675int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
ba75bb98 676{
f8c76f6f 677 struct ceph_mon_generic_request *req;
ba75bb98 678 struct ceph_mon_statfs *h;
d0b19705 679 int ret = -ENOMEM;
3143edd3 680
d0b19705 681 req = alloc_generic_request(monc, GFP_NOFS);
3143edd3 682 if (!req)
d0b19705 683 goto out;
ba75bb98 684
b61c2763
SW
685 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS,
686 true);
a79832f2 687 if (!req->request)
3143edd3 688 goto out;
d0b19705
ID
689
690 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 64, GFP_NOFS, true);
a79832f2 691 if (!req->reply)
3143edd3 692 goto out;
3143edd3 693
d0b19705
ID
694 req->u.st = buf;
695
696 mutex_lock(&monc->mutex);
697 register_generic_request(req);
3143edd3
SW
698 /* fill out request */
699 h = req->request->front.iov_base;
13e38c8a
SW
700 h->monhdr.have_version = 0;
701 h->monhdr.session_mon = cpu_to_le16(-1);
702 h->monhdr.session_mon_tid = 0;
ba75bb98 703 h->fsid = monc->monmap->fsid;
d0b19705
ID
704 send_generic_request(monc, req);
705 mutex_unlock(&monc->mutex);
ba75bb98 706
d0b19705 707 ret = wait_generic_request(req);
e56fa10e 708out:
f646912d 709 put_generic_request(req);
d0b19705 710 return ret;
e56fa10e 711}
3d14c5d2 712EXPORT_SYMBOL(ceph_monc_do_statfs);
e56fa10e 713
513a8243
ID
714static void handle_get_version_reply(struct ceph_mon_client *monc,
715 struct ceph_msg *msg)
716{
717 struct ceph_mon_generic_request *req;
718 u64 tid = le64_to_cpu(msg->hdr.tid);
719 void *p = msg->front.iov_base;
720 void *end = p + msg->front_alloc_len;
721 u64 handle;
722
d0b19705 723 dout("%s msg %p tid %llu\n", __func__, msg, tid);
513a8243
ID
724
725 ceph_decode_need(&p, end, 2*sizeof(u64), bad);
726 handle = ceph_decode_64(&p);
727 if (tid != 0 && tid != handle)
728 goto bad;
729
730 mutex_lock(&monc->mutex);
fcd00b68 731 req = lookup_generic_request(&monc->generic_request_tree, handle);
d0b19705
ID
732 if (!req) {
733 mutex_unlock(&monc->mutex);
734 return;
513a8243 735 }
d0b19705
ID
736
737 req->result = 0;
738 req->u.newest = ceph_decode_64(&p);
739 __finish_generic_request(req);
513a8243 740 mutex_unlock(&monc->mutex);
513a8243 741
d0b19705 742 complete_generic_request(req);
513a8243 743 return;
d0b19705 744
513a8243 745bad:
7a6fdeb2 746 pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
513a8243
ID
747 ceph_msg_dump(msg);
748}
749
d0b19705
ID
750static struct ceph_mon_generic_request *
751__ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
752 ceph_monc_callback_t cb, u64 private_data)
513a8243
ID
753{
754 struct ceph_mon_generic_request *req;
513a8243 755
d0b19705 756 req = alloc_generic_request(monc, GFP_NOIO);
513a8243 757 if (!req)
d0b19705 758 goto err_put_req;
513a8243
ID
759
760 req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
761 sizeof(u64) + sizeof(u32) + strlen(what),
d0b19705
ID
762 GFP_NOIO, true);
763 if (!req->request)
764 goto err_put_req;
513a8243 765
d0b19705
ID
766 req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 32, GFP_NOIO,
767 true);
768 if (!req->reply)
769 goto err_put_req;
513a8243 770
d0b19705
ID
771 req->complete_cb = cb;
772 req->private_data = private_data;
513a8243 773
513a8243 774 mutex_lock(&monc->mutex);
d0b19705
ID
775 register_generic_request(req);
776 {
777 void *p = req->request->front.iov_base;
778 void *const end = p + req->request->front_alloc_len;
779
780 ceph_encode_64(&p, req->tid); /* handle */
781 ceph_encode_string(&p, end, what, strlen(what));
782 WARN_ON(p != end);
783 }
784 send_generic_request(monc, req);
785 mutex_unlock(&monc->mutex);
513a8243 786
d0b19705 787 return req;
513a8243 788
d0b19705 789err_put_req:
f646912d 790 put_generic_request(req);
d0b19705 791 return ERR_PTR(-ENOMEM);
513a8243 792}
d0b19705
ID
793
794/*
795 * Send MMonGetVersion and wait for the reply.
796 *
797 * @what: one of "mdsmap", "osdmap" or "monmap"
798 */
799int ceph_monc_get_version(struct ceph_mon_client *monc, const char *what,
800 u64 *newest)
801{
802 struct ceph_mon_generic_request *req;
803 int ret;
804
805 req = __ceph_monc_get_version(monc, what, NULL, 0);
806 if (IS_ERR(req))
807 return PTR_ERR(req);
808
809 ret = wait_generic_request(req);
810 if (!ret)
811 *newest = req->u.newest;
812
813 put_generic_request(req);
814 return ret;
815}
816EXPORT_SYMBOL(ceph_monc_get_version);
817
818/*
819 * Send MMonGetVersion,
820 *
821 * @what: one of "mdsmap", "osdmap" or "monmap"
822 */
823int ceph_monc_get_version_async(struct ceph_mon_client *monc, const char *what,
824 ceph_monc_callback_t cb, u64 private_data)
825{
826 struct ceph_mon_generic_request *req;
827
828 req = __ceph_monc_get_version(monc, what, cb, private_data);
829 if (IS_ERR(req))
830 return PTR_ERR(req);
831
832 put_generic_request(req);
833 return 0;
834}
835EXPORT_SYMBOL(ceph_monc_get_version_async);
513a8243 836
ba75bb98 837/*
e56fa10e 838 * Resend pending generic requests.
ba75bb98 839 */
f8c76f6f 840static void __resend_generic_request(struct ceph_mon_client *monc)
ba75bb98 841{
f8c76f6f 842 struct ceph_mon_generic_request *req;
85ff03f6 843 struct rb_node *p;
ba75bb98 844
f8c76f6f
YS
845 for (p = rb_first(&monc->generic_request_tree); p; p = rb_next(p)) {
846 req = rb_entry(p, struct ceph_mon_generic_request, node);
6740a845 847 ceph_msg_revoke(req->request);
4f471e4a 848 ceph_msg_revoke_incoming(req->reply);
67130934 849 ceph_con_send(&monc->con, ceph_msg_get(req->request));
ba75bb98
SW
850 }
851}
852
853/*
854 * Delayed work. If we haven't mounted yet, retry. Otherwise,
855 * renew/retry subscription as needed (in case it is timing out, or we
856 * got an ENOMEM). And keep the monitor connection alive.
857 */
858static void delayed_work(struct work_struct *work)
859{
860 struct ceph_mon_client *monc =
861 container_of(work, struct ceph_mon_client, delayed_work.work);
862
863 dout("monc delayed_work\n");
864 mutex_lock(&monc->mutex);
4e7a5dcd 865 if (monc->hunting) {
1752b50c
ID
866 dout("%s continuing hunt\n", __func__);
867 reopen_session(monc);
ba75bb98 868 } else {
8b9558aa
YZ
869 int is_auth = ceph_auth_is_authenticated(monc->auth);
870 if (ceph_con_keepalive_expired(&monc->con,
58d81b12 871 CEPH_MONC_PING_TIMEOUT)) {
8b9558aa
YZ
872 dout("monc keepalive timeout\n");
873 is_auth = 0;
1752b50c 874 reopen_session(monc);
8b9558aa 875 }
9bd2e6f8 876
8b9558aa
YZ
877 if (!monc->hunting) {
878 ceph_con_keepalive(&monc->con);
879 __validate_auth(monc);
880 }
9bd2e6f8 881
82dcabad
ID
882 if (is_auth) {
883 unsigned long now = jiffies;
884
885 dout("%s renew subs? now %lu renew after %lu\n",
886 __func__, now, monc->sub_renew_after);
887 if (time_after_eq(now, monc->sub_renew_after))
888 __send_subscribe(monc);
889 }
ba75bb98 890 }
ba75bb98
SW
891 __schedule_delayed(monc);
892 mutex_unlock(&monc->mutex);
893}
894
6b805185
SW
895/*
896 * On startup, we build a temporary monmap populated with the IPs
897 * provided by mount(2).
898 */
899static int build_initial_monmap(struct ceph_mon_client *monc)
900{
3d14c5d2
YS
901 struct ceph_options *opt = monc->client->options;
902 struct ceph_entity_addr *mon_addr = opt->mon_addr;
903 int num_mon = opt->num_mon;
6b805185
SW
904 int i;
905
906 /* build initial monmap */
907 monc->monmap = kzalloc(sizeof(*monc->monmap) +
908 num_mon*sizeof(monc->monmap->mon_inst[0]),
909 GFP_KERNEL);
910 if (!monc->monmap)
911 return -ENOMEM;
912 for (i = 0; i < num_mon; i++) {
913 monc->monmap->mon_inst[i].addr = mon_addr[i];
6b805185
SW
914 monc->monmap->mon_inst[i].addr.nonce = 0;
915 monc->monmap->mon_inst[i].name.type =
916 CEPH_ENTITY_TYPE_MON;
917 monc->monmap->mon_inst[i].name.num = cpu_to_le64(i);
918 }
919 monc->monmap->num_mon = num_mon;
6b805185
SW
920 return 0;
921}
922
ba75bb98
SW
923int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl)
924{
925 int err = 0;
926
927 dout("init\n");
928 memset(monc, 0, sizeof(*monc));
929 monc->client = cl;
930 monc->monmap = NULL;
931 mutex_init(&monc->mutex);
932
6b805185
SW
933 err = build_initial_monmap(monc);
934 if (err)
935 goto out;
936
f6a2f5be 937 /* connection */
4e7a5dcd 938 /* authentication */
3d14c5d2 939 monc->auth = ceph_auth_init(cl->options->name,
8323c3aa 940 cl->options->key);
49d9224c
NW
941 if (IS_ERR(monc->auth)) {
942 err = PTR_ERR(monc->auth);
67130934 943 goto out_monmap;
49d9224c 944 }
4e7a5dcd
SW
945 monc->auth->want_keys =
946 CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON |
947 CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS;
948
240ed68e 949 /* msgs */
a79832f2 950 err = -ENOMEM;
7c315c55 951 monc->m_subscribe_ack = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE_ACK,
34d23762 952 sizeof(struct ceph_mon_subscribe_ack),
b61c2763 953 GFP_NOFS, true);
a79832f2 954 if (!monc->m_subscribe_ack)
49d9224c 955 goto out_auth;
6694d6b9 956
737cc81e 957 monc->m_subscribe = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 128, GFP_NOFS,
b61c2763 958 true);
240ed68e
SW
959 if (!monc->m_subscribe)
960 goto out_subscribe_ack;
961
b61c2763
SW
962 monc->m_auth_reply = ceph_msg_new(CEPH_MSG_AUTH_REPLY, 4096, GFP_NOFS,
963 true);
a79832f2 964 if (!monc->m_auth_reply)
240ed68e 965 goto out_subscribe;
4e7a5dcd 966
b61c2763 967 monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, GFP_NOFS, true);
9bd2e6f8 968 monc->pending_auth = 0;
a79832f2 969 if (!monc->m_auth)
6694d6b9 970 goto out_auth_reply;
ba75bb98 971
735a72ef
SW
972 ceph_con_init(&monc->con, monc, &mon_con_ops,
973 &monc->client->msgr);
974
ba75bb98 975 monc->cur_mon = -1;
168b9090
ID
976 monc->had_a_connection = false;
977 monc->hunt_mult = 1;
ba75bb98
SW
978
979 INIT_DELAYED_WORK(&monc->delayed_work, delayed_work);
f8c76f6f 980 monc->generic_request_tree = RB_ROOT;
ba75bb98
SW
981 monc->last_tid = 0;
982
737cc81e
ID
983 monc->fs_cluster_id = CEPH_FS_CLUSTER_ID_NONE;
984
4e7a5dcd
SW
985 return 0;
986
6694d6b9
SW
987out_auth_reply:
988 ceph_msg_put(monc->m_auth_reply);
240ed68e
SW
989out_subscribe:
990 ceph_msg_put(monc->m_subscribe);
7c315c55
SW
991out_subscribe_ack:
992 ceph_msg_put(monc->m_subscribe_ack);
49d9224c
NW
993out_auth:
994 ceph_auth_destroy(monc->auth);
4e7a5dcd
SW
995out_monmap:
996 kfree(monc->monmap);
ba75bb98
SW
997out:
998 return err;
999}
3d14c5d2 1000EXPORT_SYMBOL(ceph_monc_init);
ba75bb98
SW
1001
1002void ceph_monc_stop(struct ceph_mon_client *monc)
1003{
1004 dout("stop\n");
1005 cancel_delayed_work_sync(&monc->delayed_work);
1006
1007 mutex_lock(&monc->mutex);
1008 __close_session(monc);
0e04dc26 1009 monc->cur_mon = -1;
ba75bb98
SW
1010 mutex_unlock(&monc->mutex);
1011
f3dea7ed
SW
1012 /*
1013 * flush msgr queue before we destroy ourselves to ensure that:
1014 * - any work that references our embedded con is finished.
1015 * - any osd_client or other work that may reference an authorizer
1016 * finishes before we shut down the auth subsystem.
1017 */
1018 ceph_msgr_flush();
1019
4e7a5dcd
SW
1020 ceph_auth_destroy(monc->auth);
1021
d0b19705
ID
1022 WARN_ON(!RB_EMPTY_ROOT(&monc->generic_request_tree));
1023
4e7a5dcd 1024 ceph_msg_put(monc->m_auth);
6694d6b9 1025 ceph_msg_put(monc->m_auth_reply);
240ed68e 1026 ceph_msg_put(monc->m_subscribe);
7c315c55 1027 ceph_msg_put(monc->m_subscribe_ack);
ba75bb98
SW
1028
1029 kfree(monc->monmap);
1030}
3d14c5d2 1031EXPORT_SYMBOL(ceph_monc_stop);
ba75bb98 1032
0f9af169
ID
1033static void finish_hunting(struct ceph_mon_client *monc)
1034{
1035 if (monc->hunting) {
1036 dout("%s found mon%d\n", __func__, monc->cur_mon);
1037 monc->hunting = false;
168b9090
ID
1038 monc->had_a_connection = true;
1039 monc->hunt_mult /= 2; /* reduce by 50% */
1040 if (monc->hunt_mult < 1)
1041 monc->hunt_mult = 1;
0f9af169
ID
1042 }
1043}
1044
4e7a5dcd
SW
1045static void handle_auth_reply(struct ceph_mon_client *monc,
1046 struct ceph_msg *msg)
1047{
1048 int ret;
09c4d6a7 1049 int was_auth = 0;
4e7a5dcd
SW
1050
1051 mutex_lock(&monc->mutex);
27859f97 1052 was_auth = ceph_auth_is_authenticated(monc->auth);
9bd2e6f8 1053 monc->pending_auth = 0;
4e7a5dcd
SW
1054 ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base,
1055 msg->front.iov_len,
1056 monc->m_auth->front.iov_base,
3cea4c30 1057 monc->m_auth->front_alloc_len);
0f9af169
ID
1058 if (ret > 0) {
1059 __send_prepared_auth_request(monc, ret);
1060 goto out;
1061 }
1062
1063 finish_hunting(monc);
1064
4e7a5dcd 1065 if (ret < 0) {
9bd2e6f8 1066 monc->client->auth_err = ret;
27859f97 1067 } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) {
4e7a5dcd 1068 dout("authenticated, starting session\n");
0743304d 1069
15d9882c
AE
1070 monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT;
1071 monc->client->msgr.inst.name.num =
0cf5537b 1072 cpu_to_le64(monc->auth->global_id);
0743304d 1073
4e7a5dcd 1074 __send_subscribe(monc);
f8c76f6f 1075 __resend_generic_request(monc);
0f9af169
ID
1076
1077 pr_info("mon%d %s session established\n", monc->cur_mon,
1078 ceph_pr_addr(&monc->con.peer_addr.in_addr));
4e7a5dcd 1079 }
0f9af169
ID
1080
1081out:
4e7a5dcd 1082 mutex_unlock(&monc->mutex);
0f9af169
ID
1083 if (monc->client->auth_err < 0)
1084 wake_up_all(&monc->client->auth_wq);
4e7a5dcd
SW
1085}
1086
9bd2e6f8
SW
1087static int __validate_auth(struct ceph_mon_client *monc)
1088{
1089 int ret;
1090
1091 if (monc->pending_auth)
1092 return 0;
1093
1094 ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base,
3cea4c30 1095 monc->m_auth->front_alloc_len);
9bd2e6f8
SW
1096 if (ret <= 0)
1097 return ret; /* either an error, or no need to authenticate */
1098 __send_prepared_auth_request(monc, ret);
1099 return 0;
1100}
1101
1102int ceph_monc_validate_auth(struct ceph_mon_client *monc)
1103{
1104 int ret;
1105
1106 mutex_lock(&monc->mutex);
1107 ret = __validate_auth(monc);
1108 mutex_unlock(&monc->mutex);
1109 return ret;
1110}
3d14c5d2 1111EXPORT_SYMBOL(ceph_monc_validate_auth);
9bd2e6f8 1112
ba75bb98
SW
1113/*
1114 * handle incoming message
1115 */
1116static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
1117{
1118 struct ceph_mon_client *monc = con->private;
1119 int type = le16_to_cpu(msg->hdr.type);
1120
1121 if (!monc)
1122 return;
1123
1124 switch (type) {
4e7a5dcd
SW
1125 case CEPH_MSG_AUTH_REPLY:
1126 handle_auth_reply(monc, msg);
ba75bb98
SW
1127 break;
1128
1129 case CEPH_MSG_MON_SUBSCRIBE_ACK:
1130 handle_subscribe_ack(monc, msg);
1131 break;
1132
1133 case CEPH_MSG_STATFS_REPLY:
1134 handle_statfs_reply(monc, msg);
1135 break;
1136
513a8243
ID
1137 case CEPH_MSG_MON_GET_VERSION_REPLY:
1138 handle_get_version_reply(monc, msg);
1139 break;
1140
4e7a5dcd
SW
1141 case CEPH_MSG_MON_MAP:
1142 ceph_monc_handle_map(monc, msg);
1143 break;
1144
ba75bb98
SW
1145 case CEPH_MSG_OSD_MAP:
1146 ceph_osdc_handle_map(&monc->client->osdc, msg);
1147 break;
1148
1149 default:
3d14c5d2
YS
1150 /* can the chained handler handle it? */
1151 if (monc->client->extra_mon_dispatch &&
1152 monc->client->extra_mon_dispatch(monc->client, msg) == 0)
1153 break;
1154
ba75bb98
SW
1155 pr_err("received unknown message type %d %s\n", type,
1156 ceph_msg_type_name(type));
1157 }
1158 ceph_msg_put(msg);
1159}
1160
1161/*
1162 * Allocate memory for incoming message
1163 */
1164static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
2450418c
YS
1165 struct ceph_msg_header *hdr,
1166 int *skip)
ba75bb98
SW
1167{
1168 struct ceph_mon_client *monc = con->private;
1169 int type = le16_to_cpu(hdr->type);
2450418c 1170 int front_len = le32_to_cpu(hdr->front_len);
5b3a4db3 1171 struct ceph_msg *m = NULL;
ba75bb98 1172
2450418c 1173 *skip = 0;
0547a9b3 1174
ba75bb98 1175 switch (type) {
ba75bb98 1176 case CEPH_MSG_MON_SUBSCRIBE_ACK:
7c315c55 1177 m = ceph_msg_get(monc->m_subscribe_ack);
2450418c 1178 break;
ba75bb98 1179 case CEPH_MSG_STATFS_REPLY:
f8c76f6f 1180 return get_generic_reply(con, hdr, skip);
4e7a5dcd 1181 case CEPH_MSG_AUTH_REPLY:
6694d6b9 1182 m = ceph_msg_get(monc->m_auth_reply);
2450418c 1183 break;
513a8243
ID
1184 case CEPH_MSG_MON_GET_VERSION_REPLY:
1185 if (le64_to_cpu(hdr->tid) != 0)
1186 return get_generic_reply(con, hdr, skip);
1187
1188 /*
1189 * Older OSDs don't set reply tid even if the orignal
1190 * request had a non-zero tid. Workaround this weirdness
1191 * by falling through to the allocate case.
1192 */
5b3a4db3
SW
1193 case CEPH_MSG_MON_MAP:
1194 case CEPH_MSG_MDS_MAP:
1195 case CEPH_MSG_OSD_MAP:
b61c2763 1196 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
1c20f2d2
AE
1197 if (!m)
1198 return NULL; /* ENOMEM--return skip == 0 */
5b3a4db3 1199 break;
ba75bb98 1200 }
2450418c 1201
5b3a4db3
SW
1202 if (!m) {
1203 pr_info("alloc_msg unknown type %d\n", type);
2450418c 1204 *skip = 1;
73c3d481 1205 } else if (front_len > m->front_alloc_len) {
b9a67899
JP
1206 pr_warn("mon_alloc_msg front %d > prealloc %d (%u#%llu)\n",
1207 front_len, m->front_alloc_len,
1208 (unsigned int)con->peer_name.type,
1209 le64_to_cpu(con->peer_name.num));
73c3d481
SW
1210 ceph_msg_put(m);
1211 m = ceph_msg_new(type, front_len, GFP_NOFS, false);
5b3a4db3 1212 }
73c3d481 1213
2450418c 1214 return m;
ba75bb98
SW
1215}
1216
1217/*
1218 * If the monitor connection resets, pick a new monitor and resubmit
1219 * any pending requests.
1220 */
1221static void mon_fault(struct ceph_connection *con)
1222{
1223 struct ceph_mon_client *monc = con->private;
1224
ba75bb98 1225 mutex_lock(&monc->mutex);
b5d91704
ID
1226 dout("%s mon%d\n", __func__, monc->cur_mon);
1227 if (monc->cur_mon >= 0) {
1228 if (!monc->hunting) {
1229 dout("%s hunting for new mon\n", __func__);
1230 reopen_session(monc);
1231 __schedule_delayed(monc);
1232 } else {
1233 dout("%s already hunting\n", __func__);
1234 }
ba75bb98 1235 }
ba75bb98
SW
1236 mutex_unlock(&monc->mutex);
1237}
1238
ec87ef43
SW
1239/*
1240 * We can ignore refcounting on the connection struct, as all references
1241 * will come from the messenger workqueue, which is drained prior to
1242 * mon_client destruction.
1243 */
1244static struct ceph_connection *con_get(struct ceph_connection *con)
1245{
1246 return con;
1247}
1248
1249static void con_put(struct ceph_connection *con)
1250{
1251}
1252
9e32789f 1253static const struct ceph_connection_operations mon_con_ops = {
ec87ef43
SW
1254 .get = con_get,
1255 .put = con_put,
ba75bb98
SW
1256 .dispatch = dispatch,
1257 .fault = mon_fault,
1258 .alloc_msg = mon_alloc_msg,
ba75bb98 1259};
This page took 0.485393 seconds and 5 git commands to generate.