Commit | Line | Data |
---|---|---|
ba75bb98 SW |
1 | #include "ceph_debug.h" |
2 | ||
3 | #include <linux/types.h> | |
4 | #include <linux/random.h> | |
5 | #include <linux/sched.h> | |
6 | ||
7 | #include "mon_client.h" | |
8 | #include "super.h" | |
4e7a5dcd | 9 | #include "auth.h" |
ba75bb98 SW |
10 | #include "decode.h" |
11 | ||
12 | /* | |
13 | * Interact with Ceph monitor cluster. Handle requests for new map | |
14 | * versions, and periodically resend as needed. Also implement | |
15 | * statfs() and umount(). | |
16 | * | |
17 | * A small cluster of Ceph "monitors" are responsible for managing critical | |
18 | * cluster configuration and state information. An odd number (e.g., 3, 5) | |
19 | * of cmon daemons use a modified version of the Paxos part-time parliament | |
20 | * algorithm to manage the MDS map (mds cluster membership), OSD map, and | |
21 | * list of clients who have mounted the file system. | |
22 | * | |
23 | * We maintain an open, active session with a monitor at all times in order to | |
24 | * receive timely MDSMap updates. We periodically send a keepalive byte on the | |
25 | * TCP socket to ensure we detect a failure. If the connection does break, we | |
26 | * randomly hunt for a new monitor. Once the connection is reestablished, we | |
27 | * resend any outstanding requests. | |
28 | */ | |
29 | ||
30 | const static struct ceph_connection_operations mon_con_ops; | |
31 | ||
9bd2e6f8 SW |
32 | static int __validate_auth(struct ceph_mon_client *monc); |
33 | ||
ba75bb98 SW |
34 | /* |
35 | * Decode a monmap blob (e.g., during mount). | |
36 | */ | |
37 | struct ceph_monmap *ceph_monmap_decode(void *p, void *end) | |
38 | { | |
39 | struct ceph_monmap *m = NULL; | |
40 | int i, err = -EINVAL; | |
41 | struct ceph_fsid fsid; | |
42 | u32 epoch, num_mon; | |
43 | u16 version; | |
4e7a5dcd SW |
44 | u32 len; |
45 | ||
46 | ceph_decode_32_safe(&p, end, len, bad); | |
47 | ceph_decode_need(&p, end, len, bad); | |
ba75bb98 SW |
48 | |
49 | dout("monmap_decode %p %p len %d\n", p, end, (int)(end-p)); | |
50 | ||
51 | ceph_decode_16_safe(&p, end, version, bad); | |
52 | ||
53 | ceph_decode_need(&p, end, sizeof(fsid) + 2*sizeof(u32), bad); | |
54 | ceph_decode_copy(&p, &fsid, sizeof(fsid)); | |
c89136ea | 55 | epoch = ceph_decode_32(&p); |
ba75bb98 | 56 | |
c89136ea | 57 | num_mon = ceph_decode_32(&p); |
ba75bb98 SW |
58 | ceph_decode_need(&p, end, num_mon*sizeof(m->mon_inst[0]), bad); |
59 | ||
60 | if (num_mon >= CEPH_MAX_MON) | |
61 | goto bad; | |
62 | m = kmalloc(sizeof(*m) + sizeof(m->mon_inst[0])*num_mon, GFP_NOFS); | |
63 | if (m == NULL) | |
64 | return ERR_PTR(-ENOMEM); | |
65 | m->fsid = fsid; | |
66 | m->epoch = epoch; | |
67 | m->num_mon = num_mon; | |
68 | ceph_decode_copy(&p, m->mon_inst, num_mon*sizeof(m->mon_inst[0])); | |
63f2d211 SW |
69 | for (i = 0; i < num_mon; i++) |
70 | ceph_decode_addr(&m->mon_inst[i].addr); | |
ba75bb98 | 71 | |
ba75bb98 SW |
72 | dout("monmap_decode epoch %d, num_mon %d\n", m->epoch, |
73 | m->num_mon); | |
74 | for (i = 0; i < m->num_mon; i++) | |
75 | dout("monmap_decode mon%d is %s\n", i, | |
76 | pr_addr(&m->mon_inst[i].addr.in_addr)); | |
77 | return m; | |
78 | ||
79 | bad: | |
80 | dout("monmap_decode failed with %d\n", err); | |
81 | kfree(m); | |
82 | return ERR_PTR(err); | |
83 | } | |
84 | ||
85 | /* | |
86 | * return true if *addr is included in the monmap. | |
87 | */ | |
88 | int ceph_monmap_contains(struct ceph_monmap *m, struct ceph_entity_addr *addr) | |
89 | { | |
90 | int i; | |
91 | ||
92 | for (i = 0; i < m->num_mon; i++) | |
103e2d3a | 93 | if (memcmp(addr, &m->mon_inst[i].addr, sizeof(*addr)) == 0) |
ba75bb98 SW |
94 | return 1; |
95 | return 0; | |
96 | } | |
97 | ||
98 | /* | |
99 | * Close monitor session, if any. | |
100 | */ | |
101 | static void __close_session(struct ceph_mon_client *monc) | |
102 | { | |
103 | if (monc->con) { | |
104 | dout("__close_session closing mon%d\n", monc->cur_mon); | |
4e7a5dcd | 105 | ceph_con_revoke(monc->con, monc->m_auth); |
ba75bb98 SW |
106 | ceph_con_close(monc->con); |
107 | monc->cur_mon = -1; | |
9bd2e6f8 | 108 | monc->pending_auth = 0; |
4e7a5dcd | 109 | ceph_auth_reset(monc->auth); |
ba75bb98 SW |
110 | } |
111 | } | |
112 | ||
113 | /* | |
114 | * Open a session with a (new) monitor. | |
115 | */ | |
116 | static int __open_session(struct ceph_mon_client *monc) | |
117 | { | |
118 | char r; | |
4e7a5dcd | 119 | int ret; |
ba75bb98 SW |
120 | |
121 | if (monc->cur_mon < 0) { | |
122 | get_random_bytes(&r, 1); | |
123 | monc->cur_mon = r % monc->monmap->num_mon; | |
124 | dout("open_session num=%d r=%d -> mon%d\n", | |
125 | monc->monmap->num_mon, r, monc->cur_mon); | |
126 | monc->sub_sent = 0; | |
127 | monc->sub_renew_after = jiffies; /* i.e., expired */ | |
128 | monc->want_next_osdmap = !!monc->want_next_osdmap; | |
129 | ||
130 | dout("open_session mon%d opening\n", monc->cur_mon); | |
131 | monc->con->peer_name.type = CEPH_ENTITY_TYPE_MON; | |
132 | monc->con->peer_name.num = cpu_to_le64(monc->cur_mon); | |
133 | ceph_con_open(monc->con, | |
134 | &monc->monmap->mon_inst[monc->cur_mon].addr); | |
4e7a5dcd SW |
135 | |
136 | /* initiatiate authentication handshake */ | |
137 | ret = ceph_auth_build_hello(monc->auth, | |
138 | monc->m_auth->front.iov_base, | |
139 | monc->m_auth->front_max); | |
140 | monc->m_auth->front.iov_len = ret; | |
141 | monc->m_auth->hdr.front_len = cpu_to_le32(ret); | |
142 | ceph_msg_get(monc->m_auth); /* keep our ref */ | |
143 | ceph_con_send(monc->con, monc->m_auth); | |
ba75bb98 SW |
144 | } else { |
145 | dout("open_session mon%d already open\n", monc->cur_mon); | |
146 | } | |
147 | return 0; | |
148 | } | |
149 | ||
150 | static bool __sub_expired(struct ceph_mon_client *monc) | |
151 | { | |
152 | return time_after_eq(jiffies, monc->sub_renew_after); | |
153 | } | |
154 | ||
155 | /* | |
156 | * Reschedule delayed work timer. | |
157 | */ | |
158 | static void __schedule_delayed(struct ceph_mon_client *monc) | |
159 | { | |
160 | unsigned delay; | |
161 | ||
4e7a5dcd | 162 | if (monc->cur_mon < 0 || __sub_expired(monc)) |
ba75bb98 SW |
163 | delay = 10 * HZ; |
164 | else | |
165 | delay = 20 * HZ; | |
166 | dout("__schedule_delayed after %u\n", delay); | |
167 | schedule_delayed_work(&monc->delayed_work, delay); | |
168 | } | |
169 | ||
170 | /* | |
171 | * Send subscribe request for mdsmap and/or osdmap. | |
172 | */ | |
173 | static void __send_subscribe(struct ceph_mon_client *monc) | |
174 | { | |
175 | dout("__send_subscribe sub_sent=%u exp=%u want_osd=%d\n", | |
176 | (unsigned)monc->sub_sent, __sub_expired(monc), | |
177 | monc->want_next_osdmap); | |
178 | if ((__sub_expired(monc) && !monc->sub_sent) || | |
179 | monc->want_next_osdmap == 1) { | |
180 | struct ceph_msg *msg; | |
181 | struct ceph_mon_subscribe_item *i; | |
182 | void *p, *end; | |
183 | ||
4e7a5dcd | 184 | msg = ceph_msg_new(CEPH_MSG_MON_SUBSCRIBE, 96, 0, 0, NULL); |
ba75bb98 SW |
185 | if (!msg) |
186 | return; | |
187 | ||
188 | p = msg->front.iov_base; | |
189 | end = p + msg->front.iov_len; | |
190 | ||
191 | dout("__send_subscribe to 'mdsmap' %u+\n", | |
192 | (unsigned)monc->have_mdsmap); | |
193 | if (monc->want_next_osdmap) { | |
194 | dout("__send_subscribe to 'osdmap' %u\n", | |
195 | (unsigned)monc->have_osdmap); | |
4e7a5dcd | 196 | ceph_encode_32(&p, 3); |
ba75bb98 SW |
197 | ceph_encode_string(&p, end, "osdmap", 6); |
198 | i = p; | |
199 | i->have = cpu_to_le64(monc->have_osdmap); | |
200 | i->onetime = 1; | |
201 | p += sizeof(*i); | |
202 | monc->want_next_osdmap = 2; /* requested */ | |
203 | } else { | |
4e7a5dcd | 204 | ceph_encode_32(&p, 2); |
ba75bb98 SW |
205 | } |
206 | ceph_encode_string(&p, end, "mdsmap", 6); | |
207 | i = p; | |
208 | i->have = cpu_to_le64(monc->have_mdsmap); | |
209 | i->onetime = 0; | |
210 | p += sizeof(*i); | |
4e7a5dcd SW |
211 | ceph_encode_string(&p, end, "monmap", 6); |
212 | i = p; | |
213 | i->have = 0; | |
214 | i->onetime = 0; | |
215 | p += sizeof(*i); | |
ba75bb98 SW |
216 | |
217 | msg->front.iov_len = p - msg->front.iov_base; | |
218 | msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); | |
219 | ceph_con_send(monc->con, msg); | |
220 | ||
221 | monc->sub_sent = jiffies | 1; /* never 0 */ | |
222 | } | |
223 | } | |
224 | ||
225 | static void handle_subscribe_ack(struct ceph_mon_client *monc, | |
226 | struct ceph_msg *msg) | |
227 | { | |
228 | unsigned seconds; | |
07bd10fb SW |
229 | struct ceph_mon_subscribe_ack *h = msg->front.iov_base; |
230 | ||
231 | if (msg->front.iov_len < sizeof(*h)) | |
232 | goto bad; | |
233 | seconds = le32_to_cpu(h->duration); | |
ba75bb98 | 234 | |
ba75bb98 SW |
235 | mutex_lock(&monc->mutex); |
236 | if (monc->hunting) { | |
237 | pr_info("mon%d %s session established\n", | |
238 | monc->cur_mon, pr_addr(&monc->con->peer_addr.in_addr)); | |
239 | monc->hunting = false; | |
240 | } | |
241 | dout("handle_subscribe_ack after %d seconds\n", seconds); | |
0656d11b | 242 | monc->sub_renew_after = monc->sub_sent + (seconds >> 1)*HZ - 1; |
ba75bb98 SW |
243 | monc->sub_sent = 0; |
244 | mutex_unlock(&monc->mutex); | |
245 | return; | |
246 | bad: | |
247 | pr_err("got corrupt subscribe-ack msg\n"); | |
9ec7cab1 | 248 | ceph_msg_dump(msg); |
ba75bb98 SW |
249 | } |
250 | ||
251 | /* | |
252 | * Keep track of which maps we have | |
253 | */ | |
254 | int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 got) | |
255 | { | |
256 | mutex_lock(&monc->mutex); | |
257 | monc->have_mdsmap = got; | |
258 | mutex_unlock(&monc->mutex); | |
259 | return 0; | |
260 | } | |
261 | ||
262 | int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 got) | |
263 | { | |
264 | mutex_lock(&monc->mutex); | |
265 | monc->have_osdmap = got; | |
266 | monc->want_next_osdmap = 0; | |
267 | mutex_unlock(&monc->mutex); | |
268 | return 0; | |
269 | } | |
270 | ||
271 | /* | |
272 | * Register interest in the next osdmap | |
273 | */ | |
274 | void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc) | |
275 | { | |
276 | dout("request_next_osdmap have %u\n", monc->have_osdmap); | |
277 | mutex_lock(&monc->mutex); | |
278 | if (!monc->want_next_osdmap) | |
279 | monc->want_next_osdmap = 1; | |
280 | if (monc->want_next_osdmap < 2) | |
281 | __send_subscribe(monc); | |
282 | mutex_unlock(&monc->mutex); | |
283 | } | |
284 | ||
4e7a5dcd | 285 | /* |
50b885b9 | 286 | * |
4e7a5dcd SW |
287 | */ |
288 | int ceph_monc_open_session(struct ceph_mon_client *monc) | |
ba75bb98 SW |
289 | { |
290 | if (!monc->con) { | |
291 | monc->con = kmalloc(sizeof(*monc->con), GFP_KERNEL); | |
292 | if (!monc->con) | |
293 | return -ENOMEM; | |
294 | ceph_con_init(monc->client->msgr, monc->con); | |
295 | monc->con->private = monc; | |
296 | monc->con->ops = &mon_con_ops; | |
297 | } | |
298 | ||
299 | mutex_lock(&monc->mutex); | |
4e7a5dcd | 300 | __open_session(monc); |
ba75bb98 SW |
301 | __schedule_delayed(monc); |
302 | mutex_unlock(&monc->mutex); | |
303 | return 0; | |
304 | } | |
305 | ||
4e7a5dcd SW |
306 | /* |
307 | * The monitor responds with mount ack indicate mount success. The | |
308 | * included client ticket allows the client to talk to MDSs and OSDs. | |
309 | */ | |
0743304d SW |
310 | static void ceph_monc_handle_map(struct ceph_mon_client *monc, |
311 | struct ceph_msg *msg) | |
4e7a5dcd SW |
312 | { |
313 | struct ceph_client *client = monc->client; | |
314 | struct ceph_monmap *monmap = NULL, *old = monc->monmap; | |
315 | void *p, *end; | |
316 | ||
317 | mutex_lock(&monc->mutex); | |
318 | ||
319 | dout("handle_monmap\n"); | |
320 | p = msg->front.iov_base; | |
321 | end = p + msg->front.iov_len; | |
322 | ||
323 | monmap = ceph_monmap_decode(p, end); | |
324 | if (IS_ERR(monmap)) { | |
325 | pr_err("problem decoding monmap, %d\n", | |
326 | (int)PTR_ERR(monmap)); | |
d4a780ce | 327 | goto out; |
4e7a5dcd | 328 | } |
0743304d SW |
329 | |
330 | if (ceph_check_fsid(monc->client, &monmap->fsid) < 0) { | |
4e7a5dcd | 331 | kfree(monmap); |
d4a780ce | 332 | goto out; |
4e7a5dcd SW |
333 | } |
334 | ||
335 | client->monc.monmap = monmap; | |
4e7a5dcd SW |
336 | kfree(old); |
337 | ||
d4a780ce | 338 | out: |
4e7a5dcd | 339 | mutex_unlock(&monc->mutex); |
9bd2e6f8 | 340 | wake_up(&client->auth_wq); |
4e7a5dcd SW |
341 | } |
342 | ||
ba75bb98 SW |
343 | /* |
344 | * statfs | |
345 | */ | |
346 | static void handle_statfs_reply(struct ceph_mon_client *monc, | |
347 | struct ceph_msg *msg) | |
348 | { | |
349 | struct ceph_mon_statfs_request *req; | |
350 | struct ceph_mon_statfs_reply *reply = msg->front.iov_base; | |
351 | u64 tid; | |
352 | ||
353 | if (msg->front.iov_len != sizeof(*reply)) | |
354 | goto bad; | |
6df058c0 | 355 | tid = le64_to_cpu(msg->hdr.tid); |
ba75bb98 SW |
356 | dout("handle_statfs_reply %p tid %llu\n", msg, tid); |
357 | ||
358 | mutex_lock(&monc->mutex); | |
359 | req = radix_tree_lookup(&monc->statfs_request_tree, tid); | |
360 | if (req) { | |
361 | *req->buf = reply->st; | |
362 | req->result = 0; | |
363 | } | |
364 | mutex_unlock(&monc->mutex); | |
365 | if (req) | |
366 | complete(&req->completion); | |
367 | return; | |
368 | ||
369 | bad: | |
370 | pr_err("corrupt statfs reply, no tid\n"); | |
9ec7cab1 | 371 | ceph_msg_dump(msg); |
ba75bb98 SW |
372 | } |
373 | ||
374 | /* | |
375 | * (re)send a statfs request | |
376 | */ | |
377 | static int send_statfs(struct ceph_mon_client *monc, | |
378 | struct ceph_mon_statfs_request *req) | |
379 | { | |
380 | struct ceph_msg *msg; | |
381 | struct ceph_mon_statfs *h; | |
ba75bb98 SW |
382 | |
383 | dout("send_statfs tid %llu\n", req->tid); | |
ba75bb98 SW |
384 | msg = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), 0, 0, NULL); |
385 | if (IS_ERR(msg)) | |
386 | return PTR_ERR(msg); | |
387 | req->request = msg; | |
6df058c0 | 388 | msg->hdr.tid = cpu_to_le64(req->tid); |
ba75bb98 | 389 | h = msg->front.iov_base; |
13e38c8a SW |
390 | h->monhdr.have_version = 0; |
391 | h->monhdr.session_mon = cpu_to_le16(-1); | |
392 | h->monhdr.session_mon_tid = 0; | |
ba75bb98 | 393 | h->fsid = monc->monmap->fsid; |
ba75bb98 SW |
394 | ceph_con_send(monc->con, msg); |
395 | return 0; | |
396 | } | |
397 | ||
398 | /* | |
399 | * Do a synchronous statfs(). | |
400 | */ | |
401 | int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) | |
402 | { | |
403 | struct ceph_mon_statfs_request req; | |
404 | int err; | |
405 | ||
406 | req.buf = buf; | |
407 | init_completion(&req.completion); | |
408 | ||
409 | /* allocate memory for reply */ | |
410 | err = ceph_msgpool_resv(&monc->msgpool_statfs_reply, 1); | |
411 | if (err) | |
412 | return err; | |
413 | ||
414 | /* register request */ | |
415 | mutex_lock(&monc->mutex); | |
416 | req.tid = ++monc->last_tid; | |
417 | req.last_attempt = jiffies; | |
418 | req.delay = BASE_DELAY_INTERVAL; | |
419 | if (radix_tree_insert(&monc->statfs_request_tree, req.tid, &req) < 0) { | |
420 | mutex_unlock(&monc->mutex); | |
421 | pr_err("ENOMEM in do_statfs\n"); | |
422 | return -ENOMEM; | |
423 | } | |
424 | monc->num_statfs_requests++; | |
425 | mutex_unlock(&monc->mutex); | |
426 | ||
427 | /* send request and wait */ | |
428 | err = send_statfs(monc, &req); | |
429 | if (!err) | |
430 | err = wait_for_completion_interruptible(&req.completion); | |
431 | ||
432 | mutex_lock(&monc->mutex); | |
433 | radix_tree_delete(&monc->statfs_request_tree, req.tid); | |
434 | monc->num_statfs_requests--; | |
435 | ceph_msgpool_resv(&monc->msgpool_statfs_reply, -1); | |
436 | mutex_unlock(&monc->mutex); | |
437 | ||
438 | if (!err) | |
439 | err = req.result; | |
440 | return err; | |
441 | } | |
442 | ||
443 | /* | |
444 | * Resend pending statfs requests. | |
445 | */ | |
446 | static void __resend_statfs(struct ceph_mon_client *monc) | |
447 | { | |
448 | u64 next_tid = 0; | |
449 | int got; | |
450 | int did = 0; | |
451 | struct ceph_mon_statfs_request *req; | |
452 | ||
453 | while (1) { | |
454 | got = radix_tree_gang_lookup(&monc->statfs_request_tree, | |
455 | (void **)&req, | |
456 | next_tid, 1); | |
457 | if (got == 0) | |
458 | break; | |
459 | did++; | |
460 | next_tid = req->tid + 1; | |
461 | ||
462 | send_statfs(monc, req); | |
463 | } | |
464 | } | |
465 | ||
466 | /* | |
467 | * Delayed work. If we haven't mounted yet, retry. Otherwise, | |
468 | * renew/retry subscription as needed (in case it is timing out, or we | |
469 | * got an ENOMEM). And keep the monitor connection alive. | |
470 | */ | |
471 | static void delayed_work(struct work_struct *work) | |
472 | { | |
473 | struct ceph_mon_client *monc = | |
474 | container_of(work, struct ceph_mon_client, delayed_work.work); | |
475 | ||
476 | dout("monc delayed_work\n"); | |
477 | mutex_lock(&monc->mutex); | |
4e7a5dcd SW |
478 | if (monc->hunting) { |
479 | __close_session(monc); | |
480 | __open_session(monc); /* continue hunting */ | |
ba75bb98 | 481 | } else { |
4e7a5dcd | 482 | ceph_con_keepalive(monc->con); |
9bd2e6f8 SW |
483 | mutex_unlock(&monc->mutex); |
484 | ||
485 | __validate_auth(monc); | |
486 | ||
487 | mutex_lock(&monc->mutex); | |
4e7a5dcd SW |
488 | if (monc->auth->ops->is_authenticated(monc->auth)) |
489 | __send_subscribe(monc); | |
ba75bb98 | 490 | } |
ba75bb98 SW |
491 | __schedule_delayed(monc); |
492 | mutex_unlock(&monc->mutex); | |
493 | } | |
494 | ||
6b805185 SW |
495 | /* |
496 | * On startup, we build a temporary monmap populated with the IPs | |
497 | * provided by mount(2). | |
498 | */ | |
499 | static int build_initial_monmap(struct ceph_mon_client *monc) | |
500 | { | |
501 | struct ceph_mount_args *args = monc->client->mount_args; | |
502 | struct ceph_entity_addr *mon_addr = args->mon_addr; | |
503 | int num_mon = args->num_mon; | |
504 | int i; | |
505 | ||
506 | /* build initial monmap */ | |
507 | monc->monmap = kzalloc(sizeof(*monc->monmap) + | |
508 | num_mon*sizeof(monc->monmap->mon_inst[0]), | |
509 | GFP_KERNEL); | |
510 | if (!monc->monmap) | |
511 | return -ENOMEM; | |
512 | for (i = 0; i < num_mon; i++) { | |
513 | monc->monmap->mon_inst[i].addr = mon_addr[i]; | |
6b805185 SW |
514 | monc->monmap->mon_inst[i].addr.nonce = 0; |
515 | monc->monmap->mon_inst[i].name.type = | |
516 | CEPH_ENTITY_TYPE_MON; | |
517 | monc->monmap->mon_inst[i].name.num = cpu_to_le64(i); | |
518 | } | |
519 | monc->monmap->num_mon = num_mon; | |
4e7a5dcd | 520 | monc->have_fsid = false; |
6b805185 SW |
521 | |
522 | /* release addr memory */ | |
523 | kfree(args->mon_addr); | |
524 | args->mon_addr = NULL; | |
525 | args->num_mon = 0; | |
526 | return 0; | |
527 | } | |
528 | ||
ba75bb98 SW |
529 | int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl) |
530 | { | |
531 | int err = 0; | |
532 | ||
533 | dout("init\n"); | |
534 | memset(monc, 0, sizeof(*monc)); | |
535 | monc->client = cl; | |
536 | monc->monmap = NULL; | |
537 | mutex_init(&monc->mutex); | |
538 | ||
6b805185 SW |
539 | err = build_initial_monmap(monc); |
540 | if (err) | |
541 | goto out; | |
542 | ||
ba75bb98 SW |
543 | monc->con = NULL; |
544 | ||
4e7a5dcd SW |
545 | /* authentication */ |
546 | monc->auth = ceph_auth_init(cl->mount_args->name, | |
547 | cl->mount_args->secret); | |
548 | if (IS_ERR(monc->auth)) | |
549 | return PTR_ERR(monc->auth); | |
550 | monc->auth->want_keys = | |
551 | CEPH_ENTITY_TYPE_AUTH | CEPH_ENTITY_TYPE_MON | | |
552 | CEPH_ENTITY_TYPE_OSD | CEPH_ENTITY_TYPE_MDS; | |
553 | ||
ba75bb98 | 554 | /* msg pools */ |
07bd10fb SW |
555 | err = ceph_msgpool_init(&monc->msgpool_subscribe_ack, |
556 | sizeof(struct ceph_mon_subscribe_ack), 1, false); | |
ba75bb98 | 557 | if (err < 0) |
4e7a5dcd | 558 | goto out_monmap; |
ba75bb98 SW |
559 | err = ceph_msgpool_init(&monc->msgpool_statfs_reply, |
560 | sizeof(struct ceph_mon_statfs_reply), 0, false); | |
561 | if (err < 0) | |
4e7a5dcd SW |
562 | goto out_pool1; |
563 | err = ceph_msgpool_init(&monc->msgpool_auth_reply, 4096, 1, false); | |
564 | if (err < 0) | |
565 | goto out_pool2; | |
566 | ||
567 | monc->m_auth = ceph_msg_new(CEPH_MSG_AUTH, 4096, 0, 0, NULL); | |
9bd2e6f8 | 568 | monc->pending_auth = 0; |
4e7a5dcd SW |
569 | if (IS_ERR(monc->m_auth)) { |
570 | err = PTR_ERR(monc->m_auth); | |
571 | monc->m_auth = NULL; | |
572 | goto out_pool3; | |
573 | } | |
ba75bb98 SW |
574 | |
575 | monc->cur_mon = -1; | |
4e7a5dcd | 576 | monc->hunting = true; |
ba75bb98 SW |
577 | monc->sub_renew_after = jiffies; |
578 | monc->sub_sent = 0; | |
579 | ||
580 | INIT_DELAYED_WORK(&monc->delayed_work, delayed_work); | |
581 | INIT_RADIX_TREE(&monc->statfs_request_tree, GFP_NOFS); | |
582 | monc->num_statfs_requests = 0; | |
583 | monc->last_tid = 0; | |
584 | ||
585 | monc->have_mdsmap = 0; | |
586 | monc->have_osdmap = 0; | |
587 | monc->want_next_osdmap = 1; | |
4e7a5dcd SW |
588 | return 0; |
589 | ||
590 | out_pool3: | |
591 | ceph_msgpool_destroy(&monc->msgpool_auth_reply); | |
592 | out_pool2: | |
593 | ceph_msgpool_destroy(&monc->msgpool_subscribe_ack); | |
594 | out_pool1: | |
595 | ceph_msgpool_destroy(&monc->msgpool_statfs_reply); | |
596 | out_monmap: | |
597 | kfree(monc->monmap); | |
ba75bb98 SW |
598 | out: |
599 | return err; | |
600 | } | |
601 | ||
602 | void ceph_monc_stop(struct ceph_mon_client *monc) | |
603 | { | |
604 | dout("stop\n"); | |
605 | cancel_delayed_work_sync(&monc->delayed_work); | |
606 | ||
607 | mutex_lock(&monc->mutex); | |
608 | __close_session(monc); | |
609 | if (monc->con) { | |
610 | monc->con->private = NULL; | |
611 | monc->con->ops->put(monc->con); | |
612 | monc->con = NULL; | |
613 | } | |
614 | mutex_unlock(&monc->mutex); | |
615 | ||
4e7a5dcd SW |
616 | ceph_auth_destroy(monc->auth); |
617 | ||
618 | ceph_msg_put(monc->m_auth); | |
ba75bb98 SW |
619 | ceph_msgpool_destroy(&monc->msgpool_subscribe_ack); |
620 | ceph_msgpool_destroy(&monc->msgpool_statfs_reply); | |
4e7a5dcd | 621 | ceph_msgpool_destroy(&monc->msgpool_auth_reply); |
ba75bb98 SW |
622 | |
623 | kfree(monc->monmap); | |
624 | } | |
625 | ||
9bd2e6f8 SW |
626 | static void __send_prepared_auth_request(struct ceph_mon_client *monc, int len) |
627 | { | |
628 | monc->pending_auth = 1; | |
629 | monc->m_auth->front.iov_len = len; | |
630 | monc->m_auth->hdr.front_len = cpu_to_le32(len); | |
631 | ceph_msg_get(monc->m_auth); /* keep our ref */ | |
632 | ceph_con_send(monc->con, monc->m_auth); | |
633 | } | |
634 | ||
ba75bb98 | 635 | |
4e7a5dcd SW |
636 | static void handle_auth_reply(struct ceph_mon_client *monc, |
637 | struct ceph_msg *msg) | |
638 | { | |
639 | int ret; | |
640 | ||
641 | mutex_lock(&monc->mutex); | |
9bd2e6f8 | 642 | monc->pending_auth = 0; |
4e7a5dcd SW |
643 | ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, |
644 | msg->front.iov_len, | |
645 | monc->m_auth->front.iov_base, | |
646 | monc->m_auth->front_max); | |
647 | if (ret < 0) { | |
9bd2e6f8 SW |
648 | monc->client->auth_err = ret; |
649 | wake_up(&monc->client->auth_wq); | |
4e7a5dcd | 650 | } else if (ret > 0) { |
9bd2e6f8 | 651 | __send_prepared_auth_request(monc, ret); |
4e7a5dcd SW |
652 | } else if (monc->auth->ops->is_authenticated(monc->auth)) { |
653 | dout("authenticated, starting session\n"); | |
0743304d SW |
654 | |
655 | monc->client->msgr->inst.name.type = CEPH_ENTITY_TYPE_CLIENT; | |
656 | monc->client->msgr->inst.name.num = monc->auth->global_id; | |
657 | ||
4e7a5dcd SW |
658 | __send_subscribe(monc); |
659 | __resend_statfs(monc); | |
660 | } | |
661 | mutex_unlock(&monc->mutex); | |
662 | } | |
663 | ||
9bd2e6f8 SW |
664 | static int __validate_auth(struct ceph_mon_client *monc) |
665 | { | |
666 | int ret; | |
667 | ||
668 | if (monc->pending_auth) | |
669 | return 0; | |
670 | ||
671 | ret = ceph_build_auth(monc->auth, monc->m_auth->front.iov_base, | |
672 | monc->m_auth->front_max); | |
673 | if (ret <= 0) | |
674 | return ret; /* either an error, or no need to authenticate */ | |
675 | __send_prepared_auth_request(monc, ret); | |
676 | return 0; | |
677 | } | |
678 | ||
679 | int ceph_monc_validate_auth(struct ceph_mon_client *monc) | |
680 | { | |
681 | int ret; | |
682 | ||
683 | mutex_lock(&monc->mutex); | |
684 | ret = __validate_auth(monc); | |
685 | mutex_unlock(&monc->mutex); | |
686 | return ret; | |
687 | } | |
688 | ||
ba75bb98 SW |
689 | /* |
690 | * handle incoming message | |
691 | */ | |
692 | static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) | |
693 | { | |
694 | struct ceph_mon_client *monc = con->private; | |
695 | int type = le16_to_cpu(msg->hdr.type); | |
696 | ||
697 | if (!monc) | |
698 | return; | |
699 | ||
700 | switch (type) { | |
4e7a5dcd SW |
701 | case CEPH_MSG_AUTH_REPLY: |
702 | handle_auth_reply(monc, msg); | |
ba75bb98 SW |
703 | break; |
704 | ||
705 | case CEPH_MSG_MON_SUBSCRIBE_ACK: | |
706 | handle_subscribe_ack(monc, msg); | |
707 | break; | |
708 | ||
709 | case CEPH_MSG_STATFS_REPLY: | |
710 | handle_statfs_reply(monc, msg); | |
711 | break; | |
712 | ||
4e7a5dcd SW |
713 | case CEPH_MSG_MON_MAP: |
714 | ceph_monc_handle_map(monc, msg); | |
715 | break; | |
716 | ||
ba75bb98 SW |
717 | case CEPH_MSG_MDS_MAP: |
718 | ceph_mdsc_handle_map(&monc->client->mdsc, msg); | |
719 | break; | |
720 | ||
721 | case CEPH_MSG_OSD_MAP: | |
722 | ceph_osdc_handle_map(&monc->client->osdc, msg); | |
723 | break; | |
724 | ||
725 | default: | |
726 | pr_err("received unknown message type %d %s\n", type, | |
727 | ceph_msg_type_name(type)); | |
728 | } | |
729 | ceph_msg_put(msg); | |
730 | } | |
731 | ||
732 | /* | |
733 | * Allocate memory for incoming message | |
734 | */ | |
735 | static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, | |
2450418c YS |
736 | struct ceph_msg_header *hdr, |
737 | int *skip) | |
ba75bb98 SW |
738 | { |
739 | struct ceph_mon_client *monc = con->private; | |
740 | int type = le16_to_cpu(hdr->type); | |
2450418c YS |
741 | int front_len = le32_to_cpu(hdr->front_len); |
742 | struct ceph_msg *m; | |
ba75bb98 | 743 | |
2450418c | 744 | *skip = 0; |
0547a9b3 | 745 | |
ba75bb98 | 746 | switch (type) { |
ba75bb98 | 747 | case CEPH_MSG_MON_SUBSCRIBE_ACK: |
2450418c YS |
748 | m = ceph_msgpool_get(&monc->msgpool_subscribe_ack, front_len); |
749 | break; | |
ba75bb98 | 750 | case CEPH_MSG_STATFS_REPLY: |
2450418c YS |
751 | m = ceph_msgpool_get(&monc->msgpool_statfs_reply, front_len); |
752 | break; | |
4e7a5dcd | 753 | case CEPH_MSG_AUTH_REPLY: |
2450418c YS |
754 | m = ceph_msgpool_get(&monc->msgpool_auth_reply, front_len); |
755 | break; | |
756 | default: | |
757 | return NULL; | |
ba75bb98 | 758 | } |
2450418c YS |
759 | |
760 | if (!m) | |
761 | *skip = 1; | |
762 | ||
763 | return m; | |
ba75bb98 SW |
764 | } |
765 | ||
766 | /* | |
767 | * If the monitor connection resets, pick a new monitor and resubmit | |
768 | * any pending requests. | |
769 | */ | |
770 | static void mon_fault(struct ceph_connection *con) | |
771 | { | |
772 | struct ceph_mon_client *monc = con->private; | |
773 | ||
774 | if (!monc) | |
775 | return; | |
776 | ||
777 | dout("mon_fault\n"); | |
778 | mutex_lock(&monc->mutex); | |
779 | if (!con->private) | |
780 | goto out; | |
781 | ||
782 | if (monc->con && !monc->hunting) | |
783 | pr_info("mon%d %s session lost, " | |
784 | "hunting for new mon\n", monc->cur_mon, | |
785 | pr_addr(&monc->con->peer_addr.in_addr)); | |
786 | ||
787 | __close_session(monc); | |
788 | if (!monc->hunting) { | |
789 | /* start hunting */ | |
790 | monc->hunting = true; | |
4e7a5dcd | 791 | __open_session(monc); |
ba75bb98 SW |
792 | } else { |
793 | /* already hunting, let's wait a bit */ | |
794 | __schedule_delayed(monc); | |
795 | } | |
796 | out: | |
797 | mutex_unlock(&monc->mutex); | |
798 | } | |
799 | ||
800 | const static struct ceph_connection_operations mon_con_ops = { | |
801 | .get = ceph_con_get, | |
802 | .put = ceph_con_put, | |
803 | .dispatch = dispatch, | |
804 | .fault = mon_fault, | |
805 | .alloc_msg = mon_alloc_msg, | |
ba75bb98 | 806 | }; |